summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.editorconfig32
-rw-r--r--.gitignore1
-rw-r--r--.mailmap13
-rw-r--r--CREDITS13
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-queues45
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-statistics48
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon14
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon14
-rw-r--r--Documentation/ABI/testing/sysfs-nvmem-cells16
-rw-r--r--Documentation/ABI/testing/sysfs-platform-silicom1
-rw-r--r--Documentation/accel/introduction.rst4
-rw-r--r--Documentation/admin-guide/cifs/todo.rst44
-rw-r--r--Documentation/admin-guide/cifs/usage.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst5
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt24
-rw-r--r--Documentation/admin-guide/kernel-per-CPU-kthreads.rst16
-rw-r--r--Documentation/admin-guide/sysctl/net.rst5
-rw-r--r--Documentation/arch/arm64/silicon-errata.rst11
-rw-r--r--Documentation/arch/x86/mds.rst38
-rw-r--r--Documentation/block/ioprio.rst13
-rw-r--r--Documentation/bpf/kfuncs.rst8
-rw-r--r--Documentation/bpf/map_lpm_trie.rst2
-rw-r--r--Documentation/bpf/standardization/instruction-set.rst185
-rw-r--r--Documentation/bpf/verifier.rst2
-rw-r--r--Documentation/conf.py6
-rw-r--r--Documentation/dev-tools/checkuapi.rst477
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kselftest.rst12
-rw-r--r--Documentation/dev-tools/kunit/usage.rst19
-rw-r--r--Documentation/devicetree/bindings/Makefile5
-rw-r--r--Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/google,gs101-clock.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/nxp,tda998x.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml6
-rw-r--r--Documentation/devicetree/bindings/dma/dma-controller.yaml15
-rw-r--r--Documentation/devicetree/bindings/dma/dma-router.yaml11
-rw-r--r--Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml62
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,gpi.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml39
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml26
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-udma.yaml20
-rw-r--r--Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml18
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml12
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm63138.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6328.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6358.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/leds-pwm.yaml5
-rw-r--r--Documentation/devicetree/bindings/loongarch/cpus.yaml61
-rw-r--r--Documentation/devicetree/bindings/loongarch/loongson.yaml34
-rw-r--r--Documentation/devicetree/bindings/media/cnm,wave521c.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/can/tcan4x5x.txt3
-rw-r--r--Documentation/devicetree/bindings/net/can/xilinx,can.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ar9331.txt147
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca,ar9331.yaml161
-rw-r--r--Documentation/devicetree/bindings/net/dsa/realtek.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy-package.yaml52
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fec.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/marvell,prestera.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca808x.yaml54
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ethqos.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml15
-rw-r--r--Documentation/devicetree/bindings/net/qcom,qca807x.yaml184
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ethertsn.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml17
-rw-r--r--Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml72
-rw-r--r--Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83822.yaml34
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml5
-rw-r--r--Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml8
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.yaml13
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml8
-rw-r--r--Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq24190.yaml1
-rw-r--r--Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml3
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml9
-rw-r--r--Documentation/devicetree/bindings/riscv/extensions.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/google,sc7280-herobrine.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/tas2562.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas2781.yaml78
-rw-r--r--Documentation/devicetree/bindings/timer/sifive,clint.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml9
-rw-r--r--Documentation/devicetree/bindings/tpm/tpm-common.yaml2
-rw-r--r--Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml9
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/microchip,usb5744.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/xlnx,usb2.yaml3
-rw-r--r--Documentation/driver-api/dpll.rst2
-rw-r--r--Documentation/features/vm/TLB/arch-support.txt2
-rw-r--r--Documentation/filesystems/netfs_library.rst23
-rw-r--r--Documentation/filesystems/overlayfs.rst16
-rw-r--r--Documentation/filesystems/smb/ksmbd.rst9
-rw-r--r--Documentation/kbuild/Kconfig.recursion-issue-016
-rw-r--r--Documentation/netlink/genetlink-c.yaml15
-rw-r--r--Documentation/netlink/genetlink-legacy.yaml15
-rw-r--r--Documentation/netlink/netlink-raw.yaml30
-rw-r--r--Documentation/netlink/specs/dpll.yaml44
-rw-r--r--Documentation/netlink/specs/mptcp_pm.yaml3
-rw-r--r--Documentation/netlink/specs/netdev.yaml91
-rw-r--r--Documentation/netlink/specs/nlctrl.yaml206
-rw-r--r--Documentation/netlink/specs/rt_link.yaml10
-rw-r--r--Documentation/netlink/specs/tc.yaml2119
-rw-r--r--Documentation/networking/af_xdp.rst33
-rw-r--r--Documentation/networking/bonding.rst12
-rw-r--r--Documentation/networking/can.rst34
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst6
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst24
-rw-r--r--Documentation/networking/device_drivers/wwan/t7xx.rst46
-rw-r--r--Documentation/networking/devlink/devlink-port.rst2
-rw-r--r--Documentation/networking/devlink/mlx5.rst9
-rw-r--r--Documentation/networking/ip-sysctl.rst14
-rw-r--r--Documentation/networking/l2tp.rst135
-rw-r--r--Documentation/networking/net_cachelines/inet_sock.rst6
-rw-r--r--Documentation/networking/net_cachelines/net_device.rst4
-rw-r--r--Documentation/networking/net_cachelines/tcp_sock.rst4
-rw-r--r--Documentation/networking/netconsole.rst66
-rw-r--r--Documentation/networking/netdevices.rst4
-rw-r--r--Documentation/networking/sfp-phylink.rst147
-rw-r--r--Documentation/networking/statistics.rst15
-rw-r--r--Documentation/networking/xfrm_device.rst4
-rw-r--r--Documentation/process/4.Coding.rst4
-rw-r--r--Documentation/process/coding-style.rst4
-rw-r--r--Documentation/process/cve.rst121
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/maintainer-netdev.rst2
-rw-r--r--Documentation/process/security-bugs.rst5
-rw-r--r--Documentation/rust/arch-support.rst13
-rw-r--r--Documentation/sphinx/kernel_feat.py2
-rw-r--r--Documentation/sphinx/templates/kernel-toc.html4
-rw-r--r--Documentation/sphinx/translations.py10
-rw-r--r--Documentation/usb/gadget-testing.rst22
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/userspace-api/netlink/netlink-raw.rst42
-rw-r--r--Documentation/virt/hyperv/index.rst1
-rw-r--r--Documentation/virt/hyperv/vpci.rst316
-rw-r--r--MAINTAINERS214
-rw-r--r--Makefile45
-rw-r--r--arch/Kconfig1
-rw-r--r--arch/arc/include/asm/jump_label.h4
-rw-r--r--arch/arm/boot/dts/amazon/alpine.dtsi1
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts4
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts2
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g4.dtsi14
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g5.dtsi15
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g6.dtsi18
-rw-r--r--arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi4
-rw-r--r--arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi3
-rw-r--r--arch/arm/boot/dts/broadcom/bcm-hr2.dtsi1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts2
-rw-r--r--arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-l-50.dts2
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi2
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi1
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi1
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7s.dtsi26
-rw-r--r--arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-sdx55.dtsi8
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790-lager.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790-stout.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791-koelsch.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791-porter.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7792-blanche.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7793-gose.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794-alt.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794-silk.dts1
-rw-r--r--arch/arm/boot/dts/rockchip/rv1108.dtsi8
-rw-r--r--arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi1
-rw-r--r--arch/arm/boot/dts/st/stm32429i-eval.dts1
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-dk2.dts1
-rw-r--r--arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi2
-rw-r--r--arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig3
-rw-r--r--arch/arm/include/asm/jump_label.h4
-rw-r--r--arch/arm/mach-ep93xx/core.c1
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm64/Kconfig18
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile1
-rw-r--r--arch/arm64/boot/dts/amazon/alpine-v2.dtsi1
-rw-r--r--arch/arm64/boot/dts/amazon/alpine-v3.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi1
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile19
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts11
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts9
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts2
-rw-r--r--arch/arm64/boot/dts/lg/lg1312.dtsi1
-rw-r--r--arch/arm64/boot/dts/lg/lg1313.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap80x.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-demo.dts1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi39
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-crd.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-mtp.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8650-qrd.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts10
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c11
-rw-r--r--arch/arm64/include/asm/alternative-macros.h4
-rw-r--r--arch/arm64/include/asm/assembler.h25
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/include/asm/fpsimd.h14
-rw-r--r--arch/arm64/include/asm/irq.h2
-rw-r--r--arch/arm64/include/asm/jump_label.h4
-rw-r--r--arch/arm64/include/asm/patching.h2
-rw-r--r--arch/arm64/include/asm/vdso.h3
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpu_errata.c24
-rw-r--r--arch/arm64/kernel/entry.S25
-rw-r--r--arch/arm64/kernel/fpsimd.c30
-rw-r--r--arch/arm64/kernel/patching.c75
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/signal.c4
-rw-r--r--arch/arm64/kernel/stacktrace.c26
-rw-r--r--arch/arm64/kernel/suspend.c3
-rw-r--r--arch/arm64/kernel/vdso32/Makefile9
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c2
-rw-r--r--arch/arm64/kvm/pkvm.c27
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c5
-rw-r--r--arch/arm64/net/bpf_jit_comp.c231
-rw-r--r--arch/arm64/tools/cpucaps2
-rw-r--r--arch/csky/configs/defconfig3
-rw-r--r--arch/csky/include/asm/jump_label.h4
-rw-r--r--arch/loongarch/Kbuild1
-rw-r--r--arch/loongarch/Kconfig45
-rw-r--r--arch/loongarch/Makefile6
-rw-r--r--arch/loongarch/boot/dts/Makefile5
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500-ref.dts88
-rw-r--r--arch/loongarch/boot/dts/loongson-2k0500.dtsi266
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts183
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000.dtsi492
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000-ref.dts72
-rw-r--r--arch/loongarch/boot/dts/loongson-2k2000.dtsi300
-rw-r--r--arch/loongarch/configs/loongson3_defconfig55
-rw-r--r--arch/loongarch/include/asm/acpi.h4
-rw-r--r--arch/loongarch/include/asm/bootinfo.h6
-rw-r--r--arch/loongarch/include/asm/crash_core.h12
-rw-r--r--arch/loongarch/include/asm/elf.h5
-rw-r--r--arch/loongarch/include/asm/ftrace.h2
-rw-r--r--arch/loongarch/include/asm/jump_label.h4
-rw-r--r--arch/loongarch/include/asm/kvm_vcpu.h4
-rw-r--r--arch/loongarch/include/asm/shmparam.h12
-rw-r--r--arch/loongarch/kernel/acpi.c6
-rw-r--r--arch/loongarch/kernel/efi.c2
-rw-r--r--arch/loongarch/kernel/elf.c5
-rw-r--r--arch/loongarch/kernel/env.c34
-rw-r--r--arch/loongarch/kernel/head.S10
-rw-r--r--arch/loongarch/kernel/process.c1
-rw-r--r--arch/loongarch/kernel/setup.c60
-rw-r--r--arch/loongarch/kernel/smp.c128
-rw-r--r--arch/loongarch/kvm/mmu.c4
-rw-r--r--arch/loongarch/kvm/vcpu.c81
-rw-r--r--arch/loongarch/mm/kasan_init.c3
-rw-r--r--arch/loongarch/mm/tlb.c16
-rw-r--r--arch/loongarch/net/bpf_jit.c10
-rw-r--r--arch/loongarch/vdso/Makefile1
-rw-r--r--arch/m68k/Makefile4
-rw-r--r--arch/mips/alchemy/common/prom.c1
-rw-r--r--arch/mips/alchemy/common/setup.c4
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c2
-rw-r--r--arch/mips/bcm63xx/dev-rng.c2
-rw-r--r--arch/mips/bcm63xx/dev-uart.c1
-rw-r--r--arch/mips/bcm63xx/dev-wdt.c2
-rw-r--r--arch/mips/bcm63xx/irq.c2
-rw-r--r--arch/mips/bcm63xx/setup.c2
-rw-r--r--arch/mips/bcm63xx/timer.c2
-rw-r--r--arch/mips/cobalt/setup.c3
-rw-r--r--arch/mips/configs/ip27_defconfig3
-rw-r--r--arch/mips/configs/lemote2f_defconfig3
-rw-r--r--arch/mips/configs/loongson3_defconfig3
-rw-r--r--arch/mips/configs/pic32mzda_defconfig3
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/include/asm/checksum.h3
-rw-r--r--arch/mips/include/asm/jump_label.h4
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h3
-rw-r--r--arch/mips/include/asm/mach-cobalt/cobalt.h3
-rw-r--r--arch/mips/include/asm/ptrace.h3
-rw-r--r--arch/mips/kernel/elf.c6
-rw-r--r--arch/mips/kernel/ptrace.c7
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/lantiq/prom.c7
-rw-r--r--arch/mips/loongson64/init.c3
-rw-r--r--arch/mips/loongson64/numa.c2
-rw-r--r--arch/mips/sgi-ip27/Makefile2
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-common.h2
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c185
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c25
-rw-r--r--arch/mips/sgi-ip30/ip30-console.c1
-rw-r--r--arch/mips/sgi-ip30/ip30-setup.c1
-rw-r--r--arch/mips/sgi-ip32/crime.c6
-rw-r--r--arch/mips/sgi-ip32/ip32-berr.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-common.h15
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c6
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-setup.c3
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/Makefile4
-rw-r--r--arch/parisc/include/asm/assembly.h1
-rw-r--r--arch/parisc/include/asm/extable.h64
-rw-r--r--arch/parisc/include/asm/jump_label.h4
-rw-r--r--arch/parisc/include/asm/kprobes.h3
-rw-r--r--arch/parisc/include/asm/special_insns.h6
-rw-r--r--arch/parisc/include/asm/uaccess.h48
-rw-r--r--arch/parisc/kernel/cache.c10
-rw-r--r--arch/parisc/kernel/drivers.c5
-rw-r--r--arch/parisc/kernel/ftrace.c2
-rw-r--r--arch/parisc/kernel/processor.c8
-rw-r--r--arch/parisc/kernel/unaligned.c44
-rw-r--r--arch/parisc/kernel/unwind.c14
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/mm/fault.c11
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/ftrace.h10
-rw-r--r--arch/powerpc/include/asm/jump_label.h4
-rw-r--r--arch/powerpc/include/asm/papr-sysparm.h2
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h10
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/rtas.h4
-rw-r--r--arch/powerpc/include/asm/sections.h1
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h12
-rw-r--r--arch/powerpc/include/uapi/asm/papr-sysparm.h2
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S20
-rw-r--r--arch/powerpc/kernel/cpu_specs_e500mc.h3
-rw-r--r--arch/powerpc/kernel/interrupt_64.S4
-rw-r--r--arch/powerpc/kernel/iommu.c64
-rw-r--r--arch/powerpc/kernel/irq_64.c2
-rw-r--r--arch/powerpc/kernel/rtas.c9
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c12
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_pg.c5
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c26
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c20
-rw-r--r--arch/powerpc/mm/kasan/init_32.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mvme2500.c2
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c6
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c6
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c2
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c156
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c8
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c4
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c6
-rw-r--r--arch/riscv/Kconfig58
-rw-r--r--arch/riscv/Kconfig.errata1
-rw-r--r--arch/riscv/Makefile8
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts1
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042.dtsi80
-rw-r--r--arch/riscv/boot/dts/starfive/jh7100.dtsi12
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110.dtsi4
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/errata/thead/errata.c69
-rw-r--r--arch/riscv/include/asm/arch_hweight.h78
-rw-r--r--arch/riscv/include/asm/archrandom.h72
-rw-r--r--arch/riscv/include/asm/asm-extable.h15
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h27
-rw-r--r--arch/riscv/include/asm/bitops.h12
-rw-r--r--arch/riscv/include/asm/checksum.h93
-rw-r--r--arch/riscv/include/asm/cpufeature.h6
-rw-r--r--arch/riscv/include/asm/csr.h11
-rw-r--r--arch/riscv/include/asm/entry-common.h17
-rw-r--r--arch/riscv/include/asm/errata_list.h50
-rw-r--r--arch/riscv/include/asm/ftrace.h23
-rw-r--r--arch/riscv/include/asm/hugetlb.h5
-rw-r--r--arch/riscv/include/asm/hwcap.h2
-rw-r--r--arch/riscv/include/asm/jump_label.h4
-rw-r--r--arch/riscv/include/asm/pgalloc.h20
-rw-r--r--arch/riscv/include/asm/pgtable-64.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h8
-rw-r--r--arch/riscv/include/asm/processor.h43
-rw-r--r--arch/riscv/include/asm/sbi.h19
-rw-r--r--arch/riscv/include/asm/simd.h64
-rw-r--r--arch/riscv/include/asm/stacktrace.h5
-rw-r--r--arch/riscv/include/asm/suspend.h1
-rw-r--r--arch/riscv/include/asm/switch_to.h3
-rw-r--r--arch/riscv/include/asm/thread_info.h2
-rw-r--r--arch/riscv/include/asm/tlb.h2
-rw-r--r--arch/riscv/include/asm/tlbbatch.h15
-rw-r--r--arch/riscv/include/asm/tlbflush.h9
-rw-r--r--arch/riscv/include/asm/vector.h90
-rw-r--r--arch/riscv/include/asm/vmalloc.h61
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h27
-rw-r--r--arch/riscv/include/asm/xor.h68
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h27
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/cpufeature.c121
-rw-r--r--arch/riscv/kernel/entry.S8
-rw-r--r--arch/riscv/kernel/ftrace.c30
-rw-r--r--arch/riscv/kernel/kernel_mode_vector.c247
-rw-r--r--arch/riscv/kernel/mcount-dyn.S198
-rw-r--r--arch/riscv/kernel/module.c34
-rw-r--r--arch/riscv/kernel/paravirt.c6
-rw-r--r--arch/riscv/kernel/pi/cmdline_early.c3
-rw-r--r--arch/riscv/kernel/process.c13
-rw-r--r--arch/riscv/kernel/ptrace.c7
-rw-r--r--arch/riscv/kernel/return_address.c48
-rw-r--r--arch/riscv/kernel/sbi.c66
-rw-r--r--arch/riscv/kernel/signal.c7
-rw-r--r--arch/riscv/kernel/suspend.c48
-rw-r--r--arch/riscv/kernel/vector.c53
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c54
-rw-r--r--arch/riscv/kvm/vcpu_sbi_sta.c20
-rw-r--r--arch/riscv/lib/Makefile6
-rw-r--r--arch/riscv/lib/csum.c328
-rw-r--r--arch/riscv/lib/riscv_v_helpers.c45
-rw-r--r--arch/riscv/lib/uaccess.S10
-rw-r--r--arch/riscv/lib/uaccess_vector.S53
-rw-r--r--arch/riscv/lib/xor.S81
-rw-r--r--arch/riscv/mm/extable.c31
-rw-r--r--arch/riscv/mm/hugetlbpage.c80
-rw-r--r--arch/riscv/mm/init.c29
-rw-r--r--arch/riscv/mm/tlbflush.c73
-rw-r--r--arch/riscv/net/bpf_jit.h134
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c220
-rw-r--r--arch/s390/configs/compat.config3
-rw-r--r--arch/s390/configs/debug_defconfig9
-rw-r--r--arch/s390/configs/defconfig10
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/include/asm/jump_label.h4
-rw-r--r--arch/s390/kvm/priv.c8
-rw-r--r--arch/s390/kvm/vsie.c1
-rw-r--r--arch/s390/mm/gmap.c1
-rw-r--r--arch/s390/pci/pci.c2
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c2
-rw-r--r--arch/sh/configs/sdk7786_defconfig3
-rw-r--r--arch/sh/kernel/vsyscall/Makefile5
-rw-r--r--arch/sparc/Makefile2
-rw-r--r--arch/sparc/include/asm/jump_label.h4
-rw-r--r--arch/sparc/kernel/pci_sabre.c9
-rw-r--r--arch/sparc/kernel/pci_schizo.c13
-rw-r--r--arch/sparc/vdso/Makefile18
-rw-r--r--arch/sparc/video/Makefile2
-rw-r--r--arch/um/Makefile4
-rw-r--r--arch/um/include/asm/cpufeature.h2
-rw-r--r--arch/x86/Kconfig.cpu2
-rw-r--r--arch/x86/Makefile10
-rw-r--r--arch/x86/boot/header.S14
-rw-r--r--arch/x86/boot/setup.ld6
-rw-r--r--arch/x86/entry/entry.S23
-rw-r--r--arch/x86/entry/entry_32.S3
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/entry/entry_64_compat.S1
-rw-r--r--arch/x86/hyperv/hv_vtl.c7
-rw-r--r--arch/x86/hyperv/ivm.c65
-rw-r--r--arch/x86/include/asm/coco.h5
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h6
-rw-r--r--arch/x86/include/asm/entry-common.h1
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/jump_label.h6
-rw-r--r--arch/x86/include/asm/kmsan.h17
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/nospec-branch.h25
-rw-r--r--arch/x86/include/asm/rmwcc.h2
-rw-r--r--arch/x86/include/asm/set_memory.h1
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h25
-rw-r--r--arch/x86/include/asm/uaccess.h10
-rw-r--r--arch/x86/include/asm/vsyscall.h10
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/bugs.c15
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c178
-rw-r--r--arch/x86/kernel/e820.c8
-rw-r--r--arch/x86/kernel/fpu/signal.c13
-rw-r--r--arch/x86/kernel/kvm.c3
-rw-r--r--arch/x86/kernel/nmi.c3
-rw-r--r--arch/x86/kvm/hyperv.c50
-rw-r--r--arch/x86/kvm/hyperv.h3
-rw-r--r--arch/x86/kvm/svm/svm_ops.h6
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c2
-rw-r--r--arch/x86/kvm/vmx/run_flags.h7
-rw-r--r--arch/x86/kvm/vmx/vmenter.S9
-rw-r--r--arch/x86/kvm/vmx/vmx.c24
-rw-r--r--arch/x86/kvm/vmx/vmx_ops.h6
-rw-r--r--arch/x86/kvm/x86.c27
-rw-r--r--arch/x86/lib/getuser.S24
-rw-r--r--arch/x86/lib/putuser.S20
-rw-r--r--arch/x86/mm/fault.c9
-rw-r--r--arch/x86/mm/ident_map.c23
-rw-r--r--arch/x86/mm/maccess.c10
-rw-r--r--arch/x86/mm/numa.c21
-rw-r--r--arch/x86/mm/pat/set_memory.c24
-rw-r--r--arch/x86/net/bpf_jit_comp.c5
-rw-r--r--arch/x86/xen/smp.c12
-rw-r--r--arch/xtensa/include/asm/jump_label.h4
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-iocost.c9
-rw-r--r--block/blk-map.c13
-rw-r--r--block/blk-mq-debugfs.c18
-rw-r--r--block/blk-mq-sched.c2
-rw-r--r--block/blk-mq.c60
-rw-r--r--block/blk-wbt.c4
-rw-r--r--block/ioctl.c2
-rw-r--r--block/ioprio.c26
-rw-r--r--block/opal_proto.h1
-rw-r--r--block/partitions/core.c9
-rw-r--r--block/sed-opal.c6
-rw-r--r--crypto/algif_hash.c5
-rw-r--r--crypto/cbc.c3
-rw-r--r--crypto/lskcipher.c6
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c20
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c131
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h5
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c1
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c142
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c60
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c38
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c6
-rw-r--r--drivers/accel/ivpu/ivpu_job.c162
-rw-r--r--drivers/accel/ivpu/ivpu_job.h3
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c58
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c9
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c91
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h6
-rw-r--r--drivers/acpi/apei/ghes.c89
-rw-r--r--drivers/acpi/ec.c112
-rw-r--r--drivers/android/binder.c10
-rw-r--r--drivers/ata/ahci.c52
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_ceva.c125
-rw-r--r--drivers/ata/libata-core.c59
-rw-r--r--drivers/ata/libata-sata.c2
-rw-r--r--drivers/atm/fore200e.c6
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/arch_topology.c13
-rw-r--r--drivers/base/core.c26
-rw-r--r--drivers/base/regmap/regmap-kunit.c57
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/aoe/aoeblk.c5
-rw-r--r--drivers/block/loop.c52
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/null_blk/main.c4
-rw-r--r--drivers/block/rbd.c32
-rw-r--r--drivers/block/virtio_blk.c9
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/hci_bcm4377.c3
-rw-r--r--drivers/bluetooth/hci_qca.c22
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/cache/ax45mp_cache.c4
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c3
-rw-r--r--drivers/clk/samsung/clk-gs101.c2
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c22
-rw-r--r--drivers/clocksource/timer-ep93xx.c5
-rw-r--r--drivers/clocksource/timer-riscv.c7
-rw-r--r--drivers/clocksource/timer-ti-dm.c4
-rw-r--r--drivers/connector/cn_proc.c5
-rw-r--r--drivers/cpufreq/amd-pstate.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c58
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c34
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c7
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/ccp/sev-dev.c10
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c5
-rw-r--r--drivers/cxl/acpi.c46
-rw-r--r--drivers/cxl/core/cdat.c86
-rw-r--r--drivers/cxl/core/mbox.c4
-rw-r--r--drivers/cxl/core/memdev.c63
-rw-r--r--drivers/cxl/core/pci.c49
-rw-r--r--drivers/cxl/core/region.c66
-rw-r--r--drivers/cxl/core/trace.h6
-rw-r--r--drivers/cxl/cxl.h2
-rw-r--r--drivers/cxl/cxlmem.h10
-rw-r--r--drivers/cxl/mem.c56
-rw-r--r--drivers/cxl/pci.c83
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c7
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/apple-admac.c5
-rw-r--r--drivers/dma/at_hdmac.c32
-rw-r--r--drivers/dma/dma-axi-dmac.c280
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/dmatest.c17
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c17
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c4
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-core.c39
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-debugfs.c4
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-regs.h2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c10
-rw-r--r--drivers/dma/fsl-edma-common.c2
-rw-r--r--drivers/dma/fsl-edma-common.h5
-rw-r--r--drivers/dma/fsl-edma-main.c21
-rw-r--r--drivers/dma/fsl-qdma.c73
-rw-r--r--drivers/dma/idxd/cdev.c6
-rw-r--r--drivers/dma/idxd/debugfs.c2
-rw-r--r--drivers/dma/idxd/device.c9
-rw-r--r--drivers/dma/idxd/idxd.h1
-rw-r--r--drivers/dma/idxd/init.c15
-rw-r--r--drivers/dma/idxd/irq.c3
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ls2x-apb-dma.c705
-rw-r--r--drivers/dma/milbeaut-hdmac.c17
-rw-r--r--drivers/dma/milbeaut-xdmac.c17
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c44
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h8
-rw-r--r--drivers/dma/sh/rz-dmac.c8
-rw-r--r--drivers/dma/sh/shdma.h2
-rw-r--r--drivers/dma/sh/usb-dmac.c10
-rw-r--r--drivers/dma/ste_dma40.c12
-rw-r--r--drivers/dma/tegra210-adma.c35
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/edma.c10
-rw-r--r--drivers/dma/ti/k3-psil-am62p.c325
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c2
-rw-r--r--drivers/dma/ti/k3-udma.c12
-rw-r--r--drivers/dma/uniphier-mdmac.c17
-rw-r--r--drivers/dma/uniphier-xdmac.c17
-rw-r--r--drivers/dma/xilinx/xdma-regs.h30
-rw-r--r--drivers/dma/xilinx/xdma.c349
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c2
-rw-r--r--drivers/dpll/dpll_core.c93
-rw-r--r--drivers/dpll/dpll_core.h6
-rw-r--r--drivers/dpll/dpll_netlink.c124
-rw-r--r--drivers/dpll/dpll_nl.c4
-rw-r--r--drivers/dpll/dpll_nl.h2
-rw-r--r--drivers/firewire/core-card.c32
-rw-r--r--drivers/firewire/core-device.c18
-rw-r--r--drivers/firmware/arm_ffa/driver.c85
-rw-r--r--drivers/firmware/arm_scmi/clock.c5
-rw-r--r--drivers/firmware/arm_scmi/common.h1
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c14
-rw-r--r--drivers/firmware/arm_scmi/perf.c23
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c12
-rw-r--r--drivers/firmware/arm_scmi/shmem.c8
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/capsule-loader.c2
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/efi-init.c19
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c1
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c2
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c12
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c25
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.h4
-rw-r--r--drivers/firmware/efi/libstub/zboot.c2
-rw-r--r--drivers/firmware/efi/riscv-runtime.c2
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c5
-rw-r--r--drivers/firmware/sysfb.c16
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-eic-sprd.c32
-rw-r--r--drivers/gpio/gpiolib-acpi.c14
-rw-r--r--drivers/gpio/gpiolib.c23
-rw-r--r--drivers/gpu/drm/Kconfig5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c113
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h47
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/amdgpu_reg_state.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c28
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c47
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c73
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c71
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c7
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h2
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c70
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c23
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c32
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c42
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/drm_buddy.c22
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c3
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c20
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c32
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c1
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c7
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c85
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--drivers/gpu/drm/tegra/drm.c26
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c306
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c21
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c35
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/xe/Kconfig2
-rw-r--r--drivers/gpu/drm/xe/Makefile1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h4
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h4
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h8
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h6
-rw-r--r--drivers/gpu/drm/xe/abi/guc_messages_abi.h20
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c1
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c27
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device.c43
-rw-r--r--drivers/gpu/drm/xe/xe_device.h4
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h16
-rw-r--r--drivers/gpu/drm/xe/xe_display.c6
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c12
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c17
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c134
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c12
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c9
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c6
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c26
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c163
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c8
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c82
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h19
-rw-r--r--drivers/gpu/drm/xe/xe_query.c50
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.c7
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c1
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c60
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h9
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h59
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c368
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h16
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h33
-rw-r--r--drivers/gpu/host1x/dev.c15
-rw-r--r--drivers/gpu/host1x/dev.h6
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c109
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.h4
-rw-r--r--drivers/hid/bpf/hid_bpf_jmp_table.c40
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-logitech-hidpp.c15
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hid/hid-nvidia-shield.c4
-rw-r--r--drivers/hid/hid-steam.c36
-rw-r--r--drivers/hid/hidraw.c7
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c4
-rw-r--r--drivers/hid/wacom_sys.c63
-rw-r--r--drivers/hid/wacom_wac.c9
-rw-r--r--drivers/hv/channel.c176
-rw-r--r--drivers/hv/hv_util.c31
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c7
-rw-r--r--drivers/hwmon/coretemp.c42
-rw-r--r--drivers/hwmon/gigabyte_waterforce.c2
-rw-r--r--drivers/hwmon/nct6775-core.c14
-rw-r--r--drivers/hwmon/pmbus/mp2975.c16
-rw-r--r--drivers/i2c/busses/Makefile6
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c6
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c14
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/adc/ad4130.c12
-rw-r--r--drivers/iio/adc/ad7091r8.c2
-rw-r--r--drivers/iio/humidity/Kconfig12
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/hdc3020.c2
-rw-r--r--drivers/iio/imu/bno055/Kconfig1
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/light/hid-sensor-als.c1
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c10
-rw-r--r--drivers/iio/pressure/bmp280-spi.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c6
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1
-rw-r--r--drivers/infiniband/hw/irdma/hw.c8
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c17
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/atkbd.c14
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h14
-rw-r--r--drivers/input/touchscreen/goodix.c3
-rw-r--r--drivers/interconnect/qcom/sc8180x.c1
-rw-r--r--drivers/interconnect/qcom/sm8550.c1
-rw-r--r--drivers/interconnect/qcom/sm8650.c2
-rw-r--r--drivers/interconnect/qcom/x1e80100.c1
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c38
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c17
-rw-r--r--drivers/iommu/intel/iommu.c214
-rw-r--r--drivers/iommu/intel/iommu.h7
-rw-r--r--drivers/iommu/intel/nested.c16
-rw-r--r--drivers/iommu/intel/pasid.c5
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/iommu-sva.c17
-rw-r--r--drivers/iommu/iommu.c18
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c3
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c9
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h1
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c68
-rw-r--r--drivers/iommu/iommufd/selftest.c148
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c5
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c64
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c8
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c4
-rw-r--r--drivers/irqchip/irq-sifive-plic.c8
-rw-r--r--drivers/isdn/capi/capi.c21
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c16
-rw-r--r--drivers/md/dm-core.h2
-rw-r--r--drivers/md/dm-crypt.c137
-rw-r--r--drivers/md/dm-integrity.c93
-rw-r--r--drivers/md/dm-ioctl.c3
-rw-r--r--drivers/md/dm-stats.c9
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm-verity-target.c114
-rw-r--r--drivers/md/dm-verity.h11
-rw-r--r--drivers/md/dm-writecache.c8
-rw-r--r--drivers/md/md.c110
-rw-r--r--drivers/md/raid1.c14
-rw-r--r--drivers/md/raid10.c16
-rw-r--r--drivers/md/raid5.c29
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c55
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-offsets.h10
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c3
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c3
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c24
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c3
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/bpf-lirc.c8
-rw-r--r--drivers/media/rc/ir_toy.c2
-rw-r--r--drivers/media/rc/lirc_dev.c5
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/misc/fastrpc.c2
-rw-r--r--drivers/misc/open-dice.c2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c6
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c24
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c30
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c48
-rw-r--r--drivers/mtd/mtdcore.c1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c13
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c6
-rw-r--r--drivers/net/arcnet/arc-rawmode.c1
-rw-r--r--drivers/net/arcnet/arc-rimi.c1
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/arcnet/capmode.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c1
-rw-r--r--drivers/net/arcnet/com20020.c1
-rw-r--r--drivers/net/arcnet/com20020_cs.c1
-rw-r--r--drivers/net/arcnet/com90io.c1
-rw-r--r--drivers/net/arcnet/com90xx.c1
-rw-r--r--drivers/net/arcnet/rfc1051.c1
-rw-r--r--drivers/net/arcnet/rfc1201.c1
-rw-r--r--drivers/net/bareudp.c25
-rw-r--r--drivers/net/bonding/bond_3ad.c165
-rw-r--r--drivers/net/bonding/bond_main.c63
-rw-r--r--drivers/net/bonding/bond_netlink.c16
-rw-r--r--drivers/net/bonding/bond_options.c28
-rw-r--r--drivers/net/can/Kconfig3
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can_platform.c13
-rw-r--r--drivers/net/can/dev/netlink.c2
-rw-r--r--drivers/net/can/esd/Kconfig12
-rw-r--r--drivers/net/can/esd/Makefile7
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c514
-rw-r--r--drivers/net/can/esd/esdacc.c764
-rw-r--r--drivers/net/can/esd/esdacc.h356
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c12
-rw-r--r--drivers/net/can/kvaser_pciefd.c62
-rw-r--r--drivers/net/can/m_can/m_can.c579
-rw-r--r--drivers/net/can/m_can/m_can.h35
-rw-r--r--drivers/net/can/m_can/m_can_pci.c1
-rw-r--r--drivers/net/can/m_can/m_can_platform.c5
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c33
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c8
-rw-r--r--drivers/net/can/softing/softing_fw.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2
-rw-r--r--drivers/net/can/usb/Kconfig1
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c178
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c42
-rw-r--r--drivers/net/dsa/b53/b53_priv.h7
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c1
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c404
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c102
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c5
-rw-r--r--drivers/net/dsa/mt7530-mdio.c7
-rw-r--r--drivers/net/dsa/mt7530.c565
-rw-r--r--drivers/net/dsa/mt7530.h38
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c3
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c22
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c4
-rw-r--r--drivers/net/dsa/qca/qca8k.h4
-rw-r--r--drivers/net/dsa/realtek/Kconfig20
-rw-r--r--drivers/net/dsa/realtek/Makefile13
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c205
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.h48
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c279
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.h48
-rw-r--r--drivers/net/dsa/realtek/realtek.h14
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c132
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c119
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c335
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.h24
-rw-r--r--drivers/net/dummy.c11
-rw-r--r--drivers/net/ethernet/8390/8390.c1
-rw-r--r--drivers/net/ethernet/8390/8390p.c1
-rw-r--r--drivers/net/ethernet/8390/apne.c1
-rw-r--r--drivers/net/ethernet/8390/hydra.c1
-rw-r--r--drivers/net/ethernet/8390/stnic.c1
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c1
-rw-r--r--drivers/net/ethernet/Kconfig3
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/adi/adin1110.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c323
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c49
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h39
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c181
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c1
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c74
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c30
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c133
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h6
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c12
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c30
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/fw.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c79
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c96
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h25
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c212
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c958
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h75
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c471
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c54
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c3
-rw-r--r--drivers/net/ethernet/ec_bhf.c1
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c53
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c5
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c151
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c18
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h27
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c50
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h20
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c62
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c76
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c27
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c68
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c17
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h20
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c97
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c638
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c183
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c272
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c252
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c229
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c35
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h146
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c41
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2280
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h70
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c39
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c280
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c42
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c262
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h112
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c64
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c242
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h189
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c297
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c1
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c27
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig19
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c489
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c500
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h160
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c273
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c1231
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h334
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c430
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h166
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h154
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h162
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c510
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h224
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c330
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c734
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c8
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c88
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c46
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c19
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h15
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c214
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h15
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c18
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c663
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c64
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.h16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.h29
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c21
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.h15
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c71
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h22
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c145
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c268
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c7
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h60
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1205
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c87
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c4
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h4
-rw-r--r--drivers/net/ethernet/ti/cpts.c17
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c269
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c141
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c82
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h17
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c3
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fjes/fjes_hw.c37
-rw-r--r--drivers/net/geneve.c58
-rw-r--r--drivers/net/gtp.c53
-rw-r--r--drivers/net/hyperv/netvsc.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c86
-rw-r--r--drivers/net/ieee802154/fakelb.c1
-rw-r--r--drivers/net/ipa/ipa.h5
-rw-r--r--drivers/net/ipa/ipa_cmd.c6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c29
-rw-r--r--drivers/net/ipa/ipa_interrupt.c119
-rw-r--r--drivers/net/ipa/ipa_interrupt.h30
-rw-r--r--drivers/net/ipa/ipa_main.c60
-rw-r--r--drivers/net/ipa/ipa_mem.c37
-rw-r--r--drivers/net/ipa/ipa_mem.h5
-rw-r--r--drivers/net/ipa/ipa_modem.c110
-rw-r--r--drivers/net/ipa/ipa_power.c108
-rw-r--r--drivers/net/ipa/ipa_power.h29
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_reg.c8
-rw-r--r--drivers/net/ipa/ipa_reg.h4
-rw-r--r--drivers/net/ipa/ipa_smp2p.c33
-rw-r--r--drivers/net/ipa/ipa_smp2p.h7
-rw-r--r--drivers/net/ipa/ipa_table.c18
-rw-r--r--drivers/net/ipa/ipa_uc.c9
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/ipvlan/ipvtap.c1
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macsec.c37
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c94
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c109
-rw-r--r--drivers/net/mdio/of_mdio.c79
-rw-r--r--drivers/net/netconsole.c359
-rw-r--r--drivers/net/netdevsim/bus.c149
-rw-r--r--drivers/net/netdevsim/dev.c8
-rw-r--r--drivers/net/netdevsim/netdev.c53
-rw-r--r--drivers/net/netdevsim/netdevsim.h3
-rw-r--r--drivers/net/netkit.c2
-rw-r--r--drivers/net/nlmon.c24
-rw-r--r--drivers/net/pcs/pcs-lynx.c1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c1
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c5
-rw-r--r--drivers/net/pcs/pcs-xpcs.c18
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/adin1100.c55
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c103
-rw-r--r--drivers/net/phy/at803x.c2432
-rw-r--r--drivers/net/phy/broadcom.c3
-rw-r--r--drivers/net/phy/dp83822.c174
-rw-r--r--drivers/net/phy/dp83867.c22
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c640
-rw-r--r--drivers/net/phy/marvell.c7
-rw-r--r--drivers/net/phy/mdio_bus.c46
-rw-r--r--drivers/net/phy/mdio_devres.c1
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c147
-rw-r--r--drivers/net/phy/micrel.c120
-rw-r--r--drivers/net/phy/mxl-gpy.c20
-rw-r--r--drivers/net/phy/phy-c45.c137
-rw-r--r--drivers/net/phy/phy.c61
-rw-r--r--drivers/net/phy/phy_device.c206
-rw-r--r--drivers/net/phy/phylink.c8
-rw-r--r--drivers/net/phy/qcom/Kconfig30
-rw-r--r--drivers/net/phy/qcom/Makefile6
-rw-r--r--drivers/net/phy/qcom/at803x.c1106
-rw-r--r--drivers/net/phy/qcom/qca807x.c849
-rw-r--r--drivers/net/phy/qcom/qca808x.c663
-rw-r--r--drivers/net/phy/qcom/qca83xx.c275
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c676
-rw-r--r--drivers/net/phy/qcom/qcom.h243
-rw-r--r--drivers/net/phy/realtek.c48
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c2
-rw-r--r--drivers/net/plip/plip.c1
-rw-r--r--drivers/net/ppp/bsd_comp.c1
-rw-r--r--drivers/net/ppp/ppp_async.c5
-rw-r--r--drivers/net/ppp/ppp_deflate.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c21
-rw-r--r--drivers/net/ppp/ppp_synctty.c1
-rw-r--r--drivers/net/ppp/pppoe.c23
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c45
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/ax88179_178a.c20
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/lan78xx.c12
-rw-r--r--drivers/net/usb/r8152.c47
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/usb/sr9800.c4
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/veth.c117
-rw-r--r--drivers/net/vsockmon.c19
-rw-r--r--drivers/net/vxlan/vxlan_core.c53
-rw-r--r--drivers/net/wan/framer/framer-core.c30
-rw-r--r--drivers/net/wireguard/receive.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h62
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c100
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h44
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1214
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c62
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c267
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c300
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h151
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c270
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h84
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c25
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c166
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c30
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c171
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h33
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c415
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c15
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c33
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h55
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1309
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c52
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c142
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h23
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c94
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c429
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h35
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c13
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h116
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h29
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c327
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h202
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h16
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c72
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c46
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c154
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h60
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h127
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c632
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c500
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c427
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h210
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c132
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c147
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c340
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c230
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c151
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/module.c10
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/main.c4
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c13
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c16
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c110
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c94
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c82
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c40
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h11
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h28
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c33
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c588
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c109
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c195
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h42
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c44
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c61
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h109
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c646
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c382
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h263
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2382
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h1478
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c292
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h59
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c197
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c136
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c1105
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h113
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c331
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h488
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c72
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c1660
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c378
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c12
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c19
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c42
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c77
-rw-r--r--drivers/net/wireless/st/cw1200/main.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c147
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h5
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c47
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h18
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c103
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c110
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h10
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c115
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h24
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c132
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h1
-rw-r--r--drivers/net/wwan/wwan_core.c36
-rw-r--r--drivers/net/wwan/wwan_hwsim.c16
-rw-r--r--drivers/net/xen-netback/netback.c107
-rw-r--r--drivers/nvme/common/auth.c1
-rw-r--r--drivers/nvme/common/keyring.c3
-rw-r--r--drivers/nvme/host/apple.c13
-rw-r--r--drivers/nvme/host/auth.c19
-rw-r--r--drivers/nvme/host/constants.c10
-rw-r--r--drivers/nvme/host/core.c136
-rw-r--r--drivers/nvme/host/fabrics.c20
-rw-r--r--drivers/nvme/host/fabrics.h8
-rw-r--r--drivers/nvme/host/fc.c60
-rw-r--r--drivers/nvme/host/ioctl.c2
-rw-r--r--drivers/nvme/host/multipath.c15
-rw-r--r--drivers/nvme/host/nvme.h78
-rw-r--r--drivers/nvme/host/pci.c28
-rw-r--r--drivers/nvme/host/pr.c2
-rw-r--r--drivers/nvme/host/rdma.c24
-rw-r--r--drivers/nvme/host/sysfs.c72
-rw-r--r--drivers/nvme/host/tcp.c25
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c4
-rw-r--r--drivers/nvme/target/fc.c188
-rw-r--r--drivers/nvme/target/fcloop.c14
-rw-r--r--drivers/nvme/target/loop.c9
-rw-r--r--drivers/nvme/target/rdma.c20
-rw-r--r--drivers/nvme/target/tcp.c54
-rw-r--r--drivers/nvme/target/trace.c6
-rw-r--r--drivers/nvme/target/trace.h33
-rw-r--r--drivers/nvmem/core.c5
-rw-r--r--drivers/of/property.c65
-rw-r--r--drivers/of/unittest.c12
-rw-r--r--drivers/pci/bus.c49
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c2
-rw-r--r--drivers/pci/msi/irqdomain.c2
-rw-r--r--drivers/pci/pci.c115
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aspm.c13
-rw-r--r--drivers/perf/arm-cmn.c11
-rw-r--r--drivers/perf/cxl_pmu.c12
-rw-r--r--drivers/perf/riscv_pmu.c18
-rw-r--r--drivers/perf/riscv_pmu_legacy.c10
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c2
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c166
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c32
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c4
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c4
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c67
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig1
-rw-r--r--drivers/platform/x86/amd/pmf/core.c17
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h5
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c36
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c82
-rw-r--r--drivers/platform/x86/intel/ifs/load.c3
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c82
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h32
-rw-r--r--drivers/platform/x86/intel/vbtn.c3
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c4
-rw-r--r--drivers/platform/x86/p2sb.c177
-rw-r--r--drivers/platform/x86/serdev_helpers.h80
-rw-r--r--drivers/platform/x86/think-lmi.c20
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c64
-rw-r--r--drivers/platform/x86/wmi.c181
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c38
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c1
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h1
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c3
-rw-r--r--drivers/pmdomain/core.c2
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c15
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c7
-rw-r--r--drivers/pmdomain/renesas/r8a77980-sysc.c3
-rw-r--r--drivers/power/reset/as3722-poweroff.c6
-rw-r--r--drivers/power/reset/at91-poweroff.c13
-rw-r--r--drivers/power/reset/at91-reset.c11
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c13
-rw-r--r--drivers/power/reset/atc260x-poweroff.c6
-rw-r--r--drivers/power/reset/gpio-restart.c34
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c5
-rw-r--r--drivers/power/reset/mt6323-poweroff.c6
-rw-r--r--drivers/power/reset/pwr-mlxbf.c20
-rw-r--r--drivers/power/reset/qnap-poweroff.c5
-rw-r--r--drivers/power/reset/regulator-poweroff.c6
-rw-r--r--drivers/power/reset/restart-poweroff.c6
-rw-r--r--drivers/power/reset/rmobile-reset.c5
-rw-r--r--drivers/power/reset/syscon-poweroff.c6
-rw-r--r--drivers/power/reset/tps65086-restart.c12
-rw-r--r--drivers/power/supply/Kconfig1
-rw-r--r--drivers/power/supply/bq24190_charger.c457
-rw-r--r--drivers/power/supply/bq256xx_charger.c5
-rw-r--r--drivers/power/supply/bq27xxx_battery.c22
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c5
-rw-r--r--drivers/power/supply/cw2015_battery.c2
-rw-r--r--drivers/power/supply/power_supply_core.c167
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c4
-rw-r--r--drivers/ptp/Kconfig12
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/ptp_clock.c16
-rw-r--r--drivers/ptp/ptp_fc3.c1014
-rw-r--r--drivers/ptp/ptp_fc3.h45
-rw-r--r--drivers/ptp/ptp_ocp.c311
-rw-r--r--drivers/ptp/ptp_sysfs.c13
-rw-r--r--drivers/regulator/max5970-regulator.c10
-rw-r--r--drivers/regulator/pwm-regulator.c43
-rw-r--r--drivers/regulator/ti-abb-regulator.c22
-rw-r--r--drivers/s390/cio/device_ops.c6
-rw-r--r--drivers/s390/net/qeth_l3_main.c9
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c20
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c6
-rw-r--r--drivers/scsi/fnic/fnic.h3
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c5
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c5
-rw-r--r--drivers/scsi/initio.c3
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c6
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/scsi.c22
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h1
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c94
-rw-r--r--drivers/scsi/storvsc_drv.c12
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/soc/apple/mailbox.c6
-rw-r--r--drivers/soc/microchip/Kconfig2
-rw-r--r--drivers/soc/qcom/pmic_glink.c21
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c16
-rw-r--r--drivers/spi/spi-bcm-qspi.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c33
-rw-r--r--drivers/spi/spi-cadence.c17
-rw-r--r--drivers/spi/spi-coldfire-qspi.c1
-rw-r--r--drivers/spi/spi-cs42l43.c5
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c5
-rw-r--r--drivers/spi/spi-imx.c13
-rw-r--r--drivers/spi/spi-intel-pci.c3
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c137
-rw-r--r--drivers/spi/spi-ppc4xx.c7
-rw-r--r--drivers/spi/spi-sh-msiof.c16
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/ssb/main.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c58
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c59
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/target/target_core_configfs.c48
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/tee/optee/device.c3
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c32
-rw-r--r--drivers/thermal/loongson2_thermal.c3
-rw-r--r--drivers/thunderbolt/tb_regs.h2
-rw-r--r--drivers/thunderbolt/usb4.c2
-rw-r--r--drivers/tty/hvc/Kconfig8
-rw-r--r--drivers/tty/hvc/hvc_riscv_sbi.c37
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c2
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/amba-pl011.c60
-rw-r--r--drivers/tty/serial/earlycon-riscv-sbi.c27
-rw-r--r--drivers/tty/serial/max310x.c53
-rw-r--r--drivers/tty/serial/mxs-auart.c5
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c4
-rw-r--r--drivers/ufs/core/ufshcd.c21
-rw-r--r--drivers/ufs/host/ufs-qcom.c7
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c8
-rw-r--r--drivers/usb/cdns3/core.c1
-rw-r--r--drivers/usb/cdns3/drd.c13
-rw-r--r--drivers/usb/cdns3/drd.h6
-rw-r--r--drivers/usb/cdns3/host.c16
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/core.c44
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/hub.c46
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c17
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/dwc3/host.c4
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c20
-rw-r--r--drivers/usb/gadget/function/f_ncm.c18
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c3
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/host/uhci-grlib.c1
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c80
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/roles/class.c29
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/storage/scsiglue.c7
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c6
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c88
-rw-r--r--drivers/vhost/net.c91
-rw-r--r--drivers/video/fbdev/core/fbcon.c11
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c3
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c1
-rw-r--r--drivers/xen/events/events_base.c8
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/privcmd.c15
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c15
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_addr.c353
-rw-r--r--fs/9p/vfs_file.c89
-rw-r--r--fs/9p/vfs_inode.c16
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/9p/vfs_super.c14
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/affs/affs.h1
-rw-r--r--fs/affs/super.c2
-rw-r--r--fs/afs/dir.c32
-rw-r--r--fs/afs/dynroot.c11
-rw-r--r--fs/afs/file.c221
-rw-r--r--fs/afs/inode.c28
-rw-r--r--fs/afs/internal.h78
-rw-r--r--fs/afs/main.c3
-rw-r--r--fs/afs/proc.c5
-rw-r--r--fs/afs/server.c14
-rw-r--r--fs/afs/super.c2
-rw-r--r--fs/afs/volume.c4
-rw-r--r--fs/afs/write.c826
-rw-r--r--fs/aio.c17
-rw-r--r--fs/bcachefs/Makefile2
-rw-r--r--fs/bcachefs/alloc_background.c91
-rw-r--r--fs/bcachefs/alloc_background_format.h92
-rw-r--r--fs/bcachefs/alloc_foreground.c7
-rw-r--r--fs/bcachefs/backpointers.c108
-rw-r--r--fs/bcachefs/backpointers.h1
-rw-r--r--fs/bcachefs/bcachefs.h17
-rw-r--r--fs/bcachefs/bcachefs_format.h888
-rw-r--r--fs/bcachefs/bkey.c2
-rw-r--r--fs/bcachefs/bkey_methods.c9
-rw-r--r--fs/bcachefs/bkey_methods.h10
-rw-r--r--fs/bcachefs/bset.c7
-rw-r--r--fs/bcachefs/bset.h3
-rw-r--r--fs/bcachefs/btree_cache.c12
-rw-r--r--fs/bcachefs/btree_cache.h19
-rw-r--r--fs/bcachefs/btree_gc.c36
-rw-r--r--fs/bcachefs/btree_io.c38
-rw-r--r--fs/bcachefs/btree_iter.c6
-rw-r--r--fs/bcachefs/btree_iter.h5
-rw-r--r--fs/bcachefs/btree_locking.c44
-rw-r--r--fs/bcachefs/btree_locking.h9
-rw-r--r--fs/bcachefs/btree_trans_commit.c35
-rw-r--r--fs/bcachefs/btree_types.h12
-rw-r--r--fs/bcachefs/btree_update_interior.c11
-rw-r--r--fs/bcachefs/btree_update_interior.h42
-rw-r--r--fs/bcachefs/btree_write_buffer.c7
-rw-r--r--fs/bcachefs/buckets.c148
-rw-r--r--fs/bcachefs/buckets.h17
-rw-r--r--fs/bcachefs/buckets_types.h15
-rw-r--r--fs/bcachefs/clock.c4
-rw-r--r--fs/bcachefs/compress.h8
-rw-r--r--fs/bcachefs/data_update.c6
-rw-r--r--fs/bcachefs/debug.c18
-rw-r--r--fs/bcachefs/dirent_format.h42
-rw-r--r--fs/bcachefs/ec.c6
-rw-r--r--fs/bcachefs/ec_format.h19
-rw-r--r--fs/bcachefs/extents.c11
-rw-r--r--fs/bcachefs/extents.h2
-rw-r--r--fs/bcachefs/extents_format.h295
-rw-r--r--fs/bcachefs/eytzinger.h4
-rw-r--r--fs/bcachefs/fs-io-buffered.c21
-rw-r--r--fs/bcachefs/fs-io-direct.c6
-rw-r--r--fs/bcachefs/fs-io-pagecache.c37
-rw-r--r--fs/bcachefs/fs-io-pagecache.h2
-rw-r--r--fs/bcachefs/fs-io.c9
-rw-r--r--fs/bcachefs/fs-ioctl.c15
-rw-r--r--fs/bcachefs/fs.c9
-rw-r--r--fs/bcachefs/fsck.c23
-rw-r--r--fs/bcachefs/inode.c29
-rw-r--r--fs/bcachefs/inode_format.h166
-rw-r--r--fs/bcachefs/io_misc.c4
-rw-r--r--fs/bcachefs/io_write.c14
-rw-r--r--fs/bcachefs/journal.c111
-rw-r--r--fs/bcachefs/journal_io.c12
-rw-r--r--fs/bcachefs/journal_reclaim.c10
-rw-r--r--fs/bcachefs/logged_ops_format.h30
-rw-r--r--fs/bcachefs/mean_and_variance.h2
-rw-r--r--fs/bcachefs/move.c65
-rw-r--r--fs/bcachefs/opts.c4
-rw-r--r--fs/bcachefs/opts.h9
-rw-r--r--fs/bcachefs/printbuf.c1
-rw-r--r--fs/bcachefs/quota_format.h47
-rw-r--r--fs/bcachefs/rebalance.c13
-rw-r--r--fs/bcachefs/recovery.c13
-rw-r--r--fs/bcachefs/reflink.c21
-rw-r--r--fs/bcachefs/reflink.h8
-rw-r--r--fs/bcachefs/reflink_format.h33
-rw-r--r--fs/bcachefs/replicas.c28
-rw-r--r--fs/bcachefs/sb-clean.c2
-rw-r--r--fs/bcachefs/sb-counters.c (renamed from fs/bcachefs/counters.c)2
-rw-r--r--fs/bcachefs/sb-counters.h (renamed from fs/bcachefs/counters.h)7
-rw-r--r--fs/bcachefs/sb-counters_format.h98
-rw-r--r--fs/bcachefs/sb-members.c6
-rw-r--r--fs/bcachefs/snapshot.c6
-rw-r--r--fs/bcachefs/snapshot_format.h36
-rw-r--r--fs/bcachefs/str_hash.h22
-rw-r--r--fs/bcachefs/subvolume_format.h35
-rw-r--r--fs/bcachefs/super-io.c8
-rw-r--r--fs/bcachefs/super.c14
-rw-r--r--fs/bcachefs/sysfs.c15
-rw-r--r--fs/bcachefs/thread_with_file.c2
-rw-r--r--fs/bcachefs/trace.h76
-rw-r--r--fs/bcachefs/util.c32
-rw-r--r--fs/bcachefs/util.h7
-rw-r--r--fs/bcachefs/xattr.c5
-rw-r--r--fs/bcachefs/xattr_format.h19
-rw-r--r--fs/btrfs/block-group.c80
-rw-r--r--fs/btrfs/block-group.h7
-rw-r--r--fs/btrfs/block-rsv.c2
-rw-r--r--fs/btrfs/block-rsv.h32
-rw-r--r--fs/btrfs/compression.c23
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/defrag.c2
-rw-r--r--fs/btrfs/delalloc-space.c29
-rw-r--r--fs/btrfs/dev-replace.c24
-rw-r--r--fs/btrfs/disk-io.c29
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c53
-rw-r--r--fs/btrfs/extent_io.c186
-rw-r--r--fs/btrfs/inode.c70
-rw-r--r--fs/btrfs/ioctl.c14
-rw-r--r--fs/btrfs/lzo.c34
-rw-r--r--fs/btrfs/qgroup.c14
-rw-r--r--fs/btrfs/ref-verify.c6
-rw-r--r--fs/btrfs/scrub.c36
-rw-r--r--fs/btrfs/send.c23
-rw-r--r--fs/btrfs/space-info.c26
-rw-r--r--fs/btrfs/subpage.c3
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/transaction.c40
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/zlib.c73
-rw-r--r--fs/btrfs/zoned.c18
-rw-r--r--fs/cachefiles/Kconfig2
-rw-r--r--fs/cachefiles/cache.c2
-rw-r--r--fs/cachefiles/daemon.c1
-rw-r--r--fs/cachefiles/internal.h2
-rw-r--r--fs/cachefiles/io.c34
-rw-r--r--fs/cachefiles/ondemand.c5
-rw-r--r--fs/ceph/Kconfig1
-rw-r--r--fs/ceph/addr.c33
-rw-r--r--fs/ceph/cache.h45
-rw-r--r--fs/ceph/caps.c83
-rw-r--r--fs/ceph/dir.c21
-rw-r--r--fs/ceph/export.c2
-rw-r--r--fs/ceph/file.c8
-rw-r--r--fs/ceph/inode.c6
-rw-r--r--fs/ceph/mds_client.c92
-rw-r--r--fs/ceph/mds_client.h7
-rw-r--r--fs/ceph/mdsmap.c7
-rw-r--r--fs/ceph/mdsmap.h6
-rw-r--r--fs/ceph/quota.c39
-rw-r--r--fs/ceph/super.h16
-rw-r--r--fs/coredump.c45
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/efivarfs/internal.h2
-rw-r--r--fs/efivarfs/super.c7
-rw-r--r--fs/efivarfs/vars.c23
-rw-r--r--fs/erofs/Kconfig7
-rw-r--r--fs/erofs/compress.h5
-rw-r--r--fs/erofs/decompressor.c7
-rw-r--r--fs/erofs/decompressor_deflate.c19
-rw-r--r--fs/erofs/decompressor_lzma.c17
-rw-r--r--fs/erofs/fscache.c15
-rw-r--r--fs/erofs/inode.c2
-rw-r--r--fs/erofs/namei.c28
-rw-r--r--fs/erofs/utils.c2
-rw-r--r--fs/erofs/zdata.c98
-rw-r--r--fs/erofs/zmap.c23
-rw-r--r--fs/eventpoll.c131
-rw-r--r--fs/exec.c106
-rw-r--r--fs/exfat/exfat_fs.h1
-rw-r--r--fs/exfat/file.c37
-rw-r--r--fs/exfat/inode.c7
-rw-r--r--fs/exfat/nls.c14
-rw-r--r--fs/exfat/super.c20
-rw-r--r--fs/ext4/ext4.h8
-rw-r--r--fs/ext4/extents.c124
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/indirect.c2
-rw-r--r--fs/ext4/inode.c90
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/mballoc.c140
-rw-r--r--fs/ext4/mballoc.h1
-rw-r--r--fs/ext4/move_extent.c10
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/ext4/symlink.c8
-rw-r--r--fs/fs-writeback.c10
-rw-r--r--fs/fscache/Kconfig40
-rw-r--r--fs/fscache/Makefile16
-rw-r--r--fs/fscache/internal.h277
-rw-r--r--fs/fuse/cuse.c3
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c15
-rw-r--r--fs/gfs2/dentry.c23
-rw-r--r--fs/gfs2/inode.c8
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/super.c12
-rw-r--r--fs/hugetlbfs/inode.c21
-rw-r--r--fs/jfs/jfs_dmap.c8
-rw-r--r--fs/namei.c6
-rw-r--r--fs/namespace.c11
-rw-r--r--fs/netfs/Kconfig39
-rw-r--r--fs/netfs/Makefile22
-rw-r--r--fs/netfs/buffered_read.c237
-rw-r--r--fs/netfs/buffered_write.c1257
-rw-r--r--fs/netfs/direct_read.c125
-rw-r--r--fs/netfs/direct_write.c174
-rw-r--r--fs/netfs/fscache_cache.c (renamed from fs/fscache/cache.c)3
-rw-r--r--fs/netfs/fscache_cookie.c (renamed from fs/fscache/cookie.c)0
-rw-r--r--fs/netfs/fscache_internal.h14
-rw-r--r--fs/netfs/fscache_io.c (renamed from fs/fscache/io.c)42
-rw-r--r--fs/netfs/fscache_main.c (renamed from fs/fscache/main.c)25
-rw-r--r--fs/netfs/fscache_proc.c (renamed from fs/fscache/proc.c)23
-rw-r--r--fs/netfs/fscache_stats.c (renamed from fs/fscache/stats.c)13
-rw-r--r--fs/netfs/fscache_volume.c (renamed from fs/fscache/volume.c)0
-rw-r--r--fs/netfs/internal.h284
-rw-r--r--fs/netfs/io.c217
-rw-r--r--fs/netfs/iterator.c97
-rw-r--r--fs/netfs/locking.c216
-rw-r--r--fs/netfs/main.c109
-rw-r--r--fs/netfs/misc.c260
-rw-r--r--fs/netfs/objects.c59
-rw-r--r--fs/netfs/output.c478
-rw-r--r--fs/netfs/stats.c42
-rw-r--r--fs/nfs/Kconfig4
-rw-r--r--fs/nfs/client.c13
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/fscache.c7
-rw-r--r--fs/nfs/fscache.h2
-rw-r--r--fs/nfsd/nfs4state.c37
-rw-r--r--fs/nilfs2/file.c8
-rw-r--r--fs/nilfs2/recovery.c7
-rw-r--r--fs/nilfs2/segment.c8
-rw-r--r--fs/ntfs3/attrib.c45
-rw-r--r--fs/ntfs3/attrlist.c12
-rw-r--r--fs/ntfs3/bitmap.c4
-rw-r--r--fs/ntfs3/dir.c48
-rw-r--r--fs/ntfs3/file.c76
-rw-r--r--fs/ntfs3/frecord.c19
-rw-r--r--fs/ntfs3/fslog.c232
-rw-r--r--fs/ntfs3/fsntfs.c29
-rw-r--r--fs/ntfs3/index.c8
-rw-r--r--fs/ntfs3/inode.c32
-rw-r--r--fs/ntfs3/namei.c12
-rw-r--r--fs/ntfs3/ntfs.h4
-rw-r--r--fs/ntfs3/ntfs_fs.h29
-rw-r--r--fs/ntfs3/record.c18
-rw-r--r--fs/ntfs3/super.c54
-rw-r--r--fs/ntfs3/xattr.c6
-rw-r--r--fs/overlayfs/copy_up.c14
-rw-r--r--fs/overlayfs/namei.c43
-rw-r--r--fs/overlayfs/overlayfs.h23
-rw-r--r--fs/overlayfs/ovl_entry.h4
-rw-r--r--fs/overlayfs/readdir.c7
-rw-r--r--fs/overlayfs/super.c15
-rw-r--r--fs/overlayfs/util.c53
-rw-r--r--fs/proc/array.c66
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/inode.c19
-rw-r--r--fs/proc/root.c2
-rw-r--r--fs/remap_range.c31
-rw-r--r--fs/smb/client/cached_dir.c25
-rw-r--r--fs/smb/client/cifs_debug.c6
-rw-r--r--fs/smb/client/cifsencrypt.c2
-rw-r--r--fs/smb/client/cifsfs.c31
-rw-r--r--fs/smb/client/cifsglob.h24
-rw-r--r--fs/smb/client/connect.c29
-rw-r--r--fs/smb/client/dfs.c7
-rw-r--r--fs/smb/client/file.c40
-rw-r--r--fs/smb/client/fs_context.c19
-rw-r--r--fs/smb/client/fs_context.h2
-rw-r--r--fs/smb/client/fscache.c2
-rw-r--r--fs/smb/client/inode.c37
-rw-r--r--fs/smb/client/misc.c1
-rw-r--r--fs/smb/client/namespace.c16
-rw-r--r--fs/smb/client/readdir.c29
-rw-r--r--fs/smb/client/sess.c7
-rw-r--r--fs/smb/client/smb2inode.c267
-rw-r--r--fs/smb/client/smb2maperror.c2
-rw-r--r--fs/smb/client/smb2ops.c165
-rw-r--r--fs/smb/client/smb2pdu.c422
-rw-r--r--fs/smb/client/smb2proto.h9
-rw-r--r--fs/smb/client/smb2status.h2
-rw-r--r--fs/smb/client/smbencrypt.c7
-rw-r--r--fs/smb/client/transport.c18
-rw-r--r--fs/smb/server/asn1.c5
-rw-r--r--fs/smb/server/connection.c6
-rw-r--r--fs/smb/server/connection.h2
-rw-r--r--fs/smb/server/ksmbd_netlink.h3
-rw-r--r--fs/smb/server/misc.c1
-rw-r--r--fs/smb/server/oplock.c6
-rw-r--r--fs/smb/server/smb2pdu.c30
-rw-r--r--fs/smb/server/transport_ipc.c4
-rw-r--r--fs/smb/server/transport_rdma.c11
-rw-r--r--fs/smb/server/transport_tcp.c15
-rw-r--r--fs/super.c13
-rw-r--r--fs/tracefs/event_inode.c620
-rw-r--r--fs/tracefs/inode.c102
-rw-r--r--fs/tracefs/internal.h45
-rw-r--r--fs/verity/measure.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c2
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c14
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.h16
-rw-r--r--fs/xfs/libxfs/xfs_sb.c14
-rw-r--r--fs/xfs/libxfs/xfs_sb.h2
-rw-r--r--fs/xfs/libxfs/xfs_types.h12
-rw-r--r--fs/xfs/scrub/rtbitmap.c1
-rw-r--r--fs/xfs/scrub/rtsummary.c1
-rw-r--r--fs/xfs/xfs_super.c28
-rw-r--r--fs/zonefs/file.c42
-rw-r--r--fs/zonefs/super.c66
-rw-r--r--include/asm-generic/checksum.h6
-rw-r--r--include/drm/bridge/aux-bridge.h15
-rw-r--r--include/dt-bindings/dma/fsl-edma.h21
-rw-r--r--include/linux/backing-dev-defs.h7
-rw-r--r--include/linux/bio.h9
-rw-r--r--include/linux/bitmap.h12
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/bpf-cgroup.h3
-rw-r--r--include/linux/bpf.h177
-rw-r--r--include/linux/bpf_local_storage.h30
-rw-r--r--include/linux/bpf_verifier.h13
-rw-r--r--include/linux/btf.h36
-rw-r--r--include/linux/btf_ids.h21
-rw-r--r--include/linux/bvec.h2
-rw-r--r--include/linux/ceph/messenger.h2
-rw-r--r--include/linux/ceph/osd_client.h10
-rw-r--r--include/linux/compiler-gcc.h20
-rw-r--r--include/linux/compiler_types.h11
-rw-r--r--include/linux/cper.h23
-rw-r--r--include/linux/cpumask.h16
-rw-r--r--include/linux/cxl-event.h18
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/dmaengine.h3
-rw-r--r--include/linux/dpll.h20
-rw-r--r--include/linux/dynamic_queue_limits.h45
-rw-r--r--include/linux/ethtool.h14
-rw-r--r--include/linux/export.h18
-rw-r--r--include/linux/filter.h24
-rw-r--r--include/linux/fortify-string.h51
-rw-r--r--include/linux/framer/framer-provider.h15
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/fscache.h45
-rw-r--r--include/linux/gfp.h16
-rw-r--r--include/linux/gpio/driver.h18
-rw-r--r--include/linux/hid_bpf.h11
-rw-r--r--include/linux/hrtimer.h4
-rw-r--r--include/linux/hyperv.h22
-rw-r--r--include/linux/ieee80211.h169
-rw-r--r--include/linux/if_tun.h16
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h4
-rw-r--r--include/linux/iio/common/st_sensors.h4
-rw-r--r--include/linux/iio/imu/adis.h3
-rw-r--r--include/linux/inetdevice.h14
-rw-r--r--include/linux/init.h7
-rw-r--r--include/linux/iommu.h3
-rw-r--r--include/linux/ioprio.h25
-rw-r--r--include/linux/ipv6.h14
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/lsm_hook_defs.h19
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mdio.h63
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/mfd/idtRC38xxx_reg.h273
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h27
-rw-r--r--include/linux/mlx5/qp.h5
-rw-r--r--include/linux/mlx5/vport.h1
-rw-r--r--include/linux/mman.h1
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/net.h5
-rw-r--r--include/linux/netdevice.h165
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/linux/netfs.h181
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nvme.h12
-rw-r--r--include/linux/of_device.h5
-rw-r--r--include/linux/of_platform.h4
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/phy.h53
-rw-r--r--include/linux/phylink.h7
-rw-r--r--include/linux/platform_data/brcmfmac.h2
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h3
-rw-r--r--include/linux/platform_data/microchip-ksz.h1
-rw-r--r--include/linux/platform_data/net-cw1200.h4
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/ptp_clock_kernel.h3
-rw-r--r--include/linux/ptrace.h4
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/security.h43
-rw-r--r--include/linux/seq_buf.h17
-rw-r--r--include/linux/serial_core.h32
-rw-r--r--include/linux/skbuff.h123
-rw-r--r--include/linux/skmsg.h6
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/spinlock.h12
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/tcp.h16
-rw-r--r--include/linux/udp.h10
-rw-r--r--include/linux/uio.h16
-rw-r--r--include/linux/units.h5
-rw-r--r--include/linux/usb/gadget.h1
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/linux/wwan.h2
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/addrconf.h7
-rw-r--r--include/net/af_unix.h42
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h23
-rw-r--r--include/net/busy_poll.h4
-rw-r--r--include/net/cfg80211.h106
-rw-r--r--include/net/dropreason-core.h26
-rw-r--r--include/net/dsa.h4
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/eee.h38
-rw-r--r--include/net/genetlink.h6
-rw-r--r--include/net/gro.h39
-rw-r--r--include/net/hotdata.h52
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/inet_connection_sock.h8
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/ioam6.h4
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip6_fib.h46
-rw-r--r--include/net/ip6_route.h5
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/ipv6.h8
-rw-r--r--include/net/llc_pdu.h6
-rw-r--r--include/net/mac80211.h127
-rw-r--r--include/net/mctp.h7
-rw-r--r--include/net/net_namespace.h5
-rw-r--r--include/net/netdev_queues.h56
-rw-r--r--include/net/netfilter/nf_flow_table.h2
-rw-r--r--include/net/netfilter/nf_queue.h1
-rw-r--r--include/net/netfilter/nf_tables.h73
-rw-r--r--include/net/netlabel.h7
-rw-r--r--include/net/netmem.h41
-rw-r--r--include/net/nexthop.h32
-rw-r--r--include/net/nfc/nfc.h2
-rw-r--r--include/net/page_pool/types.h13
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/request_sock.h39
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rps.h125
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sch_generic.h11
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h191
-rw-r--r--include/net/switchdev.h3
-rw-r--r--include/net/tcp.h53
-rw-r--r--include/net/tls.h5
-rw-r--r--include/net/xdp_sock_drv.h27
-rw-r--r--include/net/xfrm.h14
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/sound/cs35l56.h7
-rw-r--r--include/sound/soc-card.h2
-rw-r--r--include/sound/tas2781.h10
-rw-r--r--include/trace/events/afs.h56
-rw-r--r--include/trace/events/ext4.h11
-rw-r--r--include/trace/events/napi.h33
-rw-r--r--include/trace/events/netfs.h155
-rw-r--r--include/trace/events/qdisc.h20
-rw-r--r--include/trace/events/rxrpc.h206
-rw-r--r--include/trace/events/tcp.h16
-rw-r--r--include/uapi/drm/ivpu_accel.h26
-rw-r--r--include/uapi/drm/nouveau_drm.h14
-rw-r--r--include/uapi/drm/xe_drm.h22
-rw-r--r--include/uapi/linux/bpf.h103
-rw-r--r--include/uapi/linux/btrfs.h3
-rw-r--r--include/uapi/linux/can.h9
-rw-r--r--include/uapi/linux/can/isotp.h1
-rw-r--r--include/uapi/linux/can/raw.h16
-rw-r--r--include/uapi/linux/dpll.h30
-rw-r--r--include/uapi/linux/eventpoll.h13
-rw-r--r--include/uapi/linux/if_link.h1
-rw-r--r--include/uapi/linux/iio/types.h4
-rw-r--r--include/uapi/linux/in6.h2
-rw-r--r--include/uapi/linux/ioam6_genl.h20
-rw-r--r--include/uapi/linux/mctp.h32
-rw-r--r--include/uapi/linux/mdio.h4
-rw-r--r--include/uapi/linux/netdev.h20
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h8
-rw-r--r--include/uapi/linux/nexthop.h45
-rw-r--r--include/uapi/linux/nl80211.h71
-rw-r--r--include/uapi/linux/ptp_clock.h13
-rw-r--r--include/uapi/linux/serial.h13
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h2
-rw-r--r--include/uapi/sound/asound.h4
-rw-r--r--include/uapi/xen/gntalloc.h5
-rw-r--r--init/Kconfig26
-rw-r--r--io_uring/io_uring.c63
-rw-r--r--io_uring/io_uring.h8
-rw-r--r--io_uring/net.c55
-rw-r--r--io_uring/opdef.c1
-rw-r--r--io_uring/openclose.c4
-rw-r--r--io_uring/poll.c49
-rw-r--r--io_uring/poll.h9
-rw-r--r--io_uring/register.c8
-rw-r--r--io_uring/rsrc.h14
-rw-r--r--io_uring/rw.c58
-rw-r--r--kernel/bpf/Kconfig1
-rw-r--r--kernel/bpf/Makefile2
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/bpf_local_storage.c52
-rw-r--r--kernel/bpf/bpf_lsm.c21
-rw-r--r--kernel/bpf/bpf_struct_ops.c604
-rw-r--r--kernel/bpf/bpf_struct_ops_types.h12
-rw-r--r--kernel/bpf/btf.c539
-rw-r--r--kernel/bpf/cgroup.c11
-rw-r--r--kernel/bpf/core.c13
-rw-r--r--kernel/bpf/cpumap.c6
-rw-r--r--kernel/bpf/cpumask.c4
-rw-r--r--kernel/bpf/helpers.c28
-rw-r--r--kernel/bpf/inode.c276
-rw-r--r--kernel/bpf/log.c62
-rw-r--r--kernel/bpf/lpm_trie.c20
-rw-r--r--kernel/bpf/map_iter.c4
-rw-r--r--kernel/bpf/syscall.c234
-rw-r--r--kernel/bpf/task_iter.c2
-rw-r--r--kernel/bpf/token.c278
-rw-r--r--kernel/bpf/verifier.c389
-rw-r--r--kernel/cgroup/cpuset.c8
-rw-r--r--kernel/cgroup/rstat.c4
-rw-r--r--kernel/configs/debug.config6
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/exit.c10
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/futex/core.c15
-rw-r--r--kernel/futex/pi.c11
-rw-r--r--kernel/irq/irqdesc.c2
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/rcu/tree.c34
-rw-r--r--kernel/rcu/tree_exp.h3
-rw-r--r--kernel/sched/membarrier.c6
-rw-r--r--kernel/sys.c50
-rw-r--r--kernel/time/clocksource.c25
-rw-r--r--kernel/time/hrtimer.c3
-rw-r--r--kernel/time/tick-sched.c10
-rw-r--r--kernel/trace/bpf_trace.c25
-rw-r--r--kernel/trace/fprobe.c14
-rw-r--r--kernel/trace/ftrace.c10
-rw-r--r--kernel/trace/ring_buffer.c6
-rw-r--r--kernel/trace/trace.c80
-rw-r--r--kernel/trace/trace_btf.c4
-rw-r--r--kernel/trace/trace_events_synth.c3
-rw-r--r--kernel/trace/trace_events_trigger.c6
-rw-r--r--kernel/trace/trace_osnoise.c6
-rw-r--r--kernel/trace/trace_probe.c32
-rw-r--r--kernel/trace/trace_probe.h3
-rw-r--r--kernel/trace/tracing_map.c7
-rw-r--r--kernel/workqueue.c8
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/bitmap.c7
-rw-r--r--lib/checksum_kunit.c287
-rw-r--r--lib/dynamic_queue_limits.c74
-rw-r--r--lib/iov_iter.c23
-rw-r--r--lib/kobject.c24
-rw-r--r--lib/kunit/device-impl.h2
-rw-r--r--lib/kunit/device.c18
-rw-r--r--lib/kunit/executor.c4
-rw-r--r--lib/kunit/kunit-test.c2
-rw-r--r--lib/kunit/test.c17
-rw-r--r--lib/nlattr.c6
-rw-r--r--lib/sbitmap.c5
-rw-r--r--lib/seq_buf.c49
-rw-r--r--lib/stackdepot.c523
-rw-r--r--lib/string.c15
-rw-r--r--lib/test_blackhole_dev.c3
-rw-r--r--lib/test_fortify/write_overflow-strlcpy-src.c5
-rw-r--r--lib/test_fortify/write_overflow-strlcpy.c5
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/damon/core.c15
-rw-r--r--mm/damon/lru_sort.c43
-rw-r--r--mm/damon/reclaim.c18
-rw-r--r--mm/damon/sysfs-schemes.c6
-rw-r--r--mm/debug_vm_pgtable.c8
-rw-r--r--mm/filemap.c53
-rw-r--r--mm/huge_memory.c18
-rw-r--r--mm/kasan/common.c8
-rw-r--r--mm/kasan/generic.c71
-rw-r--r--mm/kasan/kasan.h10
-rw-r--r--mm/kasan/quarantine.c5
-rw-r--r--mm/madvise.c1
-rw-r--r--mm/memblock.c9
-rw-r--r--mm/memcontrol.c95
-rw-r--r--mm/memory-failure.c5
-rw-r--r--mm/memory.c26
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c22
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/swap.h5
-rw-r--r--mm/swap_state.c10
-rw-r--r--mm/swapfile.c13
-rw-r--r--mm/userfaultfd.c29
-rw-r--r--mm/zswap.c27
-rw-r--r--net/6lowpan/core.c1
-rw-r--r--net/8021q/vlan_dev.c30
-rw-r--r--net/8021q/vlan_netlink.c4
-rw-r--r--net/8021q/vlanproc.c46
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile2
-rw-r--r--net/atm/mpc.c1
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/main.c14
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/multicast.c3
-rw-r--r--net/batman-adv/netlink.c1
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hci_event.c13
-rw-r--r--net/bluetooth/hci_sync.c7
-rw-r--r--net/bluetooth/l2cap_core.c8
-rw-r--r--net/bluetooth/mgmt.c4
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bpf/bpf_dummy_struct_ops.c22
-rw-r--r--net/bpf/test_run.c12
-rw-r--r--net/bridge/br.c15
-rw-r--r--net/bridge/br_device.c27
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_multicast.c20
-rw-r--r--net/bridge/br_netfilter_hooks.c96
-rw-r--r--net/bridge/br_netlink.c3
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_switchdev.c84
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/Kconfig7
-rw-r--r--net/bridge/netfilter/Makefile2
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c30
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/can/bcm.c69
-rw-r--r--net/can/isotp.c5
-rw-r--r--net/can/j1939/j1939-priv.h3
-rw-r--r--net/can/j1939/main.c2
-rw-r--r--net/can/j1939/socket.c46
-rw-r--r--net/can/raw.c104
-rw-r--r--net/ceph/messenger_v1.c33
-rw-r--r--net/ceph/messenger_v2.c4
-rw-r--r--net/ceph/osd_client.c44
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c447
-rw-r--r--net/core/dev.h10
-rw-r--r--net/core/dst.c6
-rw-r--r--net/core/filter.c217
-rw-r--r--net/core/gro.c40
-rw-r--r--net/core/gro_cells.c3
-rw-r--r--net/core/gso.c4
-rw-r--r--net/core/hotdata.c22
-rw-r--r--net/core/link_watch.c13
-rw-r--r--net/core/net-procfs.c55
-rw-r--r--net/core/net-sysfs.c137
-rw-r--r--net/core/net_namespace.c33
-rw-r--r--net/core/netdev-genl-gen.c12
-rw-r--r--net/core/netdev-genl-gen.h2
-rw-r--r--net/core/netdev-genl.c227
-rw-r--r--net/core/page_pool.c55
-rw-r--r--net/core/page_pool_user.c5
-rw-r--r--net/core/request_sock.c3
-rw-r--r--net/core/rtnetlink.c160
-rw-r--r--net/core/scm.c5
-rw-r--r--net/core/skbuff.c192
-rw-r--r--net/core/skmsg.c7
-rw-r--r--net/core/sock.c116
-rw-r--r--net/core/sysctl_net_core.c34
-rw-r--r--net/core/xdp.c15
-rw-r--r--net/dccp/ackvec.c8
-rw-r--r--net/devlink/core.c24
-rw-r--r--net/devlink/port.c4
-rw-r--r--net/dsa/dsa.c7
-rw-r--r--net/dsa/tag_sja1105.c4
-rw-r--r--net/dsa/user.c28
-rw-r--r--net/ethtool/eee.c62
-rw-r--r--net/ethtool/ioctl.c60
-rw-r--r--net/ethtool/netlink.c14
-rw-r--r--net/handshake/handshake-test.c5
-rw-r--r--net/hsr/hsr_device.c34
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/ieee802154/6lowpan/core.c3
-rw-r--r--net/ieee802154/socket.c1
-rw-r--r--net/ipv4/af_inet.c62
-rw-r--r--net/ipv4/ah4.c1
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/bpf_tcp_ca.c26
-rw-r--r--net/ipv4/cipso_ipv4.c5
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c324
-rw-r--r--net/ipv4/esp4.c1
-rw-r--r--net/ipv4/fib_frontend.c51
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/fou_bpf.c4
-rw-r--r--net/ipv4/fou_core.c2
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c11
-rw-r--r--net/ipv4/inet_hashtables.c28
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ip_output.c28
-rw-r--r--net/ipv4/ip_sockglue.c19
-rw-r--r--net/ipv4/ip_tunnel.c67
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/ip_vti.c9
-rw-r--r--net/ipv4/ipip.c9
-rw-r--r--net/ipv4/ipmr.c11
-rw-r--r--net/ipv4/netfilter/Kconfig44
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/nexthop.c367
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/raw.c15
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/syncookies.c61
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/ipv4/tcp_ao.c2
-rw-r--r--net/ipv4/tcp_bbr.c4
-rw-r--r--net/ipv4/tcp_cubic.c4
-rw-r--r--net/ipv4/tcp_dctcp.c4
-rw-r--r--net/ipv4/tcp_input.c51
-rw-r--r--net/ipv4/tcp_ipv4.c17
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_offload.c36
-rw-r--r--net/ipv4/tunnel4.c1
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv4/udp_offload.c17
-rw-r--r--net/ipv4/udp_tunnel_core.c1
-rw-r--r--net/ipv4/xfrm4_input.c2
-rw-r--r--net/ipv4/xfrm4_tunnel.c1
-rw-r--r--net/ipv6/addrconf.c640
-rw-r--r--net/ipv6/addrconf_core.c21
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/ah6.c1
-rw-r--r--net/ipv6/anycast.c61
-rw-r--r--net/ipv6/calipso.c5
-rw-r--r--net/ipv6/esp6.c1
-rw-r--r--net/ipv6/exthdrs.c34
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/ioam6.c72
-rw-r--r--net/ipv6/ip6_fib.c73
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c18
-rw-r--r--net/ipv6/ip6_output.c25
-rw-r--r--net/ipv6/ip6_tunnel.c46
-rw-r--r--net/ipv6/ip6_udp_tunnel.c1
-rw-r--r--net/ipv6/ip6_vti.c13
-rw-r--r--net/ipv6/ip6mr.c9
-rw-r--r--net/ipv6/ipv6_sockglue.c8
-rw-r--r--net/ipv6/mcast.c15
-rw-r--r--net/ipv6/mip6.c1
-rw-r--r--net/ipv6/ndisc.c84
-rw-r--r--net/ipv6/netfilter/Kconfig20
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c4
-rw-r--r--net/ipv6/output_core.c4
-rw-r--r--net/ipv6/raw.c15
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c60
-rw-r--r--net/ipv6/seg6.c20
-rw-r--r--net/ipv6/seg6_hmac.c8
-rw-r--r--net/ipv6/sit.c28
-rw-r--r--net/ipv6/syncookies.c31
-rw-r--r--net/ipv6/tcp_ipv6.c39
-rw-r--r--net/ipv6/tcpv6_offload.c16
-rw-r--r--net/ipv6/tunnel6.c1
-rw-r--r--net/ipv6/udp.c19
-rw-r--r--net/ipv6/udp_offload.c21
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c1
-rw-r--r--net/iucv/af_iucv.c10
-rw-r--r--net/iucv/iucv.c19
-rw-r--r--net/kcm/kcmsock.c15
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/llc/af_llc.c26
-rw-r--r--net/llc/llc_core.c7
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/cfg.c248
-rw-r--r--net/mac80211/chan.c705
-rw-r--r--net/mac80211/debug.h18
-rw-r--r--net/mac80211/debugfs_netdev.c4
-rw-r--r--net/mac80211/debugfs_netdev.h5
-rw-r--r--net/mac80211/driver-ops.h19
-rw-r--r--net/mac80211/ht.c6
-rw-r--r--net/mac80211/ibss.c55
-rw-r--r--net/mac80211/ieee80211_i.h184
-rw-r--r--net/mac80211/iface.c32
-rw-r--r--net/mac80211/key.c4
-rw-r--r--net/mac80211/link.c13
-rw-r--r--net/mac80211/main.c231
-rw-r--r--net/mac80211/mesh.c162
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c28
-rw-r--r--net/mac80211/mlme.c2973
-rw-r--r--net/mac80211/ocb.c5
-rw-r--r--net/mac80211/offchannel.c21
-rw-r--r--net/mac80211/parse.c926
-rw-r--r--net/mac80211/rate.c15
-rw-r--r--net/mac80211/rx.c53
-rw-r--r--net/mac80211/scan.c72
-rw-r--r--net/mac80211/spectmgmt.c337
-rw-r--r--net/mac80211/sta_info.c28
-rw-r--r--net/mac80211/sta_info.h14
-rw-r--r--net/mac80211/tdls.c67
-rw-r--r--net/mac80211/tests/elems.c1
-rw-r--r--net/mac80211/trace.h119
-rw-r--r--net/mac80211/trace_msg.h2
-rw-r--r--net/mac80211/tx.c74
-rw-r--r--net/mac80211/util.c1791
-rw-r--r--net/mac80211/vht.c6
-rw-r--r--net/mac80211/wbrf.c2
-rw-r--r--net/mac80211/wpa.c33
-rw-r--r--net/mctp/Kconfig1
-rw-r--r--net/mctp/af_mctp.c117
-rw-r--r--net/mctp/route.c117
-rw-r--r--net/mctp/test/route-test.c413
-rw-r--r--net/mctp/test/utils.c2
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/mpls/mpls_gso.c3
-rw-r--r--net/mpls/mpls_iptunnel.c2
-rw-r--r--net/mptcp/diag.c12
-rw-r--r--net/mptcp/fastopen.c6
-rw-r--r--net/mptcp/mptcp_diag.c1
-rw-r--r--net/mptcp/mptcp_pm_gen.c7
-rw-r--r--net/mptcp/mptcp_pm_gen.h2
-rw-r--r--net/mptcp/options.c31
-rw-r--r--net/mptcp/pm.c29
-rw-r--r--net/mptcp/pm_netlink.c184
-rw-r--r--net/mptcp/pm_userspace.c259
-rw-r--r--net/mptcp/protocol.c193
-rw-r--r--net/mptcp/protocol.h131
-rw-r--r--net/mptcp/sockopt.c73
-rw-r--r--net/mptcp/subflow.c98
-rw-r--r--net/mptcp/token_test.c7
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h14
-rw-r--r--net/netfilter/ipset/ip_set_core.c39
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h19
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/nf_bpf_link.c2
-rw-r--r--net/netfilter/nf_conncount.c8
-rw-r--r--net/netfilter/nf_conntrack_bpf.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c10
-rw-r--r--net/netfilter/nf_flow_table_core.c17
-rw-r--r--net/netfilter/nf_log.c16
-rw-r--r--net/netfilter/nf_nat_bpf.c4
-rw-r--r--net/netfilter/nf_nat_core.c5
-rw-r--r--net/netfilter/nf_queue.c106
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/netfilter/nf_tables_api.c161
-rw-r--r--net/netfilter/nfnetlink_queue.c155
-rw-r--r--net/netfilter/nft_chain_filter.c11
-rw-r--r--net/netfilter/nft_compat.c49
-rw-r--r--net/netfilter/nft_ct.c26
-rw-r--r--net/netfilter/nft_flow_offload.c6
-rw-r--r--net/netfilter/nft_limit.c23
-rw-r--r--net/netfilter/nft_nat.c5
-rw-r--r--net/netfilter/nft_osf.c11
-rw-r--r--net/netfilter/nft_rt.c5
-rw-r--r--net/netfilter/nft_set_hash.c8
-rw-r--r--net/netfilter/nft_set_pipapo.c311
-rw-r--r--net/netfilter/nft_set_pipapo.h59
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c78
-rw-r--r--net/netfilter/nft_set_rbtree.c17
-rw-r--r--net/netfilter/nft_socket.c5
-rw-r--r--net/netfilter/nft_synproxy.c7
-rw-r--r--net/netfilter/nft_tproxy.c5
-rw-r--r--net/netfilter/nft_tunnel.c1
-rw-r--r--net/netfilter/nft_xfrm.c5
-rw-r--r--net/netfilter/utils.c37
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netlabel/netlabel_kapi.c10
-rw-r--r--net/netlink/af_netlink.c77
-rw-r--r--net/netlink/af_netlink.h5
-rw-r--r--net/netlink/diag.c2
-rw-r--r--net/netlink/genetlink.c42
-rw-r--r--net/netrom/af_netrom.c14
-rw-r--r--net/netrom/nr_dev.c2
-rw-r--r--net/netrom/nr_in.c6
-rw-r--r--net/netrom/nr_out.c2
-rw-r--r--net/netrom/nr_route.c8
-rw-r--r--net/netrom/nr_subr.c5
-rw-r--r--net/nfc/core.c2
-rw-r--r--net/nfc/hci/llc.c20
-rw-r--r--net/nfc/nci/core.c4
-rw-r--r--net/openvswitch/flow_netlink.c49
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/datagram.c4
-rw-r--r--net/phonet/pep.c41
-rw-r--r--net/rds/af_rds.c2
-rw-r--r--net/rds/connection.c4
-rw-r--r--net/rds/rdma.c3
-rw-r--r--net/rds/recv.c13
-rw-r--r--net/rds/send.c6
-rw-r--r--net/rxrpc/af_rxrpc.c12
-rw-r--r--net/rxrpc/ar-internal.h123
-rw-r--r--net/rxrpc/call_event.c335
-rw-r--r--net/rxrpc/call_object.c57
-rw-r--r--net/rxrpc/conn_client.c4
-rw-r--r--net/rxrpc/conn_event.c26
-rw-r--r--net/rxrpc/conn_object.c4
-rw-r--r--net/rxrpc/input.c201
-rw-r--r--net/rxrpc/insecure.c11
-rw-r--r--net/rxrpc/io_thread.c11
-rw-r--r--net/rxrpc/local_object.c3
-rw-r--r--net/rxrpc/misc.c8
-rw-r--r--net/rxrpc/output.c445
-rw-r--r--net/rxrpc/proc.c12
-rw-r--r--net/rxrpc/protocol.h6
-rw-r--r--net/rxrpc/rtt.c36
-rw-r--r--net/rxrpc/rxkad.c61
-rw-r--r--net/rxrpc/sendmsg.c63
-rw-r--r--net/rxrpc/sysctl.c16
-rw-r--r--net/rxrpc/txbuf.c174
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_bpf.c1
-rw-r--r--net/sched/act_connmark.c1
-rw-r--r--net/sched/act_csum.c1
-rw-r--r--net/sched/act_ct.c1
-rw-r--r--net/sched/act_ctinfo.c1
-rw-r--r--net/sched/act_gact.c1
-rw-r--r--net/sched/act_gate.c1
-rw-r--r--net/sched/act_ife.c1
-rw-r--r--net/sched/act_mirred.c39
-rw-r--r--net/sched/act_mpls.c1
-rw-r--r--net/sched/act_nat.c1
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/act_sample.c1
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/act_skbedit.c1
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sched/act_tunnel_key.c1
-rw-r--r--net/sched/act_vlan.c1
-rw-r--r--net/sched/cls_api.c11
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_bpf.c1
-rw-r--r--net/sched/cls_cgroup.c1
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_flower.c29
-rw-r--r--net/sched/cls_fw.c1
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/cls_route.c1
-rw-r--r--net/sched/cls_u32.c1
-rw-r--r--net/sched/em_canid.c1
-rw-r--r--net/sched/em_cmp.c1
-rw-r--r--net/sched/em_meta.c1
-rw-r--r--net/sched/em_nbyte.c1
-rw-r--r--net/sched/em_text.c1
-rw-r--r--net/sched/em_u32.c1
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cake.c1
-rw-r--r--net/sched/sch_cbs.c1
-rw-r--r--net/sched/sch_choke.c1
-rw-r--r--net/sched/sch_codel.c33
-rw-r--r--net/sched/sch_drr.c1
-rw-r--r--net/sched/sch_etf.c1
-rw-r--r--net/sched/sch_ets.c1
-rw-r--r--net/sched/sch_fq.c1
-rw-r--r--net/sched/sch_fq_codel.c1
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_gred.c1
-rw-r--r--net/sched/sch_hfsc.c1
-rw-r--r--net/sched/sch_hhf.c1
-rw-r--r--net/sched/sch_htb.c1
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_mqprio.c1
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_pie.c1
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_prio.c1
-rw-r--r--net/sched/sch_qfq.c1
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfb.c1
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sched/sch_skbprio.c1
-rw-r--r--net/sched/sch_taprio.c73
-rw-r--r--net/sched/sch_tbf.c1
-rw-r--r--net/sctp/inqueue.c14
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/smc/af_smc.c23
-rw-r--r--net/smc/smc.h4
-rw-r--r--net/smc/smc_clc.c6
-rw-r--r--net/smc/smc_clc.h2
-rw-r--r--net/smc/smc_core.c14
-rw-r--r--net/smc/smc_diag.c2
-rw-r--r--net/smc/smc_ism.h10
-rw-r--r--net/smc/smc_pnet.c10
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/svc.c4
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/switchdev/switchdev.c73
-rw-r--r--net/tipc/Kconfig7
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/bearer.c17
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/tls/tls_sw.c193
-rw-r--r--net/unix/Kconfig5
-rw-r--r--net/unix/Makefile2
-rw-r--r--net/unix/af_unix.c106
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/unix/garbage.c210
-rw-r--r--net/unix/scm.c159
-rw-r--r--net/unix/scm.h10
-rw-r--r--net/wireless/Kconfig1
-rw-r--r--net/wireless/chan.c377
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/core.h52
-rw-r--r--net/wireless/mlme.c146
-rw-r--r--net/wireless/nl80211.c392
-rw-r--r--net/wireless/reg.c17
-rw-r--r--net/wireless/scan.c826
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/tests/Makefile2
-rw-r--r--net/wireless/tests/chan.c228
-rw-r--r--net/wireless/tests/scan.c277
-rw-r--r--net/wireless/trace.h52
-rw-r--r--net/wireless/util.c76
-rw-r--r--net/x25/Kconfig2
-rw-r--r--net/xdp/xsk.c20
-rw-r--r--net/xdp/xsk_buff_pool.c1
-rw-r--r--net/xfrm/espintcp.c4
-rw-r--r--net/xfrm/xfrm_algo.c1
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_interface_bpf.c4
-rw-r--r--net/xfrm/xfrm_interface_core.c16
-rw-r--r--net/xfrm/xfrm_output.c6
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--net/xfrm/xfrm_proc.c1
-rw-r--r--net/xfrm/xfrm_state.c17
-rw-r--r--net/xfrm/xfrm_state_bpf.c4
-rw-r--r--net/xfrm/xfrm_user.c6
-rw-r--r--rust/kernel/net/phy.rs24
-rw-r--r--samples/bpf/asm_goto_workaround.h8
-rw-r--r--samples/bpf/map_perf_test_user.c2
-rw-r--r--samples/bpf/xdp_router_ipv4_user.c2
-rw-r--r--samples/cgroup/.gitignore3
-rw-r--r--samples/ftrace/ftrace-direct-modify.c35
-rw-r--r--samples/ftrace/ftrace-direct-multi-modify.c41
-rw-r--r--samples/ftrace/ftrace-direct-multi.c25
-rw-r--r--samples/ftrace/ftrace-direct-too.c28
-rw-r--r--samples/ftrace/ftrace-direct.c24
-rw-r--r--scripts/Kconfig.include2
-rw-r--r--scripts/Makefile.compiler2
-rw-r--r--scripts/Makefile.defconf8
-rw-r--r--scripts/Makefile.extrawarn11
-rw-r--r--scripts/Makefile.lib4
-rw-r--r--scripts/Makefile.package28
-rwxr-xr-xscripts/bpf_doc.py4
-rwxr-xr-xscripts/check-uapi.sh573
-rwxr-xr-xscripts/clang-tools/gen_compile_commands.py2
-rw-r--r--scripts/coccinelle/api/device_attr_show.cocci17
-rw-r--r--scripts/gdb/linux/tasks.py15
-rw-r--r--scripts/generate_rust_target.rs7
-rw-r--r--scripts/genksyms/genksyms.c22
-rw-r--r--scripts/git.orderFile42
-rw-r--r--scripts/head-object-list.txt1
-rw-r--r--scripts/kconfig/Makefile14
-rw-r--r--scripts/kconfig/conf.c6
-rw-r--r--scripts/kconfig/confdata.c161
-rw-r--r--scripts/kconfig/expr.c1
-rw-r--r--scripts/kconfig/lkc.h2
-rw-r--r--scripts/kconfig/lkc_proto.h7
-rw-r--r--scripts/kconfig/mconf.c56
-rw-r--r--scripts/kconfig/menu.c17
-rw-r--r--scripts/kconfig/mnconf-common.c53
-rw-r--r--scripts/kconfig/mnconf-common.h18
-rw-r--r--scripts/kconfig/nconf.c55
-rw-r--r--scripts/kconfig/symbol.c41
-rw-r--r--scripts/kconfig/util.c3
-rwxr-xr-xscripts/link-vmlinux.sh9
-rwxr-xr-xscripts/min-tool-version.sh2
-rwxr-xr-xscripts/mksysmap13
-rw-r--r--scripts/mod/modpost.c45
-rw-r--r--scripts/mod/modpost.h6
-rw-r--r--scripts/mod/sumversion.c7
-rwxr-xr-xscripts/package/builddeb106
-rwxr-xr-xscripts/package/buildtar8
-rwxr-xr-xscripts/package/deb-build-option14
-rw-r--r--scripts/package/debian/copyright16
-rwxr-xr-xscripts/package/debian/rules35
-rwxr-xr-xscripts/package/install-extmod-build2
-rw-r--r--scripts/package/kernel.spec28
-rwxr-xr-xscripts/package/mkdebian28
-rw-r--r--scripts/package/snapcraft.template2
-rw-r--r--scripts/recordmcount.c1
-rwxr-xr-xscripts/recordmcount.pl7
-rwxr-xr-xscripts/xz_wrap.sh1
-rw-r--r--security/apparmor/Kconfig12
-rw-r--r--security/apparmor/apparmorfs.c16
-rw-r--r--security/apparmor/crypto.c6
-rw-r--r--security/apparmor/domain.c2
-rw-r--r--security/apparmor/lib.c1
-rw-r--r--security/apparmor/lsm.c69
-rw-r--r--security/apparmor/policy.c13
-rw-r--r--security/apparmor/policy_unpack.c13
-rw-r--r--security/apparmor/task.c2
-rw-r--r--security/integrity/digsig.c3
-rw-r--r--security/keys/encrypted-keys/encrypted.c4
-rw-r--r--security/landlock/fs.c4
-rw-r--r--security/security.c153
-rw-r--r--security/selinux/hooks.c49
-rw-r--r--security/tomoyo/common.c3
-rw-r--r--security/tomoyo/tomoyo.c3
-rw-r--r--sound/core/Makefile1
-rw-r--r--sound/core/pcm.c4
-rw-r--r--sound/core/pcm_native.c5
-rw-r--r--sound/core/ump.c4
-rw-r--r--sound/drivers/aloop.c23
-rw-r--r--sound/firewire/amdtp-stream.c2
-rw-r--r--sound/pci/hda/Kconfig4
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c8
-rw-r--r--sound/pci/hda/cs35l56_hda.c136
-rw-r--r--sound/pci/hda/hda_generic.c1
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_conexant.c18
-rw-r--r--sound/pci/hda/patch_cs8409.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c6
-rw-r--r--sound/pci/hda/patch_realtek.c65
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c2
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c2
-rw-r--r--sound/soc/amd/acp/acp-mach-common.c16
-rw-r--r--sound/soc/amd/acp/acp-sof-mach.c4
-rw-r--r--sound/soc/amd/acp/acp3x-es83xx/acp3x-es83xx.c8
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c35
-rw-r--r--sound/soc/amd/yc/pci-acp6x.c1
-rw-r--r--sound/soc/codecs/cs35l45.c2
-rw-r--r--sound/soc/codecs/cs35l56-shared.c142
-rw-r--r--sound/soc/codecs/cs35l56.c383
-rw-r--r--sound/soc/codecs/cs35l56.h2
-rw-r--r--sound/soc/codecs/cs42l43.c48
-rw-r--r--[-rwxr-xr-x]sound/soc/codecs/es8326.c186
-rw-r--r--sound/soc/codecs/es8326.h3
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c7
-rw-r--r--sound/soc/codecs/rt5645.c26
-rw-r--r--sound/soc/codecs/rtq9128.c73
-rw-r--r--sound/soc/codecs/tas2562.c3
-rw-r--r--sound/soc/codecs/tas2781-comlib.c3
-rw-r--r--sound/soc/codecs/tas2781-i2c.c10
-rw-r--r--sound/soc/codecs/wcd9335.c4
-rw-r--r--sound/soc/codecs/wcd934x.c1
-rw-r--r--sound/soc/codecs/wcd938x.c8
-rw-r--r--sound/soc/codecs/wm_adsp.c73
-rw-r--r--sound/soc/codecs/wsa883x.c6
-rw-r--r--sound/soc/fsl/fsl_xcvr.c12
-rw-r--r--sound/soc/generic/audio-graph-card2.c2
-rw-r--r--sound/soc/intel/avs/core.c3
-rw-r--r--sound/soc/intel/avs/topology.c2
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c6
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c3
-rw-r--r--sound/soc/intel/boards/bytcht_cx2072x.c3
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c3
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c3
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c3
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c3
-rw-r--r--sound/soc/intel/boards/bytcr_wm5102.c3
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c7
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c3
-rw-r--r--sound/soc/mediatek/common/mtk-dsp-sof-common.c2
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c3
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-afe-pcm.c33
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359.c41
-rw-r--r--sound/soc/qcom/lpass-cdc-dma.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-dai.c8
-rw-r--r--sound/soc/qcom/sc8280xp.c12
-rw-r--r--sound/soc/soc-card.c24
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--sound/soc/sof/amd/acp-ipc.c2
-rw-r--r--sound/soc/sof/amd/acp.c17
-rw-r--r--sound/soc/sof/intel/pci-lnl.c2
-rw-r--r--sound/soc/sof/intel/pci-tgl.c64
-rw-r--r--sound/soc/sof/ipc3-dtrace.c3
-rw-r--r--sound/soc/sof/ipc3-topology.c55
-rw-r--r--sound/soc/sof/ipc3.c2
-rw-r--r--sound/soc/sof/ipc4-loader.c11
-rw-r--r--sound/soc/sof/ipc4-pcm.c17
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c5
-rw-r--r--sound/usb/clock.c34
-rw-r--r--sound/usb/format.c20
-rw-r--r--sound/usb/midi.c73
-rw-r--r--sound/usb/midi2.c2
-rw-r--r--sound/usb/mixer_scarlett2.c42
-rw-r--r--sound/usb/quirks.c38
-rw-r--r--sound/virtio/virtio_card.c2
-rw-r--r--sound/virtio/virtio_ctl_msg.c2
-rw-r--r--sound/virtio/virtio_pcm_msg.c2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h8
-rw-r--r--tools/arch/x86/include/asm/msr-index.h8
-rw-r--r--tools/arch/x86/include/asm/rmwcc.h2
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h3
-rw-r--r--tools/arch/x86/lib/memcpy_64.S4
-rw-r--r--tools/arch/x86/lib/memset_64.S4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst58
-rw-r--r--tools/bpf/bpftool/gen.c246
-rw-r--r--tools/bpf/bpftool/link.c94
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/bpf/resolve_btfids/main.c70
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-dwarf_getcfi.c9
-rw-r--r--tools/build/feature/test-libopencsd.c4
-rw-r--r--tools/include/asm-generic/unaligned.h24
-rw-r--r--tools/include/linux/btf_ids.h9
-rw-r--r--tools/include/linux/compiler_types.h4
-rw-r--r--tools/include/uapi/asm-generic/unistd.h15
-rw-r--r--tools/include/uapi/drm/drm.h72
-rw-r--r--tools/include/uapi/drm/i915_drm.h12
-rw-r--r--tools/include/uapi/linux/bpf.h104
-rw-r--r--tools/include/uapi/linux/fcntl.h3
-rw-r--r--tools/include/uapi/linux/if_link.h1
-rw-r--r--tools/include/uapi/linux/kvm.h140
-rw-r--r--tools/include/uapi/linux/mount.h70
-rw-r--r--tools/include/uapi/linux/netdev.h20
-rw-r--r--tools/include/uapi/linux/perf_event.h13
-rw-r--r--tools/include/uapi/linux/stat.h1
-rw-r--r--tools/lib/api/fs/fs.c80
-rw-r--r--tools/lib/api/io.h12
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/bpf.c42
-rw-r--r--tools/lib/bpf/bpf.h79
-rw-r--r--tools/lib/bpf/bpf_core_read.h60
-rw-r--r--tools/lib/bpf/bpf_helpers.h2
-rw-r--r--tools/lib/bpf/btf.c43
-rw-r--r--tools/lib/bpf/elf.c2
-rw-r--r--tools/lib/bpf/features.c561
-rw-r--r--tools/lib/bpf/libbpf.c746
-rw-r--r--tools/lib/bpf/libbpf.h21
-rw-r--r--tools/lib/bpf/libbpf.map6
-rw-r--r--tools/lib/bpf/libbpf_internal.h66
-rw-r--r--tools/lib/bpf/libbpf_probes.c12
-rw-r--r--tools/lib/bpf/linker.c2
-rw-r--r--tools/lib/bpf/netlink.c4
-rw-r--r--tools/lib/bpf/str_error.h3
-rw-r--r--tools/lib/perf/Documentation/examples/sampling.c2
-rw-r--r--tools/lib/perf/Documentation/libperf-sampling.txt2
-rw-r--r--tools/lib/perf/Documentation/libperf.txt4
-rw-r--r--tools/lib/perf/cpumap.c65
-rw-r--r--tools/lib/perf/evlist.c6
-rw-r--r--tools/lib/perf/evsel.c2
-rw-r--r--tools/lib/perf/include/internal/mmap.h3
-rw-r--r--tools/lib/perf/include/perf/cpumap.h40
-rw-r--r--tools/lib/perf/libperf.map6
-rw-r--r--tools/lib/perf/mmap.c20
-rw-r--r--tools/lib/perf/tests/test-cpumap.c4
-rw-r--r--tools/lib/perf/tests/test-evlist.c6
-rw-r--r--tools/lib/perf/tests/test-evsel.c2
-rw-r--r--tools/lib/subcmd/help.c18
-rw-r--r--tools/net/ynl/Makefile4
-rw-r--r--tools/net/ynl/Makefile.deps5
-rwxr-xr-xtools/net/ynl/cli.py43
-rw-r--r--tools/net/ynl/generated/Makefile9
-rw-r--r--tools/net/ynl/lib/Makefile5
-rw-r--r--tools/net/ynl/lib/__init__.py4
-rw-r--r--tools/net/ynl/lib/nlspec.py9
-rw-r--r--tools/net/ynl/lib/ynl-priv.h359
-rw-r--r--tools/net/ynl/lib/ynl.c407
-rw-r--r--tools/net/ynl/lib/ynl.h5
-rw-r--r--tools/net/ynl/lib/ynl.py311
-rw-r--r--tools/net/ynl/samples/.gitignore1
-rw-r--r--tools/net/ynl/samples/Makefile6
-rw-r--r--tools/net/ynl/samples/ovs.c60
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py112
-rwxr-xr-xtools/net/ynl/ynl-gen-rst.py9
-rw-r--r--tools/perf/.gitignore4
-rw-r--r--tools/perf/Documentation/itrace.txt1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt11
-rw-r--r--tools/perf/Documentation/perf-config.txt4
-rw-r--r--tools/perf/Documentation/perf-list.txt16
-rw-r--r--tools/perf/Documentation/perf-lock.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-report.txt3
-rw-r--r--tools/perf/Documentation/perf-stat.txt52
-rw-r--r--tools/perf/Documentation/perf.txt3
-rw-r--r--tools/perf/Makefile.config13
-rw-r--r--tools/perf/Makefile.perf23
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c16
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c4
-rw-r--r--tools/perf/arch/arm64/util/header.c2
-rw-r--r--tools/perf/arch/loongarch/annotate/instructions.c6
-rw-r--r--tools/perf/arch/x86/tests/hybrid.c37
-rw-r--r--tools/perf/arch/x86/util/dwarf-regs.c38
-rw-r--r--tools/perf/arch/x86/util/event.c103
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c4
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c10
-rw-r--r--tools/perf/bench/epoll-ctl.c2
-rw-r--r--tools/perf/bench/epoll-wait.c2
-rw-r--r--tools/perf/bench/futex-hash.c2
-rw-r--r--tools/perf/bench/futex-lock-pi.c2
-rw-r--r--tools/perf/bench/futex-requeue.c2
-rw-r--r--tools/perf/bench/futex-wake-parallel.c2
-rw-r--r--tools/perf/bench/futex-wake.c2
-rw-r--r--tools/perf/bench/sched-seccomp-notify.c2
-rw-r--r--tools/perf/builtin-annotate.c300
-rw-r--r--tools/perf/builtin-c2c.c2
-rw-r--r--tools/perf/builtin-ftrace.c2
-rw-r--r--tools/perf/builtin-inject.c6
-rw-r--r--tools/perf/builtin-list.c211
-rw-r--r--tools/perf/builtin-lock.c4
-rw-r--r--tools/perf/builtin-record.c97
-rw-r--r--tools/perf/builtin-report.c113
-rw-r--r--tools/perf/builtin-stat.c33
-rw-r--r--tools/perf/builtin-top.c54
-rw-r--r--tools/perf/builtin-trace.c6
-rwxr-xr-x[-rw-r--r--]tools/perf/perf-archive.sh86
-rw-r--r--tools/perf/perf.c30
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json125
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json206
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json464
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json128
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json41
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json442
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json170
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json41
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json2
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv3
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/datasource.json18
-rw-r--r--tools/perf/pmu-events/arch/riscv/mapfile.csv2
-rw-r--r--tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json172
-rw-r--r--tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json68
-rw-r--r--tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json67
-rw-r--r--tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json68
-rw-r--r--tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json72
-rw-r--r--tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json80
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json269
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/recommended.json84
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv6
-rw-r--r--tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json6
-rwxr-xr-xtools/perf/pmu-events/jevents.py4
-rwxr-xr-xtools/perf/scripts/python/arm-cs-trace-disasm.py30
-rw-r--r--tools/perf/scripts/python/compaction-times.py2
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py4
-rw-r--r--tools/perf/tests/Build14
-rw-r--r--tools/perf/tests/attr.c2
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/test-record-user-regs-no-sve-aarch642
-rw-r--r--tools/perf/tests/attr/test-record-user-regs-sve-aarch642
-rw-r--r--tools/perf/tests/builtin-test.c17
-rw-r--r--tools/perf/tests/code-reading.c80
-rw-r--r--tools/perf/tests/cpumap.c2
-rw-r--r--tools/perf/tests/dso-data.c15
-rw-r--r--tools/perf/tests/keep-tracking.c2
-rw-r--r--tools/perf/tests/make2
-rw-r--r--tools/perf/tests/maps.c61
-rw-r--r--tools/perf/tests/mmap-basic.c2
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c2
-rw-r--r--tools/perf/tests/parse-events.c260
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c2
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c1
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/thread_loop.c1
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c1
-rwxr-xr-xtools/perf/tests/shell/daemon.sh34
-rwxr-xr-xtools/perf/tests/shell/diff.sh108
-rw-r--r--tools/perf/tests/shell/lib/perf_has_symbol.sh21
-rw-r--r--tools/perf/tests/shell/lib/setup_python.sh16
-rwxr-xr-xtools/perf/tests/shell/list.sh34
-rwxr-xr-xtools/perf/tests/shell/pipe_test.sh9
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh5
-rwxr-xr-xtools/perf/tests/shell/record.sh38
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh4
-rwxr-xr-xtools/perf/tests/shell/script.sh72
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh16
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh2
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh14
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh6
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh6
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh16
-rwxr-xr-xtools/perf/tests/shell/test_perf_data_converter_json.sh13
-rw-r--r--tools/perf/tests/sigtrap.c106
-rw-r--r--tools/perf/tests/sw-clock.c2
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/task-exit.c2
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/tests/topology.c2
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c181
-rw-r--r--tools/perf/tests/workloads/thloop.c4
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh10
-rw-r--r--tools/perf/trace/beauty/beauty.h2
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh4
-rwxr-xr-xtools/perf/trace/beauty/socket.sh4
-rw-r--r--tools/perf/trace/beauty/statx.c1
-rw-r--r--tools/perf/ui/browsers/annotate.c104
-rw-r--r--tools/perf/ui/browsers/hists.c36
-rw-r--r--tools/perf/ui/browsers/hists.h2
-rw-r--r--tools/perf/ui/browsers/scripts.c1
-rw-r--r--tools/perf/ui/gtk/annotate.c6
-rw-r--r--tools/perf/ui/gtk/gtk.h2
-rw-r--r--tools/perf/ui/tui/setup.c22
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/annotate-data.c405
-rw-r--r--tools/perf/util/annotate-data.h143
-rw-r--r--tools/perf/util/annotate.c687
-rw-r--r--tools/perf/util/annotate.h144
-rw-r--r--tools/perf/util/auxtrace.c7
-rw-r--r--tools/perf/util/auxtrace.h3
-rw-r--r--tools/perf/util/block-info.c10
-rw-r--r--tools/perf/util/block-info.h3
-rw-r--r--tools/perf/util/block-range.c7
-rw-r--r--tools/perf/util/bpf-event.c11
-rw-r--r--tools/perf/util/bpf-event.h12
-rw-r--r--tools/perf/util/bpf_counter.c2
-rw-r--r--tools/perf/util/bpf_lock_contention.c2
-rw-r--r--tools/perf/util/compress.h7
-rw-r--r--tools/perf/util/cpumap.c2
-rw-r--r--tools/perf/util/cputopo.c2
-rw-r--r--tools/perf/util/cs-etm.c21
-rw-r--r--tools/perf/util/db-export.c4
-rw-r--r--tools/perf/util/debug.c22
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/debuginfo.c205
-rw-r--r--tools/perf/util/debuginfo.h64
-rw-r--r--tools/perf/util/dso.c4
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/dwarf-aux.c249
-rw-r--r--tools/perf/util/dwarf-aux.h51
-rw-r--r--tools/perf/util/dwarf-regs.c34
-rw-r--r--tools/perf/util/env.c82
-rw-r--r--tools/perf/util/env.h17
-rw-r--r--tools/perf/util/event.c4
-rw-r--r--tools/perf/util/evlist.c43
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c55
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/genelf.c6
-rw-r--r--tools/perf/util/header.c43
-rw-r--r--tools/perf/util/hisi-ptt.c1
-rw-r--r--tools/perf/util/hist.c4
-rw-r--r--tools/perf/util/hist.h28
-rw-r--r--tools/perf/util/include/dwarf-regs.h19
-rw-r--r--tools/perf/util/include/linux/linkage.h4
-rw-r--r--tools/perf/util/machine.c75
-rw-r--r--tools/perf/util/map.c20
-rw-r--r--tools/perf/util/map.h83
-rw-r--r--tools/perf/util/maps.c528
-rw-r--r--tools/perf/util/maps.h48
-rw-r--r--tools/perf/util/mem-events.c25
-rw-r--r--tools/perf/util/metricgroup.c2
-rw-r--r--tools/perf/util/mmap.c5
-rw-r--r--tools/perf/util/mmap.h1
-rw-r--r--tools/perf/util/parse-branch-options.c1
-rw-r--r--tools/perf/util/parse-events.c52
-rw-r--r--tools/perf/util/perf_api_probe.c4
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c1
-rw-r--r--tools/perf/util/pmu.c8
-rw-r--r--tools/perf/util/pmu.h3
-rw-r--r--tools/perf/util/print-events.c2
-rw-r--r--tools/perf/util/probe-event.c40
-rw-r--r--tools/perf/util/probe-finder.c201
-rw-r--r--tools/perf/util/probe-finder.h19
-rw-r--r--tools/perf/util/record.c4
-rw-r--r--tools/perf/util/s390-cpumcf-kernel.h2
-rw-r--r--tools/perf/util/s390-sample-raw.c114
-rw-r--r--tools/perf/util/sample.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c17
-rw-r--r--tools/perf/util/session.c20
-rw-r--r--tools/perf/util/sort.c216
-rw-r--r--tools/perf/util/sort.h7
-rw-r--r--tools/perf/util/stat-display.c2
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/stat.c4
-rw-r--r--tools/perf/util/stat.h1
-rw-r--r--tools/perf/util/symbol-elf.c6
-rw-r--r--tools/perf/util/symbol-minimal.c10
-rw-r--r--tools/perf/util/symbol.c306
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/symbol_conf.h6
-rw-r--r--tools/perf/util/synthetic-events.c122
-rw-r--r--tools/perf/util/thread.c42
-rw-r--r--tools/perf/util/thread.h14
-rw-r--r--tools/perf/util/top.c9
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/unwind-libdw.c21
-rw-r--r--tools/perf/util/unwind-libunwind-local.c34
-rw-r--r--tools/perf/util/vdso.c35
-rw-r--r--tools/perf/util/zstd.c63
-rw-r--r--tools/power/cpupower/bench/Makefile2
-rw-r--r--tools/testing/cxl/Kbuild3
-rw-r--r--tools/testing/cxl/test/Kbuild2
-rw-r--r--tools/testing/cxl/test/cxl.c63
-rw-r--r--tools/testing/cxl/test/mock.c14
-rw-r--r--tools/testing/cxl/test/mock.h1
-rw-r--r--tools/testing/kunit/configs/all_tests.config6
-rw-r--r--tools/testing/nvdimm/Kbuild2
-rw-r--r--tools/testing/selftests/Makefile7
-rw-r--r--tools/testing/selftests/alsa/test-pcmtest-driver.c4
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch641
-rw-r--r--tools/testing/selftests/bpf/Makefile49
-rw-r--r--tools/testing/selftests/bpf/README.rst32
-rw-r--r--tools/testing/selftests/bpf/bench.c12
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h21
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h30
-rw-r--r--tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile19
-rw-r--r--tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c84
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c103
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h17
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/decap_sanity.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fib_lookup.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fill_link_info.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/iters.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c52
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_probes.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_str.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_fixup.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_redirect.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_reroute.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/read_vsyscall.c57
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reg_bounds.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_destroy.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spin_lock.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c86
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tunnel.c18
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/token.c1052
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_failure.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bonding.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdpwall.c2
-rw-r--r--tools/testing/selftests/bpf/progs/async_stack_depth.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_compiler.h33
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h16
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c26
-rw-r--r--tools/testing/selftests/bpf/progs/connect_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h57
-rw-r--r--tools/testing/selftests/bpf/progs/getpeername_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/getsockname_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c9
-rw-r--r--tools/testing/selftests/bpf/progs/iters_task.c12
-rw-r--r--tools/testing/selftests/bpf/progs/kptr_xchg_inline.c48
-rw-r--r--tools/testing/selftests/bpf/progs/loop4.c4
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/priv_map.c13
-rw-r--r--tools/testing/selftests/bpf/progs/priv_prog.c13
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h17
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h7
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_read_lock.c120
-rw-r--r--tools/testing/selftests/bpf/progs/read_vsyscall.c45
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c3
-rw-r--r--tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c4
-rw-r--r--tools/testing/selftests/bpf/progs/sock_iter_batch.c4
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h18
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c29
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c24
-rw-r--r--tools/testing/selftests/bpf/progs/struct_ops_module.c37
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_recursion.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_fill_link_info.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_ptr_untrusted.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_seg6_loop.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_siphash.h64
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c65
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock_fail.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c595
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h140
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_dynptr.c10
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_loop.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c5
-rw-r--r--tools/testing/selftests/bpf/progs/timer.c34
-rw-r--r--tools/testing/selftests/bpf/progs/token_lsm.c32
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_failure.c20
-rw-r--r--tools/testing/selftests/bpf/progs/type_cast.c13
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c2
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c182
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c29
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c70
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c24
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c553
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spin_lock.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c6
-rw-r--r--tools/testing/selftests/bpf/progs/xdping_kern.c3
-rw-r--r--tools/testing/selftests/bpf/test_loader.c4
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c18
-rw-r--r--tools/testing/selftests/bpf/test_maps.c6
-rw-r--r--tools/testing/selftests/bpf/test_progs.c18
-rw-r--r--tools/testing/selftests/bpf/test_progs.h7
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c3
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c60
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c96
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h10
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_loop_inline.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c6
-rw-r--r--tools/testing/selftests/core/close_range_test.c1
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile7
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh19
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh21
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_options.sh48
-rw-r--r--tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh8
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh2
-rw-r--r--tools/testing/selftests/drivers/net/bonding/lag_lib.sh18
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh2
l---------tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh1
-rw-r--r--tools/testing/selftests/drivers/net/bonding/settings2
-rw-r--r--tools/testing/selftests/drivers/net/dsa/Makefile18
l---------tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_mld.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh2
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh2
l---------tools/testing/selftests/drivers/net/dsa/lib.sh1
l---------tools/testing/selftests/drivers/net/dsa/local_termination.sh2
l---------tools/testing/selftests/drivers/net/dsa/no_forwarding.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh9
l---------tools/testing/selftests/drivers/net/dsa/tc_actions.sh2
l---------tools/testing/selftests/drivers/net/dsa/tc_common.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh2
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/Makefile18
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/peer.sh143
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh47
-rw-r--r--tools/testing/selftests/drivers/net/team/Makefile7
-rw-r--r--tools/testing/selftests/drivers/net/team/config4
-rwxr-xr-xtools/testing/selftests/drivers/net/team/dev_addr_lists.sh4
l---------tools/testing/selftests/drivers/net/team/lag_lib.sh1
l---------tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh1
-rwxr-xr-xtools/testing/selftests/dt/test_unprobed_devices.sh13
-rw-r--r--tools/testing/selftests/hid/tests/test_wacom_generic.py8
-rw-r--r--tools/testing/selftests/iommu/config5
-rw-r--r--tools/testing/selftests/iommu/iommufd.c78
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h39
-rw-r--r--tools/testing/selftests/kselftest.h45
-rw-r--r--tools/testing/selftests/kselftest_harness.h198
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c12
-rw-r--r--tools/testing/selftests/kvm/aarch64/hypercalls.c16
-rw-r--r--tools/testing/selftests/kvm/aarch64/page_fault_test.c6
-rw-r--r--tools/testing/selftests/kvm/aarch64/smccc_filter.c2
-rw-r--r--tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c12
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c4
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c4
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c54
-rw-r--r--tools/testing/selftests/kvm/get-reg-list.c2
-rw-r--r--tools/testing/selftests/kvm/guest_print_test.c8
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c6
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h2
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c2
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c4
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/vgic.c4
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c19
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c2
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c25
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c21
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c6
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c2
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c6
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c110
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/resets.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/sync_regs_test.c20
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c6
-rw-r--r--tools/testing/selftests/kvm/system_counter_offset_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/amx_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c21
-rw-r--r--tools/testing/selftests/kvm/x86_64/flds_emulation.h2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_clock.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_ipi.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_clock_test.c42
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c28
-rw-r--r--tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86_64/ucna_injection_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_io_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c16
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c19
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/xss_msr_test.c2
-rw-r--r--tools/testing/selftests/landlock/base_test.c2
-rw-r--r--tools/testing/selftests/landlock/common.h106
-rw-r--r--tools/testing/selftests/landlock/fs_test.c37
-rw-r--r--tools/testing/selftests/landlock/net_test.c17
-rw-r--r--tools/testing/selftests/landlock/ptrace_test.c7
-rw-r--r--tools/testing/selftests/lib.mk19
-rw-r--r--tools/testing/selftests/livepatch/functions.sh37
-rwxr-xr-xtools/testing/selftests/mm/charge_reserved_hugetlb.sh2
-rw-r--r--tools/testing/selftests/mm/hmm-tests.c4
-rw-r--r--tools/testing/selftests/mm/ksm_tests.c2
-rw-r--r--tools/testing/selftests/mm/map_hugetlb.c7
-rw-r--r--tools/testing/selftests/mm/mremap_test.c27
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c6
-rwxr-xr-xtools/testing/selftests/mm/va_high_addr_switch.sh6
-rwxr-xr-xtools/testing/selftests/mm/write_hugetlb_memory.sh2
-rw-r--r--tools/testing/selftests/net/Makefile11
-rwxr-xr-xtools/testing/selftests/net/big_tcp.sh4
-rwxr-xr-xtools/testing/selftests/net/cmsg_ipv6.sh4
-rw-r--r--tools/testing/selftests/net/config50
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh34
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh148
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile5
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_locked_port.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh14
-rw-r--r--tools/testing/selftests/net/forwarding/config35
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh16
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh16
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh.sh41
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh42
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh16
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh6
-rw-r--r--tools/testing/selftests/net/forwarding/ip6gre_lib.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh31
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_lib.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh.sh39
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh_res.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_actions.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh8
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_police.sh16
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh10
-rwxr-xr-xtools/testing/selftests/net/fq_band_pktlimit.sh14
-rwxr-xr-xtools/testing/selftests/net/gro.sh5
-rwxr-xr-xtools/testing/selftests/net/ioam6.sh38
-rw-r--r--tools/testing/selftests/net/ioam6_parser.c95
-rw-r--r--tools/testing/selftests/net/ip_local_port_range.c10
-rw-r--r--tools/testing/selftests/net/lib.sh5
-rw-r--r--tools/testing/selftests/net/mptcp/config3
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh111
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh111
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh223
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_lib.sh118
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh55
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh58
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c39
-rw-r--r--tools/testing/selftests/net/mptcp/settings2
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh55
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh60
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/net/net_helper.sh11
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh75
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py71
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh43
-rwxr-xr-xtools/testing/selftests/net/rps_default_mask.sh6
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh8
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/net/setup_loopback.sh0
-rw-r--r--tools/testing/selftests/net/setup_veth.sh2
-rw-r--r--tools/testing/selftests/net/so_incoming_cpu.c68
-rw-r--r--tools/testing/selftests/net/so_txtime.c7
-rwxr-xr-xtools/testing/selftests/net/so_txtime.sh29
-rw-r--r--tools/testing/selftests/net/tcp_ao/config10
-rw-r--r--tools/testing/selftests/net/tcp_ao/key-management.c46
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/sock.c12
-rw-r--r--tools/testing/selftests/net/tcp_ao/rst.c138
-rw-r--r--tools/testing/selftests/net/tcp_ao/settings1
-rw-r--r--tools/testing/selftests/net/tcp_ao/unsigned-md5.c12
-rwxr-xr-xtools/testing/selftests/net/test_bridge_backup_port.sh23
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_mdb.sh36
-rw-r--r--tools/testing/selftests/net/tls.c59
-rw-r--r--tools/testing/selftests/net/txtimestamp.c3
-rwxr-xr-xtools/testing/selftests/net/txtimestamp.sh12
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh4
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh4
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh6
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh22
-rw-r--r--tools/testing/selftests/net/udpgso.c134
-rwxr-xr-xtools/testing/selftests/net/udpgso.sh49
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c2
-rwxr-xr-xtools/testing/selftests/net/veth.sh18
-rw-r--r--tools/testing/selftests/net/xdp_dummy.c13
-rw-r--r--tools/testing/selftests/netfilter/Makefile3
-rw-r--r--tools/testing/selftests/netfilter/bridge_netfilter.sh188
-rw-r--r--tools/testing/selftests/netfilter/conntrack_dump_flush.c43
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_signal.c16
-rw-r--r--tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c4
-rw-r--r--tools/testing/selftests/riscv/hwprobe/cbo.c24
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.c4
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_test.h3
-rw-r--r--tools/testing/selftests/riscv/vector/v_initval_nolibc.c2
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c3
-rw-r--r--tools/testing/selftests/riscv/vector/vstate_prctl.c4
-rw-r--r--tools/testing/selftests/rseq/basic_percpu_ops_test.c14
-rw-r--r--tools/testing/selftests/rseq/param_test.c22
-rw-r--r--tools/testing/selftests/seccomp/seccomp_benchmark.c104
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c9
-rw-r--r--tools/testing/selftests/tc-testing/config1
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json403
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json2
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py2
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh3
-rw-r--r--tools/testing/vsock/vsock_diag_test.c2
-rw-r--r--tools/tracing/rtla/Makefile7
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c9
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c6
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c9
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c6
-rw-r--r--tools/tracing/rtla/src/utils.c14
-rw-r--r--tools/tracing/rtla/src/utils.h2
-rw-r--r--tools/verification/rv/Makefile7
-rw-r--r--tools/verification/rv/src/in_kernel.c2
-rw-r--r--tools/virtio/.gitignore1
-rw-r--r--tools/virtio/Makefile8
-rw-r--r--tools/virtio/linux/virtio_config.h4
-rw-r--r--tools/virtio/vhost_net_test.c532
-rw-r--r--usr/gen_init_cpio.c33
4194 files changed, 124832 insertions, 49702 deletions
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 000000000000..854773350cc5
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+root = true
+
+[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
+charset = utf-8
+end_of_line = lf
+trim_trailing_whitespace = true
+insert_final_newline = true
+indent_style = tab
+indent_size = 8
+
+[*.{json,py,rs}]
+charset = utf-8
+end_of_line = lf
+trim_trailing_whitespace = true
+insert_final_newline = true
+indent_style = space
+indent_size = 4
+
+# this must be below the general *.py to overwrite it
+[tools/{perf,power,rcu,testing/kunit}/**.py,]
+indent_style = tab
+indent_size = 8
+
+[*.yaml]
+charset = utf-8
+end_of_line = lf
+trim_trailing_whitespace = unset
+insert_final_newline = true
+indent_style = space
+indent_size = 2
diff --git a/.gitignore b/.gitignore
index 98274e1160d7..689a4fa3f547 100644
--- a/.gitignore
+++ b/.gitignore
@@ -96,6 +96,7 @@ modules.order
#
!.clang-format
!.cocciconfig
+!.editorconfig
!.get_maintainer.ignore
!.gitattributes
!.gitignore
diff --git a/.mailmap b/.mailmap
index 04998f7bda81..6ac0d469c7fa 100644
--- a/.mailmap
+++ b/.mailmap
@@ -191,10 +191,11 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
-Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com>
-Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com>
-Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com>
-Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com>
+Geliang Tang <geliang@kernel.org> <geliang.tang@linux.dev>
+Geliang Tang <geliang@kernel.org> <geliang.tang@suse.com>
+Geliang Tang <geliang@kernel.org> <geliangtang@xiaomi.com>
+Geliang Tang <geliang@kernel.org> <geliangtang@gmail.com>
+Geliang Tang <geliang@kernel.org> <geliangtang@163.com>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
@@ -289,6 +290,7 @@ Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Crispin <john@phrozen.org> <blogic@openwrt.org>
John Fastabend <john.fastabend@gmail.com> <john.r.fastabend@intel.com>
John Keeping <john@keeping.me.uk> <john@metanate.com>
+John Moon <john@jmoon.dev> <quic_johmoo@quicinc.com>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
John Stultz <johnstul@us.ibm.com>
<jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
@@ -344,6 +346,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
Leon Romanovsky <leon@kernel.org> <leonro@nvidia.com>
+Leo Yan <leo.yan@linux.dev> <leo.yan@linaro.org>
Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org>
Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
@@ -550,6 +553,7 @@ Senthilkumar N L <quic_snlakshm@quicinc.com> <snlakshm@codeaurora.org>
Serge Hallyn <sergeh@kernel.org> <serge.hallyn@canonical.com>
Serge Hallyn <sergeh@kernel.org> <serue@us.ibm.com>
Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
+Shakeel Butt <shakeel.butt@linux.dev> <shakeelb@google.com>
Shannon Nelson <shannon.nelson@amd.com> <snelson@pensando.io>
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@intel.com>
Shannon Nelson <shannon.nelson@amd.com> <shannon.nelson@oracle.com>
@@ -568,6 +572,7 @@ Simon Kelley <simon@thekelleys.org.uk>
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
+Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@osdl.org>
diff --git a/CREDITS b/CREDITS
index 5797e8f7e92b..df8d6946739f 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2161,6 +2161,19 @@ N: Mike Kravetz
E: mike.kravetz@oracle.com
D: Maintenance and development of the hugetlb subsystem
+N: Seth Jennings
+E: sjenning@redhat.com
+D: Creation and maintenance of zswap
+
+N: Dan Streetman
+E: ddstreet@ieee.org
+D: Maintenance and development of zswap
+D: Creation and maintenance of the zpool API
+
+N: Vitaly Wool
+E: vitaly.wool@konsulko.com
+D: Maintenance and development of zswap
+
N: Andreas S. Krebs
E: akrebs@altavista.net
D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards
diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues
index 906ff3ca928a..84aa25e0d14d 100644
--- a/Documentation/ABI/testing/sysfs-class-net-queues
+++ b/Documentation/ABI/testing/sysfs-class-net-queues
@@ -1,4 +1,4 @@
-What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
+What: /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
Date: March 2010
KernelVersion: 2.6.35
Contact: netdev@vger.kernel.org
@@ -8,7 +8,7 @@ Description:
network device queue. Possible values depend on the number
of available CPU(s) in the system.
-What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
+What: /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
Date: April 2010
KernelVersion: 2.6.35
Contact: netdev@vger.kernel.org
@@ -16,7 +16,7 @@ Description:
Number of Receive Packet Steering flows being currently
processed by this particular network device receive queue.
-What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
+What: /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -24,7 +24,7 @@ Description:
Indicates the number of transmit timeout events seen by this
network interface transmit queue.
-What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
+What: /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
Date: March 2015
KernelVersion: 4.1
Contact: netdev@vger.kernel.org
@@ -32,7 +32,7 @@ Description:
A Mbps max-rate set for the queue, a value of zero means disabled,
default is disabled.
-What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
+What: /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
Date: November 2010
KernelVersion: 2.6.38
Contact: netdev@vger.kernel.org
@@ -42,7 +42,7 @@ Description:
network device transmit queue. Possible values depend on the
number of available CPU(s) in the system.
-What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
+What: /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
Date: June 2018
KernelVersion: 4.18.0
Contact: netdev@vger.kernel.org
@@ -53,7 +53,7 @@ Description:
number of available receive queue(s) in the network device.
Default is disabled.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -62,7 +62,7 @@ Description:
of this particular network device transmit queue.
Default value is 1000.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -70,7 +70,7 @@ Description:
Indicates the number of bytes (objects) in flight on this
network device transmit queue.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -79,7 +79,7 @@ Description:
on this network device transmit queue. This value is clamped
to be within the bounds defined by limit_max and limit_min.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -88,7 +88,7 @@ Description:
queued on this network device transmit queue. See
include/linux/dynamic_queue_limits.h for the default value.
-What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
Date: November 2011
KernelVersion: 3.3
Contact: netdev@vger.kernel.org
@@ -96,3 +96,26 @@ Description:
Indicates the absolute minimum limit of bytes allowed to be
queued on this network device transmit queue. Default value is
0.
+
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/stall_thrs
+Date: Jan 2024
+KernelVersion: 6.9
+Contact: netdev@vger.kernel.org
+Description:
+ Tx completion stall detection threshold in ms. Kernel will
+ guarantee to detect all stalls longer than this threshold but
+ may also detect stalls longer than half of the threshold.
+
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/stall_cnt
+Date: Jan 2024
+KernelVersion: 6.9
+Contact: netdev@vger.kernel.org
+Description:
+ Number of detected Tx completion stalls.
+
+What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/stall_max
+Date: Jan 2024
+KernelVersion: 6.9
+Contact: netdev@vger.kernel.org
+Description:
+ Longest detected Tx completion stall. Write 0 to clear.
diff --git a/Documentation/ABI/testing/sysfs-class-net-statistics b/Documentation/ABI/testing/sysfs-class-net-statistics
index 55db27815361..53e508c6936a 100644
--- a/Documentation/ABI/testing/sysfs-class-net-statistics
+++ b/Documentation/ABI/testing/sysfs-class-net-statistics
@@ -1,4 +1,4 @@
-What: /sys/class/<iface>/statistics/collisions
+What: /sys/class/net/<iface>/statistics/collisions
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -6,7 +6,7 @@ Description:
Indicates the number of collisions seen by this network device.
This value might not be relevant with all MAC layers.
-What: /sys/class/<iface>/statistics/multicast
+What: /sys/class/net/<iface>/statistics/multicast
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -14,7 +14,7 @@ Description:
Indicates the number of multicast packets received by this
network device.
-What: /sys/class/<iface>/statistics/rx_bytes
+What: /sys/class/net/<iface>/statistics/rx_bytes
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -23,7 +23,7 @@ Description:
See the network driver for the exact meaning of when this
value is incremented.
-What: /sys/class/<iface>/statistics/rx_compressed
+What: /sys/class/net/<iface>/statistics/rx_compressed
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -32,7 +32,7 @@ Description:
network device. This value might only be relevant for interfaces
that support packet compression (e.g: PPP).
-What: /sys/class/<iface>/statistics/rx_crc_errors
+What: /sys/class/net/<iface>/statistics/rx_crc_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -41,7 +41,7 @@ Description:
by this network device. Note that the specific meaning might
depend on the MAC layer used by the interface.
-What: /sys/class/<iface>/statistics/rx_dropped
+What: /sys/class/net/<iface>/statistics/rx_dropped
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -51,7 +51,7 @@ Description:
packet processing. See the network driver for the exact
meaning of this value.
-What: /sys/class/<iface>/statistics/rx_errors
+What: /sys/class/net/<iface>/statistics/rx_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -59,7 +59,7 @@ Description:
Indicates the number of receive errors on this network device.
See the network driver for the exact meaning of this value.
-What: /sys/class/<iface>/statistics/rx_fifo_errors
+What: /sys/class/net/<iface>/statistics/rx_fifo_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -68,7 +68,7 @@ Description:
network device. See the network driver for the exact
meaning of this value.
-What: /sys/class/<iface>/statistics/rx_frame_errors
+What: /sys/class/net/<iface>/statistics/rx_frame_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -78,7 +78,7 @@ Description:
on the MAC layer protocol used. See the network driver for
the exact meaning of this value.
-What: /sys/class/<iface>/statistics/rx_length_errors
+What: /sys/class/net/<iface>/statistics/rx_length_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -87,7 +87,7 @@ Description:
error, oversized or undersized. See the network driver for the
exact meaning of this value.
-What: /sys/class/<iface>/statistics/rx_missed_errors
+What: /sys/class/net/<iface>/statistics/rx_missed_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -96,7 +96,7 @@ Description:
due to lack of capacity in the receive side. See the network
driver for the exact meaning of this value.
-What: /sys/class/<iface>/statistics/rx_nohandler
+What: /sys/class/net/<iface>/statistics/rx_nohandler
Date: February 2016
KernelVersion: 4.6
Contact: netdev@vger.kernel.org
@@ -104,7 +104,7 @@ Description:
Indicates the number of received packets that were dropped on
an inactive device by the network core.
-What: /sys/class/<iface>/statistics/rx_over_errors
+What: /sys/class/net/<iface>/statistics/rx_over_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -114,7 +114,7 @@ Description:
(e.g: larger than MTU). See the network driver for the exact
meaning of this value.
-What: /sys/class/<iface>/statistics/rx_packets
+What: /sys/class/net/<iface>/statistics/rx_packets
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -122,7 +122,7 @@ Description:
Indicates the total number of good packets received by this
network device.
-What: /sys/class/<iface>/statistics/tx_aborted_errors
+What: /sys/class/net/<iface>/statistics/tx_aborted_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -132,7 +132,7 @@ Description:
a medium collision). See the network driver for the exact
meaning of this value.
-What: /sys/class/<iface>/statistics/tx_bytes
+What: /sys/class/net/<iface>/statistics/tx_bytes
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -143,7 +143,7 @@ Description:
transmitted packets or all packets that have been queued for
transmission.
-What: /sys/class/<iface>/statistics/tx_carrier_errors
+What: /sys/class/net/<iface>/statistics/tx_carrier_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -152,7 +152,7 @@ Description:
because of carrier errors (e.g: physical link down). See the
network driver for the exact meaning of this value.
-What: /sys/class/<iface>/statistics/tx_compressed
+What: /sys/class/net/<iface>/statistics/tx_compressed
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -161,7 +161,7 @@ Description:
this might only be relevant for devices that support
compression (e.g: PPP).
-What: /sys/class/<iface>/statistics/tx_dropped
+What: /sys/class/net/<iface>/statistics/tx_dropped
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -170,7 +170,7 @@ Description:
See the driver for the exact reasons as to why the packets were
dropped.
-What: /sys/class/<iface>/statistics/tx_errors
+What: /sys/class/net/<iface>/statistics/tx_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -179,7 +179,7 @@ Description:
a network device. See the driver for the exact reasons as to
why the packets were dropped.
-What: /sys/class/<iface>/statistics/tx_fifo_errors
+What: /sys/class/net/<iface>/statistics/tx_fifo_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -188,7 +188,7 @@ Description:
FIFO error. See the driver for the exact reasons as to why the
packets were dropped.
-What: /sys/class/<iface>/statistics/tx_heartbeat_errors
+What: /sys/class/net/<iface>/statistics/tx_heartbeat_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -197,7 +197,7 @@ Description:
reported as heartbeat errors. See the driver for the exact
reasons as to why the packets were dropped.
-What: /sys/class/<iface>/statistics/tx_packets
+What: /sys/class/net/<iface>/statistics/tx_packets
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
@@ -206,7 +206,7 @@ Description:
device. See the driver for whether this reports the number of all
attempted or successful transmissions.
-What: /sys/class/<iface>/statistics/tx_window_errors
+What: /sys/class/net/<iface>/statistics/tx_window_errors
Date: April 2005
KernelVersion: 2.6.12
Contact: netdev@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
index 8d7d8f05f6cd..92fe7c5c5ac1 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
@@ -1,4 +1,4 @@
-What: /sys/devices/.../hwmon/hwmon<i>/in0_input
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/in0_input
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@@ -6,7 +6,7 @@ Description: RO. Current Voltage in millivolt.
Only supported for particular Intel i915 graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/power1_max
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@@ -20,7 +20,7 @@ Description: RW. Card reactive sustained (PL1/Tau) power limit in microwatts.
Only supported for particular Intel i915 graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/power1_rated_max
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_rated_max
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@@ -28,7 +28,7 @@ Description: RO. Card default power limit (default TDP setting).
Only supported for particular Intel i915 graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_max_interval
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@@ -37,7 +37,7 @@ Description: RW. Sustained power limit interval (Tau in PL1/Tau) in
Only supported for particular Intel i915 graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/power1_crit
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/power1_crit
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@@ -50,7 +50,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts.
Only supported for particular Intel i915 graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/curr1_crit
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/curr1_crit
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
@@ -63,7 +63,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes.
Only supported for particular Intel i915 graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/energy1_input
+What: /sys/bus/pci/drivers/i915/.../hwmon/hwmon<i>/energy1_input
Date: February 2023
KernelVersion: 6.2
Contact: intel-gfx@lists.freedesktop.org
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
index 8c321bc9dc04..023fd82de3f7 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
+++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
@@ -1,4 +1,4 @@
-What: /sys/devices/.../hwmon/hwmon<i>/power1_max
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@@ -12,7 +12,7 @@ Description: RW. Card reactive sustained (PL1) power limit in microwatts.
Only supported for particular Intel xe graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/power1_rated_max
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_rated_max
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@@ -20,7 +20,7 @@ Description: RO. Card default power limit (default TDP setting).
Only supported for particular Intel xe graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/power1_crit
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_crit
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@@ -33,7 +33,7 @@ Description: RW. Card reactive critical (I1) power limit in microwatts.
Only supported for particular Intel xe graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/curr1_crit
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/curr1_crit
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@@ -44,7 +44,7 @@ Description: RW. Card reactive critical (I1) power limit in milliamperes.
the operating frequency if the power averaged over a window
exceeds this limit.
-What: /sys/devices/.../hwmon/hwmon<i>/in0_input
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/in0_input
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@@ -52,7 +52,7 @@ Description: RO. Current Voltage in millivolt.
Only supported for particular Intel xe graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/energy1_input
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/energy1_input
Date: September 2023
KernelVersion: 6.5
Contact: intel-xe@lists.freedesktop.org
@@ -60,7 +60,7 @@ Description: RO. Energy input of device in microjoules.
Only supported for particular Intel xe graphics platforms.
-What: /sys/devices/.../hwmon/hwmon<i>/power1_max_interval
+What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/power1_max_interval
Date: October 2023
KernelVersion: 6.6
Contact: intel-xe@lists.freedesktop.org
diff --git a/Documentation/ABI/testing/sysfs-nvmem-cells b/Documentation/ABI/testing/sysfs-nvmem-cells
index 7af70adf3690..c7c9444f92a8 100644
--- a/Documentation/ABI/testing/sysfs-nvmem-cells
+++ b/Documentation/ABI/testing/sysfs-nvmem-cells
@@ -4,18 +4,18 @@ KernelVersion: 6.5
Contact: Miquel Raynal <miquel.raynal@bootlin.com>
Description:
The "cells" folder contains one file per cell exposed by the
- NVMEM device. The name of the file is: <name>@<where>, with
- <name> being the cell name and <where> its location in the NVMEM
- device, in hexadecimal (without the '0x' prefix, to mimic device
- tree node names). The length of the file is the size of the cell
- (when known). The content of the file is the binary content of
- the cell (may sometimes be ASCII, likely without trailing
- character).
+ NVMEM device. The name of the file is: "<name>@<byte>,<bit>",
+ with <name> being the cell name and <where> its location in
+ the NVMEM device, in hexadecimal bytes and bits (without the
+ '0x' prefix, to mimic device tree node names). The length of
+ the file is the size of the cell (when known). The content of
+ the file is the binary content of the cell (may sometimes be
+ ASCII, likely without trailing character).
Note: This file is only present if CONFIG_NVMEM_SYSFS
is enabled.
Example::
- hexdump -C /sys/bus/nvmem/devices/1-00563/cells/product-name@d
+ hexdump -C /sys/bus/nvmem/devices/1-00563/cells/product-name@d,0
00000000 54 4e 34 38 4d 2d 50 2d 44 4e |TN48M-P-DN|
0000000a
diff --git a/Documentation/ABI/testing/sysfs-platform-silicom b/Documentation/ABI/testing/sysfs-platform-silicom
index 2288b3665d16..4d1cc5bdbcc5 100644
--- a/Documentation/ABI/testing/sysfs-platform-silicom
+++ b/Documentation/ABI/testing/sysfs-platform-silicom
@@ -10,6 +10,7 @@ What: /sys/devices/platform/silicom-platform/power_cycle
Date: November 2023
KernelVersion: 6.7
Contact: Henry Shi <henrys@silicom-usa.com>
+Description:
This file allow user to power cycle the platform.
Default value is 0; when set to 1, it powers down
the platform, waits 5 seconds, then powers on the
diff --git a/Documentation/accel/introduction.rst b/Documentation/accel/introduction.rst
index 89984dfececf..ae3030136637 100644
--- a/Documentation/accel/introduction.rst
+++ b/Documentation/accel/introduction.rst
@@ -101,8 +101,8 @@ External References
email threads
-------------
-* `Initial discussion on the New subsystem for acceleration devices <https://lkml.org/lkml/2022/7/31/83>`_ - Oded Gabbay (2022)
-* `patch-set to add the new subsystem <https://lkml.org/lkml/2022/10/22/544>`_ - Oded Gabbay (2022)
+* `Initial discussion on the New subsystem for acceleration devices <https://lore.kernel.org/lkml/CAFCwf11=9qpNAepL7NL+YAV_QO=Wv6pnWPhKHKAepK3fNn+2Dg@mail.gmail.com/>`_ - Oded Gabbay (2022)
+* `patch-set to add the new subsystem <https://lore.kernel.org/lkml/20221022214622.18042-1-ogabbay@kernel.org/>`_ - Oded Gabbay (2022)
Conference talks
----------------
diff --git a/Documentation/admin-guide/cifs/todo.rst b/Documentation/admin-guide/cifs/todo.rst
index 2646ed2e2d3e..9a65c670774e 100644
--- a/Documentation/admin-guide/cifs/todo.rst
+++ b/Documentation/admin-guide/cifs/todo.rst
@@ -2,7 +2,8 @@
TODO
====
-Version 2.14 December 21, 2018
+As of 6.7 kernel. See https://wiki.samba.org/index.php/LinuxCIFSKernel
+for list of features added by release
A Partial List of Missing Features
==================================
@@ -12,22 +13,22 @@ for visible, important contributions to this module. Here
is a partial list of the known problems and missing features:
a) SMB3 (and SMB3.1.1) missing optional features:
+ multichannel performance optimizations, algorithmic channel selection,
+ directory leases optimizations,
+ support for faster packet signing (GMAC),
+ support for compression over the network,
+ T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
+ are currently the only two server side copy mechanisms supported)
- - multichannel (partially integrated), integration of multichannel with RDMA
- - directory leases (improved metadata caching). Currently only implemented for root dir
- - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
- currently the only two server side copy mechanisms supported)
+b) Better optimized compounding and error handling for sparse file support,
+ perhaps addition of new optional SMB3.1.1 fsctls to make collapse range
+ and insert range more atomic
-b) improved sparse file support (fiemap and SEEK_HOLE are implemented
- but additional features would be supportable by the protocol such
- as FALLOC_FL_COLLAPSE_RANGE and FALLOC_FL_INSERT_RANGE)
-
-c) Directory entry caching relies on a 1 second timer, rather than
- using Directory Leases, currently only the root file handle is cached longer
- by leveraging Directory Leases
+c) Support for SMB3.1.1 over QUIC (and perhaps other socket based protocols
+ like SCTP)
d) quota support (needs minor kernel change since quota calls otherwise
- won't make it to network filesystems or deviceless filesystems).
+ won't make it to network filesystems or deviceless filesystems).
e) Additional use cases can be optimized to use "compounding" (e.g.
open/query/close and open/setinfo/close) to reduce the number of
@@ -92,23 +93,20 @@ t) split cifs and smb3 support into separate modules so legacy (and less
v) Additional testing of POSIX Extensions for SMB3.1.1
-w) Add support for additional strong encryption types, and additional spnego
- authentication mechanisms (see MS-SMB2). GCM-256 is now partially implemented.
+w) Support for the Mac SMB3.1.1 extensions to improve interop with Apple servers
+
+x) Support for additional authentication options (e.g. IAKERB, peer-to-peer
+ Kerberos, SCRAM and others supported by existing servers)
-x) Finish support for SMB3.1.1 compression
+y) Improved tracing, more eBPF trace points, better scripts for performance
+ analysis
Known Bugs
==========
See https://bugzilla.samba.org - search on product "CifsVFS" for
current bug list. Also check http://bugzilla.kernel.org (Product = File System, Component = CIFS)
-
-1) existing symbolic links (Windows reparse points) are recognized but
- can not be created remotely. They are implemented for Samba and those that
- support the CIFS Unix extensions, although earlier versions of Samba
- overly restrict the pathnames.
-2) follow_link and readdir code does not follow dfs junctions
- but recognizes them
+and xfstest results e.g. https://wiki.samba.org/index.php/Xfstest-results-smb3
Misc testing to do
==================
diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst
index 5f936b4b6018..aa8290a29dc8 100644
--- a/Documentation/admin-guide/cifs/usage.rst
+++ b/Documentation/admin-guide/cifs/usage.rst
@@ -81,7 +81,7 @@ much older and less secure than the default dialect SMB3 which includes
many advanced security features such as downgrade attack detection
and encrypted shares and stronger signing and authentication algorithms.
There are additional mount options that may be helpful for SMB3 to get
-improved POSIX behavior (NB: can use vers=3.0 to force only SMB3, never 2.1):
+improved POSIX behavior (NB: can use vers=3 to force SMB3 or later, never 2.1):
``mfsymlinks`` and either ``cifsacl`` or ``modefromsid`` (usually with ``idsfromsid``)
@@ -715,6 +715,7 @@ DebugData Displays information about active CIFS sessions and
Stats Lists summary resource usage information as well as per
share statistics.
open_files List all the open file handles on all active SMB sessions.
+mount_params List of all mount parameters available for the module
======================= =======================================================
Configuration pseudo-files:
@@ -864,6 +865,11 @@ i.e.::
echo "value" > /sys/module/cifs/parameters/<param>
+More detailed descriptions of the available module parameters and their values
+can be seen by doing:
+
+ modinfo cifs (or modinfo smb3)
+
================= ==========================================================
1. enable_oplocks Enable or disable oplocks. Oplocks are enabled by default.
[Y/y/1]. To disable use any of [N/n/0].
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index 102937bc8443..4410384596a9 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -218,8 +218,3 @@ bytes respectively. Such letter suffixes can also be entirely omitted:
.. include:: kernel-parameters.txt
:literal:
-
-Todo
-----
-
- Add more DRM drivers.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2e76c3476e2a..31b3a25680d0 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -888,9 +888,9 @@
memory region [offset, offset + size] for that kernel
image. If '@offset' is omitted, then a suitable offset
is selected automatically.
- [KNL, X86-64, ARM64, RISCV] Select a region under 4G first, and
- fall back to reserve region above 4G when '@offset'
- hasn't been specified.
+ [KNL, X86-64, ARM64, RISCV, LoongArch] Select a region
+ under 4G first, and fall back to reserve region above
+ 4G when '@offset' hasn't been specified.
See Documentation/admin-guide/kdump/kdump.rst for further details.
crashkernel=range1:size1[,range2:size2,...][@offset]
@@ -901,25 +901,27 @@
Documentation/admin-guide/kdump/kdump.rst for an example.
crashkernel=size[KMG],high
- [KNL, X86-64, ARM64, RISCV] range could be above 4G.
+ [KNL, X86-64, ARM64, RISCV, LoongArch] range could be
+ above 4G.
Allow kernel to allocate physical memory region from top,
so could be above 4G if system have more than 4G ram
installed. Otherwise memory region will be allocated
below 4G, if available.
It will be ignored if crashkernel=X is specified.
crashkernel=size[KMG],low
- [KNL, X86-64, ARM64, RISCV] range under 4G. When crashkernel=X,high
- is passed, kernel could allocate physical memory region
- above 4G, that cause second kernel crash on system
- that require some amount of low memory, e.g. swiotlb
- requires at least 64M+32K low memory, also enough extra
- low memory is needed to make sure DMA buffers for 32-bit
- devices won't run out. Kernel would try to allocate
+ [KNL, X86-64, ARM64, RISCV, LoongArch] range under 4G.
+ When crashkernel=X,high is passed, kernel could allocate
+ physical memory region above 4G, that cause second kernel
+ crash on system that require some amount of low memory,
+ e.g. swiotlb requires at least 64M+32K low memory, also
+ enough extra low memory is needed to make sure DMA buffers
+ for 32-bit devices won't run out. Kernel would try to allocate
default size of memory below 4G automatically. The default
size is platform dependent.
--> x86: max(swiotlb_size_or_default() + 8MiB, 256MiB)
--> arm64: 128MiB
--> riscv: 128MiB
+ --> loongarch: 128MiB
This one lets the user specify own low range under 4G
for second kernel instead.
0: to disable low allocation.
diff --git a/Documentation/admin-guide/kernel-per-CPU-kthreads.rst b/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
index 993c2a05f5ee..b6aeae3327ce 100644
--- a/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
+++ b/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
@@ -243,13 +243,9 @@ To reduce its OS jitter, do any of the following:
3. Do any of the following needed to avoid jitter that your
application cannot tolerate:
- a. Build your kernel with CONFIG_SLUB=y rather than
- CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
- use of each CPU's workqueues to run its cache_reap()
- function.
- b. Avoid using oprofile, thus avoiding OS jitter from
+ a. Avoid using oprofile, thus avoiding OS jitter from
wq_sync_buffer().
- c. Limit your CPU frequency so that a CPU-frequency
+ b. Limit your CPU frequency so that a CPU-frequency
governor is not required, possibly enlisting the aid of
special heatsinks or other cooling technologies. If done
correctly, and if you CPU architecture permits, you should
@@ -259,7 +255,7 @@ To reduce its OS jitter, do any of the following:
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
- d. As of v3.18, Christoph Lameter's on-demand vmstat workers
+ c. As of v3.18, Christoph Lameter's on-demand vmstat workers
commit prevents OS jitter due to vmstat_update() on
CONFIG_SMP=y systems. Before v3.18, is not possible
to entirely get rid of the OS jitter, but you can
@@ -274,7 +270,7 @@ To reduce its OS jitter, do any of the following:
(based on an earlier one from Gilad Ben-Yossef) that
reduces or even eliminates vmstat overhead for some
workloads at https://lore.kernel.org/r/00000140e9dfd6bd-40db3d4f-c1be-434f-8132-7820f81bb586-000000@email.amazonses.com.
- e. If running on high-end powerpc servers, build with
+ d. If running on high-end powerpc servers, build with
CONFIG_PPC_RTAS_DAEMON=n. This prevents the RTAS
daemon from running on each CPU every second or so.
(This will require editing Kconfig files and will defeat
@@ -282,12 +278,12 @@ To reduce its OS jitter, do any of the following:
due to the rtas_event_scan() function.
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
- f. If running on Cell Processor, build your kernel with
+ e. If running on Cell Processor, build your kernel with
CBE_CPUFREQ_SPU_GOVERNOR=n to avoid OS jitter from
spu_gov_work().
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
- g. If running on PowerMAC, build your kernel with
+ f. If running on PowerMAC, build your kernel with
CONFIG_PMAC_RACKMETER=n to disable the CPU-meter,
avoiding OS jitter from rackmeter_do_timer().
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 396091651955..7250c0542828 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -206,6 +206,11 @@ Will increase power usage.
Default: 0 (off)
+mem_pcpu_rsv
+------------
+
+Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
+
rmem_default
------------
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index bfdf236e2af3..45a7f4932fe0 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -71,6 +71,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #3117295 | ARM64_ERRATUM_3117295 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A520 | #2966298 | ARM64_ERRATUM_2966298 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
@@ -235,11 +237,16 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Rockchip | RK3588 | #3588001 | ROCKCHIP_ERRATUM_3588001 |
+----------------+-----------------+-----------------+-----------------------------+
-
+----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+
-
+----------------+-----------------+-----------------+-----------------------------+
| ASR | ASR8601 | #8601001 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
+| Microsoft | Azure Cobalt 100| #2139208 | ARM64_ERRATUM_2139208 |
++----------------+-----------------+-----------------+-----------------------------+
+| Microsoft | Azure Cobalt 100| #2067961 | ARM64_ERRATUM_2067961 |
++----------------+-----------------+-----------------+-----------------------------+
+| Microsoft | Azure Cobalt 100| #2253138 | ARM64_ERRATUM_2253138 |
++----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/arch/x86/mds.rst b/Documentation/arch/x86/mds.rst
index e73fdff62c0a..c58c72362911 100644
--- a/Documentation/arch/x86/mds.rst
+++ b/Documentation/arch/x86/mds.rst
@@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers()
+Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
+Other than CFLAGS.ZF, this macro doesn't clobber any registers.
+
The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
(idle) transitions.
@@ -138,17 +141,30 @@ Mitigation points
When transitioning from kernel to user space the CPU buffers are flushed
on affected CPUs when the mitigation is not disabled on the kernel
- command line. The migitation is enabled through the static key
- mds_user_clear.
-
- The mitigation is invoked in prepare_exit_to_usermode() which covers
- all but one of the kernel to user space transitions. The exception
- is when we return from a Non Maskable Interrupt (NMI), which is
- handled directly in do_nmi().
-
- (The reason that NMI is special is that prepare_exit_to_usermode() can
- enable IRQs. In NMI context, NMIs are blocked, and we don't want to
- enable IRQs with NMIs blocked.)
+ command line. The mitigation is enabled through the feature flag
+ X86_FEATURE_CLEAR_CPU_BUF.
+
+ The mitigation is invoked just before transitioning to userspace after
+ user registers are restored. This is done to minimize the window in
+ which kernel data could be accessed after VERW e.g. via an NMI after
+ VERW.
+
+ **Corner case not handled**
+ Interrupts returning to kernel don't clear CPUs buffers since the
+ exit-to-user path is expected to do that anyways. But, there could be
+ a case when an NMI is generated in kernel after the exit-to-user path
+ has cleared the buffers. This case is not handled and NMI returning to
+ kernel don't clear CPU buffers because:
+
+ 1. It is rare to get an NMI after VERW, but before returning to userspace.
+ 2. For an unprivileged user, there is no known way to make that NMI
+ less rare or target it.
+ 3. It would take a large number of these precisely-timed NMIs to mount
+ an actual attack. There's presumably not enough bandwidth.
+ 4. The NMI in question occurs after a VERW, i.e. when user state is
+ restored and most interesting data is already scrubbed. Whats left
+ is only the data that NMI touches, and that may or may not be of
+ any interest.
2. C-State transition
diff --git a/Documentation/block/ioprio.rst b/Documentation/block/ioprio.rst
index a25c6d5df87b..4662e1ff3d81 100644
--- a/Documentation/block/ioprio.rst
+++ b/Documentation/block/ioprio.rst
@@ -6,17 +6,16 @@ Block io priorities
Intro
-----
-With the introduction of cfq v3 (aka cfq-ts or time sliced cfq), basic io
-priorities are supported for reads on files. This enables users to io nice
-processes or process groups, similar to what has been possible with cpu
-scheduling for ages. This document mainly details the current possibilities
-with cfq; other io schedulers do not support io priorities thus far.
+The io priority feature enables users to io nice processes or process groups,
+similar to what has been possible with cpu scheduling for ages. Support for io
+priorities is io scheduler dependent and currently supported by bfq and
+mq-deadline.
Scheduling classes
------------------
-CFQ implements three generic scheduling classes that determine how io is
-served for a process.
+Three generic scheduling classes are implemented for io priorities that
+determine how io is served for a process.
IOPRIO_CLASS_RT: This is the realtime io class. This scheduling class is given
higher priority than any other in the system, processes from this class are
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index 7985c6615f3c..a8f5782bd833 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -177,10 +177,10 @@ In addition to kfuncs' arguments, verifier may need more information about the
type of kfunc(s) being registered with the BPF subsystem. To do so, we define
flags on a set of kfuncs as follows::
- BTF_SET8_START(bpf_task_set)
+ BTF_KFUNCS_START(bpf_task_set)
BTF_ID_FLAGS(func, bpf_get_task_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_put_pid, KF_RELEASE)
- BTF_SET8_END(bpf_task_set)
+ BTF_KFUNCS_END(bpf_task_set)
This set encodes the BTF ID of each kfunc listed above, and encodes the flags
along with it. Ofcourse, it is also allowed to specify no flags.
@@ -347,10 +347,10 @@ Once the kfunc is prepared for use, the final step to making it visible is
registering it with the BPF subsystem. Registration is done per BPF program
type. An example is shown below::
- BTF_SET8_START(bpf_task_set)
+ BTF_KFUNCS_START(bpf_task_set)
BTF_ID_FLAGS(func, bpf_get_task_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_put_pid, KF_RELEASE)
- BTF_SET8_END(bpf_task_set)
+ BTF_KFUNCS_END(bpf_task_set)
static const struct btf_kfunc_id_set bpf_task_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/Documentation/bpf/map_lpm_trie.rst b/Documentation/bpf/map_lpm_trie.rst
index 74d64a30f500..f9cd579496c9 100644
--- a/Documentation/bpf/map_lpm_trie.rst
+++ b/Documentation/bpf/map_lpm_trie.rst
@@ -17,7 +17,7 @@ significant byte.
LPM tries may be created with a maximum prefix length that is a multiple
of 8, in the range from 8 to 2048. The key used for lookup and update
-operations is a ``struct bpf_lpm_trie_key``, extended by
+operations is a ``struct bpf_lpm_trie_key_u8``, extended by
``max_prefixlen/8`` bytes.
- For IPv4 addresses the data length is 4 bytes
diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst
index 245b6defc298..f3269d6dd5e0 100644
--- a/Documentation/bpf/standardization/instruction-set.rst
+++ b/Documentation/bpf/standardization/instruction-set.rst
@@ -1,11 +1,11 @@
.. contents::
.. sectnum::
-=======================================
-BPF Instruction Set Specification, v1.0
-=======================================
+======================================
+BPF Instruction Set Architecture (ISA)
+======================================
-This document specifies version 1.0 of the BPF instruction set.
+This document specifies the BPF instruction set architecture (ISA).
Documentation conventions
=========================
@@ -97,6 +97,38 @@ Definitions
A: 10000110
B: 11111111 10000110
+Conformance groups
+------------------
+
+An implementation does not need to support all instructions specified in this
+document (e.g., deprecated instructions). Instead, a number of conformance
+groups are specified. An implementation must support the base32 conformance
+group and may support additional conformance groups, where supporting a
+conformance group means it must support all instructions in that conformance
+group.
+
+The use of named conformance groups enables interoperability between a runtime
+that executes instructions, and tools as such compilers that generate
+instructions for the runtime. Thus, capability discovery in terms of
+conformance groups might be done manually by users or automatically by tools.
+
+Each conformance group has a short ASCII label (e.g., "base32") that
+corresponds to a set of instructions that are mandatory. That is, each
+instruction has one or more conformance groups of which it is a member.
+
+This document defines the following conformance groups:
+
+* base32: includes all instructions defined in this
+ specification unless otherwise noted.
+* base64: includes base32, plus instructions explicitly noted
+ as being in the base64 conformance group.
+* atomic32: includes 32-bit atomic operation instructions (see `Atomic operations`_).
+* atomic64: includes atomic32, plus 64-bit atomic operation instructions.
+* divmul32: includes 32-bit division, multiplication, and modulo instructions.
+* divmul64: includes divmul32, plus 64-bit division, multiplication,
+ and modulo instructions.
+* legacy: deprecated packet access instructions.
+
Instruction encoding
====================
@@ -144,28 +176,23 @@ Note that most instructions do not use all of the fields.
Unused fields shall be cleared to zero.
As discussed below in `64-bit immediate instructions`_, a 64-bit immediate
-instruction uses a 64-bit immediate value that is constructed as follows.
+instruction uses two 32-bit immediate values that are constructed as follows.
The 64 bits following the basic instruction contain a pseudo instruction
-using the same format but with opcode, dst_reg, src_reg, and offset all set to zero,
-and imm containing the high 32 bits of the immediate value.
+using the same format but with 'opcode', 'dst_reg', 'src_reg', and 'offset' all
+set to zero, and imm containing the high 32 bits of the immediate value.
This is depicted in the following figure::
basic_instruction
- .-----------------------------.
- | |
- code:8 regs:8 offset:16 imm:32 unused:32 imm:32
- | |
- '--------------'
- pseudo instruction
-
-Thus the 64-bit immediate value is constructed as follows:
-
- imm64 = (next_imm << 32) | imm
+ .------------------------------.
+ | |
+ opcode:8 regs:8 offset:16 imm:32 unused:32 imm:32
+ | |
+ '--------------'
+ pseudo instruction
-where 'next_imm' refers to the imm value of the pseudo instruction
-following the basic instruction. The unused bytes in the pseudo
-instruction are reserved and shall be cleared to zero.
+Here, the imm value of the pseudo instruction is called 'next_imm'. The unused
+bytes in the pseudo instruction are reserved and shall be cleared to zero.
Instruction classes
-------------------
@@ -217,7 +244,8 @@ Arithmetic instructions
-----------------------
``BPF_ALU`` uses 32-bit wide operands while ``BPF_ALU64`` uses 64-bit wide operands for
-otherwise identical operations.
+otherwise identical operations. ``BPF_ALU64`` instructions belong to the
+base64 conformance group unless noted otherwise.
The 'code' field encodes the operation as below, where 'src' and 'dst' refer
to the values of the source and destination registers, respectively.
@@ -262,15 +290,19 @@ where '(u32)' indicates that the upper 32 bits are zeroed.
``BPF_XOR | BPF_K | BPF_ALU`` means::
- dst = (u32) dst ^ (u32) imm32
+ dst = (u32) dst ^ (u32) imm
``BPF_XOR | BPF_K | BPF_ALU64`` means::
- dst = dst ^ imm32
+ dst = dst ^ imm
Note that most instructions have instruction offset of 0. Only three instructions
(``BPF_SDIV``, ``BPF_SMOD``, ``BPF_MOVSX``) have a non-zero offset.
+Division, multiplication, and modulo operations for ``BPF_ALU`` are part
+of the "divmul32" conformance group, and division, multiplication, and
+modulo operations for ``BPF_ALU64`` are part of the "divmul64" conformance
+group.
The division and modulo operations support both unsigned and signed flavors.
For unsigned operations (``BPF_DIV`` and ``BPF_MOD``), for ``BPF_ALU``,
@@ -295,7 +327,11 @@ The ``BPF_MOVSX`` instruction does a move operation with sign extension.
``BPF_ALU | BPF_MOVSX`` :term:`sign extends<Sign Extend>` 8-bit and 16-bit operands into 32
bit operands, and zeroes the remaining upper 32 bits.
``BPF_ALU64 | BPF_MOVSX`` :term:`sign extends<Sign Extend>` 8-bit, 16-bit, and 32-bit
-operands into 64 bit operands.
+operands into 64 bit operands. Unlike other arithmetic instructions,
+``BPF_MOVSX`` is only defined for register source operands (``BPF_X``).
+
+The ``BPF_NEG`` instruction is only defined when the source bit is clear
+(``BPF_K``).
Shift operations use a mask of 0x3F (63) for 64-bit operations and 0x1F (31)
for 32-bit operations.
@@ -323,7 +359,9 @@ BPF_ALU64 Reserved 0x00 do byte swap unconditionally
========= ========= ===== =================================================
The 'imm' field encodes the width of the swap operations. The following widths
-are supported: 16, 32 and 64.
+are supported: 16, 32 and 64. Width 64 operations belong to the base64
+conformance group and other swap operations belong to the base32
+conformance group.
Examples:
@@ -348,31 +386,33 @@ Examples:
Jump instructions
-----------------
-``BPF_JMP32`` uses 32-bit wide operands while ``BPF_JMP`` uses 64-bit wide operands for
-otherwise identical operations.
+``BPF_JMP32`` uses 32-bit wide operands and indicates the base32
+conformance group, while ``BPF_JMP`` uses 64-bit wide operands for
+otherwise identical operations, and indicates the base64 conformance
+group unless otherwise specified.
The 'code' field encodes the operation as below:
-======== ===== === =========================================== =========================================
-code value src description notes
-======== ===== === =========================================== =========================================
-BPF_JA 0x0 0x0 PC += offset BPF_JMP class
-BPF_JA 0x0 0x0 PC += imm BPF_JMP32 class
-BPF_JEQ 0x1 any PC += offset if dst == src
-BPF_JGT 0x2 any PC += offset if dst > src unsigned
-BPF_JGE 0x3 any PC += offset if dst >= src unsigned
-BPF_JSET 0x4 any PC += offset if dst & src
-BPF_JNE 0x5 any PC += offset if dst != src
-BPF_JSGT 0x6 any PC += offset if dst > src signed
-BPF_JSGE 0x7 any PC += offset if dst >= src signed
-BPF_CALL 0x8 0x0 call helper function by address see `Helper functions`_
-BPF_CALL 0x8 0x1 call PC += imm see `Program-local functions`_
-BPF_CALL 0x8 0x2 call helper function by BTF ID see `Helper functions`_
-BPF_EXIT 0x9 0x0 return BPF_JMP only
-BPF_JLT 0xa any PC += offset if dst < src unsigned
-BPF_JLE 0xb any PC += offset if dst <= src unsigned
-BPF_JSLT 0xc any PC += offset if dst < src signed
-BPF_JSLE 0xd any PC += offset if dst <= src signed
-======== ===== === =========================================== =========================================
+======== ===== ======= =============================== =============================================
+code value src_reg description notes
+======== ===== ======= =============================== =============================================
+BPF_JA 0x0 0x0 PC += offset BPF_JMP | BPF_K only
+BPF_JA 0x0 0x0 PC += imm BPF_JMP32 | BPF_K only
+BPF_JEQ 0x1 any PC += offset if dst == src
+BPF_JGT 0x2 any PC += offset if dst > src unsigned
+BPF_JGE 0x3 any PC += offset if dst >= src unsigned
+BPF_JSET 0x4 any PC += offset if dst & src
+BPF_JNE 0x5 any PC += offset if dst != src
+BPF_JSGT 0x6 any PC += offset if dst > src signed
+BPF_JSGE 0x7 any PC += offset if dst >= src signed
+BPF_CALL 0x8 0x0 call helper function by address BPF_JMP | BPF_K only, see `Helper functions`_
+BPF_CALL 0x8 0x1 call PC += imm BPF_JMP | BPF_K only, see `Program-local functions`_
+BPF_CALL 0x8 0x2 call helper function by BTF ID BPF_JMP | BPF_K only, see `Helper functions`_
+BPF_EXIT 0x9 0x0 return BPF_JMP | BPF_K only
+BPF_JLT 0xa any PC += offset if dst < src unsigned
+BPF_JLE 0xb any PC += offset if dst <= src unsigned
+BPF_JSLT 0xc any PC += offset if dst < src signed
+BPF_JSLE 0xd any PC += offset if dst <= src signed
+======== ===== ======= =============================== =============================================
The BPF program needs to store the return value into register R0 before doing a
``BPF_EXIT``.
@@ -398,6 +438,9 @@ specified by the 'imm' field. A > 16-bit conditional jump may be
converted to a < 16-bit conditional jump plus a 32-bit unconditional
jump.
+All ``BPF_CALL`` and ``BPF_JA`` instructions belong to the
+base32 conformance group.
+
Helper functions
~~~~~~~~~~~~~~~~
@@ -455,6 +498,8 @@ The size modifier is one of:
BPF_DW 0x18 double word (8 bytes)
============= ===== =====================
+Instructions using ``BPF_DW`` belong to the base64 conformance group.
+
Regular load and store operations
---------------------------------
@@ -467,7 +512,7 @@ instructions that transfer data between a register and memory.
``BPF_MEM | <size> | BPF_ST`` means::
- *(size *) (dst + offset) = imm32
+ *(size *) (dst + offset) = imm
``BPF_MEM | <size> | BPF_LDX`` means::
@@ -499,8 +544,10 @@ by other BPF programs or means outside of this specification.
All atomic operations supported by BPF are encoded as store operations
that use the ``BPF_ATOMIC`` mode modifier as follows:
-* ``BPF_ATOMIC | BPF_W | BPF_STX`` for 32-bit operations
-* ``BPF_ATOMIC | BPF_DW | BPF_STX`` for 64-bit operations
+* ``BPF_ATOMIC | BPF_W | BPF_STX`` for 32-bit operations, which are
+ part of the "atomic32" conformance group.
+* ``BPF_ATOMIC | BPF_DW | BPF_STX`` for 64-bit operations, which are
+ part of the "atomic64" conformance group.
* 8-bit and 16-bit wide atomic operations are not supported.
The 'imm' field is used to encode the actual atomic operation.
@@ -521,7 +568,7 @@ BPF_XOR 0xa0 atomic xor
*(u32 *)(dst + offset) += src
-``BPF_ATOMIC | BPF_DW | BPF_STX`` with 'imm' = BPF ADD means::
+``BPF_ATOMIC | BPF_DW | BPF_STX`` with 'imm' = BPF_ADD means::
*(u64 *)(dst + offset) += src
@@ -554,24 +601,24 @@ and loaded back to ``R0``.
-----------------------------
Instructions with the ``BPF_IMM`` 'mode' modifier use the wide instruction
-encoding defined in `Instruction encoding`_, and use the 'src' field of the
+encoding defined in `Instruction encoding`_, and use the 'src_reg' field of the
basic instruction to hold an opcode subtype.
The following table defines a set of ``BPF_IMM | BPF_DW | BPF_LD`` instructions
-with opcode subtypes in the 'src' field, using new terms such as "map"
+with opcode subtypes in the 'src_reg' field, using new terms such as "map"
defined further below:
-========================= ====== === ========================================= =========== ==============
-opcode construction opcode src pseudocode imm type dst type
-========================= ====== === ========================================= =========== ==============
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x0 dst = imm64 integer integer
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x1 dst = map_by_fd(imm) map fd map
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data pointer
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x3 dst = var_addr(imm) variable id data pointer
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x4 dst = code_addr(imm) integer code pointer
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x5 dst = map_by_idx(imm) map index map
-BPF_IMM | BPF_DW | BPF_LD 0x18 0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data pointer
-========================= ====== === ========================================= =========== ==============
+========================= ====== ======= ========================================= =========== ==============
+opcode construction opcode src_reg pseudocode imm type dst type
+========================= ====== ======= ========================================= =========== ==============
+BPF_IMM | BPF_DW | BPF_LD 0x18 0x0 dst = (next_imm << 32) | imm integer integer
+BPF_IMM | BPF_DW | BPF_LD 0x18 0x1 dst = map_by_fd(imm) map fd map
+BPF_IMM | BPF_DW | BPF_LD 0x18 0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data pointer
+BPF_IMM | BPF_DW | BPF_LD 0x18 0x3 dst = var_addr(imm) variable id data pointer
+BPF_IMM | BPF_DW | BPF_LD 0x18 0x4 dst = code_addr(imm) integer code pointer
+BPF_IMM | BPF_DW | BPF_LD 0x18 0x5 dst = map_by_idx(imm) map index map
+BPF_IMM | BPF_DW | BPF_LD 0x18 0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data pointer
+========================= ====== ======= ========================================= =========== ==============
where
@@ -609,5 +656,9 @@ Legacy BPF Packet access instructions
-------------------------------------
BPF previously introduced special instructions for access to packet data that were
-carried over from classic BPF. However, these instructions are
-deprecated and should no longer be used.
+carried over from classic BPF. These instructions used an instruction
+class of BPF_LD, a size modifier of BPF_W, BPF_H, or BPF_B, and a
+mode modifier of BPF_ABS or BPF_IND. The 'dst_reg' and 'offset' fields were
+set to zero, and 'src_reg' was set to zero for BPF_ABS. However, these
+instructions are deprecated and should no longer be used. All legacy packet
+access instructions belong to the "legacy" conformance group.
diff --git a/Documentation/bpf/verifier.rst b/Documentation/bpf/verifier.rst
index f0ec19db301c..356894399fbf 100644
--- a/Documentation/bpf/verifier.rst
+++ b/Documentation/bpf/verifier.rst
@@ -562,7 +562,7 @@ works::
* ``checkpoint[0].r1`` is marked as read;
* At instruction #5 exit is reached and ``checkpoint[0]`` can now be processed
- by ``clean_live_states()``. After this processing ``checkpoint[0].r0`` has a
+ by ``clean_live_states()``. After this processing ``checkpoint[0].r1`` has a
read mark and all other registers and stack slots are marked as ``NOT_INIT``
or ``STACK_INVALID``
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 5830b01c5642..da64c9fb7e07 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -388,6 +388,12 @@ latex_elements = {
verbatimhintsturnover=false,
''',
+ #
+ # Some of our authors are fond of deep nesting; tell latex to
+ # cope.
+ #
+ 'maxlistdepth': '10',
+
# For CJK One-half spacing, need to be in front of hyperref
'extrapackages': r'\usepackage{setspace}',
diff --git a/Documentation/dev-tools/checkuapi.rst b/Documentation/dev-tools/checkuapi.rst
new file mode 100644
index 000000000000..9072f21b50b0
--- /dev/null
+++ b/Documentation/dev-tools/checkuapi.rst
@@ -0,0 +1,477 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+============
+UAPI Checker
+============
+
+The UAPI checker (``scripts/check-uapi.sh``) is a shell script which
+checks UAPI header files for userspace backwards-compatibility across
+the git tree.
+
+Options
+=======
+
+This section will describe the options with which ``check-uapi.sh``
+can be run.
+
+Usage::
+
+ check-uapi.sh [-b BASE_REF] [-p PAST_REF] [-j N] [-l ERROR_LOG] [-i] [-q] [-v]
+
+Available options::
+
+ -b BASE_REF Base git reference to use for comparison. If unspecified or empty,
+ will use any dirty changes in tree to UAPI files. If there are no
+ dirty changes, HEAD will be used.
+ -p PAST_REF Compare BASE_REF to PAST_REF (e.g. -p v6.1). If unspecified or empty,
+ will use BASE_REF^1. Must be an ancestor of BASE_REF. Only headers
+ that exist on PAST_REF will be checked for compatibility.
+ -j JOBS Number of checks to run in parallel (default: number of CPU cores).
+ -l ERROR_LOG Write error log to file (default: no error log is generated).
+ -i Ignore ambiguous changes that may or may not break UAPI compatibility.
+ -q Quiet operation.
+ -v Verbose operation (print more information about each header being checked).
+
+Environmental args::
+
+ ABIDIFF Custom path to abidiff binary
+ CC C compiler (default is "gcc")
+ ARCH Target architecture of C compiler (default is host arch)
+
+Exit codes::
+
+ 0) Success
+ 1) ABI difference detected
+ 2) Prerequisite not met
+
+Examples
+========
+
+Basic Usage
+-----------
+
+First, let's try making a change to a UAPI header file that obviously
+won't break userspace::
+
+ cat << 'EOF' | patch -l -p1
+ --- a/include/uapi/linux/acct.h
+ +++ b/include/uapi/linux/acct.h
+ @@ -21,7 +21,9 @@
+ #include <asm/param.h>
+ #include <asm/byteorder.h>
+
+ -/*
+ +#define FOO
+ +
+ +/*
+ * comp_t is a 16-bit "floating" point number with a 3-bit base 8
+ * exponent and a 13-bit fraction.
+ * comp2_t is 24-bit with 5-bit base 2 exponent and 20 bit fraction
+ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+ EOF
+
+Now, let's use the script to validate::
+
+ % ./scripts/check-uapi.sh
+ Installing user-facing UAPI headers from dirty tree... OK
+ Installing user-facing UAPI headers from HEAD... OK
+ Checking changes to UAPI headers between HEAD and dirty tree...
+ All 912 UAPI headers compatible with x86 appear to be backwards compatible
+
+Let's add another change that *might* break userspace::
+
+ cat << 'EOF' | patch -l -p1
+ --- a/include/uapi/linux/bpf.h
+ +++ b/include/uapi/linux/bpf.h
+ @@ -74,7 +74,7 @@ struct bpf_insn {
+ __u8 dst_reg:4; /* dest register */
+ __u8 src_reg:4; /* source register */
+ __s16 off; /* signed offset */
+ - __s32 imm; /* signed immediate constant */
+ + __u32 imm; /* unsigned immediate constant */
+ };
+
+ /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+ EOF
+
+The script will catch this::
+
+ % ./scripts/check-uapi.sh
+ Installing user-facing UAPI headers from dirty tree... OK
+ Installing user-facing UAPI headers from HEAD... OK
+ Checking changes to UAPI headers between HEAD and dirty tree...
+ ==== ABI differences detected in include/linux/bpf.h from HEAD -> dirty tree ====
+ [C] 'struct bpf_insn' changed:
+ type size hasn't changed
+ 1 data member change:
+ type of '__s32 imm' changed:
+ typedef name changed from __s32 to __u32 at int-ll64.h:27:1
+ underlying type 'int' changed:
+ type name changed from 'int' to 'unsigned int'
+ type size hasn't changed
+ ==================================================================================
+
+ error - 1/912 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+In this case, the script is reporting the type change because it could
+break a userspace program that passes in a negative number. Now, let's
+say you know that no userspace program could possibly be using a negative
+value in ``imm``, so changing to an unsigned type there shouldn't hurt
+anything. You can pass the ``-i`` flag to the script to ignore changes
+in which the userspace backwards compatibility is ambiguous::
+
+ % ./scripts/check-uapi.sh -i
+ Installing user-facing UAPI headers from dirty tree... OK
+ Installing user-facing UAPI headers from HEAD... OK
+ Checking changes to UAPI headers between HEAD and dirty tree...
+ All 912 UAPI headers compatible with x86 appear to be backwards compatible
+
+Now, let's make a similar change that *will* break userspace::
+
+ cat << 'EOF' | patch -l -p1
+ --- a/include/uapi/linux/bpf.h
+ +++ b/include/uapi/linux/bpf.h
+ @@ -71,8 +71,8 @@ enum {
+
+ struct bpf_insn {
+ __u8 code; /* opcode */
+ - __u8 dst_reg:4; /* dest register */
+ __u8 src_reg:4; /* source register */
+ + __u8 dst_reg:4; /* dest register */
+ __s16 off; /* signed offset */
+ __s32 imm; /* signed immediate constant */
+ };
+ EOF
+
+Since we're re-ordering an existing struct member, there's no ambiguity,
+and the script will report the breakage even if you pass ``-i``::
+
+ % ./scripts/check-uapi.sh -i
+ Installing user-facing UAPI headers from dirty tree... OK
+ Installing user-facing UAPI headers from HEAD... OK
+ Checking changes to UAPI headers between HEAD and dirty tree...
+ ==== ABI differences detected in include/linux/bpf.h from HEAD -> dirty tree ====
+ [C] 'struct bpf_insn' changed:
+ type size hasn't changed
+ 2 data member changes:
+ '__u8 dst_reg' offset changed from 8 to 12 (in bits) (by +4 bits)
+ '__u8 src_reg' offset changed from 12 to 8 (in bits) (by -4 bits)
+ ==================================================================================
+
+ error - 1/912 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+Let's commit the breaking change, then commit the innocuous change::
+
+ % git commit -m 'Breaking UAPI change' include/uapi/linux/bpf.h
+ [detached HEAD f758e574663a] Breaking UAPI change
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+ % git commit -m 'Innocuous UAPI change' include/uapi/linux/acct.h
+ [detached HEAD 2e87df769081] Innocuous UAPI change
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+Now, let's run the script again with no arguments::
+
+ % ./scripts/check-uapi.sh
+ Installing user-facing UAPI headers from HEAD... OK
+ Installing user-facing UAPI headers from HEAD^1... OK
+ Checking changes to UAPI headers between HEAD^1 and HEAD...
+ All 912 UAPI headers compatible with x86 appear to be backwards compatible
+
+It doesn't catch any breaking change because, by default, it only
+compares ``HEAD`` to ``HEAD^1``. The breaking change was committed on
+``HEAD~2``. If we wanted the search scope to go back further, we'd have to
+use the ``-p`` option to pass a different past reference. In this case,
+let's pass ``-p HEAD~2`` to the script so it checks UAPI changes between
+``HEAD~2`` and ``HEAD``::
+
+ % ./scripts/check-uapi.sh -p HEAD~2
+ Installing user-facing UAPI headers from HEAD... OK
+ Installing user-facing UAPI headers from HEAD~2... OK
+ Checking changes to UAPI headers between HEAD~2 and HEAD...
+ ==== ABI differences detected in include/linux/bpf.h from HEAD~2 -> HEAD ====
+ [C] 'struct bpf_insn' changed:
+ type size hasn't changed
+ 2 data member changes:
+ '__u8 dst_reg' offset changed from 8 to 12 (in bits) (by +4 bits)
+ '__u8 src_reg' offset changed from 12 to 8 (in bits) (by -4 bits)
+ ==============================================================================
+
+ error - 1/912 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+Alternatively, we could have also run with ``-b HEAD~``. This would set the
+base reference to ``HEAD~`` so then the script would compare it to ``HEAD~^1``.
+
+Architecture-specific Headers
+-----------------------------
+
+Consider this change::
+
+ cat << 'EOF' | patch -l -p1
+ --- a/arch/arm64/include/uapi/asm/sigcontext.h
+ +++ b/arch/arm64/include/uapi/asm/sigcontext.h
+ @@ -70,6 +70,7 @@ struct sigcontext {
+ struct _aarch64_ctx {
+ __u32 magic;
+ __u32 size;
+ + __u32 new_var;
+ };
+
+ #define FPSIMD_MAGIC 0x46508001
+ EOF
+
+This is a change to an arm64-specific UAPI header file. In this example, I'm
+running the script from an x86 machine with an x86 compiler, so, by default,
+the script only checks x86-compatible UAPI header files::
+
+ % ./scripts/check-uapi.sh
+ Installing user-facing UAPI headers from dirty tree... OK
+ Installing user-facing UAPI headers from HEAD... OK
+ No changes to UAPI headers were applied between HEAD and dirty tree
+
+With an x86 compiler, we can't check header files in ``arch/arm64``, so the
+script doesn't even try.
+
+If we want to check the header file, we'll have to use an arm64 compiler and
+set ``ARCH`` accordingly::
+
+ % CC=aarch64-linux-gnu-gcc ARCH=arm64 ./scripts/check-uapi.sh
+ Installing user-facing UAPI headers from dirty tree... OK
+ Installing user-facing UAPI headers from HEAD... OK
+ Checking changes to UAPI headers between HEAD and dirty tree...
+ ==== ABI differences detected in include/asm/sigcontext.h from HEAD -> dirty tree ====
+ [C] 'struct _aarch64_ctx' changed:
+ type size changed from 64 to 96 (in bits)
+ 1 data member insertion:
+ '__u32 new_var', at offset 64 (in bits) at sigcontext.h:73:1
+ -- snip --
+ [C] 'struct zt_context' changed:
+ type size changed from 128 to 160 (in bits)
+ 2 data member changes (1 filtered):
+ '__u16 nregs' offset changed from 64 to 96 (in bits) (by +32 bits)
+ '__u16 __reserved[3]' offset changed from 80 to 112 (in bits) (by +32 bits)
+ =======================================================================================
+
+ error - 1/884 UAPI headers compatible with arm64 appear _not_ to be backwards compatible
+
+We can see with ``ARCH`` and ``CC`` set properly for the file, the ABI
+change is reported properly. Also notice that the total number of UAPI
+header files checked by the script changes. This is because the number
+of headers installed for arm64 platforms is different than x86.
+
+Cross-Dependency Breakages
+--------------------------
+
+Consider this change::
+
+ cat << 'EOF' | patch -l -p1
+ --- a/include/uapi/linux/types.h
+ +++ b/include/uapi/linux/types.h
+ @@ -52,7 +52,7 @@ typedef __u32 __bitwise __wsum;
+ #define __aligned_be64 __be64 __attribute__((aligned(8)))
+ #define __aligned_le64 __le64 __attribute__((aligned(8)))
+
+ -typedef unsigned __bitwise __poll_t;
+ +typedef unsigned short __bitwise __poll_t;
+
+ #endif /* __ASSEMBLY__ */
+ #endif /* _UAPI_LINUX_TYPES_H */
+ EOF
+
+Here, we're changing a ``typedef`` in ``types.h``. This doesn't break
+a UAPI in ``types.h``, but other UAPIs in the tree may break due to
+this change::
+
+ % ./scripts/check-uapi.sh
+ Installing user-facing UAPI headers from dirty tree... OK
+ Installing user-facing UAPI headers from HEAD... OK
+ Checking changes to UAPI headers between HEAD and dirty tree...
+ ==== ABI differences detected in include/linux/eventpoll.h from HEAD -> dirty tree ====
+ [C] 'struct epoll_event' changed:
+ type size changed from 96 to 80 (in bits)
+ 2 data member changes:
+ type of '__poll_t events' changed:
+ underlying type 'unsigned int' changed:
+ type name changed from 'unsigned int' to 'unsigned short int'
+ type size changed from 32 to 16 (in bits)
+ '__u64 data' offset changed from 32 to 16 (in bits) (by -16 bits)
+ ========================================================================================
+ include/linux/eventpoll.h did not change between HEAD and dirty tree...
+ It's possible a change to one of the headers it includes caused this error:
+ #include <linux/fcntl.h>
+ #include <linux/types.h>
+
+Note that the script noticed the failing header file did not change,
+so it assumes one of its includes must have caused the breakage. Indeed,
+we can see ``linux/types.h`` is used from ``eventpoll.h``.
+
+UAPI Header Removals
+--------------------
+
+Consider this change::
+
+ cat << 'EOF' | patch -l -p1
+ diff --git a/include/uapi/asm-generic/Kbuild b/include/uapi/asm-generic/Kbuild
+ index ebb180aac74e..a9c88b0a8b3b 100644
+ --- a/include/uapi/asm-generic/Kbuild
+ +++ b/include/uapi/asm-generic/Kbuild
+ @@ -31,6 +31,6 @@ mandatory-y += stat.h
+ mandatory-y += statfs.h
+ mandatory-y += swab.h
+ mandatory-y += termbits.h
+ -mandatory-y += termios.h
+ +#mandatory-y += termios.h
+ mandatory-y += types.h
+ mandatory-y += unistd.h
+ EOF
+
+This script removes a UAPI header file from the install list. Let's run
+the script::
+
+ % ./scripts/check-uapi.sh
+ Installing user-facing UAPI headers from dirty tree... OK
+ Installing user-facing UAPI headers from HEAD... OK
+ Checking changes to UAPI headers between HEAD and dirty tree...
+ ==== UAPI header include/asm/termios.h was removed between HEAD and dirty tree ====
+
+ error - 1/912 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+Removing a UAPI header is considered a breaking change, and the script
+will flag it as such.
+
+Checking Historic UAPI Compatibility
+------------------------------------
+
+You can use the ``-b`` and ``-p`` options to examine different chunks of your
+git tree. For example, to check all changed UAPI header files between tags
+v6.0 and v6.1, you'd run::
+
+ % ./scripts/check-uapi.sh -b v6.1 -p v6.0
+ Installing user-facing UAPI headers from v6.1... OK
+ Installing user-facing UAPI headers from v6.0... OK
+ Checking changes to UAPI headers between v6.0 and v6.1...
+
+ --- snip ---
+ error - 37/907 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+Note: Before v5.3, a header file needed by the script is not present,
+so the script is unable to check changes before then.
+
+You'll notice that the script detected many UAPI changes that are not
+backwards compatible. Knowing that kernel UAPIs are supposed to be stable
+forever, this is an alarming result. This brings us to the next section:
+caveats.
+
+Caveats
+=======
+
+The UAPI checker makes no assumptions about the author's intention, so some
+types of changes may be flagged even though they intentionally break UAPI.
+
+Removals For Refactoring or Deprecation
+---------------------------------------
+
+Sometimes drivers for very old hardware are removed, such as in this example::
+
+ % ./scripts/check-uapi.sh -b ba47652ba655
+ Installing user-facing UAPI headers from ba47652ba655... OK
+ Installing user-facing UAPI headers from ba47652ba655^1... OK
+ Checking changes to UAPI headers between ba47652ba655^1 and ba47652ba655...
+ ==== UAPI header include/linux/meye.h was removed between ba47652ba655^1 and ba47652ba655 ====
+
+ error - 1/910 UAPI headers compatible with x86 appear _not_ to be backwards compatible
+
+The script will always flag removals (even if they're intentional).
+
+Struct Expansions
+-----------------
+
+Depending on how a structure is handled in kernelspace, a change which
+expands a struct could be non-breaking.
+
+If a struct is used as the argument to an ioctl, then the kernel driver
+must be able to handle ioctl commands of any size. Beyond that, you need
+to be careful when copying data from the user. Say, for example, that
+``struct foo`` is changed like this::
+
+ struct foo {
+ __u64 a; /* added in version 1 */
+ + __u32 b; /* added in version 2 */
+ + __u32 c; /* added in version 2 */
+ }
+
+By default, the script will flag this kind of change for further review::
+
+ [C] 'struct foo' changed:
+ type size changed from 64 to 128 (in bits)
+ 2 data member insertions:
+ '__u32 b', at offset 64 (in bits)
+ '__u32 c', at offset 96 (in bits)
+
+However, it is possible that this change was made safely.
+
+If a userspace program was built with version 1, it will think
+``sizeof(struct foo)`` is 8. That size will be encoded in the
+ioctl value that gets sent to the kernel. If the kernel is built
+with version 2, it will think the ``sizeof(struct foo)`` is 16.
+
+The kernel can use the ``_IOC_SIZE`` macro to get the size encoded
+in the ioctl code that the user passed in and then use
+``copy_struct_from_user()`` to safely copy the value::
+
+ int handle_ioctl(unsigned long cmd, unsigned long arg)
+ {
+ switch _IOC_NR(cmd) {
+ 0x01: {
+ struct foo my_cmd; /* size 16 in the kernel */
+
+ ret = copy_struct_from_user(&my_cmd, arg, sizeof(struct foo), _IOC_SIZE(cmd));
+ ...
+
+``copy_struct_from_user`` will zero the struct in the kernel and then copy
+only the bytes passed in from the user (leaving new members zeroized).
+If the user passed in a larger struct, the extra members are ignored.
+
+If you know this situation is accounted for in the kernel code, you can
+pass ``-i`` to the script, and struct expansions like this will be ignored.
+
+Flex Array Migration
+--------------------
+
+While the script handles expansion into an existing flex array, it does
+still flag initial migration to flex arrays from 1-element fake flex
+arrays. For example::
+
+ struct foo {
+ __u32 x;
+ - __u32 flex[1]; /* fake flex */
+ + __u32 flex[]; /* real flex */
+ };
+
+This change would be flagged by the script::
+
+ [C] 'struct foo' changed:
+ type size changed from 64 to 32 (in bits)
+ 1 data member change:
+ type of '__u32 flex[1]' changed:
+ type name changed from '__u32[1]' to '__u32[]'
+ array type size changed from 32 to 'unknown'
+ array type subrange 1 changed length from 1 to 'unknown'
+
+At this time, there's no way to filter these types of changes, so be
+aware of this possible false positive.
+
+Summary
+-------
+
+While many types of false positives are filtered out by the script,
+it's possible there are some cases where the script flags a change
+which does not break UAPI. It's also possible a change which *does*
+break userspace would not be flagged by this script. While the script
+has been run on much of the kernel history, there could still be corner
+cases that are not accounted for.
+
+The intention is for this script to be used as a quick check for
+maintainers or automated tooling, not as the end-all authority on
+patch compatibility. It's best to remember: use your best judgment
+(and ideally a unit test in userspace) to make sure your UAPI changes
+are backwards-compatible!
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index 3d2286c683bc..efa49cdc8e2e 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -31,6 +31,7 @@ Documentation/dev-tools/testing-overview.rst
kselftest
kunit/index
ktap
+ checkuapi
.. only:: subproject and html
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index ab376b316c36..522214c7b43b 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -255,9 +255,21 @@ Contributing new tests (details)
TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
executable which is not tested by default.
+
TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
test.
+ TEST_INCLUDES is similar to TEST_FILES, it lists files which should be
+ included when exporting or installing the tests, with the following
+ differences:
+
+ * symlinks to files in other directories are preserved
+ * the part of paths below tools/testing/selftests/ is preserved when
+ copying the files to the output directory
+
+ TEST_INCLUDES is meant to list dependencies located in other directories of
+ the selftests hierarchy.
+
* First use the headers inside the kernel source and/or git repo, and then the
system headers. Headers for the kernel release as opposed to headers
installed by the distro on the system should be the primary focus to be able
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index a9efab50eed8..22955d56b379 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -671,8 +671,23 @@ Testing Static Functions
------------------------
If we do not want to expose functions or variables for testing, one option is to
-conditionally ``#include`` the test file at the end of your .c file. For
-example:
+conditionally export the used symbol. For example:
+
+.. code-block:: c
+
+ /* In my_file.c */
+
+ VISIBLE_IF_KUNIT int do_interesting_thing();
+ EXPORT_SYMBOL_IF_KUNIT(do_interesting_thing);
+
+ /* In my_file.h */
+
+ #if IS_ENABLED(CONFIG_KUNIT)
+ int do_interesting_thing(void);
+ #endif
+
+Alternatively, you could conditionally ``#include`` the test file at the end of
+your .c file. For example:
.. code-block:: c
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 2323fd5b7cda..129cf698fa8a 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -28,7 +28,10 @@ $(obj)/%.example.dts: $(src)/%.yaml check_dtschema_version FORCE
find_all_cmd = find $(srctree)/$(src) \( -name '*.yaml' ! \
-name 'processed-schema*' \)
-find_cmd = $(find_all_cmd) | sed 's|^$(srctree)/$(src)/||' | grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | sed 's|^|$(srctree)/$(src)/|'
+find_cmd = $(find_all_cmd) | \
+ sed 's|^$(srctree)/||' | \
+ grep -F -e "$(subst :," -e ",$(DT_SCHEMA_FILES))" | \
+ sed 's|^|$(srctree)/|'
CHK_DT_DOCS := $(shell $(find_cmd))
quiet_cmd_yamllint = LINT $(src)
diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
index b29ce598f9aa..9952e0ef7767 100644
--- a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
+++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
@@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ceva AHCI SATA Controller
maintainers:
- - Piyush Mehta <piyush.mehta@amd.com>
+ - Mubin Sayyed <mubin.sayyed@amd.com>
+ - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: |
The Ceva SATA controller mostly conforms to the AHCI interface with some
diff --git a/Documentation/devicetree/bindings/clock/google,gs101-clock.yaml b/Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
index 3eebc03a309b..ca7fdada3ff2 100644
--- a/Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/google,gs101-clock.yaml
@@ -85,8 +85,8 @@ allOf:
clock-names:
items:
- - const: dout_cmu_misc_bus
- - const: dout_cmu_misc_sss
+ - const: bus
+ - const: sss
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/display/bridge/nxp,tda998x.yaml b/Documentation/devicetree/bindings/display/bridge/nxp,tda998x.yaml
index 21d995f29a1e..b8e9cf6ce4e6 100644
--- a/Documentation/devicetree/bindings/display/bridge/nxp,tda998x.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/nxp,tda998x.yaml
@@ -29,19 +29,22 @@ properties:
audio-ports:
description:
- Array of 8-bit values, 2 values per DAI (Documentation/sound/soc/dai.rst).
+ Array of 2 values per DAI (Documentation/sound/soc/dai.rst).
The implementation allows one or two DAIs.
If two DAIs are defined, they must be of different type.
$ref: /schemas/types.yaml#/definitions/uint32-matrix
+ minItems: 1
+ maxItems: 2
items:
- minItems: 1
items:
- description: |
The first value defines the DAI type: TDA998x_SPDIF or TDA998x_I2S
(see include/dt-bindings/display/tda998x.h).
+ enum: [ 1, 2 ]
- description:
The second value defines the tda998x AP_ENA reg content when the
DAI in question is used.
+ maximum: 0xff
'#sound-dai-cells':
enum: [ 0, 1 ]
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
index 25d53fde92e1..597c9cc6a312 100644
--- a/Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
@@ -85,7 +85,7 @@ allOf:
clocks:
minItems: 6
maxItems: 6
- regs:
+ reg:
minItems: 2
maxItems: 2
@@ -99,7 +99,7 @@ allOf:
clocks:
minItems: 4
maxItems: 4
- regs:
+ reg:
minItems: 2
maxItems: 2
@@ -116,7 +116,7 @@ allOf:
clocks:
minItems: 3
maxItems: 3
- regs:
+ reg:
minItems: 1
maxItems: 1
diff --git a/Documentation/devicetree/bindings/dma/dma-controller.yaml b/Documentation/devicetree/bindings/dma/dma-controller.yaml
index 04d150d4d15d..e6afca558c2d 100644
--- a/Documentation/devicetree/bindings/dma/dma-controller.yaml
+++ b/Documentation/devicetree/bindings/dma/dma-controller.yaml
@@ -19,19 +19,4 @@ properties:
additionalProperties: true
-examples:
- - |
- dma: dma-controller@48000000 {
- compatible = "ti,omap-sdma";
- reg = <0x48000000 0x1000>;
- interrupts = <0 12 0x4>,
- <0 13 0x4>,
- <0 14 0x4>,
- <0 15 0x4>;
- #dma-cells = <1>;
- dma-channels = <32>;
- dma-requests = <127>;
- dma-channel-mask = <0xfffe>;
- };
-
...
diff --git a/Documentation/devicetree/bindings/dma/dma-router.yaml b/Documentation/devicetree/bindings/dma/dma-router.yaml
index 346fe0fa4460..5ad2febc581e 100644
--- a/Documentation/devicetree/bindings/dma/dma-router.yaml
+++ b/Documentation/devicetree/bindings/dma/dma-router.yaml
@@ -40,15 +40,4 @@ required:
additionalProperties: true
-examples:
- - |
- sdma_xbar: dma-router@4a002b78 {
- compatible = "ti,dra7-dma-crossbar";
- reg = <0x4a002b78 0xfc>;
- #dma-cells = <1>;
- dma-requests = <205>;
- ti,dma-safe-map = <0>;
- dma-masters = <&sdma>;
- };
-
...
diff --git a/Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml b/Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml
new file mode 100644
index 000000000000..6a1b49a49a64
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/loongson,ls2x-apbdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson LS2X APB DMA controller
+
+description:
+ The Loongson LS2X APB DMA controller is used for transferring data
+ between system memory and the peripherals on the APB bus.
+
+maintainers:
+ - Binbin Zhou <zhoubinbin@loongson.cn>
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: loongson,ls2k1000-apbdma
+ - items:
+ - const: loongson,ls2k0500-apbdma
+ - const: loongson,ls2k1000-apbdma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#dma-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - '#dma-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/loongson,ls2k-clk.h>
+
+ dma-controller@1fe00c00 {
+ compatible = "loongson,ls2k1000-apbdma";
+ reg = <0x1fe00c00 0x8>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml
index 4003dbe94940..877147e95ecc 100644
--- a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml
@@ -53,6 +53,9 @@ properties:
ADMA_CHn_CTRL register.
const: 1
+ dma-channel-mask:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
index 88d0de3d1b46..deb64cb9ca3e 100644
--- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
@@ -32,6 +32,8 @@ properties:
- qcom,sm8350-gpi-dma
- qcom,sm8450-gpi-dma
- qcom,sm8550-gpi-dma
+ - qcom,sm8650-gpi-dma
+ - qcom,x1e80100-gpi-dma
- const: qcom,sm6350-gpi-dma
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
index c284abc6784a..a42b6a26a6d3 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
@@ -16,7 +16,7 @@ properties:
compatible:
items:
- enum:
- - renesas,r9a07g043-dmac # RZ/G2UL
+ - renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five
- renesas,r9a07g044-dmac # RZ/G2{L,LC}
- renesas,r9a07g054-dmac # RZ/V2L
- const: renesas,rz-dmac
diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
index a1af0b906365..3b22183a1a37 100644
--- a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
+++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
@@ -29,6 +29,7 @@ properties:
compatible:
items:
- enum:
+ - microchip,mpfs-pdma
- sifive,fu540-c000-pdma
- const: sifive,pdma0
description:
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
index 4ca300a42a99..27b8e1636560 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
@@ -37,11 +37,11 @@ properties:
reg:
minItems: 3
- maxItems: 5
+ maxItems: 9
reg-names:
minItems: 3
- maxItems: 5
+ maxItems: 9
"#dma-cells":
const: 3
@@ -141,7 +141,10 @@ allOf:
ti,sci-rm-range-tchan: false
reg:
- maxItems: 3
+ items:
+ - description: BCDMA Control /Status Registers region
+ - description: RX Channel Realtime Registers region
+ - description: Ring Realtime Registers region
reg-names:
items:
@@ -161,14 +164,29 @@ allOf:
properties:
reg:
minItems: 5
+ items:
+ - description: BCDMA Control /Status Registers region
+ - description: Block Copy Channel Realtime Registers region
+ - description: RX Channel Realtime Registers region
+ - description: TX Channel Realtime Registers region
+ - description: Ring Realtime Registers region
+ - description: Ring Configuration Registers region
+ - description: TX Channel Configuration Registers region
+ - description: RX Channel Configuration Registers region
+ - description: Block Copy Channel Configuration Registers region
reg-names:
+ minItems: 5
items:
- const: gcfg
- const: bchanrt
- const: rchanrt
- const: tchanrt
- const: ringrt
+ - const: ring
+ - const: tchan
+ - const: rchan
+ - const: bchan
required:
- ti,sci-rm-range-bchan
@@ -184,7 +202,11 @@ allOf:
ti,sci-rm-range-bchan: false
reg:
- maxItems: 4
+ items:
+ - description: BCDMA Control /Status Registers region
+ - description: RX Channel Realtime Registers region
+ - description: TX Channel Realtime Registers region
+ - description: Ring Realtime Registers region
reg-names:
items:
@@ -220,8 +242,13 @@ examples:
<0x0 0x4c000000 0x0 0x20000>,
<0x0 0x4a820000 0x0 0x20000>,
<0x0 0x4aa40000 0x0 0x20000>,
- <0x0 0x4bc00000 0x0 0x100000>;
- reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt";
+ <0x0 0x4bc00000 0x0 0x100000>,
+ <0x0 0x48600000 0x0 0x8000>,
+ <0x0 0x484a4000 0x0 0x2000>,
+ <0x0 0x484c2000 0x0 0x2000>,
+ <0x0 0x48420000 0x0 0x2000>;
+ reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt",
+ "ring", "tchan", "rchan", "bchan";
msi-parent = <&inta_main_dmss>;
#dma-cells = <3>;
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
index a69f62f854d8..11e064c02994 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
@@ -45,14 +45,28 @@ properties:
The second cell is the ASEL value for the channel
reg:
- maxItems: 4
+ minItems: 4
+ items:
+ - description: Packet DMA Control /Status Registers region
+ - description: RX Channel Realtime Registers region
+ - description: TX Channel Realtime Registers region
+ - description: Ring Realtime Registers region
+ - description: Ring Configuration Registers region
+ - description: TX Configuration Registers region
+ - description: RX Configuration Registers region
+ - description: RX Flow Configuration Registers region
reg-names:
+ minItems: 4
items:
- const: gcfg
- const: rchanrt
- const: tchanrt
- const: ringrt
+ - const: ring
+ - const: tchan
+ - const: rchan
+ - const: rflow
msi-parent: true
@@ -136,8 +150,14 @@ examples:
reg = <0x0 0x485c0000 0x0 0x100>,
<0x0 0x4a800000 0x0 0x20000>,
<0x0 0x4aa00000 0x0 0x40000>,
- <0x0 0x4b800000 0x0 0x400000>;
- reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt";
+ <0x0 0x4b800000 0x0 0x400000>,
+ <0x0 0x485e0000 0x0 0x20000>,
+ <0x0 0x484a0000 0x0 0x4000>,
+ <0x0 0x484c0000 0x0 0x2000>,
+ <0x0 0x48430000 0x0 0x4000>;
+ reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt",
+ "ring", "tchan", "rchan", "rflow";
+
msi-parent = <&inta_main_dmss>;
#dma-cells = <2>;
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
index 22f6c5e2f7f4..b18cf2bfdb5b 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
@@ -69,13 +69,24 @@ properties:
- ti,j721e-navss-mcu-udmap
reg:
- maxItems: 3
+ minItems: 3
+ items:
+ - description: UDMA-P Control /Status Registers region
+ - description: RX Channel Realtime Registers region
+ - description: TX Channel Realtime Registers region
+ - description: TX Configuration Registers region
+ - description: RX Configuration Registers region
+ - description: RX Flow Configuration Registers region
reg-names:
+ minItems: 3
items:
- const: gcfg
- const: rchanrt
- const: tchanrt
+ - const: tchan
+ - const: rchan
+ - const: rflow
msi-parent: true
@@ -158,8 +169,11 @@ examples:
compatible = "ti,am654-navss-main-udmap";
reg = <0x0 0x31150000 0x0 0x100>,
<0x0 0x34000000 0x0 0x100000>,
- <0x0 0x35000000 0x0 0x100000>;
- reg-names = "gcfg", "rchanrt", "tchanrt";
+ <0x0 0x35000000 0x0 0x100000>,
+ <0x0 0x30b00000 0x0 0x20000>,
+ <0x0 0x30c00000 0x0 0x8000>,
+ <0x0 0x30d00000 0x0 0x4000>;
+ reg-names = "gcfg", "rchanrt", "tchanrt", "tchan", "rchan", "rflow";
#dma-cells = <1>;
ti,ringacc = <&ringacc>;
diff --git a/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml b/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
index b1fd632718d4..bb93baa88879 100644
--- a/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
+++ b/Documentation/devicetree/bindings/gpio/xlnx,zynqmp-gpio-modepin.yaml
@@ -12,7 +12,8 @@ description:
PS_MODE). Every pin can be configured as input/output.
maintainers:
- - Piyush Mehta <piyush.mehta@amd.com>
+ - Mubin Sayyed <mubin.sayyed@amd.com>
+ - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml b/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
index 00b570c82903..60441f0c5d72 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
@@ -11,8 +11,13 @@ maintainers:
description: |
This interrupt controller is found in the Loongson-3 family of chips and
- Loongson-2K1000 chip, as the primary package interrupt controller which
+ Loongson-2K series chips, as the primary package interrupt controller which
can route local I/O interrupt to interrupt lines of cores.
+ Be aware of the following points.
+ 1.The Loongson-2K0500 is a single core CPU;
+ 2.The Loongson-2K0500/2K1000 has 64 device interrupt sources as inputs, so we
+ need to define two nodes in dts{i} to describe the "0-31" and "32-61" interrupt
+ sources respectively.
allOf:
- $ref: /schemas/interrupt-controller.yaml#
@@ -33,6 +38,7 @@ properties:
- const: main
- const: isr0
- const: isr1
+ minItems: 2
interrupt-controller: true
@@ -45,11 +51,9 @@ properties:
interrupt-names:
description: List of names for the parent interrupts.
items:
- - const: int0
- - const: int1
- - const: int2
- - const: int3
+ pattern: int[0-3]
minItems: 1
+ maxItems: 4
'#interrupt-cells':
const: 2
@@ -69,6 +73,7 @@ required:
- compatible
- reg
- interrupts
+ - interrupt-names
- interrupt-controller
- '#interrupt-cells'
- loongson,parent_int_map
@@ -86,7 +91,8 @@ if:
then:
properties:
reg:
- minItems: 3
+ minItems: 2
+ maxItems: 3
required:
- reg-names
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index 55a8d1385e21..8a3c2398b10c 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -200,6 +200,18 @@ properties:
#trigger-source-cells property in the source node.
$ref: /schemas/types.yaml#/definitions/phandle-array
+ active-low:
+ type: boolean
+ description:
+ Makes LED active low. To turn the LED ON, line needs to be
+ set to low voltage instead of high.
+
+ inactive-high-impedance:
+ type: boolean
+ description:
+ Set LED to high-impedance mode to turn the LED OFF. LED might also
+ describe this mode as tristate.
+
# Required properties for flash LED child nodes:
flash-max-microamp:
description:
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml b/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml
index 52252fb6bb32..bb20394fca5c 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-bcm63138.yaml
@@ -52,10 +52,6 @@ patternProperties:
maxItems: 1
description: LED pin number
- active-low:
- type: boolean
- description: Makes LED active low
-
required:
- reg
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml b/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml
index 51cc0d82c12e..f3a3ef992929 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml
@@ -78,10 +78,6 @@ patternProperties:
- maximum: 23
description: LED pin number (only LEDs 0 to 23 are valid).
- active-low:
- type: boolean
- description: Makes LED active low.
-
brcm,hardware-controlled:
type: boolean
description: Makes this LED hardware controlled.
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
index 6e51c6b91ee5..211ffc3c4a20 100644
--- a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt
@@ -25,8 +25,6 @@ LED sub-node required properties:
LED sub-node optional properties:
- label : see Documentation/devicetree/bindings/leds/common.txt
- - active-low : Boolean, makes LED active low.
- Default : false
- default-state : see
Documentation/devicetree/bindings/leds/common.txt
- linux,default-trigger : see
diff --git a/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
index bd6ec04a8727..a31a202afe5c 100644
--- a/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-pwm-multicolor.yaml
@@ -41,9 +41,7 @@ properties:
pwm-names: true
- active-low:
- description: For PWMs where the LED is wired to supply rather than ground.
- type: boolean
+ active-low: true
color: true
diff --git a/Documentation/devicetree/bindings/leds/leds-pwm.yaml b/Documentation/devicetree/bindings/leds/leds-pwm.yaml
index 7de6da58be3c..113b7c218303 100644
--- a/Documentation/devicetree/bindings/leds/leds-pwm.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-pwm.yaml
@@ -34,11 +34,6 @@ patternProperties:
Maximum brightness possible for the LED
$ref: /schemas/types.yaml#/definitions/uint32
- active-low:
- description:
- For PWMs where the LED is wired to supply rather than ground.
- type: boolean
-
required:
- pwms
- max-brightness
diff --git a/Documentation/devicetree/bindings/loongarch/cpus.yaml b/Documentation/devicetree/bindings/loongarch/cpus.yaml
new file mode 100644
index 000000000000..f175872995e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/loongarch/cpus.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/loongarch/cpus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LoongArch CPUs
+
+maintainers:
+ - Binbin Zhou <zhoubinbin@loongson.cn>
+
+description:
+ This document describes the list of LoongArch CPU cores that support FDT,
+ it describe the layout of CPUs in a system through the "cpus" node.
+
+allOf:
+ - $ref: /schemas/cpu.yaml#
+
+properties:
+ compatible:
+ enum:
+ - loongson,la264
+ - loongson,la364
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/loongson,ls2k-clk.h>
+
+ cpus {
+ #size-cells = <0>;
+ #address-cells = <1>;
+
+ cpu@0 {
+ compatible = "loongson,la264";
+ device_type = "cpu";
+ reg = <0>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
+ };
+
+ cpu@1 {
+ compatible = "loongson,la264";
+ device_type = "cpu";
+ reg = <1>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/loongarch/loongson.yaml b/Documentation/devicetree/bindings/loongarch/loongson.yaml
new file mode 100644
index 000000000000..e1a4a97b7576
--- /dev/null
+++ b/Documentation/devicetree/bindings/loongarch/loongson.yaml
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/loongarch/loongson.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson SoC-based boards
+
+maintainers:
+ - Binbin Zhou <zhoubinbin@loongson.cn>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: Loongson-2K0500 processor based boards
+ items:
+ - const: loongson,ls2k0500-ref
+ - const: loongson,ls2k0500
+
+ - description: Loongson-2K1000 processor based boards
+ items:
+ - const: loongson,ls2k1000-ref
+ - const: loongson,ls2k1000
+
+ - description: Loongson-2K2000 processor based boards
+ items:
+ - const: loongson,ls2k2000-ref
+ - const: loongson,ls2k2000
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/media/cnm,wave521c.yaml b/Documentation/devicetree/bindings/media/cnm,wave521c.yaml
index 6d5569e77b7a..6a11c1d11fb5 100644
--- a/Documentation/devicetree/bindings/media/cnm,wave521c.yaml
+++ b/Documentation/devicetree/bindings/media/cnm,wave521c.yaml
@@ -17,7 +17,7 @@ properties:
compatible:
items:
- enum:
- - ti,k3-j721s2-wave521c
+ - ti,j721s2-wave521c
- const: cnm,wave521c
reg:
@@ -53,7 +53,7 @@ additionalProperties: false
examples:
- |
vpu: video-codec@12345678 {
- compatible = "ti,k3-j721s2-wave521c", "cnm,wave521c";
+ compatible = "ti,j721s2-wave521c", "cnm,wave521c";
reg = <0x12345678 0x1000>;
clocks = <&clks 42>;
interrupts = <42>;
diff --git a/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml b/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
index 75d8138298fb..660e2ca42daf 100644
--- a/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,asp-v2.0.yaml
@@ -17,6 +17,10 @@ properties:
oneOf:
- items:
- enum:
+ - brcm,bcm74165b0-asp
+ - const: brcm,asp-v2.2
+ - items:
+ - enum:
- brcm,bcm74165-asp
- const: brcm,asp-v2.1
- items:
diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
index 6684810fcbf0..23dfe0838dca 100644
--- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
@@ -24,6 +24,7 @@ properties:
- brcm,genet-mdio-v5
- brcm,asp-v2.0-mdio
- brcm,asp-v2.1-mdio
+ - brcm,asp-v2.2-mdio
- brcm,unimac-mdio
reg:
diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
index 170e23f0610d..20c0572c9853 100644
--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
@@ -28,6 +28,8 @@ Optional properties:
available with tcan4552/4553.
- device-wake-gpios: Wake up GPIO to wake up the TCAN device. Not
available with tcan4552/4553.
+ - wakeup-source: Leave the chip running when suspended, and configure
+ the RX interrupt to wake up the device.
Example:
tcan4x5x: tcan4x5x@0 {
@@ -42,4 +44,5 @@ tcan4x5x: tcan4x5x@0 {
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+ wakeup-source;
};
diff --git a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
index 64d57c343e6f..8d4e5af6fd6c 100644
--- a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
+++ b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml
@@ -49,6 +49,10 @@ properties:
resets:
maxItems: 1
+ xlnx,has-ecc:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: CAN TX_OL, TX_TL and RX FIFOs have ECC support(AXI CAN)
+
required:
- compatible
- reg
@@ -137,6 +141,7 @@ examples:
interrupts = <GIC_SPI 59 IRQ_TYPE_EDGE_RISING>;
tx-fifo-depth = <0x40>;
rx-fifo-depth = <0x40>;
+ xlnx,has-ecc;
};
- |
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index bf8894a0257e..2c71e2cf3a2f 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -59,6 +59,11 @@ properties:
- cdns,gem # Generic
- cdns,macb # Generic
+ - items:
+ - enum:
+ - microchip,sam9x7-gem # Microchip SAM9X7 gigabit ethernet interface
+ - const: microchip,sama7g5-gem # Microchip SAMA7G5 gigabit ethernet interface
+
reg:
minItems: 1
items:
diff --git a/Documentation/devicetree/bindings/net/dsa/ar9331.txt b/Documentation/devicetree/bindings/net/dsa/ar9331.txt
deleted file mode 100644
index f824fdae0da2..000000000000
--- a/Documentation/devicetree/bindings/net/dsa/ar9331.txt
+++ /dev/null
@@ -1,147 +0,0 @@
-Atheros AR9331 built-in switch
-=============================
-
-It is a switch built-in to Atheros AR9331 WiSoC and addressable over internal
-MDIO bus. All PHYs are built-in as well.
-
-Required properties:
-
- - compatible: should be: "qca,ar9331-switch"
- - reg: Address on the MII bus for the switch.
- - resets : Must contain an entry for each entry in reset-names.
- - reset-names : Must include the following entries: "switch"
- - interrupt-parent: Phandle to the parent interrupt controller
- - interrupts: IRQ line for the switch
- - interrupt-controller: Indicates the switch is itself an interrupt
- controller. This is used for the PHY interrupts.
- - #interrupt-cells: must be 1
- - mdio: Container of PHY and devices on the switches MDIO bus.
-
-See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
-required and optional properties.
-Examples:
-
-eth0: ethernet@19000000 {
- compatible = "qca,ar9330-eth";
- reg = <0x19000000 0x200>;
- interrupts = <4>;
-
- resets = <&rst 9>, <&rst 22>;
- reset-names = "mac", "mdio";
- clocks = <&pll ATH79_CLK_AHB>, <&pll ATH79_CLK_AHB>;
- clock-names = "eth", "mdio";
-
- phy-mode = "mii";
- phy-handle = <&phy_port4>;
-};
-
-eth1: ethernet@1a000000 {
- compatible = "qca,ar9330-eth";
- reg = <0x1a000000 0x200>;
- interrupts = <5>;
- resets = <&rst 13>, <&rst 23>;
- reset-names = "mac", "mdio";
- clocks = <&pll ATH79_CLK_AHB>, <&pll ATH79_CLK_AHB>;
- clock-names = "eth", "mdio";
-
- phy-mode = "gmii";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- switch10: switch@10 {
- #address-cells = <1>;
- #size-cells = <0>;
-
- compatible = "qca,ar9331-switch";
- reg = <0x10>;
- resets = <&rst 8>;
- reset-names = "switch";
-
- interrupt-parent = <&miscintc>;
- interrupts = <12>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- switch_port0: port@0 {
- reg = <0x0>;
- ethernet = <&eth1>;
-
- phy-mode = "gmii";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
-
- switch_port1: port@1 {
- reg = <0x1>;
- phy-handle = <&phy_port0>;
- phy-mode = "internal";
- };
-
- switch_port2: port@2 {
- reg = <0x2>;
- phy-handle = <&phy_port1>;
- phy-mode = "internal";
- };
-
- switch_port3: port@3 {
- reg = <0x3>;
- phy-handle = <&phy_port2>;
- phy-mode = "internal";
- };
-
- switch_port4: port@4 {
- reg = <0x4>;
- phy-handle = <&phy_port3>;
- phy-mode = "internal";
- };
- };
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- interrupt-parent = <&switch10>;
-
- phy_port0: phy@0 {
- reg = <0x0>;
- interrupts = <0>;
- };
-
- phy_port1: phy@1 {
- reg = <0x1>;
- interrupts = <0>;
- };
-
- phy_port2: phy@2 {
- reg = <0x2>;
- interrupts = <0>;
- };
-
- phy_port3: phy@3 {
- reg = <0x3>;
- interrupts = <0>;
- };
-
- phy_port4: phy@4 {
- reg = <0x4>;
- interrupts = <0>;
- };
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index c963dc09e8e1..52acc15ebcbf 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -31,6 +31,7 @@ properties:
- microchip,ksz9893
- microchip,ksz9563
- microchip,ksz8563
+ - microchip,ksz8567
reset-gpios:
description:
diff --git a/Documentation/devicetree/bindings/net/dsa/qca,ar9331.yaml b/Documentation/devicetree/bindings/net/dsa/qca,ar9331.yaml
new file mode 100644
index 000000000000..fd9ddc59d38c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/qca,ar9331.yaml
@@ -0,0 +1,161 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/qca,ar9331.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros AR9331 built-in switch
+
+maintainers:
+ - Oleksij Rempel <o.rempel@pengutronix.de>
+
+description:
+ Qualcomm Atheros AR9331 is a switch built-in to Atheros AR9331 WiSoC and
+ addressable over internal MDIO bus. All PHYs are built-in as well.
+
+properties:
+ compatible:
+ const: qca,ar9331-switch
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 1
+
+ mdio:
+ $ref: /schemas/net/mdio.yaml#
+ unevaluatedProperties: false
+ properties:
+ interrupt-parent: true
+
+ patternProperties:
+ '(ethernet-)?phy@[0-4]+$':
+ type: object
+ unevaluatedProperties: false
+
+ properties:
+ reg: true
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: switch
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - mdio
+ - ports
+ - resets
+ - reset-names
+
+allOf:
+ - $ref: dsa.yaml#/$defs/ethernet-ports
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch10: switch@10 {
+ compatible = "qca,ar9331-switch";
+ reg = <0x10>;
+
+ interrupt-parent = <&miscintc>;
+ interrupts = <12>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ resets = <&rst 8>;
+ reset-names = "switch";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0x0>;
+ ethernet = <&eth1>;
+
+ phy-mode = "gmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+ phy-handle = <&phy_port0>;
+ phy-mode = "internal";
+ };
+
+ port@2 {
+ reg = <0x2>;
+ phy-handle = <&phy_port1>;
+ phy-mode = "internal";
+ };
+
+ port@3 {
+ reg = <0x3>;
+ phy-handle = <&phy_port2>;
+ phy-mode = "internal";
+ };
+
+ port@4 {
+ reg = <0x4>;
+ phy-handle = <&phy_port3>;
+ phy-mode = "internal";
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&switch10>;
+
+ phy_port0: ethernet-phy@0 {
+ reg = <0x0>;
+ interrupts = <0>;
+ };
+
+ phy_port1: ethernet-phy@1 {
+ reg = <0x1>;
+ interrupts = <0>;
+ };
+
+ phy_port2: ethernet-phy@2 {
+ reg = <0x2>;
+ interrupts = <0>;
+ };
+
+ phy_port3: ethernet-phy@3 {
+ reg = <0x3>;
+ interrupts = <0>;
+ };
+
+ phy_port4: ethernet-phy@4 {
+ reg = <0x4>;
+ interrupts = <0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek.yaml b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
index cce692f57b08..70b6bda3cf98 100644
--- a/Documentation/devicetree/bindings/net/dsa/realtek.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
@@ -59,6 +59,9 @@ properties:
description: GPIO to be used to reset the whole device
maxItems: 1
+ resets:
+ maxItems: 1
+
realtek,disable-leds:
type: boolean
description: |
@@ -127,7 +130,6 @@ else:
- mdc-gpios
- mdio-gpios
- mdio
- - reset-gpios
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index d14d123ad7a0..b2785b03139f 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -14,7 +14,6 @@ properties:
pattern: "^ethernet(@.*)?$"
label:
- $ref: /schemas/types.yaml#/definitions/string
description: Human readable label on a port of a box.
local-mac-address:
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy-package.yaml b/Documentation/devicetree/bindings/net/ethernet-phy-package.yaml
new file mode 100644
index 000000000000..e567101e6f38
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ethernet-phy-package.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ethernet-phy-package.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ethernet PHY Package Common Properties
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+description:
+ PHY packages are multi-port Ethernet PHY of the same family
+ and each Ethernet PHY is affected by the global configuration
+ of the PHY package.
+
+ Each reg of the PHYs defined in the PHY package node is
+ absolute and describe the real address of the Ethernet PHY on
+ the MDIO bus.
+
+properties:
+ $nodename:
+ pattern: "^ethernet-phy-package@[a-f0-9]+$"
+
+ reg:
+ minimum: 0
+ maximum: 31
+ description:
+ The base ID number for the PHY package.
+ Commonly the ID of the first PHY in the PHY package.
+
+ Some PHY in the PHY package might be not defined but
+ still occupy ID on the device (just not attached to
+ anything) hence the PHY package reg might correspond
+ to a not attached PHY (offset 0).
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ ^ethernet-phy@[a-f0-9]+$:
+ $ref: ethernet-phy.yaml#
+
+required:
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml
index 8948a11c994e..5536c06139ca 100644
--- a/Documentation/devicetree/bindings/net/fsl,fec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -224,6 +224,9 @@ properties:
Can be omitted thus no delay is observed. Delay is in range of 1ms to 1000ms.
Other delays are invalid.
+ iommus:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/marvell,prestera.yaml b/Documentation/devicetree/bindings/net/marvell,prestera.yaml
index 5ea8b73663a5..16ff892f7bbd 100644
--- a/Documentation/devicetree/bindings/net/marvell,prestera.yaml
+++ b/Documentation/devicetree/bindings/net/marvell,prestera.yaml
@@ -78,8 +78,8 @@ examples:
pcie@0 {
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x0 0x0 0x0 0x0 0x0 0x0>;
- reg = <0x0 0x0 0x0 0x0 0x0 0x0>;
+ ranges = <0x02000000 0x0 0x100000 0x10000000 0x0 0x0>;
+ reg = <0x0 0x1000>;
device_type = "pci";
switch@0,0 {
diff --git a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
index 9cc236ec42f2..d0332eb76ad2 100644
--- a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
@@ -73,7 +73,7 @@ examples:
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
- i2c {
+ spi {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/qca,qca808x.yaml b/Documentation/devicetree/bindings/net/qca,qca808x.yaml
new file mode 100644
index 000000000000..e2552655902a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qca,qca808x.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qca,qca808x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros QCA808X PHY
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+description:
+ QCA808X PHYs can have up to 3 LEDs attached.
+ All 3 LEDs are disabled by default.
+ 2 LEDs have dedicated pins with the 3rd LED having the
+ double function of Interrupt LEDs/GPIO or additional LED.
+
+ By default this special PIN is set to LED function.
+
+allOf:
+ - $ref: ethernet-phy.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ethernet-phy-id004d.d101
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@0 {
+ compatible = "ethernet-phy-id004d.d101";
+ reg = <0>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WAN;
+ default-state = "keep";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qcom,ethqos.yaml b/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
index 7bdb412a0185..69a337c7e345 100644
--- a/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ethqos.yaml
@@ -37,12 +37,14 @@ properties:
items:
- description: Combined signal for various interrupt events
- description: The interrupt that occurs when Rx exits the LPI state
+ - description: The interrupt that occurs when HW safety error triggered
interrupt-names:
minItems: 1
items:
- const: macirq
- - const: eth_lpi
+ - enum: [eth_lpi, sfty]
+ - const: sfty
clocks:
maxItems: 4
@@ -89,8 +91,9 @@ examples:
<&gcc GCC_ETH_PTP_CLK>,
<&gcc GCC_ETH_RGMII_CLK>;
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "macirq", "eth_lpi";
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 782 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi", "sfty";
rx-fifo-depth = <4096>;
tx-fifo-depth = <4096>;
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index c30218684cfe..53cae71d9957 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -159,7 +159,7 @@ properties:
when the AP (not the modem) performs early initialization.
firmware-name:
- $ref: /schemas/types.yaml#/definitions/string
+ maxItems: 1
description:
If present, name (or relative path) of the file within the
firmware search path containing the firmware image used when
diff --git a/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml b/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
index 3407e909e8a7..0029e197a825 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipq4019-mdio.yaml
@@ -44,6 +44,21 @@ properties:
items:
- const: gcc_mdio_ahb_clk
+ clock-frequency:
+ description:
+ The MDIO bus clock that must be output by the MDIO bus hardware, if
+ absent, the default hardware values are used.
+
+ MDC rate is feed by an external clock (fixed 100MHz) and is divider
+ internally. The default divider is /256 resulting in the default rate
+ applied of 390KHz.
+
+ To follow 802.3 standard that instruct up to 2.5MHz by default, if
+ this property is not declared and the divider is set to /256, by
+ default 1.5625Mhz is select.
+ enum: [ 390625, 781250, 1562500, 3125000, 6250000, 12500000 ]
+ default: 1562500
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/qcom,qca807x.yaml b/Documentation/devicetree/bindings/net/qcom,qca807x.yaml
new file mode 100644
index 000000000000..7290024024f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,qca807x.yaml
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qcom,qca807x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QCA807x Ethernet PHY
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+ - Robert Marko <robert.marko@sartura.hr>
+
+description: |
+ Qualcomm QCA8072/5 Ethernet PHY is PHY package of 2 or 5
+ IEEE 802.3 clause 22 compliant 10BASE-Te, 100BASE-TX and
+ 1000BASE-T PHY-s.
+
+ They feature 2 SerDes, one for PSGMII or QSGMII connection with
+ MAC, while second one is SGMII for connection to MAC or fiber.
+
+ Both models have a combo port that supports 1000BASE-X and
+ 100BASE-FX fiber.
+
+ Each PHY inside of QCA807x series has 4 digitally controlled
+ output only pins that natively drive LED-s for up to 2 attached
+ LEDs. Some vendor also use these 4 output for GPIO usage without
+ attaching LEDs.
+
+ Note that output pins can be set to drive LEDs OR GPIO, mixed
+ definition are not accepted.
+
+$ref: ethernet-phy-package.yaml#
+
+properties:
+ compatible:
+ enum:
+ - qcom,qca8072-package
+ - qcom,qca8075-package
+
+ qcom,package-mode:
+ description: |
+ PHY package can be configured in 3 mode following this table:
+
+ First Serdes mode Second Serdes mode
+ Option 1 PSGMII for copper Disabled
+ ports 0-4
+ Option 2 PSGMII for copper 1000BASE-X / 100BASE-FX
+ ports 0-4
+ Option 3 QSGMII for copper SGMII for
+ ports 0-3 copper port 4
+
+ PSGMII mode (option 1 or 2) is configured dynamically based on
+ the presence of a connected SFP device.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - qsgmii
+ - psgmii
+ default: psgmii
+
+ qcom,tx-drive-strength-milliwatt:
+ description: set the TX Amplifier value in mv.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [140, 160, 180, 200, 220,
+ 240, 260, 280, 300, 320,
+ 400, 500, 600]
+ default: 600
+
+patternProperties:
+ ^ethernet-phy@[a-f0-9]+$:
+ $ref: ethernet-phy.yaml#
+
+ properties:
+ qcom,dac-full-amplitude:
+ description:
+ Set Analog MDI driver amplitude to FULL.
+
+ With this not defined, amplitude is set to DSP.
+ (amplitude is adjusted based on cable length)
+
+ With this enabled and qcom,dac-full-bias-current
+ and qcom,dac-disable-bias-current-tweak disabled,
+ bias current is half.
+ type: boolean
+
+ qcom,dac-full-bias-current:
+ description:
+ Set Analog MDI driver bias current to FULL.
+
+ With this not defined, bias current is set to DSP.
+ (bias current is adjusted based on cable length)
+
+ Actual bias current might be different with
+ qcom,dac-disable-bias-current-tweak disabled.
+ type: boolean
+
+ qcom,dac-disable-bias-current-tweak:
+ description: |
+ Set Analog MDI driver bias current to disable tweak
+ to bias current.
+
+ With this not defined, bias current tweak are enabled
+ by default.
+
+ With this enabled the following tweak are NOT applied:
+ - With both FULL amplitude and FULL bias current: bias current
+ is set to half.
+ - With only DSP amplitude: bias current is set to half and
+ is set to 1/4 with cable < 10m.
+ - With DSP bias current (included both DSP amplitude and
+ DSP bias current): bias current is half the detected current
+ with cable < 10m.
+ type: boolean
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ if:
+ required:
+ - gpio-controller
+ then:
+ properties:
+ leds: false
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy-package@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "qcom,qca8075-package";
+ reg = <0>;
+
+ qcom,package-mode = "qsgmii";
+
+ ethernet-phy@0 {
+ reg = <0>;
+
+ leds {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ default-state = "keep";
+ };
+ };
+ };
+
+ ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethernet-phy@2 {
+ reg = <2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ ethernet-phy@3 {
+ reg = <3>;
+ };
+
+ ethernet-phy@4 {
+ reg = <4>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index 890f7858d0dc..de7ba7f345a9 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -46,6 +46,7 @@ properties:
- enum:
- renesas,etheravb-r8a779a0 # R-Car V3U
- renesas,etheravb-r8a779g0 # R-Car V4H
+ - renesas,etheravb-r8a779h0 # R-Car V4M
- const: renesas,etheravb-rcar-gen4 # R-Car Gen4
- items:
diff --git a/Documentation/devicetree/bindings/net/renesas,ethertsn.yaml b/Documentation/devicetree/bindings/net/renesas,ethertsn.yaml
index 475aff7714d6..ea35d19be829 100644
--- a/Documentation/devicetree/bindings/net/renesas,ethertsn.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,ethertsn.yaml
@@ -65,9 +65,11 @@ properties:
rx-internal-delay-ps:
enum: [0, 1800]
+ default: 0
tx-internal-delay-ps:
enum: [0, 2000]
+ default: 0
'#address-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 5c2769dc689a..6b0341a8e0ea 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -95,6 +95,7 @@ properties:
- snps,dwmac-5.20
- snps,dwxgmac
- snps,dwxgmac-2.10
+ - starfive,jh7100-dwmac
- starfive,jh7110-dwmac
reg:
@@ -107,13 +108,15 @@ properties:
- description: Combined signal for various interrupt events
- description: The interrupt to manage the remote wake-up packet detection
- description: The interrupt that occurs when Rx exits the LPI state
+ - description: The interrupt that occurs when HW safety error triggered
interrupt-names:
minItems: 1
items:
- const: macirq
- - enum: [eth_wake_irq, eth_lpi]
- - const: eth_lpi
+ - enum: [eth_wake_irq, eth_lpi, sfty]
+ - enum: [eth_wake_irq, eth_lpi, sfty]
+ - enum: [eth_wake_irq, eth_lpi, sfty]
clocks:
minItems: 1
@@ -144,10 +147,12 @@ properties:
- description: AHB reset
reset-names:
- minItems: 1
- items:
- - const: stmmaceth
- - const: ahb
+ oneOf:
+ - items:
+ - enum: [stmmaceth, ahb]
+ - items:
+ - const: stmmaceth
+ - const: ahb
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml b/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
index 5e7cfbbebce6..0d1962980f57 100644
--- a/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
@@ -16,16 +16,20 @@ select:
compatible:
contains:
enum:
+ - starfive,jh7100-dwmac
- starfive,jh7110-dwmac
required:
- compatible
properties:
compatible:
- items:
- - enum:
- - starfive,jh7110-dwmac
- - const: snps,dwmac-5.20
+ oneOf:
+ - items:
+ - const: starfive,jh7100-dwmac
+ - const: snps,dwmac
+ - items:
+ - const: starfive,jh7110-dwmac
+ - const: snps,dwmac-5.20
reg:
maxItems: 1
@@ -46,24 +50,6 @@ properties:
- const: tx
- const: gtx
- interrupts:
- minItems: 3
- maxItems: 3
-
- interrupt-names:
- minItems: 3
- maxItems: 3
-
- resets:
- items:
- - description: MAC Reset signal.
- - description: AHB Reset signal.
-
- reset-names:
- items:
- - const: stmmaceth
- - const: ahb
-
starfive,tx-use-rgmii-clk:
description:
Tx clock is provided by external rgmii clock.
@@ -94,6 +80,48 @@ required:
allOf:
- $ref: snps,dwmac.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: starfive,jh7100-dwmac
+ then:
+ properties:
+ interrupts:
+ minItems: 2
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 2
+ maxItems: 2
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: ahb
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: starfive,jh7110-dwmac
+ then:
+ properties:
+ interrupts:
+ minItems: 3
+ maxItems: 3
+
+ interrupt-names:
+ minItems: 3
+ maxItems: 3
+
+ resets:
+ minItems: 2
+
+ reset-names:
+ minItems: 2
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
index f07ae3173b03..d5bd93ee4dbb 100644
--- a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -7,8 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI SoC Ethernet Switch Controller (CPSW)
maintainers:
- - Grygorii Strashko <grygorii.strashko@ti.com>
- - Sekhar Nori <nsekhar@ti.com>
+ - Siddharth Vadapalli <s-vadapalli@ti.com>
+ - Ravi Gunasekaran <r-gunasekaran@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
description:
The 3-port switch gigabit ethernet subsystem provides ethernet packet
diff --git a/Documentation/devicetree/bindings/net/ti,dp83822.yaml b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
index db74474207ed..784866ea392b 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83822.yaml
+++ b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
@@ -62,6 +62,40 @@ properties:
for the PHY. The internal delay for the PHY is fixed to 3.5ns relative
to transmit data.
+ ti,cfg-dac-minus-one-bp:
+ description: |
+ DP83826 PHY only.
+ Sets the voltage ratio (with respect to the nominal value)
+ of the logical level -1 for the MLT-3 encoded TX data.
+ enum: [5000, 5625, 6250, 6875, 7500, 8125, 8750, 9375, 10000,
+ 10625, 11250, 11875, 12500, 13125, 13750, 14375, 15000]
+ default: 10000
+
+ ti,cfg-dac-plus-one-bp:
+ description: |
+ DP83826 PHY only.
+ Sets the voltage ratio (with respect to the nominal value)
+ of the logical level +1 for the MLT-3 encoded TX data.
+ enum: [5000, 5625, 6250, 6875, 7500, 8125, 8750, 9375, 10000,
+ 10625, 11250, 11875, 12500, 13125, 13750, 14375, 15000]
+ default: 10000
+
+ ti,rmii-mode:
+ description: |
+ If present, select the RMII operation mode. Two modes are
+ available:
+ - RMII master, where the PHY outputs a 50MHz reference clock which can
+ be connected to the MAC.
+ - RMII slave, where the PHY expects a 50MHz reference clock input
+ shared with the MAC.
+ The RMII operation mode can also be configured by its straps.
+ If the strap pin is not set correctly or not set at all, then this can be
+ used to configure it.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - master
+ - slave
+
required:
- reg
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index c9c25132d154..73ed5951d296 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -7,8 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: The TI AM654x/J721E/AM642x SoC Gigabit Ethernet MAC (Media Access Controller)
maintainers:
- - Grygorii Strashko <grygorii.strashko@ti.com>
- - Sekhar Nori <nsekhar@ti.com>
+ - Siddharth Vadapalli <s-vadapalli@ti.com>
+ - Ravi Gunasekaran <r-gunasekaran@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
description:
The TI AM654x/J721E SoC Gigabit Ethernet MAC (CPSW2G NUSS) has two ports
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
index 3e910d3b24a0..b1c875325776 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
@@ -7,8 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: The TI AM654x/J721E Common Platform Time Sync (CPTS) module
maintainers:
- - Grygorii Strashko <grygorii.strashko@ti.com>
- - Sekhar Nori <nsekhar@ti.com>
+ - Siddharth Vadapalli <s-vadapalli@ti.com>
+ - Ravi Gunasekaran <r-gunasekaran@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
description: |+
The TI AM654x/J721E CPTS module is used to facilitate host control of time
diff --git a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml
index 14a262bcbf7c..627f8a6078c2 100644
--- a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml
+++ b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml
@@ -28,17 +28,15 @@ properties:
items:
- const: reboot-mode
-patternProperties:
- "^mode-.+":
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Vendor-specific mode value written to the mode register
+allOf:
+ - $ref: reboot-mode.yaml#
required:
- compatible
- nvmem-cells
- nvmem-cell-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
index 5e460128b0d1..fc8105a7b9b2 100644
--- a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
@@ -111,21 +111,24 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/input/linux-event-codes.h>
#include <dt-bindings/spmi/spmi.h>
- spmi_bus: spmi@c440000 {
+
+ spmi@c440000 {
reg = <0x0c440000 0x1100>;
#address-cells = <2>;
#size-cells = <0>;
- pmk8350: pmic@0 {
+
+ pmic@0 {
reg = <0x0 SPMI_USID>;
#address-cells = <1>;
#size-cells = <0>;
- pmk8350_pon: pon_hlos@1300 {
- reg = <0x1300>;
+
+ pon@800 {
compatible = "qcom,pm8998-pon";
+ reg = <0x800>;
pwrkey {
compatible = "qcom,pm8941-pwrkey";
- interrupts = < 0x0 0x8 0 IRQ_TYPE_EDGE_BOTH >;
+ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
debounce = <15625>;
bias-pull-up;
linux,code = <KEY_POWER>;
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml
index 9b1ffceefe3d..b6acff199cde 100644
--- a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml
+++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml
@@ -29,12 +29,10 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
description: Offset in the register map for the mode register (in bytes)
-patternProperties:
- "^mode-.+":
- $ref: /schemas/types.yaml#/definitions/uint32
- description: Vendor-specific mode value written to the mode register
+allOf:
+ - $ref: reboot-mode.yaml#
-additionalProperties: false
+unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml
index 45792e216981..799831636194 100644
--- a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml
+++ b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.yaml
@@ -57,7 +57,7 @@ examples:
firmware {
zynqmp-firmware {
- zynqmp-power {
+ power-management {
compatible = "xlnx,zynqmp-power";
interrupts = <0 35 4>;
};
@@ -70,7 +70,7 @@ examples:
firmware {
zynqmp-firmware {
- zynqmp-power {
+ power-management {
compatible = "xlnx,zynqmp-power";
interrupt-parent = <&gic>;
interrupts = <0 35 4>;
diff --git a/Documentation/devicetree/bindings/power/supply/bq24190.yaml b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
index d3ebc9de8c0b..131b7e57d22f 100644
--- a/Documentation/devicetree/bindings/power/supply/bq24190.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq24190.yaml
@@ -20,6 +20,7 @@ properties:
- ti,bq24192
- ti,bq24192i
- ti,bq24196
+ - ti,bq24296
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
index 49db66801429..1f1b42dde94d 100644
--- a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.yaml
@@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Zynq UltraScale+ MPSoC and Versal reset
maintainers:
- - Piyush Mehta <piyush.mehta@amd.com>
+ - Mubin Sayyed <mubin.sayyed@amd.com>
+ - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
description: |
The Zynq UltraScale+ MPSoC and Versal has several different resets.
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index 23646b684ea2..9d8670c00e3b 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -63,8 +63,8 @@ properties:
mmu-type:
description:
- Identifies the MMU address translation mode used on this
- hart. These values originate from the RISC-V Privileged
+ Identifies the largest MMU address translation mode supported by
+ this hart. These values originate from the RISC-V Privileged
Specification document, available from
https://riscv.org/specifications/
$ref: /schemas/types.yaml#/definitions/string
@@ -80,6 +80,11 @@ properties:
description:
The blocksize in bytes for the Zicbom cache operations.
+ riscv,cbop-block-size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The blocksize in bytes for the Zicbop cache operations.
+
riscv,cboz-block-size:
$ref: /schemas/types.yaml#/definitions/uint32
description:
diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml
index 27beedb98198..63d81dc895e5 100644
--- a/Documentation/devicetree/bindings/riscv/extensions.yaml
+++ b/Documentation/devicetree/bindings/riscv/extensions.yaml
@@ -48,7 +48,7 @@ properties:
insensitive, letters in the riscv,isa string must be all
lowercase.
$ref: /schemas/types.yaml#/definitions/string
- pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$
+ pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[0-9a-z])+)?(?:_[hsxz](?:[0-9a-z])+)*$
deprecated: true
riscv,isa-base:
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
index 8108c564dd78..aa32dc950e72 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-spdif.yaml
@@ -22,6 +22,7 @@ properties:
- const: allwinner,sun6i-a31-spdif
- const: allwinner,sun8i-h3-spdif
- const: allwinner,sun50i-h6-spdif
+ - const: allwinner,sun50i-h616-spdif
- items:
- const: allwinner,sun8i-a83t-spdif
- const: allwinner,sun8i-h3-spdif
@@ -62,6 +63,8 @@ allOf:
enum:
- allwinner,sun6i-a31-spdif
- allwinner,sun8i-h3-spdif
+ - allwinner,sun50i-h6-spdif
+ - allwinner,sun50i-h616-spdif
then:
required:
@@ -73,7 +76,7 @@ allOf:
contains:
enum:
- allwinner,sun8i-h3-spdif
- - allwinner,sun50i-h6-spdif
+ - allwinner,sun50i-h616-spdif
then:
properties:
diff --git a/Documentation/devicetree/bindings/sound/google,sc7280-herobrine.yaml b/Documentation/devicetree/bindings/sound/google,sc7280-herobrine.yaml
index ec4b6e547ca6..cdcd7c6f21eb 100644
--- a/Documentation/devicetree/bindings/sound/google,sc7280-herobrine.yaml
+++ b/Documentation/devicetree/bindings/sound/google,sc7280-herobrine.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Google SC7280-Herobrine ASoC sound card driver
maintainers:
- - Srinivasa Rao Mandadapu <srivasam@codeaurora.org>
- Judy Hsiao <judyhsiao@chromium.org>
description:
diff --git a/Documentation/devicetree/bindings/sound/tas2562.yaml b/Documentation/devicetree/bindings/sound/tas2562.yaml
index f01c0dde0cf7..d28c102c0ce7 100644
--- a/Documentation/devicetree/bindings/sound/tas2562.yaml
+++ b/Documentation/devicetree/bindings/sound/tas2562.yaml
@@ -18,7 +18,6 @@ description: |
Specifications about the audio amplifier can be found at:
https://www.ti.com/lit/gpn/tas2562
- https://www.ti.com/lit/gpn/tas2563
https://www.ti.com/lit/gpn/tas2564
https://www.ti.com/lit/gpn/tas2110
@@ -29,7 +28,6 @@ properties:
compatible:
enum:
- ti,tas2562
- - ti,tas2563
- ti,tas2564
- ti,tas2110
diff --git a/Documentation/devicetree/bindings/sound/ti,tas2781.yaml b/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
index a69e6c223308..976238689249 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
@@ -5,36 +5,46 @@
$id: http://devicetree.org/schemas/sound/ti,tas2781.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Texas Instruments TAS2781 SmartAMP
+title: Texas Instruments TAS2563/TAS2781 SmartAMP
maintainers:
- Shenghao Ding <shenghao-ding@ti.com>
-description:
- The TAS2781 is a mono, digital input Class-D audio amplifier
- optimized for efficiently driving high peak power into small
- loudspeakers. An integrated on-chip DSP supports Texas Instruments
- Smart Amp speaker protection algorithm. The integrated speaker
- voltage and current sense provides for real time
+description: |
+ The TAS2563/TAS2781 is a mono, digital input Class-D audio
+ amplifier optimized for efficiently driving high peak power into
+ small loudspeakers. An integrated on-chip DSP supports Texas
+ Instruments Smart Amp speaker protection algorithm. The
+ integrated speaker voltage and current sense provides for real time
monitoring of loudspeaker behavior.
-allOf:
- - $ref: dai-common.yaml#
+ Specifications about the audio amplifier can be found at:
+ https://www.ti.com/lit/gpn/tas2563
+ https://www.ti.com/lit/gpn/tas2781
properties:
compatible:
- enum:
- - ti,tas2781
+ description: |
+ ti,tas2563: 6.1-W Boosted Class-D Audio Amplifier With Integrated
+ DSP and IV Sense, 16/20/24/32bit stereo I2S or multichannel TDM.
+
+ ti,tas2781: 24-V Class-D Amplifier with Real Time Integrated Speaker
+ Protection and Audio Processing, 16/20/24/32bit stereo I2S or
+ multichannel TDM.
+ oneOf:
+ - items:
+ - enum:
+ - ti,tas2563
+ - const: ti,tas2781
+ - enum:
+ - ti,tas2781
reg:
description:
- I2C address, in multiple tas2781s case, all the i2c address
+ I2C address, in multiple-AMP case, all the i2c address
aggregate as one Audio Device to support multiple audio slots.
maxItems: 8
minItems: 1
- items:
- minimum: 0x38
- maximum: 0x3f
reset-gpios:
maxItems: 1
@@ -49,6 +59,44 @@ required:
- compatible
- reg
+allOf:
+ - $ref: dai-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,tas2563
+ then:
+ properties:
+ reg:
+ description:
+ I2C address, in multiple-AMP case, all the i2c address
+ aggregate as one Audio Device to support multiple audio slots.
+ maxItems: 4
+ minItems: 1
+ items:
+ minimum: 0x4c
+ maximum: 0x4f
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,tas2781
+ then:
+ properties:
+ reg:
+ description:
+ I2C address, in multiple-AMP case, all the i2c address
+ aggregate as one Audio Device to support multiple audio slots.
+ maxItems: 8
+ minItems: 1
+ items:
+ minimum: 0x38
+ maximum: 0x3f
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
index 4b6c20fc8194..fced6f2d8ecb 100644
--- a/Documentation/devicetree/bindings/timer/sifive,clint.yaml
+++ b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
@@ -33,6 +33,7 @@ properties:
- sifive,fu540-c000-clint # SiFive FU540
- starfive,jh7100-clint # StarFive JH7100
- starfive,jh7110-clint # StarFive JH7110
+ - starfive,jh8100-clint # StarFive JH8100
- const: sifive,clint0 # SiFive CLINT v0 IP block
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml b/Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml
index fbd235650e52..2e92bcdeb423 100644
--- a/Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml
+++ b/Documentation/devicetree/bindings/timer/thead,c900-aclint-mtimer.yaml
@@ -17,7 +17,12 @@ properties:
- const: thead,c900-aclint-mtimer
reg:
- maxItems: 1
+ items:
+ - description: MTIMECMP Registers
+
+ reg-names:
+ items:
+ - const: mtimecmp
interrupts-extended:
minItems: 1
@@ -28,6 +33,7 @@ additionalProperties: false
required:
- compatible
- reg
+ - reg-names
- interrupts-extended
examples:
@@ -39,5 +45,6 @@ examples:
<&cpu3intc 7>,
<&cpu4intc 7>;
reg = <0xac000000 0x00010000>;
+ reg-names = "mtimecmp";
};
...
diff --git a/Documentation/devicetree/bindings/tpm/tpm-common.yaml b/Documentation/devicetree/bindings/tpm/tpm-common.yaml
index 90390624a8be..3c1241b2a43f 100644
--- a/Documentation/devicetree/bindings/tpm/tpm-common.yaml
+++ b/Documentation/devicetree/bindings/tpm/tpm-common.yaml
@@ -42,7 +42,7 @@ properties:
resets:
description: Reset controller to reset the TPM
- $ref: /schemas/types.yaml#/definitions/phandle
+ maxItems: 1
reset-gpios:
description: Output GPIO pin to reset the TPM
diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
index 88cc1e3a0c88..b2b509b3944d 100644
--- a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml
@@ -55,9 +55,12 @@ properties:
samsung,sysreg:
$ref: /schemas/types.yaml#/definitions/phandle-array
- description: Should be phandle/offset pair. The phandle to the syscon node
- which indicates the FSYSx sysreg interface and the offset of
- the control register for UFS io coherency setting.
+ items:
+ - items:
+ - description: phandle to FSYSx sysreg node
+ - description: offset of the control register for UFS io coherency setting
+ description:
+ Phandle and offset to the FSYSx sysreg for UFS io coherency setting.
dma-coherent: true
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
index bb373eb025a5..00f87a558c7d 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
@@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx SuperSpeed DWC3 USB SoC controller
maintainers:
- - Piyush Mehta <piyush.mehta@amd.com>
+ - Mubin Sayyed <mubin.sayyed@amd.com>
+ - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
index 6d4cfd943f58..445183d9d6db 100644
--- a/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
+++ b/Documentation/devicetree/bindings/usb/microchip,usb5744.yaml
@@ -16,8 +16,9 @@ description:
USB 2.0 traffic.
maintainers:
- - Piyush Mehta <piyush.mehta@amd.com>
- Michal Simek <michal.simek@amd.com>
+ - Mubin Sayyed <mubin.sayyed@amd.com>
+ - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml b/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
index 868dffe314bc..a7f75fe36665 100644
--- a/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
+++ b/Documentation/devicetree/bindings/usb/xlnx,usb2.yaml
@@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx udc controller
maintainers:
- - Piyush Mehta <piyush.mehta@amd.com>
+ - Mubin Sayyed <mubin.sayyed@amd.com>
+ - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
properties:
compatible:
diff --git a/Documentation/driver-api/dpll.rst b/Documentation/driver-api/dpll.rst
index e3d593841aa7..ea8d16600e16 100644
--- a/Documentation/driver-api/dpll.rst
+++ b/Documentation/driver-api/dpll.rst
@@ -545,7 +545,7 @@ In such scenario, dpll device input signal shall be also configurable
to drive dpll with signal recovered from the PHY netdevice.
This is done by exposing a pin to the netdevice - attaching pin to the
netdevice itself with
-``netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)``.
+``dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)``.
Exposed pin id handle ``DPLL_A_PIN_ID`` is then identifiable by the user
as it is attached to rtnetlink respond to get ``RTM_NEWLINK`` command in
nested attribute ``IFLA_DPLL_PIN``.
diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt
index 8fd22073a847..d222bd3ee749 100644
--- a/Documentation/features/vm/TLB/arch-support.txt
+++ b/Documentation/features/vm/TLB/arch-support.txt
@@ -20,7 +20,7 @@
| openrisc: | .. |
| parisc: | TODO |
| powerpc: | TODO |
- | riscv: | TODO |
+ | riscv: | ok |
| s390: | TODO |
| sh: | TODO |
| sparc: | TODO |
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index 48b95d04f72d..4cc657d743f7 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -295,7 +295,6 @@ through which it can issue requests and negotiate::
struct netfs_request_ops {
void (*init_request)(struct netfs_io_request *rreq, struct file *file);
void (*free_request)(struct netfs_io_request *rreq);
- int (*begin_cache_operation)(struct netfs_io_request *rreq);
void (*expand_readahead)(struct netfs_io_request *rreq);
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
@@ -317,20 +316,6 @@ The operations are as follows:
[Optional] This is called as the request is being deallocated so that the
filesystem can clean up any state it has attached there.
- * ``begin_cache_operation()``
-
- [Optional] This is called to ask the network filesystem to call into the
- cache (if present) to initialise the caching state for this read. The netfs
- library module cannot access the cache directly, so the cache should call
- something like fscache_begin_read_operation() to do this.
-
- The cache gets to store its state in ->cache_resources and must set a table
- of operations of its own there (though of a different type).
-
- This should return 0 on success and an error code otherwise. If an error is
- reported, the operation may proceed anyway, just without local caching (only
- out of memory and interruption errors cause failure here).
-
* ``expand_readahead()``
[Optional] This is called to allow the filesystem to expand the size of a
@@ -460,14 +445,14 @@ When implementing a local cache to be used by the read helpers, two things are
required: some way for the network filesystem to initialise the caching for a
read request and a table of operations for the helpers to call.
-The network filesystem's ->begin_cache_operation() method is called to set up a
-cache and this must call into the cache to do the work. If using fscache, for
-example, the cache would call::
+To begin a cache operation on an fscache object, the following function is
+called::
int fscache_begin_read_operation(struct netfs_io_request *rreq,
struct fscache_cookie *cookie);
-passing in the request pointer and the cookie corresponding to the file.
+passing in the request pointer and the cookie corresponding to the file. This
+fills in the cache resources mentioned below.
The netfs_io_request object contains a place for the cache to hang its
state::
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index 1c244866041a..165514401441 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -145,7 +145,9 @@ filesystem, an overlay filesystem needs to record in the upper filesystem
that files have been removed. This is done using whiteouts and opaque
directories (non-directories are always opaque).
-A whiteout is created as a character device with 0/0 device number.
+A whiteout is created as a character device with 0/0 device number or
+as a zero-size regular file with the xattr "trusted.overlay.whiteout".
+
When a whiteout is found in the upper level of a merged directory, any
matching name in the lower level is ignored, and the whiteout itself
is also hidden.
@@ -154,6 +156,13 @@ A directory is made opaque by setting the xattr "trusted.overlay.opaque"
to "y". Where the upper filesystem contains an opaque directory, any
directory in the lower filesystem with the same name is ignored.
+An opaque directory should not conntain any whiteouts, because they do not
+serve any purpose. A merge directory containing regular files with the xattr
+"trusted.overlay.whiteout", should be additionally marked by setting the xattr
+"trusted.overlay.opaque" to "x" on the merge directory itself.
+This is needed to avoid the overhead of checking the "trusted.overlay.whiteout"
+on all entries during readdir in the common case.
+
readdir
-------
@@ -534,8 +543,9 @@ A lower dir with a regular whiteout will always be handled by the overlayfs
mount, so to support storing an effective whiteout file in an overlayfs mount an
alternative form of whiteout is supported. This form is a regular, zero-size
file with the "overlay.whiteout" xattr set, inside a directory with the
-"overlay.whiteouts" xattr set. Such whiteouts are never created by overlayfs,
-but can be used by userspace tools (like containers) that generate lower layers.
+"overlay.opaque" xattr set to "x" (see `whiteouts and opaque directories`_).
+These alternative whiteouts are never created by overlayfs, but can be used by
+userspace tools (like containers) that generate lower layers.
These alternative whiteouts can be escaped using the standard xattr escape
mechanism in order to properly nest to any depth.
diff --git a/Documentation/filesystems/smb/ksmbd.rst b/Documentation/filesystems/smb/ksmbd.rst
index 7bed96d794fc..6b30e43a0d11 100644
--- a/Documentation/filesystems/smb/ksmbd.rst
+++ b/Documentation/filesystems/smb/ksmbd.rst
@@ -73,15 +73,14 @@ Auto Negotiation Supported.
Compound Request Supported.
Oplock Cache Mechanism Supported.
SMB2 leases(v1 lease) Supported.
-Directory leases(v2 lease) Planned for future.
+Directory leases(v2 lease) Supported.
Multi-credits Supported.
NTLM/NTLMv2 Supported.
HMAC-SHA256 Signing Supported.
Secure negotiate Supported.
Signing Update Supported.
Pre-authentication integrity Supported.
-SMB3 encryption(CCM, GCM) Supported. (CCM and GCM128 supported, GCM256 in
- progress)
+SMB3 encryption(CCM, GCM) Supported. (CCM/GCM128 and CCM/GCM256 supported)
SMB direct(RDMA) Supported.
SMB3 Multi-channel Partially Supported. Planned to implement
replay/retry mechanisms for future.
@@ -112,6 +111,10 @@ DCE/RPC support Partially Supported. a few calls(NetShareEnumAll,
for Witness protocol e.g.)
ksmbd/nfsd interoperability Planned for future. The features that ksmbd
support are Leases, Notify, ACLs and Share modes.
+SMB3.1.1 Compression Planned for future.
+SMB3.1.1 over QUIC Planned for future.
+Signing/Encryption over RDMA Planned for future.
+SMB3.1.1 GMAC signing support Planned for future.
============================== =================================================
diff --git a/Documentation/kbuild/Kconfig.recursion-issue-01 b/Documentation/kbuild/Kconfig.recursion-issue-01
index e8877db0461f..ac49836d8ecf 100644
--- a/Documentation/kbuild/Kconfig.recursion-issue-01
+++ b/Documentation/kbuild/Kconfig.recursion-issue-01
@@ -16,13 +16,13 @@
# that are possible for CORE. So for example if CORE_BELL_A_ADVANCED is 'y',
# CORE must be 'y' too.
#
-# * What influences CORE_BELL_A_ADVANCED ?
+# * What influences CORE_BELL_A_ADVANCED?
#
# As the name implies CORE_BELL_A_ADVANCED is an advanced feature of
# CORE_BELL_A so naturally it depends on CORE_BELL_A. So if CORE_BELL_A is 'y'
# we know CORE_BELL_A_ADVANCED can be 'y' too.
#
-# * What influences CORE_BELL_A ?
+# * What influences CORE_BELL_A?
#
# CORE_BELL_A depends on CORE, so CORE influences CORE_BELL_A.
#
@@ -34,7 +34,7 @@
# the "recursive dependency detected" error.
#
# Reading the Documentation/kbuild/Kconfig.recursion-issue-01 file it may be
-# obvious that an easy to solution to this problem should just be the removal
+# obvious that an easy solution to this problem should just be the removal
# of the "select CORE" from CORE_BELL_A_ADVANCED as that is implicit already
# since CORE_BELL_A depends on CORE. Recursive dependency issues are not always
# so trivial to resolve, we provide another example below of practical
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
index c58f7153fcf8..3ebd50d78820 100644
--- a/Documentation/netlink/genetlink-c.yaml
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -126,8 +126,9 @@ properties:
Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
type: string
enum-name:
- description: Name for the enum type of the attribute.
- type: string
+ description: |
+ Name for the enum type of the attribute, if empty no name will be used.
+ type: [ string, "null" ]
doc:
description: Documentation of the space.
type: string
@@ -261,14 +262,16 @@ properties:
the prefix with the upper case name of the command, with dashes replaced by underscores.
type: string
enum-name:
- description: Name for the enum type with commands.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
async-prefix:
description: Same as name-prefix but used to render notifications and events to separate enum.
type: string
async-enum:
- description: Name for the enum type with notifications/events.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
list:
description: List of commands
type: array
diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml
index 938703088306..1d3fe3637707 100644
--- a/Documentation/netlink/genetlink-legacy.yaml
+++ b/Documentation/netlink/genetlink-legacy.yaml
@@ -168,8 +168,9 @@ properties:
Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
type: string
enum-name:
- description: Name for the enum type of the attribute.
- type: string
+ description: |
+ Name for the enum type of the attribute, if empty no name will be used.
+ type: [ string, "null" ]
doc:
description: Documentation of the space.
type: string
@@ -304,14 +305,16 @@ properties:
the prefix with the upper case name of the command, with dashes replaced by underscores.
type: string
enum-name:
- description: Name for the enum type with commands.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
async-prefix:
description: Same as name-prefix but used to render notifications and events to separate enum.
type: string
async-enum:
- description: Name for the enum type with notifications/events.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
# Start genetlink-legacy
fixed-header: &fixed-header
description: |
diff --git a/Documentation/netlink/netlink-raw.yaml b/Documentation/netlink/netlink-raw.yaml
index 04b92f1a5cd6..40fc8ab1ee44 100644
--- a/Documentation/netlink/netlink-raw.yaml
+++ b/Documentation/netlink/netlink-raw.yaml
@@ -152,14 +152,23 @@ properties:
the right formatting mechanism when displaying values of this
type.
enum: [ hex, mac, fddi, ipv4, ipv6, uuid ]
+ struct:
+ description: Name of the nested struct type.
+ type: string
if:
properties:
type:
- oneOf:
- - const: binary
- - const: pad
+ const: pad
then:
required: [ len ]
+ if:
+ properties:
+ type:
+ const: binary
+ then:
+ oneOf:
+ - required: [ len ]
+ - required: [ struct ]
# End genetlink-legacy
attribute-sets:
@@ -180,8 +189,9 @@ properties:
Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
type: string
enum-name:
- description: Name for the enum type of the attribute.
- type: string
+ description: |
+ Name for the enum type of the attribute, if empty no name will be used.
+ type: [ string, "null" ]
doc:
description: Documentation of the space.
type: string
@@ -362,14 +372,16 @@ properties:
the prefix with the upper case name of the command, with dashes replaced by underscores.
type: string
enum-name:
- description: Name for the enum type with commands.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
async-prefix:
description: Same as name-prefix but used to render notifications and events to separate enum.
type: string
async-enum:
- description: Name for the enum type with notifications/events.
- type: string
+ description: |
+ Name for the enum type with commands, if empty no name will be used.
+ type: [ string, "null" ]
# Start genetlink-legacy
fixed-header: &fixed-header
description: |
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
index b14aed18065f..95b0eb1486bf 100644
--- a/Documentation/netlink/specs/dpll.yaml
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -52,6 +52,40 @@ definitions:
dpll's lock-state shall remain DPLL_LOCK_STATUS_UNLOCKED)
render-max: true
-
+ type: enum
+ name: lock-status-error
+ doc: |
+ if previous status change was done due to a failure, this provides
+ information of dpll device lock status error.
+ Valid values for DPLL_A_LOCK_STATUS_ERROR attribute
+ entries:
+ -
+ name: none
+ doc: |
+ dpll device lock status was changed without any error
+ value: 1
+ -
+ name: undefined
+ doc: |
+ dpll device lock status was changed due to undefined error.
+ Driver fills this value up in case it is not able
+ to obtain suitable exact error type.
+ -
+ name: media-down
+ doc: |
+ dpll device lock status was changed because of associated
+ media got down.
+ This may happen for example if dpll device was previously
+ locked on an input pin of type PIN_TYPE_SYNCE_ETH_PORT.
+ -
+ name: fractional-frequency-offset-too-high
+ doc: |
+ the FFO (Fractional Frequency Offset) between the RX and TX
+ symbol rate on the media got too high.
+ This may happen for example if dpll device was previously
+ locked on an input pin of type PIN_TYPE_SYNCE_ETH_PORT.
+ render-max: true
+ -
type: const
name: temp-divider
value: 1000
@@ -214,6 +248,10 @@ attribute-sets:
name: type
type: u32
enum: type
+ -
+ name: lock-status-error
+ type: u32
+ enum: lock-status-error
-
name: pin
enum-name: dpll_a_pin
@@ -274,6 +312,7 @@ attribute-sets:
-
name: capabilities
type: u32
+ enum: pin-capabilities
-
name: parent-device
type: nest
@@ -379,13 +418,12 @@ operations:
- mode
- mode-supported
- lock-status
+ - lock-status-error
- temp
- clock-id
- type
dump:
- pre: dpll-lock-dumpit
- post: dpll-unlock-dumpit
reply: *dev-attrs
-
@@ -473,8 +511,6 @@ operations:
- fractional-frequency-offset
dump:
- pre: dpll-lock-dumpit
- post: dpll-unlock-dumpit
request:
attributes:
- id
diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
index 49f90cfb4698..af525ed29792 100644
--- a/Documentation/netlink/specs/mptcp_pm.yaml
+++ b/Documentation/netlink/specs/mptcp_pm.yaml
@@ -292,13 +292,14 @@ operations:
-
name: get-addr
doc: Get endpoint information
- attribute-set: endpoint
+ attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
do: &get-addr-attrs
request:
attributes:
- addr
+ - token
reply:
attributes:
- addr
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 3addac970680..76352dbd2be4 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -74,6 +74,10 @@ definitions:
name: queue-type
type: enum
entries: [ rx, tx ]
+ -
+ name: qstats-scope
+ type: flags
+ entries: [ queue ]
attribute-sets:
-
@@ -265,6 +269,73 @@ attribute-sets:
doc: ID of the NAPI instance which services this queue.
type: u32
+ -
+ name: qstats
+ doc: |
+ Get device statistics, scoped to a device or a queue.
+ These statistics extend (and partially duplicate) statistics available
+ in struct rtnl_link_stats64.
+ Value of the `scope` attribute determines how statistics are
+ aggregated. When aggregated for the entire device the statistics
+ represent the total number of events since last explicit reset of
+ the device (i.e. not a reconfiguration like changing queue count).
+ When reported per-queue, however, the statistics may not add
+ up to the total number of events, will only be reported for currently
+ active objects, and will likely report the number of events since last
+ reconfiguration.
+ attributes:
+ -
+ name: ifindex
+ doc: ifindex of the netdevice to which stats belong.
+ type: u32
+ checks:
+ min: 1
+ -
+ name: queue-type
+ doc: Queue type as rx, tx, for queue-id.
+ type: u32
+ enum: queue-type
+ -
+ name: queue-id
+ doc: Queue ID, if stats are scoped to a single queue instance.
+ type: u32
+ -
+ name: scope
+ doc: |
+ What object type should be used to iterate over the stats.
+ type: uint
+ enum: qstats-scope
+ -
+ name: rx-packets
+ doc: |
+ Number of wire packets successfully received and passed to the stack.
+ For drivers supporting XDP, XDP is considered the first layer
+ of the stack, so packets consumed by XDP are still counted here.
+ type: uint
+ value: 8 # reserve some attr ids in case we need more metadata later
+ -
+ name: rx-bytes
+ doc: Successfully received bytes, see `rx-packets`.
+ type: uint
+ -
+ name: tx-packets
+ doc: |
+ Number of wire packets successfully sent. Packet is considered to be
+ successfully sent once it is in device memory (usually this means
+ the device has issued a DMA completion for the packet).
+ type: uint
+ -
+ name: tx-bytes
+ doc: Successfully sent bytes, see `tx-packets`.
+ type: uint
+ -
+ name: rx-alloc-fail
+ doc: |
+ Number of times skb or buffer allocation failed on the Rx datapath.
+ Allocation failure may, or may not result in a packet drop, depending
+ on driver implementation and whether system recovers quickly.
+ type: uint
+
operations:
list:
-
@@ -405,6 +476,26 @@ operations:
attributes:
- ifindex
reply: *napi-get-op
+ -
+ name: qstats-get
+ doc: |
+ Get / dump fine grained statistics. Which statistics are reported
+ depends on the device and the driver, and whether the driver stores
+ software counters per-queue.
+ attribute-set: qstats
+ dump:
+ request:
+ attributes:
+ - scope
+ reply:
+ attributes:
+ - ifindex
+ - queue-type
+ - queue-id
+ - rx-packets
+ - rx-bytes
+ - tx-packets
+ - tx-bytes
mcast-groups:
list:
diff --git a/Documentation/netlink/specs/nlctrl.yaml b/Documentation/netlink/specs/nlctrl.yaml
new file mode 100644
index 000000000000..b1632b95f725
--- /dev/null
+++ b/Documentation/netlink/specs/nlctrl.yaml
@@ -0,0 +1,206 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: nlctrl
+protocol: genetlink-legacy
+uapi-header: linux/genetlink.h
+
+doc: |
+ genetlink meta-family that exposes information about all genetlink
+ families registered in the kernel (including itself).
+
+definitions:
+ -
+ name: op-flags
+ type: flags
+ enum-name:
+ entries:
+ - admin-perm
+ - cmd-cap-do
+ - cmd-cap-dump
+ - cmd-cap-haspol
+ - uns-admin-perm
+ -
+ name: attr-type
+ enum-name: netlink-attribute-type
+ type: enum
+ entries:
+ - invalid
+ - flag
+ - u8
+ - u16
+ - u32
+ - u64
+ - s8
+ - s16
+ - s32
+ - s64
+ - binary
+ - string
+ - nul-string
+ - nested
+ - nested-array
+ - bitfield32
+ - sint
+ - uint
+
+attribute-sets:
+ -
+ name: ctrl-attrs
+ name-prefix: ctrl-attr-
+ attributes:
+ -
+ name: family-id
+ type: u16
+ -
+ name: family-name
+ type: string
+ -
+ name: version
+ type: u32
+ -
+ name: hdrsize
+ type: u32
+ -
+ name: maxattr
+ type: u32
+ -
+ name: ops
+ type: array-nest
+ nested-attributes: op-attrs
+ -
+ name: mcast-groups
+ type: array-nest
+ nested-attributes: mcast-group-attrs
+ -
+ name: policy
+ type: nest-type-value
+ type-value: [ policy-id, attr-id ]
+ nested-attributes: policy-attrs
+ -
+ name: op-policy
+ type: nest-type-value
+ type-value: [ op-id ]
+ nested-attributes: op-policy-attrs
+ -
+ name: op
+ type: u32
+ -
+ name: mcast-group-attrs
+ name-prefix: ctrl-attr-mcast-grp-
+ enum-name:
+ attributes:
+ -
+ name: name
+ type: string
+ -
+ name: id
+ type: u32
+ -
+ name: op-attrs
+ name-prefix: ctrl-attr-op-
+ enum-name:
+ attributes:
+ -
+ name: id
+ type: u32
+ -
+ name: flags
+ type: u32
+ enum: op-flags
+ enum-as-flags: true
+ -
+ name: policy-attrs
+ name-prefix: nl-policy-type-attr-
+ enum-name:
+ attributes:
+ -
+ name: type
+ type: u32
+ enum: attr-type
+ -
+ name: min-value-s
+ type: s64
+ -
+ name: max-value-s
+ type: s64
+ -
+ name: min-value-u
+ type: u64
+ -
+ name: max-value-u
+ type: u64
+ -
+ name: min-length
+ type: u32
+ -
+ name: max-length
+ type: u32
+ -
+ name: policy-idx
+ type: u32
+ -
+ name: policy-maxtype
+ type: u32
+ -
+ name: bitfield32-mask
+ type: u32
+ -
+ name: mask
+ type: u64
+ -
+ name: pad
+ type: pad
+ -
+ name: op-policy-attrs
+ name-prefix: ctrl-attr-policy-
+ enum-name:
+ attributes:
+ -
+ name: do
+ type: u32
+ -
+ name: dump
+ type: u32
+
+operations:
+ enum-model: directional
+ name-prefix: ctrl-cmd-
+ list:
+ -
+ name: getfamily
+ doc: Get / dump genetlink families
+ attribute-set: ctrl-attrs
+ do:
+ request:
+ value: 3
+ attributes:
+ - family-name
+ reply: &all-attrs
+ value: 1
+ attributes:
+ - family-id
+ - family-name
+ - hdrsize
+ - maxattr
+ - mcast-groups
+ - ops
+ - version
+ dump:
+ reply: *all-attrs
+ -
+ name: getpolicy
+ doc: Get / dump genetlink policies
+ attribute-set: ctrl-attrs
+ dump:
+ request:
+ value: 10
+ attributes:
+ - family-name
+ - family-id
+ - op
+ reply:
+ value: 10
+ attributes:
+ - family-id
+ - op-policy
+ - policy
diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml
index 1ad01d52a863..8e4d19adee8c 100644
--- a/Documentation/netlink/specs/rt_link.yaml
+++ b/Documentation/netlink/specs/rt_link.yaml
@@ -942,6 +942,10 @@ attribute-sets:
-
name: gro-ipv4-max-size
type: u32
+ -
+ name: dpll-pin
+ type: nest
+ nested-attributes: link-dpll-pin-attrs
-
name: af-spec-attrs
attributes:
@@ -1627,6 +1631,12 @@ attribute-sets:
-
name: used
type: u8
+ -
+ name: link-dpll-pin-attrs
+ attributes:
+ -
+ name: id
+ type: u32
sub-messages:
-
diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
index 4346fa402fc9..324fa182cd14 100644
--- a/Documentation/netlink/specs/tc.yaml
+++ b/Documentation/netlink/specs/tc.yaml
@@ -48,21 +48,28 @@ definitions:
-
name: bytes
type: u64
+ doc: Number of enqueued bytes
-
name: packets
type: u32
+ doc: Number of enqueued packets
-
name: drops
type: u32
+ doc: Packets dropped because of lack of resources
-
name: overlimits
type: u32
+ doc: |
+ Number of throttle events when this flow goes out of allocated bandwidth
-
name: bps
type: u32
+ doc: Current flow byte rate
-
name: pps
type: u32
+ doc: Current flow packet rate
-
name: qlen
type: u32
@@ -112,6 +119,7 @@ definitions:
-
name: limit
type: u32
+ doc: Queue length; bytes for bfifo, packets for pfifo
-
name: tc-htb-opt
type: struct
@@ -119,11 +127,11 @@ definitions:
-
name: rate
type: binary
- len: 12
+ struct: tc-ratespec
-
name: ceil
type: binary
- len: 12
+ struct: tc-ratespec
-
name: buffer
type: u32
@@ -149,15 +157,19 @@ definitions:
-
name: rate2quantum
type: u32
+ doc: bps->quantum divisor
-
name: defcls
type: u32
+ doc: Default class number
-
name: debug
type: u32
+ doc: Debug flags
-
name: direct-pkts
type: u32
+ doc: Count of non shaped packets
-
name: tc-gred-qopt
type: struct
@@ -165,15 +177,19 @@ definitions:
-
name: limit
type: u32
+ doc: HARD maximal queue length in bytes
-
name: qth-min
type: u32
+ doc: Min average length threshold in bytes
-
name: qth-max
type: u32
+ doc: Max average length threshold in bytes
-
name: DP
type: u32
+ doc: Up to 2^32 DPs
-
name: backlog
type: u32
@@ -195,15 +211,19 @@ definitions:
-
name: Wlog
type: u8
+ doc: log(W)
-
name: Plog
type: u8
+ doc: log(P_max / (qth-max - qth-min))
-
name: Scell_log
type: u8
+ doc: cell size for idle damping
-
name: prio
type: u8
+ doc: Priority of this VQ
-
name: packets
type: u32
@@ -266,9 +286,11 @@ definitions:
-
name: bands
type: u16
+ doc: Number of bands
-
name: max-bands
type: u16
+ doc: Maximum number of queues
-
name: tc-netem-qopt
type: struct
@@ -276,21 +298,138 @@ definitions:
-
name: latency
type: u32
+ doc: Added delay in microseconds
-
name: limit
type: u32
+ doc: Fifo limit in packets
-
name: loss
type: u32
+ doc: Random packet loss (0=none, ~0=100%)
-
name: gap
type: u32
+ doc: Re-ordering gap (0 for none)
-
name: duplicate
type: u32
+ doc: Random packet duplication (0=none, ~0=100%)
-
name: jitter
type: u32
+ doc: Random jitter latency in microseconds
+ -
+ name: tc-netem-gimodel
+ doc: State transition probabilities for 4 state model
+ type: struct
+ members:
+ -
+ name: p13
+ type: u32
+ -
+ name: p31
+ type: u32
+ -
+ name: p32
+ type: u32
+ -
+ name: p14
+ type: u32
+ -
+ name: p23
+ type: u32
+ -
+ name: tc-netem-gemodel
+ doc: Gilbert-Elliot models
+ type: struct
+ members:
+ -
+ name: p
+ type: u32
+ -
+ name: r
+ type: u32
+ -
+ name: h
+ type: u32
+ -
+ name: k1
+ type: u32
+ -
+ name: tc-netem-corr
+ type: struct
+ members:
+ -
+ name: delay-corr
+ type: u32
+ doc: Delay correlation
+ -
+ name: loss-corr
+ type: u32
+ doc: Packet loss correlation
+ -
+ name: dup-corr
+ type: u32
+ doc: Duplicate correlation
+ -
+ name: tc-netem-reorder
+ type: struct
+ members:
+ -
+ name: probability
+ type: u32
+ -
+ name: correlation
+ type: u32
+ -
+ name: tc-netem-corrupt
+ type: struct
+ members:
+ -
+ name: probability
+ type: u32
+ -
+ name: correlation
+ type: u32
+ -
+ name: tc-netem-rate
+ type: struct
+ members:
+ -
+ name: rate
+ type: u32
+ -
+ name: packet-overhead
+ type: s32
+ -
+ name: cell-size
+ type: u32
+ -
+ name: cell-overhead
+ type: s32
+ -
+ name: tc-netem-slot
+ type: struct
+ members:
+ -
+ name: min-delay
+ type: s64
+ -
+ name: max-delay
+ type: s64
+ -
+ name: max-packets
+ type: s32
+ -
+ name: max-bytes
+ type: s32
+ -
+ name: dist-delay
+ type: s64
+ -
+ name: dist-jitter
+ type: s64
-
name: tc-plug-qopt
type: struct
@@ -307,11 +446,13 @@ definitions:
members:
-
name: bands
- type: u16
+ type: u32
+ doc: Number of bands
-
name: priomap
type: binary
len: 16
+ doc: Map of logical priority -> PRIO band
-
name: tc-red-qopt
type: struct
@@ -319,21 +460,27 @@ definitions:
-
name: limit
type: u32
+ doc: Hard queue length in packets
-
name: qth-min
type: u32
+ doc: Min average threshold in packets
-
name: qth-max
type: u32
+ doc: Max average threshold in packets
-
name: Wlog
type: u8
+ doc: log(W)
-
name: Plog
type: u8
+ doc: log(P_max / (qth-max - qth-min))
-
name: Scell-log
type: u8
+ doc: Cell size for idle damping
-
name: flags
type: u8
@@ -369,71 +516,128 @@ definitions:
name: penalty-burst
type: u32
-
- name: tc-sfq-qopt-v1 # TODO nested structs
+ name: tc-sfq-qopt
type: struct
members:
-
name: quantum
type: u32
+ doc: Bytes per round allocated to flow
-
name: perturb-period
type: s32
+ doc: Period of hash perturbation
-
name: limit
type: u32
+ doc: Maximal packets in queue
-
name: divisor
type: u32
+ doc: Hash divisor
-
name: flows
type: u32
+ doc: Maximal number of flows
+ -
+ name: tc-sfqred-stats
+ type: struct
+ members:
+ -
+ name: prob-drop
+ type: u32
+ doc: Early drops, below max threshold
+ -
+ name: forced-drop
+ type: u32
+ doc: Early drops, after max threshold
+ -
+ name: prob-mark
+ type: u32
+ doc: Marked packets, below max threshold
+ -
+ name: forced-mark
+ type: u32
+ doc: Marked packets, after max threshold
+ -
+ name: prob-mark-head
+ type: u32
+ doc: Marked packets, below max threshold
+ -
+ name: forced-mark-head
+ type: u32
+ doc: Marked packets, after max threshold
+ -
+ name: tc-sfq-qopt-v1
+ type: struct
+ members:
+ -
+ name: v0
+ type: binary
+ struct: tc-sfq-qopt
-
name: depth
type: u32
+ doc: Maximum number of packets per flow
-
name: headdrop
type: u32
-
name: limit
type: u32
+ doc: HARD maximal flow queue length in bytes
-
name: qth-min
type: u32
+ doc: Min average length threshold in bytes
-
- name: qth-mac
+ name: qth-max
type: u32
+ doc: Max average length threshold in bytes
-
name: Wlog
type: u8
+ doc: log(W)
-
name: Plog
type: u8
+ doc: log(P_max / (qth-max - qth-min))
-
name: Scell-log
type: u8
+ doc: Cell size for idle damping
-
name: flags
type: u8
-
name: max-P
type: u32
+ doc: probabilty, high resolution
-
- name: prob-drop
- type: u32
+ name: stats
+ type: binary
+ struct: tc-sfqred-stats
+ -
+ name: tc-ratespec
+ type: struct
+ members:
-
- name: forced-drop
- type: u32
+ name: cell-log
+ type: u8
-
- name: prob-mark
- type: u32
+ name: linklayer
+ type: u8
-
- name: forced-mark
- type: u32
+ name: overhead
+ type: u8
-
- name: prob-mark-head
- type: u32
+ name: cell-align
+ type: u8
-
- name: forced-mark-head
+ name: mpu
+ type: u8
+ -
+ name: rate
type: u32
-
name: tc-tbf-qopt
@@ -441,12 +645,12 @@ definitions:
members:
-
name: rate
- type: binary # TODO nested struct tc_ratespec
- len: 12
+ type: binary
+ struct: tc-ratespec
-
name: peakrate
- type: binary # TODO nested struct tc_ratespec
- len: 12
+ type: binary
+ struct: tc-ratespec
-
name: limit
type: u32
@@ -491,9 +695,663 @@ definitions:
-
name: interval
type: s8
+ doc: Sampling period
-
name: ewma-log
type: u8
+ doc: The log() of measurement window weight
+ -
+ name: tc-choke-xstats
+ type: struct
+ members:
+ -
+ name: early
+ type: u32
+ doc: Early drops
+ -
+ name: pdrop
+ type: u32
+ doc: Drops due to queue limits
+ -
+ name: other
+ type: u32
+ doc: Drops due to drop() calls
+ -
+ name: marked
+ type: u32
+ doc: Marked packets
+ -
+ name: matched
+ type: u32
+ doc: Drops due to flow match
+ -
+ name: tc-codel-xstats
+ type: struct
+ members:
+ -
+ name: maxpacket
+ type: u32
+ doc: Largest packet we've seen so far
+ -
+ name: count
+ type: u32
+ doc: How many drops we've done since the last time we entered dropping state
+ -
+ name: lastcount
+ type: u32
+ doc: Count at entry to dropping state
+ -
+ name: ldelay
+ type: u32
+ doc: in-queue delay seen by most recently dequeued packet
+ -
+ name: drop-next
+ type: s32
+ doc: Time to drop next packet
+ -
+ name: drop-overlimit
+ type: u32
+ doc: Number of times max qdisc packet limit was hit
+ -
+ name: ecn-mark
+ type: u32
+ doc: Number of packets we've ECN marked instead of dropped
+ -
+ name: dropping
+ type: u32
+ doc: Are we in a dropping state?
+ -
+ name: ce-mark
+ type: u32
+ doc: Number of CE marked packets because of ce-threshold
+ -
+ name: tc-fq-codel-xstats
+ type: struct
+ members:
+ -
+ name: type
+ type: u32
+ -
+ name: maxpacket
+ type: u32
+ doc: Largest packet we've seen so far
+ -
+ name: drop-overlimit
+ type: u32
+ doc: Number of times max qdisc packet limit was hit
+ -
+ name: ecn-mark
+ type: u32
+ doc: Number of packets we ECN marked instead of being dropped
+ -
+ name: new-flow-count
+ type: u32
+ doc: Number of times packets created a new flow
+ -
+ name: new-flows-len
+ type: u32
+ doc: Count of flows in new list
+ -
+ name: old-flows-len
+ type: u32
+ doc: Count of flows in old list
+ -
+ name: ce-mark
+ type: u32
+ doc: Packets above ce-threshold
+ -
+ name: memory-usage
+ type: u32
+ doc: Memory usage in bytes
+ -
+ name: drop-overmemory
+ type: u32
+ -
+ name: tc-fq-pie-xstats
+ type: struct
+ members:
+ -
+ name: packets-in
+ type: u32
+ doc: Total number of packets enqueued
+ -
+ name: dropped
+ type: u32
+ doc: Packets dropped due to fq_pie_action
+ -
+ name: overlimit
+ type: u32
+ doc: Dropped due to lack of space in queue
+ -
+ name: overmemory
+ type: u32
+ doc: Dropped due to lack of memory in queue
+ -
+ name: ecn-mark
+ type: u32
+ doc: Packets marked with ecn
+ -
+ name: new-flow-count
+ type: u32
+ doc: Count of new flows created by packets
+ -
+ name: new-flows-len
+ type: u32
+ doc: Count of flows in new list
+ -
+ name: old-flows-len
+ type: u32
+ doc: Count of flows in old list
+ -
+ name: memory-usage
+ type: u32
+ doc: Total memory across all queues
+ -
+ name: tc-fq-qd-stats
+ type: struct
+ members:
+ -
+ name: gc-flows
+ type: u64
+ -
+ name: highprio-packets
+ type: u64
+ doc: obsolete
+ -
+ name: tcp-retrans
+ type: u64
+ doc: obsolete
+ -
+ name: throttled
+ type: u64
+ -
+ name: flows-plimit
+ type: u64
+ -
+ name: pkts-too-long
+ type: u64
+ -
+ name: allocation-errors
+ type: u64
+ -
+ name: time-next-delayed-flow
+ type: s64
+ -
+ name: flows
+ type: u32
+ -
+ name: inactive-flows
+ type: u32
+ -
+ name: throttled-flows
+ type: u32
+ -
+ name: unthrottle-latency-ns
+ type: u32
+ -
+ name: ce-mark
+ type: u64
+ doc: Packets above ce-threshold
+ -
+ name: horizon-drops
+ type: u64
+ -
+ name: horizon-caps
+ type: u64
+ -
+ name: fastpath-packets
+ type: u64
+ -
+ name: band-drops
+ type: binary
+ len: 24
+ -
+ name: band-pkt-count
+ type: binary
+ len: 12
+ -
+ name: pad
+ type: pad
+ len: 4
+ -
+ name: tc-hhf-xstats
+ type: struct
+ members:
+ -
+ name: drop-overlimit
+ type: u32
+ doc: Number of times max qdisc packet limit was hit
+ -
+ name: hh-overlimit
+ type: u32
+ doc: Number of times max heavy-hitters was hit
+ -
+ name: hh-tot-count
+ type: u32
+ doc: Number of captured heavy-hitters so far
+ -
+ name: hh-cur-count
+ type: u32
+ doc: Number of current heavy-hitters
+ -
+ name: tc-pie-xstats
+ type: struct
+ members:
+ -
+ name: prob
+ type: u64
+ doc: Current probability
+ -
+ name: delay
+ type: u32
+ doc: Current delay in ms
+ -
+ name: avg-dq-rate
+ type: u32
+ doc: Current average dq rate in bits/pie-time
+ -
+ name: dq-rate-estimating
+ type: u32
+ doc: Is avg-dq-rate being calculated?
+ -
+ name: packets-in
+ type: u32
+ doc: Total number of packets enqueued
+ -
+ name: dropped
+ type: u32
+ doc: Packets dropped due to pie action
+ -
+ name: overlimit
+ type: u32
+ doc: Dropped due to lack of space in queue
+ -
+ name: maxq
+ type: u32
+ doc: Maximum queue size
+ -
+ name: ecn-mark
+ type: u32
+ doc: Packets marked with ecn
+ -
+ name: tc-red-xstats
+ type: struct
+ members:
+ -
+ name: early
+ type: u32
+ doc: Early drops
+ -
+ name: pdrop
+ type: u32
+ doc: Drops due to queue limits
+ -
+ name: other
+ type: u32
+ doc: Drops due to drop() calls
+ -
+ name: marked
+ type: u32
+ doc: Marked packets
+ -
+ name: tc-sfb-xstats
+ type: struct
+ members:
+ -
+ name: earlydrop
+ type: u32
+ -
+ name: penaltydrop
+ type: u32
+ -
+ name: bucketdrop
+ type: u32
+ -
+ name: queuedrop
+ type: u32
+ -
+ name: childdrop
+ type: u32
+ doc: drops in child qdisc
+ -
+ name: marked
+ type: u32
+ -
+ name: maxqlen
+ type: u32
+ -
+ name: maxprob
+ type: u32
+ -
+ name: avgprob
+ type: u32
+ -
+ name: tc-sfq-xstats
+ type: struct
+ members:
+ -
+ name: allot
+ type: s32
+ -
+ name: gnet-stats-basic
+ type: struct
+ members:
+ -
+ name: bytes
+ type: u64
+ -
+ name: packets
+ type: u32
+ -
+ name: gnet-stats-rate-est
+ type: struct
+ members:
+ -
+ name: bps
+ type: u32
+ -
+ name: pps
+ type: u32
+ -
+ name: gnet-stats-rate-est64
+ type: struct
+ members:
+ -
+ name: bps
+ type: u64
+ -
+ name: pps
+ type: u64
+ -
+ name: gnet-stats-queue
+ type: struct
+ members:
+ -
+ name: qlen
+ type: u32
+ -
+ name: backlog
+ type: u32
+ -
+ name: drops
+ type: u32
+ -
+ name: requeues
+ type: u32
+ -
+ name: overlimits
+ type: u32
+ -
+ name: tc-u32-key
+ type: struct
+ members:
+ -
+ name: mask
+ type: u32
+ byte-order: big-endian
+ -
+ name: val
+ type: u32
+ byte-order: big-endian
+ -
+ name: "off"
+ type: s32
+ -
+ name: offmask
+ type: s32
+ -
+ name: tc-u32-sel
+ type: struct
+ members:
+ -
+ name: flags
+ type: u8
+ -
+ name: offshift
+ type: u8
+ -
+ name: nkeys
+ type: u8
+ -
+ name: offmask
+ type: u16
+ byte-order: big-endian
+ -
+ name: "off"
+ type: u16
+ -
+ name: offoff
+ type: s16
+ -
+ name: hoff
+ type: s16
+ -
+ name: hmask
+ type: u32
+ byte-order: big-endian
+ -
+ name: keys
+ type: binary
+ struct: tc-u32-key # TODO: array
+ -
+ name: tc-u32-pcnt
+ type: struct
+ members:
+ -
+ name: rcnt
+ type: u64
+ -
+ name: rhit
+ type: u64
+ -
+ name: kcnts
+ type: u64 # TODO: array
+ -
+ name: tcf-t
+ type: struct
+ members:
+ -
+ name: install
+ type: u64
+ -
+ name: lastuse
+ type: u64
+ -
+ name: expires
+ type: u64
+ -
+ name: firstuse
+ type: u64
+ -
+ name: tc-gen
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: capab
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: tc-gact-p
+ type: struct
+ members:
+ -
+ name: ptype
+ type: u16
+ -
+ name: pval
+ type: u16
+ -
+ name: paction
+ type: s32
+ -
+ name: tcf-ematch-tree-hdr
+ type: struct
+ members:
+ -
+ name: nmatches
+ type: u16
+ -
+ name: progid
+ type: u16
+ -
+ name: tc-basic-pcnt
+ type: struct
+ members:
+ -
+ name: rcnt
+ type: u64
+ -
+ name: rhit
+ type: u64
+ -
+ name: tc-matchall-pcnt
+ type: struct
+ members:
+ -
+ name: rhit
+ type: u64
+ -
+ name: tc-mpls
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: capab
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: m-action
+ type: s32
+ -
+ name: tc-police
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: limit
+ type: u32
+ -
+ name: burst
+ type: u32
+ -
+ name: mtu
+ type: u32
+ -
+ name: rate
+ type: binary
+ struct: tc-ratespec
+ -
+ name: peakrate
+ type: binary
+ struct: tc-ratespec
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: capab
+ type: u32
+ -
+ name: tc-pedit-sel
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: capab
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: nkeys
+ type: u8
+ -
+ name: flags
+ type: u8
+ -
+ name: keys
+ type: binary
+ struct: tc-pedit-key # TODO: array
+ -
+ name: tc-pedit-key
+ type: struct
+ members:
+ -
+ name: mask
+ type: u32
+ -
+ name: val
+ type: u32
+ -
+ name: "off"
+ type: u32
+ -
+ name: at
+ type: u32
+ -
+ name: offmask
+ type: u32
+ -
+ name: shift
+ type: u32
+ -
+ name: tc-vlan
+ type: struct
+ members:
+ -
+ name: index
+ type: u32
+ -
+ name: capab
+ type: u32
+ -
+ name: action
+ type: s32
+ -
+ name: refcnt
+ type: s32
+ -
+ name: bindcnt
+ type: s32
+ -
+ name: v-action
+ type: s32
attribute-sets:
-
name: tc-attrs
@@ -512,7 +1370,9 @@ attribute-sets:
struct: tc-stats
-
name: xstats
- type: binary
+ type: sub-message
+ sub-message: tca-stats-app-msg
+ selector: kind
-
name: rate
type: binary
@@ -553,6 +1413,582 @@ attribute-sets:
name: ext-warn-msg
type: string
-
+ name: tc-act-attrs
+ attributes:
+ -
+ name: kind
+ type: string
+ -
+ name: options
+ type: sub-message
+ sub-message: tc-act-options-msg
+ selector: kind
+ -
+ name: index
+ type: u32
+ -
+ name: stats
+ type: nest
+ nested-attributes: tc-act-stats-attrs
+ -
+ name: pad
+ type: pad
+ -
+ name: cookie
+ type: binary
+ -
+ name: flags
+ type: bitfield32
+ -
+ name: hw-stats
+ type: bitfield32
+ -
+ name: used-hw-stats
+ type: bitfield32
+ -
+ name: in-hw-count
+ type: u32
+ -
+ name: tc-act-stats-attrs
+ attributes:
+ -
+ name: basic
+ type: binary
+ struct: gnet-stats-basic
+ -
+ name: rate-est
+ type: binary
+ struct: gnet-stats-rate-est
+ -
+ name: queue
+ type: binary
+ struct: gnet-stats-queue
+ -
+ name: app
+ type: binary
+ -
+ name: rate-est64
+ type: binary
+ struct: gnet-stats-rate-est64
+ -
+ name: pad
+ type: pad
+ -
+ name: basic-hw
+ type: binary
+ struct: gnet-stats-basic
+ -
+ name: pkt64
+ type: u64
+ -
+ name: tc-act-bpf-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: ops-len
+ type: u16
+ -
+ name: ops
+ type: binary
+ -
+ name: fd
+ type: u32
+ -
+ name: name
+ type: string
+ -
+ name: pad
+ type: pad
+ -
+ name: tag
+ type: binary
+ -
+ name: id
+ type: binary
+ -
+ name: tc-act-connmark-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-csum-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-ct-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: action
+ type: u16
+ -
+ name: zone
+ type: u16
+ -
+ name: mark
+ type: u32
+ -
+ name: mark-mask
+ type: u32
+ -
+ name: labels
+ type: binary
+ -
+ name: labels-mask
+ type: binary
+ -
+ name: nat-ipv4-min
+ type: u32
+ byte-order: big-endian
+ -
+ name: nat-ipv4-max
+ type: u32
+ byte-order: big-endian
+ -
+ name: nat-ipv6-min
+ type: binary
+ -
+ name: nat-ipv6-max
+ type: binary
+ -
+ name: nat-port-min
+ type: u16
+ byte-order: big-endian
+ -
+ name: nat-port-max
+ type: u16
+ byte-order: big-endian
+ -
+ name: pad
+ type: pad
+ -
+ name: helper-name
+ type: string
+ -
+ name: helper-family
+ type: u8
+ -
+ name: helper-proto
+ type: u8
+ -
+ name: tc-act-ctinfo-attrs
+ attributes:
+ -
+ name: pad
+ type: pad
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: act
+ type: binary
+ -
+ name: zone
+ type: u16
+ -
+ name: parms-dscp-mask
+ type: u32
+ -
+ name: parms-dscp-statemask
+ type: u32
+ -
+ name: parms-cpmark-mask
+ type: u32
+ -
+ name: stats-dscp-set
+ type: u64
+ -
+ name: stats-dscp-error
+ type: u64
+ -
+ name: stats-cpmark-set
+ type: u64
+ -
+ name: tc-act-gate-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: priority
+ type: s32
+ -
+ name: entry-list
+ type: binary
+ -
+ name: base-time
+ type: u64
+ -
+ name: cycle-time
+ type: u64
+ -
+ name: cycle-time-ext
+ type: u64
+ -
+ name: flags
+ type: u32
+ -
+ name: clockid
+ type: s32
+ -
+ name: tc-act-ife-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: dmac
+ type: binary
+ -
+ name: smac
+ type: binary
+ -
+ name: type
+ type: u16
+ -
+ name: metalst
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-mirred-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: blockid
+ type: binary
+ -
+ name: tc-act-mpls-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ struct: tc-mpls
+ -
+ name: pad
+ type: pad
+ -
+ name: proto
+ type: u16
+ byte-order: big-endian
+ -
+ name: label
+ type: u32
+ -
+ name: tc
+ type: u8
+ -
+ name: ttl
+ type: u8
+ -
+ name: bos
+ type: u8
+ -
+ name: tc-act-nat-attrs
+ attributes:
+ -
+ name: parms
+ type: binary
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-pedit-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ struct: tc-pedit-sel
+ -
+ name: pad
+ type: pad
+ -
+ name: parms-ex
+ type: binary
+ -
+ name: keys-ex
+ type: binary
+ -
+ name: key-ex
+ type: binary
+ -
+ name: tc-act-simple-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: data
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-skbedit-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: priority
+ type: u32
+ -
+ name: queue-mapping
+ type: u16
+ -
+ name: mark
+ type: u32
+ -
+ name: pad
+ type: pad
+ -
+ name: ptype
+ type: u16
+ -
+ name: mask
+ type: u32
+ -
+ name: flags
+ type: u64
+ -
+ name: queue-mapping-max
+ type: u16
+ -
+ name: tc-act-skbmod-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: dmac
+ type: binary
+ -
+ name: smac
+ type: binary
+ -
+ name: etype
+ type: binary
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-tunnel-key-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ -
+ name: enc-ipv4-src
+ type: u32
+ byte-order: big-endian
+ -
+ name: enc-ipv4-dst
+ type: u32
+ byte-order: big-endian
+ -
+ name: enc-ipv6-src
+ type: binary
+ -
+ name: enc-ipv6-dst
+ type: binary
+ -
+ name: enc-key-id
+ type: u64
+ byte-order: big-endian
+ -
+ name: pad
+ type: pad
+ -
+ name: enc-dst-port
+ type: u16
+ byte-order: big-endian
+ -
+ name: no-csum
+ type: u8
+ -
+ name: enc-opts
+ type: binary
+ -
+ name: enc-tos
+ type: u8
+ -
+ name: enc-ttl
+ type: u8
+ -
+ name: no-frag
+ type: flag
+ -
+ name: tc-act-vlan-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ struct: tc-vlan
+ -
+ name: push-vlan-id
+ type: u16
+ -
+ name: push-vlan-protocol
+ type: u16
+ -
+ name: pad
+ type: pad
+ -
+ name: push-vlan-priority
+ type: u8
+ -
+ name: push-eth-dst
+ type: binary
+ -
+ name: push-eth-src
+ type: binary
+ -
+ name: tc-basic-attrs
+ attributes:
+ -
+ name: classid
+ type: u32
+ -
+ name: ematches
+ type: nest
+ nested-attributes: tc-ematch-attrs
+ -
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: pcnt
+ type: binary
+ struct: tc-basic-pcnt
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-bpf-attrs
+ attributes:
+ -
+ name: act
+ type: nest
+ nested-attributes: tc-act-attrs
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: classid
+ type: u32
+ -
+ name: ops-len
+ type: u16
+ -
+ name: ops
+ type: binary
+ -
+ name: fd
+ type: u32
+ -
+ name: name
+ type: string
+ -
+ name: flags
+ type: u32
+ -
+ name: flags-gen
+ type: u32
+ -
+ name: tag
+ type: binary
+ -
+ name: id
+ type: u32
+ -
name: tc-cake-attrs
attributes:
-
@@ -641,7 +2077,8 @@ attribute-sets:
type: u32
-
name: tin-stats
- type: binary
+ type: array-nest
+ nested-attributes: tc-cake-tin-stats-attrs
-
name: deficit
type: s32
@@ -661,6 +2098,84 @@ attribute-sets:
name: blue-timer-us
type: s32
-
+ name: tc-cake-tin-stats-attrs
+ attributes:
+ -
+ name: pad
+ type: pad
+ -
+ name: sent-packets
+ type: u32
+ -
+ name: sent-bytes64
+ type: u64
+ -
+ name: dropped-packets
+ type: u32
+ -
+ name: dropped-bytes64
+ type: u64
+ -
+ name: acks-dropped-packets
+ type: u32
+ -
+ name: acks-dropped-bytes64
+ type: u64
+ -
+ name: ecn-marked-packets
+ type: u32
+ -
+ name: ecn-marked-bytes64
+ type: u64
+ -
+ name: backlog-packets
+ type: u32
+ -
+ name: backlog-bytes
+ type: u32
+ -
+ name: threshold-rate64
+ type: u64
+ -
+ name: target-us
+ type: u32
+ -
+ name: interval-us
+ type: u32
+ -
+ name: way-indirect-hits
+ type: u32
+ -
+ name: way-misses
+ type: u32
+ -
+ name: way-collisions
+ type: u32
+ -
+ name: peak-delay-us
+ type: u32
+ -
+ name: avg-delay-us
+ type: u32
+ -
+ name: base-delay-us
+ type: u32
+ -
+ name: sparse-flows
+ type: u32
+ -
+ name: bulk-flows
+ type: u32
+ -
+ name: unresponsive-flows
+ type: u32
+ -
+ name: max-skblen
+ type: u32
+ -
+ name: flow-quantum
+ type: u32
+ -
name: tc-cbs-attrs
attributes:
-
@@ -668,6 +2183,20 @@ attribute-sets:
type: binary
struct: tc-cbs-qopt
-
+ name: tc-cgroup-attrs
+ attributes:
+ -
+ name: act
+ type: nest
+ nested-attributes: tc-act-attrs
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: ematches
+ type: binary
+ -
name: tc-choke-attrs
attributes:
-
@@ -677,6 +2206,9 @@ attribute-sets:
-
name: stab
type: binary
+ checks:
+ min-len: 256
+ max-len: 256
-
name: max-p
type: u32
@@ -705,6 +2237,56 @@ attribute-sets:
name: quantum
type: u32
-
+ name: tc-ematch-attrs
+ attributes:
+ -
+ name: tree-hdr
+ type: binary
+ struct: tcf-ematch-tree-hdr
+ -
+ name: tree-list
+ type: binary
+ -
+ name: tc-flow-attrs
+ attributes:
+ -
+ name: keys
+ type: u32
+ -
+ name: mode
+ type: u32
+ -
+ name: baseclass
+ type: u32
+ -
+ name: rshift
+ type: u32
+ -
+ name: addend
+ type: u32
+ -
+ name: mask
+ type: u32
+ -
+ name: xor
+ type: u32
+ -
+ name: divisor
+ type: u32
+ -
+ name: act
+ type: binary
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: ematches
+ type: binary
+ -
+ name: perturb
+ type: u32
+ -
name: tc-flower-attrs
attributes:
-
@@ -953,15 +2535,19 @@ attribute-sets:
-
name: key-arp-sha
type: binary
+ display-hint: mac
-
name: key-arp-sha-mask
type: binary
+ display-hint: mac
-
name: key-arp-tha
type: binary
+ display-hint: mac
-
name: key-arp-tha-mask
type: binary
+ display-hint: mac
-
name: key-mpls-ttl
type: u8
@@ -1020,10 +2606,12 @@ attribute-sets:
type: u8
-
name: key-enc-opts
- type: binary
+ type: nest
+ nested-attributes: tc-flower-key-enc-opts-attrs
-
name: key-enc-opts-mask
- type: binary
+ type: nest
+ nested-attributes: tc-flower-key-enc-opts-attrs
-
name: in-hw-count
type: u32
@@ -1069,7 +2657,8 @@ attribute-sets:
type: binary
-
name: key-mpls-opts
- type: binary
+ type: nest
+ nested-attributes: tc-flower-key-mpls-opt-attrs
-
name: key-hash
type: u32
@@ -1091,6 +2680,129 @@ attribute-sets:
name: key-l2-tpv3-sid
type: u32
byte-order: big-endian
+ -
+ name: l2-miss
+ type: u8
+ -
+ name: key-cfm
+ type: nest
+ nested-attributes: tc-flower-key-cfm-attrs
+ -
+ name: key-spi
+ type: u32
+ byte-order: big-endian
+ -
+ name: key-spi-mask
+ type: u32
+ byte-order: big-endian
+ -
+ name: tc-flower-key-enc-opts-attrs
+ attributes:
+ -
+ name: geneve
+ type: nest
+ nested-attributes: tc-flower-key-enc-opt-geneve-attrs
+ -
+ name: vxlan
+ type: nest
+ nested-attributes: tc-flower-key-enc-opt-vxlan-attrs
+ -
+ name: erspan
+ type: nest
+ nested-attributes: tc-flower-key-enc-opt-erspan-attrs
+ -
+ name: gtp
+ type: nest
+ nested-attributes: tc-flower-key-enc-opt-gtp-attrs
+ -
+ name: tc-flower-key-enc-opt-geneve-attrs
+ attributes:
+ -
+ name: class
+ type: u16
+ -
+ name: type
+ type: u8
+ -
+ name: data
+ type: binary
+ -
+ name: tc-flower-key-enc-opt-vxlan-attrs
+ attributes:
+ -
+ name: gbp
+ type: u32
+ -
+ name: tc-flower-key-enc-opt-erspan-attrs
+ attributes:
+ -
+ name: ver
+ type: u8
+ -
+ name: index
+ type: u32
+ -
+ name: dir
+ type: u8
+ -
+ name: hwid
+ type: u8
+ -
+ name: tc-flower-key-enc-opt-gtp-attrs
+ attributes:
+ -
+ name: pdu-type
+ type: u8
+ -
+ name: qfi
+ type: u8
+ -
+ name: tc-flower-key-mpls-opt-attrs
+ attributes:
+ -
+ name: lse-depth
+ type: u8
+ -
+ name: lse-ttl
+ type: u8
+ -
+ name: lse-bos
+ type: u8
+ -
+ name: lse-tc
+ type: u8
+ -
+ name: lse-label
+ type: u32
+ -
+ name: tc-flower-key-cfm-attrs
+ attributes:
+ -
+ name: md-level
+ type: u8
+ -
+ name: opcode
+ type: u8
+ -
+ name: tc-fw-attrs
+ attributes:
+ -
+ name: classid
+ type: u32
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: indev
+ type: string
+ -
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
+ -
+ name: mask
+ type: u32
-
name: tc-gred-attrs
attributes:
@@ -1135,7 +2847,7 @@ attribute-sets:
type: u32
-
name: stat-bytes
- type: u32
+ type: u64
-
name: stat-packets
type: u32
@@ -1232,40 +2944,25 @@ attribute-sets:
name: offload
type: flag
-
- name: tc-act-attrs
+ name: tc-matchall-attrs
attributes:
-
- name: kind
- type: string
+ name: classid
+ type: u32
-
- name: options
- type: sub-message
- sub-message: tc-act-options-msg
- selector: kind
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
-
- name: index
+ name: flags
type: u32
-
- name: stats
+ name: pcnt
type: binary
+ struct: tc-matchall-pcnt
-
name: pad
type: pad
- -
- name: cookie
- type: binary
- -
- name: flags
- type: bitfield32
- -
- name: hw-stats
- type: bitfield32
- -
- name: used-hw-stats
- type: bitfield32
- -
- name: in-hw-count
- type: u32
-
name: tc-etf-attrs
attributes:
@@ -1304,48 +3001,71 @@ attribute-sets:
-
name: plimit
type: u32
+ doc: Limit of total number of packets in queue
-
name: flow-plimit
type: u32
+ doc: Limit of packets per flow
-
name: quantum
type: u32
+ doc: RR quantum
-
name: initial-quantum
type: u32
+ doc: RR quantum for new flow
-
name: rate-enable
type: u32
+ doc: Enable / disable rate limiting
-
name: flow-default-rate
type: u32
+ doc: Obsolete, do not use
-
name: flow-max-rate
type: u32
+ doc: Per flow max rate
-
name: buckets-log
type: u32
+ doc: log2(number of buckets)
-
name: flow-refill-delay
type: u32
+ doc: Flow credit refill delay in usec
-
name: orphan-mask
type: u32
+ doc: Mask applied to orphaned skb hashes
-
name: low-rate-threshold
type: u32
+ doc: Per packet delay under this rate
-
name: ce-threshold
type: u32
+ doc: DCTCP-like CE marking threshold
-
name: timer-slack
type: u32
-
name: horizon
type: u32
+ doc: Time horizon in usec
-
name: horizon-drop
type: u8
+ doc: Drop packets beyond horizon, or cap their EDT
+ -
+ name: priomap
+ type: binary
+ struct: tc-prio-qopt
+ -
+ name: weights
+ type: binary
+ sub-type: s32
+ doc: Weights for each band
-
name: tc-fq-codel-attrs
attributes:
@@ -1427,6 +3147,7 @@ attribute-sets:
-
name: corr
type: binary
+ struct: tc-netem-corr
-
name: delay-dist
type: binary
@@ -1434,15 +3155,19 @@ attribute-sets:
-
name: reorder
type: binary
+ struct: tc-netem-reorder
-
name: corrupt
type: binary
+ struct: tc-netem-corrupt
-
name: loss
- type: binary
+ type: nest
+ nested-attributes: tc-netem-loss-attrs
-
name: rate
type: binary
+ struct: tc-netem-rate
-
name: ecn
type: u32
@@ -1461,10 +3186,27 @@ attribute-sets:
-
name: slot
type: binary
+ struct: tc-netem-slot
-
name: slot-dist
type: binary
sub-type: s16
+ -
+ name: prng-seed
+ type: u64
+ -
+ name: tc-netem-loss-attrs
+ attributes:
+ -
+ name: gi
+ type: binary
+ doc: General Intuitive - 4 state model
+ struct: tc-netem-gimodel
+ -
+ name: ge
+ type: binary
+ doc: Gilbert Elliot models
+ struct: tc-netem-gemodel
-
name: tc-pie-attrs
attributes:
@@ -1493,6 +3235,44 @@ attribute-sets:
name: dq-rate-estimator
type: u32
-
+ name: tc-police-attrs
+ attributes:
+ -
+ name: tbf
+ type: binary
+ struct: tc-police
+ -
+ name: rate
+ type: binary
+ -
+ name: peakrate
+ type: binary
+ -
+ name: avrate
+ type: u32
+ -
+ name: result
+ type: u32
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: pad
+ type: pad
+ -
+ name: rate64
+ type: u64
+ -
+ name: peakrate64
+ type: u64
+ -
+ name: pktrate64
+ type: u64
+ -
+ name: pktburst64
+ type: u64
+ -
name: tc-qfq-attrs
attributes:
-
@@ -1516,7 +3296,7 @@ attribute-sets:
type: u32
-
name: flags
- type: binary
+ type: bitfield32
-
name: early-drop-block
type: u32
@@ -1524,6 +3304,29 @@ attribute-sets:
name: mark-block
type: u32
-
+ name: tc-route-attrs
+ attributes:
+ -
+ name: classid
+ type: u32
+ -
+ name: to
+ type: u32
+ -
+ name: from
+ type: u32
+ -
+ name: iif
+ type: u32
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
+ -
name: tc-taprio-attrs
attributes:
-
@@ -1573,6 +3376,7 @@ attribute-sets:
name: entry
type: nest
nested-attributes: tc-taprio-sched-entry
+ multi-attr: true
-
name: tc-taprio-sched-entry
attributes:
@@ -1629,17 +3433,43 @@ attribute-sets:
name: pad
type: pad
-
- name: tca-gact-attrs
+ name: tc-act-sample-attrs
attributes:
-
name: tm
type: binary
+ struct: tcf-t
-
name: parms
type: binary
+ struct: tc-gen
+ -
+ name: rate
+ type: u32
+ -
+ name: trunc-size
+ type: u32
+ -
+ name: psample-group
+ type: u32
+ -
+ name: pad
+ type: pad
+ -
+ name: tc-act-gact-attrs
+ attributes:
+ -
+ name: tm
+ type: binary
+ struct: tcf-t
+ -
+ name: parms
+ type: binary
+ struct: tc-gen
-
name: prob
type: binary
+ struct: tc-gact-p
-
name: pad
type: pad
@@ -1659,35 +3489,90 @@ attribute-sets:
-
name: basic
type: binary
+ struct: gnet-stats-basic
-
name: rate-est
type: binary
+ struct: gnet-stats-rate-est
-
name: queue
type: binary
+ struct: gnet-stats-queue
-
name: app
- type: binary # TODO sub-message needs 2+ level deep lookup
+ type: sub-message
sub-message: tca-stats-app-msg
selector: kind
-
name: rate-est64
type: binary
+ struct: gnet-stats-rate-est64
-
name: pad
type: pad
-
name: basic-hw
type: binary
+ struct: gnet-stats-basic
-
name: pkt64
+ type: u64
+ -
+ name: tc-u32-attrs
+ attributes:
+ -
+ name: classid
+ type: u32
+ -
+ name: hash
+ type: u32
+ -
+ name: link
+ type: u32
+ -
+ name: divisor
+ type: u32
+ -
+ name: sel
type: binary
+ struct: tc-u32-sel
+ -
+ name: police
+ type: nest
+ nested-attributes: tc-police-attrs
+ -
+ name: act
+ type: array-nest
+ nested-attributes: tc-act-attrs
+ -
+ name: indev
+ type: string
+ -
+ name: pcnt
+ type: binary
+ struct: tc-u32-pcnt
+ -
+ name: mark
+ type: binary
+ struct: tc-u32-mark
+ -
+ name: flags
+ type: u32
+ -
+ name: pad
+ type: pad
sub-messages:
-
name: tc-options-msg
formats:
-
+ value: basic
+ attribute-set: tc-basic-attrs
+ -
+ value: bpf
+ attribute-set: tc-bpf-attrs
+ -
value: bfifo
fixed-header: tc-fifo-qopt
-
@@ -1697,6 +3582,9 @@ sub-messages:
value: cbs
attribute-set: tc-cbs-attrs
-
+ value: cgroup
+ attribute-set: tc-cgroup-attrs
+ -
value: choke
attribute-set: tc-choke-attrs
-
@@ -1714,6 +3602,12 @@ sub-messages:
value: ets
attribute-set: tc-ets-attrs
-
+ value: flow
+ attribute-set: tc-flow-attrs
+ -
+ value: flower
+ attribute-set: tc-flower-attrs
+ -
value: fq
attribute-set: tc-fq-attrs
-
@@ -1723,8 +3617,8 @@ sub-messages:
value: fq_pie
attribute-set: tc-fq-pie-attrs
-
- value: flower
- attribute-set: tc-flower-attrs
+ value: fw
+ attribute-set: tc-fw-attrs
-
value: gred
attribute-set: tc-gred-attrs
@@ -1740,6 +3634,9 @@ sub-messages:
-
value: ingress # no content
-
+ value: matchall
+ attribute-set: tc-matchall-attrs
+ -
value: mq # no content
-
value: mqprio
@@ -1776,6 +3673,9 @@ sub-messages:
value: red
attribute-set: tc-red-attrs
-
+ value: route
+ attribute-set: tc-route-attrs
+ -
value: sfb
fixed-header: tc-sfb-qopt
-
@@ -1787,88 +3687,105 @@ sub-messages:
-
value: tbf
attribute-set: tc-tbf-attrs
+ -
+ value: u32
+ attribute-set: tc-u32-attrs
-
name: tc-act-options-msg
formats:
-
- value: gact
- attribute-set: tca-gact-attrs
- -
- name: tca-stats-app-msg
- formats:
+ value: bpf
+ attribute-set: tc-act-bpf-attrs
-
- value: bfifo
+ value: connmark
+ attribute-set: tc-act-connmark-attrs
-
- value: blackhole
+ value: csum
+ attribute-set: tc-act-csum-attrs
-
- value: cake
- attribute-set: tc-cake-stats-attrs
+ value: ct
+ attribute-set: tc-act-ct-attrs
-
- value: cbs
+ value: ctinfo
+ attribute-set: tc-act-ctinfo-attrs
-
- value: choke
+ value: gact
+ attribute-set: tc-act-gact-attrs
-
- value: clsact
+ value: gate
+ attribute-set: tc-act-gate-attrs
-
- value: codel
+ value: ife
+ attribute-set: tc-act-ife-attrs
-
- value: drr
+ value: mirred
+ attribute-set: tc-act-mirred-attrs
-
- value: etf
+ value: mpls
+ attribute-set: tc-act-mpls-attrs
-
- value: ets
- -
- value: fq
- -
- value: fq_codel
+ value: nat
+ attribute-set: tc-act-nat-attrs
-
- value: fq_pie
+ value: pedit
+ attribute-set: tc-act-pedit-attrs
-
- value: flower
+ value: police
+ attribute-set: tc-act-police-attrs
-
- value: gred
+ value: sample
+ attribute-set: tc-act-sample-attrs
-
- value: hfsc
+ value: simple
+ attribute-set: tc-act-simple-attrs
-
- value: hhf
+ value: skbedit
+ attribute-set: tc-act-skbedit-attrs
-
- value: htb
+ value: skbmod
+ attribute-set: tc-act-skbmod-attrs
-
- value: ingress
+ value: tunnel_key
+ attribute-set: tc-act-tunnel-key-attrs
-
- value: mq
+ value: vlan
+ attribute-set: tc-act-vlan-attrs
+ -
+ name: tca-stats-app-msg
+ formats:
-
- value: mqprio
+ value: cake
+ attribute-set: tc-cake-stats-attrs
-
- value: multiq
+ value: choke
+ fixed-header: tc-choke-xstats
-
- value: netem
+ value: codel
+ fixed-header: tc-codel-xstats
-
- value: noqueue
+ value: fq
+ fixed-header: tc-fq-qd-stats
-
- value: pfifo
+ value: fq_codel
+ fixed-header: tc-fq-codel-xstats
-
- value: pfifo_fast
+ value: fq_pie
+ fixed-header: tc-fq-pie-xstats
-
- value: pfifo_head_drop
+ value: hhf
+ fixed-header: tc-hhf-xstats
-
value: pie
- -
- value: plug
- -
- value: prio
- -
- value: qfq
+ fixed-header: tc-pie-xstats
-
value: red
+ fixed-header: tc-red-xstats
-
value: sfb
+ fixed-header: tc-sfb-xstats
-
value: sfq
- -
- value: taprio
- -
- value: tbf
+ fixed-header: tc-sfq-xstats
operations:
enum-model: directional
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index dceeb0d763aa..72da7057e4cf 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -329,23 +329,24 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
sxdp_shared_umem_fd field as you registered the UMEM on that
socket. These two sockets will now share one and the same UMEM.
-There is no need to supply an XDP program like the one in the previous
-case where sockets were bound to the same queue id and
-device. Instead, use the NIC's packet steering capabilities to steer
-the packets to the right queue. In the previous example, there is only
-one queue shared among sockets, so the NIC cannot do this steering. It
-can only steer between queues.
-
-In libbpf, you need to use the xsk_socket__create_shared() API as it
-takes a reference to a FILL ring and a COMPLETION ring that will be
-created for you and bound to the shared UMEM. You can use this
-function for all the sockets you create, or you can use it for the
-second and following ones and use xsk_socket__create() for the first
-one. Both methods yield the same result.
+In this case, it is possible to use the NIC's packet steering
+capabilities to steer the packets to the right queue. This is not
+possible in the previous example as there is only one queue shared
+among sockets, so the NIC cannot do this steering as it can only steer
+between queues.
+
+In libxdp (or libbpf prior to version 1.0), you need to use the
+xsk_socket__create_shared() API as it takes a reference to a FILL ring
+and a COMPLETION ring that will be created for you and bound to the
+shared UMEM. You can use this function for all the sockets you create,
+or you can use it for the second and following ones and use
+xsk_socket__create() for the first one. Both methods yield the same
+result.
Note that a UMEM can be shared between sockets on the same queue id
and device, as well as between queues on the same device and between
-devices at the same time.
+devices at the same time. It is also possible to redirect to any
+socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
XDP_USE_NEED_WAKEUP bind flag
-----------------------------
@@ -822,6 +823,10 @@ A: The short answer is no, that is not supported at the moment. The
switch, or other distribution mechanism, in your NIC to direct
traffic to the correct queue id and socket.
+ Note that if you are using the XDP_SHARED_UMEM option, it is
+ possible to switch traffic between any socket bound to the same
+ umem.
+
Q: My packets are sometimes corrupted. What is wrong?
A: Care has to be taken not to feed the same buffer in the UMEM into
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index f7a73421eb76..e774b48de9f5 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -444,6 +444,18 @@ arp_missed_max
The default value is 2, and the allowable range is 1 - 255.
+coupled_control
+
+ Specifies whether the LACP state machine's MUX in the 802.3ad mode
+ should have separate Collecting and Distributing states.
+
+ This is by implementing the independent control state machine per
+ IEEE 802.1AX-2008 5.4.15 in addition to the existing coupled control
+ state machine.
+
+ The default value is 1. This setting does not separate the Collecting
+ and Distributing states, maintaining the bond in coupled control.
+
downdelay
Specifies the time, in milliseconds, to wait before disabling
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
index d7e1ada905b2..62519d38c58b 100644
--- a/Documentation/networking/can.rst
+++ b/Documentation/networking/can.rst
@@ -444,6 +444,24 @@ definitions are specified for CAN specific MTUs in include/linux/can.h:
#define CANFD_MTU (sizeof(struct canfd_frame)) == 72 => CAN FD frame
+Returned Message Flags
+----------------------
+
+When using the system call recvmsg(2) on a RAW or a BCM socket, the
+msg->msg_flags field may contain the following flags:
+
+MSG_DONTROUTE:
+ set when the received frame was created on the local host.
+
+MSG_CONFIRM:
+ set when the frame was sent via the socket it is received on.
+ This flag can be interpreted as a 'transmission confirmation' when the
+ CAN driver supports the echo of frames on driver level, see
+ :ref:`socketcan-local-loopback1` and :ref:`socketcan-local-loopback2`.
+ (Note: In order to receive such messages on a RAW socket,
+ CAN_RAW_RECV_OWN_MSGS must be set.)
+
+
.. _socketcan-raw-sockets:
RAW Protocol Sockets with can_filters (SOCK_RAW)
@@ -693,22 +711,6 @@ where the CAN_INV_FILTER flag is set in order to notch single CAN IDs or
CAN ID ranges from the incoming traffic.
-RAW Socket Returned Message Flags
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When using recvmsg() call, the msg->msg_flags may contain following flags:
-
-MSG_DONTROUTE:
- set when the received frame was created on the local host.
-
-MSG_CONFIRM:
- set when the frame was sent via the socket it is received on.
- This flag can be interpreted as a 'transmission confirmation' when the
- CAN driver supports the echo of frames on driver level, see
- :ref:`socketcan-local-loopback1` and :ref:`socketcan-local-loopback2`.
- In order to receive such messages, CAN_RAW_RECV_OWN_MSGS must be set.
-
-
Broadcast Manager Protocol Sockets (SOCK_DGRAM)
-----------------------------------------------
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index b842bcb14255..a4c7d0c65fd7 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -211,10 +211,16 @@ Documentation/networking/net_dim.rst
RX copybreak
============
+
The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
and can be configured by the ETHTOOL_STUNABLE command of the
SIOCETHTOOL ioctl.
+This option controls the maximum packet length for which the RX
+descriptor it was received on would be recycled. When a packet smaller
+than RX copybreak bytes is received, it is copied into a new memory
+buffer and the RX descriptor is returned to HW.
+
Statistics
==========
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 43de285b8a92..6932d8c043c2 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -42,6 +42,7 @@ Contents:
intel/ice
marvell/octeontx2
marvell/octeon_ep
+ marvell/octeon_ep_vf
mellanox/mlx5/index
microsoft/netvsc
neterion/s2io
diff --git a/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst
new file mode 100644
index 000000000000..603133d0b92f
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst
@@ -0,0 +1,24 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=======================================================================
+Linux kernel networking driver for Marvell's Octeon PCI Endpoint NIC VF
+=======================================================================
+
+Network driver for Marvell's Octeon PCI EndPoint NIC VF.
+Copyright (c) 2020 Marvell International Ltd.
+
+Overview
+========
+This driver implements networking functionality of Marvell's Octeon PCI
+EndPoint NIC VF.
+
+Supported Devices
+=================
+Currently, this driver support following devices:
+ * Network controller: Cavium, Inc. Device b203
+ * Network controller: Cavium, Inc. Device b403
+ * Network controller: Cavium, Inc. Device b103
+ * Network controller: Cavium, Inc. Device b903
+ * Network controller: Cavium, Inc. Device ba03
+ * Network controller: Cavium, Inc. Device bc03
+ * Network controller: Cavium, Inc. Device bd03
diff --git a/Documentation/networking/device_drivers/wwan/t7xx.rst b/Documentation/networking/device_drivers/wwan/t7xx.rst
index dd5b731957ca..f346f5f85f15 100644
--- a/Documentation/networking/device_drivers/wwan/t7xx.rst
+++ b/Documentation/networking/device_drivers/wwan/t7xx.rst
@@ -39,6 +39,34 @@ command and receive response:
- open the AT control channel using a UART tool or a special user tool
+Sysfs
+=====
+The driver provides sysfs interfaces to userspace.
+
+t7xx_mode
+---------
+The sysfs interface provides userspace with access to the device mode, this interface
+supports read and write operations.
+
+Device mode:
+
+- ``unknown`` represents that device in unknown status
+- ``ready`` represents that device in ready status
+- ``reset`` represents that device in reset status
+- ``fastboot_switching`` represents that device in fastboot switching status
+- ``fastboot_download`` represents that device in fastboot download status
+- ``fastboot_dump`` represents that device in fastboot dump status
+
+Read from userspace to get the current device mode.
+
+::
+ $ cat /sys/bus/pci/devices/${bdf}/t7xx_mode
+
+Write from userspace to set the device mode.
+
+::
+ $ echo fastboot_switching > /sys/bus/pci/devices/${bdf}/t7xx_mode
+
Management application development
==================================
The driver and userspace interfaces are described below. The MBIM protocol is
@@ -97,6 +125,20 @@ The driver exposes an AT port by implementing AT WWAN Port.
The userspace end of the control port is a /dev/wwan0at0 character
device. Application shall use this interface to issue AT commands.
+fastboot port userspace ABI
+---------------------------
+
+/dev/wwan0fastboot0 character device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The driver exposes a fastboot protocol interface by implementing
+fastboot WWAN Port. The userspace end of the fastboot channel pipe is a
+/dev/wwan0fastboot0 character device. Application shall use this interface for
+fastboot protocol communication.
+
+Please note that driver needs to be reloaded to export /dev/wwan0fastboot0
+port, because device needs a cold reset after enter ``fastboot_switching``
+mode.
+
The MediaTek's T700 modem supports the 3GPP TS 27.007 [4] specification.
References
@@ -118,3 +160,7 @@ speak the Mobile Interface Broadband Model (MBIM) protocol"*
[4] *Specification # 27.007 - 3GPP*
- https://www.3gpp.org/DynaReport/27007.htm
+
+[5] *fastboot "a mechanism for communicating with bootloaders"*
+
+- https://android.googlesource.com/platform/system/core/+/refs/heads/main/fastboot/README.md
diff --git a/Documentation/networking/devlink/devlink-port.rst b/Documentation/networking/devlink/devlink-port.rst
index e33ad2401ad7..562f46b41274 100644
--- a/Documentation/networking/devlink/devlink-port.rst
+++ b/Documentation/networking/devlink/devlink-port.rst
@@ -126,7 +126,7 @@ Users may also set the RoCE capability of the function using
`devlink port function set roce` command.
Users may also set the function as migratable using
-'devlink port function set migratable' command.
+`devlink port function set migratable` command.
Users may also set the IPsec crypto capability of the function using
`devlink port function set ipsec_crypto` command.
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 702f204a3dbd..456985407475 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -97,6 +97,10 @@ parameters.
When metadata is disabled, the above use cases will fail to initialize if
users try to enable them.
+
+ Note: Setting this parameter does not take effect immediately. Setting
+ must happen in legacy mode and eswitch port metadata takes effect after
+ enabling switchdev mode.
* - ``hairpin_num_queues``
- u32
- driverinit
@@ -246,7 +250,7 @@ them in realtime.
Description of the vnic counters:
-- total_q_under_processor_handle
+- total_error_queues
number of queues in an error state due to
an async error or errored command.
- send_queue_priority_update_flow
@@ -255,7 +259,8 @@ Description of the vnic counters:
number of times CQ entered an error state due to an overflow.
- async_eq_overrun
number of times an EQ mapped to async events was overrun.
- comp_eq_overrun number of times an EQ mapped to completion events was
+- comp_eq_overrun
+ number of times an EQ mapped to completion events was
overrun.
- quota_exceeded_command
number of commands issued and failed due to quota exceeded.
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 7afff42612e9..bd50df6a5a42 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -2503,7 +2503,7 @@ use_tempaddr - INTEGER
temp_valid_lft - INTEGER
valid lifetime (in seconds) for temporary addresses. If less than the
- minimum required lifetime (typically 5 seconds), temporary addresses
+ minimum required lifetime (typically 5-7 seconds), temporary addresses
will not be created.
Default: 172800 (2 days)
@@ -2511,7 +2511,7 @@ temp_valid_lft - INTEGER
temp_prefered_lft - INTEGER
Preferred lifetime (in seconds) for temporary addresses. If
temp_prefered_lft is less than the minimum required lifetime (typically
- 5 seconds), temporary addresses will not be created. If
+ 5-7 seconds), the preferred lifetime is the minimum required. If
temp_prefered_lft is greater than temp_valid_lft, the preferred lifetime
is temp_valid_lft.
@@ -2535,6 +2535,16 @@ max_desync_factor - INTEGER
Default: 600
+regen_min_advance - INTEGER
+ How far in advance (in seconds), at minimum, to create a new temporary
+ address before the current one is deprecated. This value is added to
+ the amount of time that may be required for duplicate address detection
+ to determine when to create a new address. Linux permits setting this
+ value to less than the default of 2 seconds, but a value less than 2
+ does not conform to RFC 8981.
+
+ Default: 2
+
regen_max_retry - INTEGER
Number of attempts before give up attempting to generate
valid temporary addresses.
diff --git a/Documentation/networking/l2tp.rst b/Documentation/networking/l2tp.rst
index 7f383e99dbad..8496b467dea4 100644
--- a/Documentation/networking/l2tp.rst
+++ b/Documentation/networking/l2tp.rst
@@ -386,12 +386,19 @@ Sample userspace code:
- Create session PPPoX data socket::
+ /* Input: the L2TP tunnel UDP socket `tunnel_fd`, which needs to be
+ * bound already (both sockname and peername), otherwise it will not be
+ * ready.
+ */
+
struct sockaddr_pppol2tp sax;
- int fd;
+ int session_fd;
+ int ret;
+
+ session_fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
+ if (session_fd < 0)
+ return -errno;
- /* Note, the tunnel socket must be bound already, else it
- * will not be ready
- */
sax.sa_family = AF_PPPOX;
sax.sa_protocol = PX_PROTO_OL2TP;
sax.pppol2tp.fd = tunnel_fd;
@@ -406,12 +413,128 @@ Sample userspace code:
/* session_fd is the fd of the session's PPPoL2TP socket.
* tunnel_fd is the fd of the tunnel UDP / L2TPIP socket.
*/
- fd = connect(session_fd, (struct sockaddr *)&sax, sizeof(sax));
- if (fd < 0 ) {
+ ret = connect(session_fd, (struct sockaddr *)&sax, sizeof(sax));
+ if (ret < 0 ) {
+ close(session_fd);
+ return -errno;
+ }
+
+ return session_fd;
+
+L2TP control packets will still be available for read on `tunnel_fd`.
+
+ - Create PPP channel::
+
+ /* Input: the session PPPoX data socket `session_fd` which was created
+ * as described above.
+ */
+
+ int ppp_chan_fd;
+ int chindx;
+ int ret;
+
+ ret = ioctl(session_fd, PPPIOCGCHAN, &chindx);
+ if (ret < 0)
+ return -errno;
+
+ ppp_chan_fd = open("/dev/ppp", O_RDWR);
+ if (ppp_chan_fd < 0)
+ return -errno;
+
+ ret = ioctl(ppp_chan_fd, PPPIOCATTCHAN, &chindx);
+ if (ret < 0) {
+ close(ppp_chan_fd);
return -errno;
}
+
+ return ppp_chan_fd;
+
+LCP PPP frames will be available for read on `ppp_chan_fd`.
+
+ - Create PPP interface::
+
+ /* Input: the PPP channel `ppp_chan_fd` which was created as described
+ * above.
+ */
+
+ int ifunit = -1;
+ int ppp_if_fd;
+ int ret;
+
+ ppp_if_fd = open("/dev/ppp", O_RDWR);
+ if (ppp_if_fd < 0)
+ return -errno;
+
+ ret = ioctl(ppp_if_fd, PPPIOCNEWUNIT, &ifunit);
+ if (ret < 0) {
+ close(ppp_if_fd);
+ return -errno;
+ }
+
+ ret = ioctl(ppp_chan_fd, PPPIOCCONNECT, &ifunit);
+ if (ret < 0) {
+ close(ppp_if_fd);
+ return -errno;
+ }
+
+ return ppp_if_fd;
+
+IPCP/IPv6CP PPP frames will be available for read on `ppp_if_fd`.
+
+The ppp<ifunit> interface can then be configured as usual with netlink's
+RTM_NEWLINK, RTM_NEWADDR, RTM_NEWROUTE, or ioctl's SIOCSIFMTU, SIOCSIFADDR,
+SIOCSIFDSTADDR, SIOCSIFNETMASK, SIOCSIFFLAGS, or with the `ip` command.
+
+ - Bridging L2TP sessions which have PPP pseudowire types (this is also called
+ L2TP tunnel switching or L2TP multihop) is supported by bridging the PPP
+ channels of the two L2TP sessions to be bridged::
+
+ /* Input: the session PPPoX data sockets `session_fd1` and `session_fd2`
+ * which were created as described further above.
+ */
+
+ int ppp_chan_fd;
+ int chindx1;
+ int chindx2;
+ int ret;
+
+ ret = ioctl(session_fd1, PPPIOCGCHAN, &chindx1);
+ if (ret < 0)
+ return -errno;
+
+ ret = ioctl(session_fd2, PPPIOCGCHAN, &chindx2);
+ if (ret < 0)
+ return -errno;
+
+ ppp_chan_fd = open("/dev/ppp", O_RDWR);
+ if (ppp_chan_fd < 0)
+ return -errno;
+
+ ret = ioctl(ppp_chan_fd, PPPIOCATTCHAN, &chindx1);
+ if (ret < 0) {
+ close(ppp_chan_fd);
+ return -errno;
+ }
+
+ ret = ioctl(ppp_chan_fd, PPPIOCBRIDGECHAN, &chindx2);
+ close(ppp_chan_fd);
+ if (ret < 0)
+ return -errno;
+
return 0;
+It can be noted that when bridging PPP channels, the PPP session is not locally
+terminated, and no local PPP interface is created. PPP frames arriving on one
+channel are directly passed to the other channel, and vice versa.
+
+The PPP channel does not need to be kept open. Only the session PPPoX data
+sockets need to be kept open.
+
+More generally, it is also possible in the same way to e.g. bridge a PPPoL2TP
+PPP channel with other types of PPP channels, such as PPPoE.
+
+See more details for the PPP side in ppp_generic.rst.
+
Old L2TPv2-only API
-------------------
diff --git a/Documentation/networking/net_cachelines/inet_sock.rst b/Documentation/networking/net_cachelines/inet_sock.rst
index a2babd0d7954..595d7ef5fc8b 100644
--- a/Documentation/networking/net_cachelines/inet_sock.rst
+++ b/Documentation/networking/net_cachelines/inet_sock.rst
@@ -1,9 +1,9 @@
.. SPDX-License-Identifier: GPL-2.0
.. Copyright (C) 2023 Google LLC
-=====================================================
-inet_connection_sock struct fast path usage breakdown
-=====================================================
+==========================================
+inet_sock struct fast path usage breakdown
+==========================================
Type Name fastpath_tx_access fastpath_rx_access comment
..struct ..inet_sock
diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst
index e75a53593bb9..dceb49d56a91 100644
--- a/Documentation/networking/net_cachelines/net_device.rst
+++ b/Documentation/networking/net_cachelines/net_device.rst
@@ -136,8 +136,8 @@ struct_netpoll_info* npinfo -
possible_net_t nd_net - read_mostly (dev_net)napi_busy_loop,tcp_v(4/6)_rcv,ip(v6)_rcv,ip(6)_input,ip(6)_input_finish
void* ml_priv
enum_netdev_ml_priv_type ml_priv_type
-struct_pcpu_lstats__percpu* lstats
-struct_pcpu_sw_netstats__percpu* tstats
+struct_pcpu_lstats__percpu* lstats read_mostly dev_lstats_add()
+struct_pcpu_sw_netstats__percpu* tstats read_mostly dev_sw_netstats_tx_add()
struct_pcpu_dstats__percpu* dstats
struct_garp_port* garp_port
struct_mrp_port* mrp_port
diff --git a/Documentation/networking/net_cachelines/tcp_sock.rst b/Documentation/networking/net_cachelines/tcp_sock.rst
index 97d7a5c8e01c..1c154cbd1848 100644
--- a/Documentation/networking/net_cachelines/tcp_sock.rst
+++ b/Documentation/networking/net_cachelines/tcp_sock.rst
@@ -38,13 +38,13 @@ u32 max_window read_mostly -
u32 mss_cache read_mostly read_mostly tcp_rate_check_app_limited,tcp_current_mss,tcp_sync_mss,tcp_sndbuf_expand,tcp_tso_should_defer(tx);tcp_update_pacing_rate,tcp_clean_rtx_queue(rx)
u32 window_clamp read_mostly read_write tcp_rcv_space_adjust,__tcp_select_window
u32 rcv_ssthresh read_mostly - __tcp_select_window
-u82 scaling_ratio
+u8 scaling_ratio read_mostly read_mostly tcp_win_from_space
struct tcp_rack
u16 advmss - read_mostly tcp_rcv_space_adjust
u8 compressed_ack
u8:2 dup_ack_counter
u8:1 tlp_retrans
-u8:1 tcp_usec_ts
+u8:1 tcp_usec_ts read_mostly read_mostly
u32 chrono_start read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)
u32[3] chrono_stat read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)
u8:2 chrono_type read_write - tcp_chrono_start/stop(tcp_write_xmit,tcp_cwnd_validate,tcp_send_syn_data)
diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst
index 390730a74332..b28c525e5d1e 100644
--- a/Documentation/networking/netconsole.rst
+++ b/Documentation/networking/netconsole.rst
@@ -15,6 +15,8 @@ Extended console support by Tejun Heo <tj@kernel.org>, May 1 2015
Release prepend support by Breno Leitao <leitao@debian.org>, Jul 7 2023
+Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
+
Please send bug reports to Matt Mackall <mpm@selenic.com>
Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
@@ -171,6 +173,70 @@ You can modify these targets in runtime by creating the following targets::
cat cmdline1/remote_ip
10.0.0.3
+Append User Data
+----------------
+
+Custom user data can be appended to the end of messages with netconsole
+dynamic configuration enabled. User data entries can be modified without
+changing the "enabled" attribute of a target.
+
+Directories (keys) under `userdata` are limited to 54 character length, and
+data in `userdata/<key>/value` are limited to 200 bytes::
+
+ cd /sys/kernel/config/netconsole && mkdir cmdline0
+ cd cmdline0
+ mkdir userdata/foo
+ echo bar > userdata/foo/value
+ mkdir userdata/qux
+ echo baz > userdata/qux/value
+
+Messages will now include this additional user data::
+
+ echo "This is a message" > /dev/kmsg
+
+Sends::
+
+ 12,607,22085407756,-;This is a message
+ foo=bar
+ qux=baz
+
+Preview the userdata that will be appended with::
+
+ cd /sys/kernel/config/netconsole/cmdline0/userdata
+ for f in `ls userdata`; do echo $f=$(cat userdata/$f/value); done
+
+If a `userdata` entry is created but no data is written to the `value` file,
+the entry will be omitted from netconsole messages::
+
+ cd /sys/kernel/config/netconsole && mkdir cmdline0
+ cd cmdline0
+ mkdir userdata/foo
+ echo bar > userdata/foo/value
+ mkdir userdata/qux
+
+The `qux` key is omitted since it has no value::
+
+ echo "This is a message" > /dev/kmsg
+ 12,607,22085407756,-;This is a message
+ foo=bar
+
+Delete `userdata` entries with `rmdir`::
+
+ rmdir /sys/kernel/config/netconsole/cmdline0/userdata/qux
+
+.. warning::
+ When writing strings to user data values, input is broken up per line in
+ configfs store calls and this can cause confusing behavior::
+
+ mkdir userdata/testing
+ printf "val1\nval2" > userdata/testing/value
+ # userdata store value is called twice, first with "val1\n" then "val2"
+ # so "val2" is stored, being the last value stored
+ cat userdata/testing/value
+ val2
+
+ It is recommended to not write user data values with newlines.
+
Extended console:
=================
diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst
index 9e4cccb90b87..c2476917a6c3 100644
--- a/Documentation/networking/netdevices.rst
+++ b/Documentation/networking/netdevices.rst
@@ -252,8 +252,8 @@ ndo_eth_ioctl:
Context: process
ndo_get_stats:
- Synchronization: rtnl_lock() semaphore, dev_base_lock rwlock, or RCU.
- Context: atomic (can't sleep under rwlock or RCU)
+ Synchronization: rtnl_lock() semaphore, or RCU.
+ Context: atomic (can't sleep under RCU)
ndo_start_xmit:
Synchronization: __netif_tx_lock spinlock.
diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst
index 8054d33f449f..5bf285d73e8a 100644
--- a/Documentation/networking/sfp-phylink.rst
+++ b/Documentation/networking/sfp-phylink.rst
@@ -231,16 +231,136 @@ this documentation.
For further information on these methods, please see the inline
documentation in :c:type:`struct phylink_mac_ops <phylink_mac_ops>`.
-9. Remove calls to of_parse_phandle() for the PHY,
- of_phy_register_fixed_link() for fixed links etc. from the probe
- function, and replace with:
+9. Fill-in the :c:type:`struct phylink_config <phylink_config>` fields with
+ a reference to the :c:type:`struct device <device>` associated to your
+ :c:type:`struct net_device <net_device>`:
.. code-block:: c
- struct phylink *phylink;
priv->phylink_config.dev = &dev.dev;
priv->phylink_config.type = PHYLINK_NETDEV;
+ Fill-in the various speeds, pause and duplex modes your MAC can handle:
+
+ .. code-block:: c
+
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+10. Some Ethernet controllers work in pair with a PCS (Physical Coding Sublayer)
+ block, that can handle among other things the encoding/decoding, link
+ establishment detection and autonegotiation. While some MACs have internal
+ PCS whose operation is transparent, some other require dedicated PCS
+ configuration for the link to become functional. In that case, phylink
+ provides a PCS abstraction through :c:type:`struct phylink_pcs <phylink_pcs>`.
+
+ Identify if your driver has one or more internal PCS blocks, and/or if
+ your controller can use an external PCS block that might be internally
+ connected to your controller.
+
+ If your controller doesn't have any internal PCS, you can go to step 11.
+
+ If your Ethernet controller contains one or several PCS blocks, create
+ one :c:type:`struct phylink_pcs <phylink_pcs>` instance per PCS block within
+ your driver's private data structure:
+
+ .. code-block:: c
+
+ struct phylink_pcs pcs;
+
+ Populate the relevant :c:type:`struct phylink_pcs_ops <phylink_pcs_ops>` to
+ configure your PCS. Create a :c:func:`pcs_get_state` function that reports
+ the inband link state, a :c:func:`pcs_config` function to configure your
+ PCS according to phylink-provided parameters, and a :c:func:`pcs_validate`
+ function that report to phylink all accepted configuration parameters for
+ your PCS:
+
+ .. code-block:: c
+
+ struct phylink_pcs_ops foo_pcs_ops = {
+ .pcs_validate = foo_pcs_validate,
+ .pcs_get_state = foo_pcs_get_state,
+ .pcs_config = foo_pcs_config,
+ };
+
+ Arrange for PCS link state interrupts to be forwarded into
+ phylink, via:
+
+ .. code-block:: c
+
+ phylink_pcs_change(pcs, link_is_up);
+
+ where ``link_is_up`` is true if the link is currently up or false
+ otherwise. If a PCS is unable to provide these interrupts, then
+ it should set ``pcs->pcs_poll = true;`` when creating the PCS.
+
+11. If your controller relies on, or accepts the presence of an external PCS
+ controlled through its own driver, add a pointer to a phylink_pcs instance
+ in your driver private data structure:
+
+ .. code-block:: c
+
+ struct phylink_pcs *pcs;
+
+ The way of getting an instance of the actual PCS depends on the platform,
+ some PCS sit on an MDIO bus and are grabbed by passing a pointer to the
+ corresponding :c:type:`struct mii_bus <mii_bus>` and the PCS's address on
+ that bus. In this example, we assume the controller attaches to a Lynx PCS
+ instance:
+
+ .. code-block:: c
+
+ priv->pcs = lynx_pcs_create_mdiodev(bus, 0);
+
+ Some PCS can be recovered based on firmware information:
+
+ .. code-block:: c
+
+ priv->pcs = lynx_pcs_create_fwnode(of_fwnode_handle(node));
+
+12. Populate the :c:func:`mac_select_pcs` callback and add it to your
+ :c:type:`struct phylink_mac_ops <phylink_mac_ops>` set of ops. This function
+ must return a pointer to the relevant :c:type:`struct phylink_pcs <phylink_pcs>`
+ that will be used for the requested link configuration:
+
+ .. code-block:: c
+
+ static struct phylink_pcs *foo_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+ {
+ struct foo_priv *priv = container_of(config, struct foo_priv,
+ phylink_config);
+
+ if ( /* 'interface' needs a PCS to function */ )
+ return priv->pcs;
+
+ return NULL;
+ }
+
+ See :c:func:`mvpp2_select_pcs` for an example of a driver that has multiple
+ internal PCS.
+
+13. Fill-in all the :c:type:`phy_interface_t <phy_interface_t>` (i.e. all MAC to
+ PHY link modes) that your MAC can output. The following example shows a
+ configuration for a MAC that can handle all RGMII modes, SGMII and 1000BaseX.
+ You must adjust these according to what your MAC and all PCS associated
+ with this MAC are capable of, and not just the interface you wish to use:
+
+ .. code-block:: c
+
+ phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->phylink_config.supported_interfaces);
+
+14. Remove calls to of_parse_phandle() for the PHY,
+ of_phy_register_fixed_link() for fixed links etc. from the probe
+ function, and replace with:
+
+ .. code-block:: c
+
+ struct phylink *phylink;
+
phylink = phylink_create(&priv->phylink_config, node, phy_mode, &phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
@@ -249,14 +369,14 @@ this documentation.
priv->phylink = phylink;
- and arrange to destroy the phylink in the probe failure path as
- appropriate and the removal path too by calling:
+ and arrange to destroy the phylink in the probe failure path as
+ appropriate and the removal path too by calling:
- .. code-block:: c
+ .. code-block:: c
phylink_destroy(priv->phylink);
-10. Arrange for MAC link state interrupts to be forwarded into
+15. Arrange for MAC link state interrupts to be forwarded into
phylink, via:
.. code-block:: c
@@ -264,17 +384,16 @@ this documentation.
phylink_mac_change(priv->phylink, link_is_up);
where ``link_is_up`` is true if the link is currently up or false
- otherwise. If a MAC is unable to provide these interrupts, then
- it should set ``priv->phylink_config.pcs_poll = true;`` in step 9.
+ otherwise.
-11. Verify that the driver does not call::
+16. Verify that the driver does not call::
netif_carrier_on()
netif_carrier_off()
- as these will interfere with phylink's tracking of the link state,
- and cause phylink to omit calls via the :c:func:`mac_link_up` and
- :c:func:`mac_link_down` methods.
+ as these will interfere with phylink's tracking of the link state,
+ and cause phylink to omit calls via the :c:func:`mac_link_up` and
+ :c:func:`mac_link_down` methods.
Network drivers should call phylink_stop() and phylink_start() via their
suspend/resume paths, which ensures that the appropriate
diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst
index 551b3cc29a41..75e017dfa825 100644
--- a/Documentation/networking/statistics.rst
+++ b/Documentation/networking/statistics.rst
@@ -41,6 +41,15 @@ If `-s` is specified once the detailed errors won't be shown.
`ip` supports JSON formatting via the `-j` option.
+Queue statistics
+~~~~~~~~~~~~~~~~
+
+Queue statistics are accessible via the netdev netlink family.
+
+Currently no widely distributed CLI exists to access those statistics.
+Kernel development tools (ynl) can be used to experiment with them,
+see `Documentation/userspace-api/netlink/intro-specs.rst`.
+
Protocol-specific statistics
----------------------------
@@ -147,6 +156,12 @@ Statistics are reported both in the responses to link information
requests (`RTM_GETLINK`) and statistic requests (`RTM_GETSTATS`,
when `IFLA_STATS_LINK_64` bit is set in the `.filter_mask` of the request).
+netdev (netlink)
+~~~~~~~~~~~~~~~~
+
+`netdev` generic netlink family allows accessing page pool and per queue
+statistics.
+
ethtool
-------
diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst
index 535077cbeb07..bfea9d8579ed 100644
--- a/Documentation/networking/xfrm_device.rst
+++ b/Documentation/networking/xfrm_device.rst
@@ -71,9 +71,9 @@ Callbacks to implement
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
/* Solely packet offload callbacks */
- void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
@@ -191,6 +191,6 @@ xdo_dev_policy_free() on any remaining offloaded states.
Outcome of HW handling packets, the XFRM core can't count hard, soft limits.
The HW/driver are responsible to perform it and provide accurate data when
-xdo_dev_state_update_curlft() is called. In case of one of these limits
+xdo_dev_state_update_stats() is called. In case of one of these limits
occuried, the driver needs to call to xfrm_state_check_expire() to make sure
that XFRM performs rekeying sequence.
diff --git a/Documentation/process/4.Coding.rst b/Documentation/process/4.Coding.rst
index 1f0d81f44e14..c2046dec0c2f 100644
--- a/Documentation/process/4.Coding.rst
+++ b/Documentation/process/4.Coding.rst
@@ -66,6 +66,10 @@ for aligning variables/macros, for reflowing text and other similar tasks.
See the file :ref:`Documentation/process/clang-format.rst <clangformat>`
for more details.
+Some basic editor settings, such as indentation and line endings, will be
+set automatically if you are using an editor that is compatible with
+EditorConfig. See the official EditorConfig website for more information:
+https://editorconfig.org/
Abstraction layers
******************
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 6db37a46d305..c48382c6b477 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -735,6 +735,10 @@ for aligning variables/macros, for reflowing text and other similar tasks.
See the file :ref:`Documentation/process/clang-format.rst <clangformat>`
for more details.
+Some basic editor settings, such as indentation and line endings, will be
+set automatically if you are using an editor that is compatible with
+EditorConfig. See the official EditorConfig website for more information:
+https://editorconfig.org/
10) Kconfig configuration files
-------------------------------
diff --git a/Documentation/process/cve.rst b/Documentation/process/cve.rst
new file mode 100644
index 000000000000..5e2753eff729
--- /dev/null
+++ b/Documentation/process/cve.rst
@@ -0,0 +1,121 @@
+====
+CVEs
+====
+
+Common Vulnerabilities and Exposure (CVE®) numbers were developed as an
+unambiguous way to identify, define, and catalog publicly disclosed
+security vulnerabilities. Over time, their usefulness has declined with
+regards to the kernel project, and CVE numbers were very often assigned
+in inappropriate ways and for inappropriate reasons. Because of this,
+the kernel development community has tended to avoid them. However, the
+combination of continuing pressure to assign CVEs and other forms of
+security identifiers, and ongoing abuses by individuals and companies
+outside of the kernel community has made it clear that the kernel
+community should have control over those assignments.
+
+The Linux kernel developer team does have the ability to assign CVEs for
+potential Linux kernel security issues. This assignment is independent
+of the :doc:`normal Linux kernel security bug reporting
+process<../process/security-bugs>`.
+
+A list of all assigned CVEs for the Linux kernel can be found in the
+archives of the linux-cve mailing list, as seen on
+https://lore.kernel.org/linux-cve-announce/. To get notice of the
+assigned CVEs, please `subscribe
+<https://subspace.kernel.org/subscribing.html>`_ to that mailing list.
+
+Process
+=======
+
+As part of the normal stable release process, kernel changes that are
+potentially security issues are identified by the developers responsible
+for CVE number assignments and have CVE numbers automatically assigned
+to them. These assignments are published on the linux-cve-announce
+mailing list as announcements on a frequent basis.
+
+Note, due to the layer at which the Linux kernel is in a system, almost
+any bug might be exploitable to compromise the security of the kernel,
+but the possibility of exploitation is often not evident when the bug is
+fixed. Because of this, the CVE assignment team is overly cautious and
+assign CVE numbers to any bugfix that they identify. This
+explains the seemingly large number of CVEs that are issued by the Linux
+kernel team.
+
+If the CVE assignment team misses a specific fix that any user feels
+should have a CVE assigned to it, please email them at <cve@kernel.org>
+and the team there will work with you on it. Note that no potential
+security issues should be sent to this alias, it is ONLY for assignment
+of CVEs for fixes that are already in released kernel trees. If you
+feel you have found an unfixed security issue, please follow the
+:doc:`normal Linux kernel security bug reporting
+process<../process/security-bugs>`.
+
+No CVEs will be automatically assigned for unfixed security issues in
+the Linux kernel; assignment will only automatically happen after a fix
+is available and applied to a stable kernel tree, and it will be tracked
+that way by the git commit id of the original fix. If anyone wishes to
+have a CVE assigned before an issue is resolved with a commit, please
+contact the kernel CVE assignment team at <cve@kernel.org> to get an
+identifier assigned from their batch of reserved identifiers.
+
+No CVEs will be assigned for any issue found in a version of the kernel
+that is not currently being actively supported by the Stable/LTS kernel
+team. A list of the currently supported kernel branches can be found at
+https://kernel.org/releases.html
+
+Disputes of assigned CVEs
+=========================
+
+The authority to dispute or modify an assigned CVE for a specific kernel
+change lies solely with the maintainers of the relevant subsystem
+affected. This principle ensures a high degree of accuracy and
+accountability in vulnerability reporting. Only those individuals with
+deep expertise and intimate knowledge of the subsystem can effectively
+assess the validity and scope of a reported vulnerability and determine
+its appropriate CVE designation. Any attempt to modify or dispute a CVE
+outside of this designated authority could lead to confusion, inaccurate
+reporting, and ultimately, compromised systems.
+
+Invalid CVEs
+============
+
+If a security issue is found in a Linux kernel that is only supported by
+a Linux distribution due to the changes that have been made by that
+distribution, or due to the distribution supporting a kernel version
+that is no longer one of the kernel.org supported releases, then a CVE
+can not be assigned by the Linux kernel CVE team, and must be asked for
+from that Linux distribution itself.
+
+Any CVE that is assigned against the Linux kernel for an actively
+supported kernel version, by any group other than the kernel assignment
+CVE team should not be treated as a valid CVE. Please notify the
+kernel CVE assignment team at <cve@kernel.org> so that they can work to
+invalidate such entries through the CNA remediation process.
+
+Applicability of specific CVEs
+==============================
+
+As the Linux kernel can be used in many different ways, with many
+different ways of accessing it by external users, or no access at all,
+the applicability of any specific CVE is up to the user of Linux to
+determine, it is not up to the CVE assignment team. Please do not
+contact us to attempt to determine the applicability of any specific
+CVE.
+
+Also, as the source tree is so large, and any one system only uses a
+small subset of the source tree, any users of Linux should be aware that
+large numbers of assigned CVEs are not relevant for their systems.
+
+In short, we do not know your use case, and we do not know what portions
+of the kernel that you use, so there is no way for us to determine if a
+specific CVE is relevant for your system.
+
+As always, it is best to take all released kernel changes, as they are
+tested together in a unified whole by many community members, and not as
+individual cherry-picked changes. Also note that for many bugs, the
+solution to the overall problem is not found in a single change, but by
+the sum of many fixes on top of each other. Ideally CVEs will be
+assigned to all fixes for all issues, but sometimes we will fail to
+notice fixes, therefore assume that some changes without a CVE assigned
+might be relevant to take.
+
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 6cb732dfcc72..de9cbb7bd7eb 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -81,6 +81,7 @@ of special classes of bugs: regressions and security problems.
handling-regressions
security-bugs
+ cve
embargoed-hardware-issues
Maintainer information
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
index 84ee60fceef2..fd96e4a3cef9 100644
--- a/Documentation/process/maintainer-netdev.rst
+++ b/Documentation/process/maintainer-netdev.rst
@@ -431,7 +431,7 @@ patchwork checks
Checks in patchwork are mostly simple wrappers around existing kernel
scripts, the sources are available at:
-https://github.com/kuba-moo/nipa/tree/master/tests
+https://github.com/linux-netdev/nipa/tree/master/tests
**Do not** post your patches just to run them through the checks.
You must ensure that your patches are ready by testing them locally
diff --git a/Documentation/process/security-bugs.rst b/Documentation/process/security-bugs.rst
index 692a3ba56cca..56c560a00b37 100644
--- a/Documentation/process/security-bugs.rst
+++ b/Documentation/process/security-bugs.rst
@@ -99,9 +99,8 @@ CVE assignment
The security team does not assign CVEs, nor do we require them for
reports or fixes, as this can needlessly complicate the process and may
delay the bug handling. If a reporter wishes to have a CVE identifier
-assigned, they should find one by themselves, for example by contacting
-MITRE directly. However under no circumstances will a patch inclusion
-be delayed to wait for a CVE identifier to arrive.
+assigned for a confirmed issue, they can contact the :doc:`kernel CVE
+assignment team<../process/cve>` to obtain one.
Non-disclosure agreements
-------------------------
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
index b91e9ef4d0c2..73203ba1e901 100644
--- a/Documentation/rust/arch-support.rst
+++ b/Documentation/rust/arch-support.rst
@@ -12,10 +12,11 @@ which uses ``libclang``.
Below is a general summary of architectures that currently work. Level of
support corresponds to ``S`` values in the ``MAINTAINERS`` file.
-============ ================ ==============================================
-Architecture Level of support Constraints
-============ ================ ==============================================
-``um`` Maintained ``x86_64`` only.
-``x86`` Maintained ``x86_64`` only.
-============ ================ ==============================================
+============= ================ ==============================================
+Architecture Level of support Constraints
+============= ================ ==============================================
+``loongarch`` Maintained -
+``um`` Maintained ``x86_64`` only.
+``x86`` Maintained ``x86_64`` only.
+============= ================ ==============================================
diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py
index b9df61eb4501..03ace5f01b5c 100644
--- a/Documentation/sphinx/kernel_feat.py
+++ b/Documentation/sphinx/kernel_feat.py
@@ -109,7 +109,7 @@ class KernelFeat(Directive):
else:
out_lines += line + "\n"
- nodeList = self.nestedParse(out_lines, fname)
+ nodeList = self.nestedParse(out_lines, self.arguments[0])
return nodeList
def nestedParse(self, lines, fname):
diff --git a/Documentation/sphinx/templates/kernel-toc.html b/Documentation/sphinx/templates/kernel-toc.html
index b58efa99df52..41f1efbe64bb 100644
--- a/Documentation/sphinx/templates/kernel-toc.html
+++ b/Documentation/sphinx/templates/kernel-toc.html
@@ -12,5 +12,7 @@
<script type="text/javascript"> <!--
var sbar = document.getElementsByClassName("sphinxsidebar")[0];
let currents = document.getElementsByClassName("current")
- sbar.scrollTop = currents[currents.length - 1].offsetTop;
+ if (currents.length) {
+ sbar.scrollTop = currents[currents.length - 1].offsetTop;
+ }
--> </script>
diff --git a/Documentation/sphinx/translations.py b/Documentation/sphinx/translations.py
index 47161e6eba99..32c2b32b2b5e 100644
--- a/Documentation/sphinx/translations.py
+++ b/Documentation/sphinx/translations.py
@@ -29,10 +29,7 @@ all_languages = {
}
class LanguagesNode(nodes.Element):
- def __init__(self, current_language, *args, **kwargs):
- super().__init__(*args, **kwargs)
-
- self.current_language = current_language
+ pass
class TranslationsTransform(Transform):
default_priority = 900
@@ -49,7 +46,8 @@ class TranslationsTransform(Transform):
# normalize docname to be the untranslated one
docname = os.path.join(*components[2:])
- new_nodes = LanguagesNode(all_languages[this_lang_code])
+ new_nodes = LanguagesNode()
+ new_nodes['current_language'] = all_languages[this_lang_code]
for lang_code, lang_name in all_languages.items():
if lang_code == this_lang_code:
@@ -84,7 +82,7 @@ def process_languages(app, doctree, docname):
html_content = app.builder.templates.render('translations.html',
context={
- 'current_language': node.current_language,
+ 'current_language': node['current_language'],
'languages': languages,
})
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index 8cd62c466d20..077dfac7ed98 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -448,17 +448,17 @@ Function-specific configfs interface
The function name to use when creating the function directory is "ncm".
The NCM function provides these attributes in its function directory:
- =============== ==================================================
- ifname network device interface name associated with this
- function instance
- qmult queue length multiplier for high and super speed
- host_addr MAC address of host's end of this
- Ethernet over USB link
- dev_addr MAC address of device's end of this
- Ethernet over USB link
- max_segment_size Segment size required for P2P connections. This
- will set MTU to (max_segment_size - 14 bytes)
- =============== ==================================================
+ ======================= ==================================================
+ ifname network device interface name associated with this
+ function instance
+ qmult queue length multiplier for high and super speed
+ host_addr MAC address of host's end of this
+ Ethernet over USB link
+ dev_addr MAC address of device's end of this
+ Ethernet over USB link
+ max_segment_size Segment size required for P2P connections. This
+ will set MTU to 14 bytes
+ ======================= ==================================================
and after creating the functions/ncm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 457e16f06e04..b33918232f78 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -309,6 +309,7 @@ Code Seq# Include File Comments
0x89 0B-DF linux/sockios.h
0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range
0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range
+0x8A 00-1F linux/eventpoll.h
0x8B all linux/wireless.h
0x8C 00-3F WiNRADiO driver
<http://www.winradio.com.au/>
diff --git a/Documentation/userspace-api/netlink/netlink-raw.rst b/Documentation/userspace-api/netlink/netlink-raw.rst
index 1e14f5f22b8e..1990eea772d0 100644
--- a/Documentation/userspace-api/netlink/netlink-raw.rst
+++ b/Documentation/userspace-api/netlink/netlink-raw.rst
@@ -150,3 +150,45 @@ attributes from an ``attribute-set``. For example the following
Note that a selector attribute must appear in a netlink message before any
sub-message attributes that depend on it.
+
+If an attribute such as ``kind`` is defined at more than one nest level, then a
+sub-message selector will be resolved using the value 'closest' to the selector.
+For example, if the same attribute name is defined in a nested ``attribute-set``
+alongside a sub-message selector and also in a top level ``attribute-set``, then
+the selector will be resolved using the value 'closest' to the selector. If the
+value is not present in the message at the same level as defined in the spec
+then this is an error.
+
+Nested struct definitions
+-------------------------
+
+Many raw netlink families such as :doc:`tc<../../networking/netlink_spec/tc>`
+make use of nested struct definitions. The ``netlink-raw`` schema makes it
+possible to embed a struct within a struct definition using the ``struct``
+property. For example, the following struct definition embeds the
+``tc-ratespec`` struct definition for both the ``rate`` and the ``peakrate``
+members of ``struct tc-tbf-qopt``.
+
+.. code-block:: yaml
+
+ -
+ name: tc-tbf-qopt
+ type: struct
+ members:
+ -
+ name: rate
+ type: binary
+ struct: tc-ratespec
+ -
+ name: peakrate
+ type: binary
+ struct: tc-ratespec
+ -
+ name: limit
+ type: u32
+ -
+ name: buffer
+ type: u32
+ -
+ name: mtu
+ type: u32
diff --git a/Documentation/virt/hyperv/index.rst b/Documentation/virt/hyperv/index.rst
index 4a7a1b738bbe..de447e11b4a5 100644
--- a/Documentation/virt/hyperv/index.rst
+++ b/Documentation/virt/hyperv/index.rst
@@ -10,3 +10,4 @@ Hyper-V Enlightenments
overview
vmbus
clocks
+ vpci
diff --git a/Documentation/virt/hyperv/vpci.rst b/Documentation/virt/hyperv/vpci.rst
new file mode 100644
index 000000000000..b65b2126ede3
--- /dev/null
+++ b/Documentation/virt/hyperv/vpci.rst
@@ -0,0 +1,316 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+PCI pass-thru devices
+=========================
+In a Hyper-V guest VM, PCI pass-thru devices (also called
+virtual PCI devices, or vPCI devices) are physical PCI devices
+that are mapped directly into the VM's physical address space.
+Guest device drivers can interact directly with the hardware
+without intermediation by the host hypervisor. This approach
+provides higher bandwidth access to the device with lower
+latency, compared with devices that are virtualized by the
+hypervisor. The device should appear to the guest just as it
+would when running on bare metal, so no changes are required
+to the Linux device drivers for the device.
+
+Hyper-V terminology for vPCI devices is "Discrete Device
+Assignment" (DDA). Public documentation for Hyper-V DDA is
+available here: `DDA`_
+
+.. _DDA: https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/plan/plan-for-deploying-devices-using-discrete-device-assignment
+
+DDA is typically used for storage controllers, such as NVMe,
+and for GPUs. A similar mechanism for NICs is called SR-IOV
+and produces the same benefits by allowing a guest device
+driver to interact directly with the hardware. See Hyper-V
+public documentation here: `SR-IOV`_
+
+.. _SR-IOV: https://learn.microsoft.com/en-us/windows-hardware/drivers/network/overview-of-single-root-i-o-virtualization--sr-iov-
+
+This discussion of vPCI devices includes DDA and SR-IOV
+devices.
+
+Device Presentation
+-------------------
+Hyper-V provides full PCI functionality for a vPCI device when
+it is operating, so the Linux device driver for the device can
+be used unchanged, provided it uses the correct Linux kernel
+APIs for accessing PCI config space and for other integration
+with Linux. But the initial detection of the PCI device and
+its integration with the Linux PCI subsystem must use Hyper-V
+specific mechanisms. Consequently, vPCI devices on Hyper-V
+have a dual identity. They are initially presented to Linux
+guests as VMBus devices via the standard VMBus "offer"
+mechanism, so they have a VMBus identity and appear under
+/sys/bus/vmbus/devices. The VMBus vPCI driver in Linux at
+drivers/pci/controller/pci-hyperv.c handles a newly introduced
+vPCI device by fabricating a PCI bus topology and creating all
+the normal PCI device data structures in Linux that would
+exist if the PCI device were discovered via ACPI on a bare-
+metal system. Once those data structures are set up, the
+device also has a normal PCI identity in Linux, and the normal
+Linux device driver for the vPCI device can function as if it
+were running in Linux on bare-metal. Because vPCI devices are
+presented dynamically through the VMBus offer mechanism, they
+do not appear in the Linux guest's ACPI tables. vPCI devices
+may be added to a VM or removed from a VM at any time during
+the life of the VM, and not just during initial boot.
+
+With this approach, the vPCI device is a VMBus device and a
+PCI device at the same time. In response to the VMBus offer
+message, the hv_pci_probe() function runs and establishes a
+VMBus connection to the vPCI VSP on the Hyper-V host. That
+connection has a single VMBus channel. The channel is used to
+exchange messages with the vPCI VSP for the purpose of setting
+up and configuring the vPCI device in Linux. Once the device
+is fully configured in Linux as a PCI device, the VMBus
+channel is used only if Linux changes the vCPU to be interrupted
+in the guest, or if the vPCI device is removed from
+the VM while the VM is running. The ongoing operation of the
+device happens directly between the Linux device driver for
+the device and the hardware, with VMBus and the VMBus channel
+playing no role.
+
+PCI Device Setup
+----------------
+PCI device setup follows a sequence that Hyper-V originally
+created for Windows guests, and that can be ill-suited for
+Linux guests due to differences in the overall structure of
+the Linux PCI subsystem compared with Windows. Nonetheless,
+with a bit of hackery in the Hyper-V virtual PCI driver for
+Linux, the virtual PCI device is setup in Linux so that
+generic Linux PCI subsystem code and the Linux driver for the
+device "just work".
+
+Each vPCI device is set up in Linux to be in its own PCI
+domain with a host bridge. The PCI domainID is derived from
+bytes 4 and 5 of the instance GUID assigned to the VMBus vPCI
+device. The Hyper-V host does not guarantee that these bytes
+are unique, so hv_pci_probe() has an algorithm to resolve
+collisions. The collision resolution is intended to be stable
+across reboots of the same VM so that the PCI domainIDs don't
+change, as the domainID appears in the user space
+configuration of some devices.
+
+hv_pci_probe() allocates a guest MMIO range to be used as PCI
+config space for the device. This MMIO range is communicated
+to the Hyper-V host over the VMBus channel as part of telling
+the host that the device is ready to enter d0. See
+hv_pci_enter_d0(). When the guest subsequently accesses this
+MMIO range, the Hyper-V host intercepts the accesses and maps
+them to the physical device PCI config space.
+
+hv_pci_probe() also gets BAR information for the device from
+the Hyper-V host, and uses this information to allocate MMIO
+space for the BARs. That MMIO space is then setup to be
+associated with the host bridge so that it works when generic
+PCI subsystem code in Linux processes the BARs.
+
+Finally, hv_pci_probe() creates the root PCI bus. At this
+point the Hyper-V virtual PCI driver hackery is done, and the
+normal Linux PCI machinery for scanning the root bus works to
+detect the device, to perform driver matching, and to
+initialize the driver and device.
+
+PCI Device Removal
+------------------
+A Hyper-V host may initiate removal of a vPCI device from a
+guest VM at any time during the life of the VM. The removal
+is instigated by an admin action taken on the Hyper-V host and
+is not under the control of the guest OS.
+
+A guest VM is notified of the removal by an unsolicited
+"Eject" message sent from the host to the guest over the VMBus
+channel associated with the vPCI device. Upon receipt of such
+a message, the Hyper-V virtual PCI driver in Linux
+asynchronously invokes Linux kernel PCI subsystem calls to
+shutdown and remove the device. When those calls are
+complete, an "Ejection Complete" message is sent back to
+Hyper-V over the VMBus channel indicating that the device has
+been removed. At this point, Hyper-V sends a VMBus rescind
+message to the Linux guest, which the VMBus driver in Linux
+processes by removing the VMBus identity for the device. Once
+that processing is complete, all vestiges of the device having
+been present are gone from the Linux kernel. The rescind
+message also indicates to the guest that Hyper-V has stopped
+providing support for the vPCI device in the guest. If the
+guest were to attempt to access that device's MMIO space, it
+would be an invalid reference. Hypercalls affecting the device
+return errors, and any further messages sent in the VMBus
+channel are ignored.
+
+After sending the Eject message, Hyper-V allows the guest VM
+60 seconds to cleanly shutdown the device and respond with
+Ejection Complete before sending the VMBus rescind
+message. If for any reason the Eject steps don't complete
+within the allowed 60 seconds, the Hyper-V host forcibly
+performs the rescind steps, which will likely result in
+cascading errors in the guest because the device is now no
+longer present from the guest standpoint and accessing the
+device MMIO space will fail.
+
+Because ejection is asynchronous and can happen at any point
+during the guest VM lifecycle, proper synchronization in the
+Hyper-V virtual PCI driver is very tricky. Ejection has been
+observed even before a newly offered vPCI device has been
+fully setup. The Hyper-V virtual PCI driver has been updated
+several times over the years to fix race conditions when
+ejections happen at inopportune times. Care must be taken when
+modifying this code to prevent re-introducing such problems.
+See comments in the code.
+
+Interrupt Assignment
+--------------------
+The Hyper-V virtual PCI driver supports vPCI devices using
+MSI, multi-MSI, or MSI-X. Assigning the guest vCPU that will
+receive the interrupt for a particular MSI or MSI-X message is
+complex because of the way the Linux setup of IRQs maps onto
+the Hyper-V interfaces. For the single-MSI and MSI-X cases,
+Linux calls hv_compse_msi_msg() twice, with the first call
+containing a dummy vCPU and the second call containing the
+real vCPU. Furthermore, hv_irq_unmask() is finally called
+(on x86) or the GICD registers are set (on arm64) to specify
+the real vCPU again. Each of these three calls interact
+with Hyper-V, which must decide which physical CPU should
+receive the interrupt before it is forwarded to the guest VM.
+Unfortunately, the Hyper-V decision-making process is a bit
+limited, and can result in concentrating the physical
+interrupts on a single CPU, causing a performance bottleneck.
+See details about how this is resolved in the extensive
+comment above the function hv_compose_msi_req_get_cpu().
+
+The Hyper-V virtual PCI driver implements the
+irq_chip.irq_compose_msi_msg function as hv_compose_msi_msg().
+Unfortunately, on Hyper-V the implementation requires sending
+a VMBus message to the Hyper-V host and awaiting an interrupt
+indicating receipt of a reply message. Since
+irq_chip.irq_compose_msi_msg can be called with IRQ locks
+held, it doesn't work to do the normal sleep until awakened by
+the interrupt. Instead hv_compose_msi_msg() must send the
+VMBus message, and then poll for the completion message. As
+further complexity, the vPCI device could be ejected/rescinded
+while the polling is in progress, so this scenario must be
+detected as well. See comments in the code regarding this
+very tricky area.
+
+Most of the code in the Hyper-V virtual PCI driver (pci-
+hyperv.c) applies to Hyper-V and Linux guests running on x86
+and on arm64 architectures. But there are differences in how
+interrupt assignments are managed. On x86, the Hyper-V
+virtual PCI driver in the guest must make a hypercall to tell
+Hyper-V which guest vCPU should be interrupted by each
+MSI/MSI-X interrupt, and the x86 interrupt vector number that
+the x86_vector IRQ domain has picked for the interrupt. This
+hypercall is made by hv_arch_irq_unmask(). On arm64, the
+Hyper-V virtual PCI driver manages the allocation of an SPI
+for each MSI/MSI-X interrupt. The Hyper-V virtual PCI driver
+stores the allocated SPI in the architectural GICD registers,
+which Hyper-V emulates, so no hypercall is necessary as with
+x86. Hyper-V does not support using LPIs for vPCI devices in
+arm64 guest VMs because it does not emulate a GICv3 ITS.
+
+The Hyper-V virtual PCI driver in Linux supports vPCI devices
+whose drivers create managed or unmanaged Linux IRQs. If the
+smp_affinity for an unmanaged IRQ is updated via the /proc/irq
+interface, the Hyper-V virtual PCI driver is called to tell
+the Hyper-V host to change the interrupt targeting and
+everything works properly. However, on x86 if the x86_vector
+IRQ domain needs to reassign an interrupt vector due to
+running out of vectors on a CPU, there's no path to inform the
+Hyper-V host of the change, and things break. Fortunately,
+guest VMs operate in a constrained device environment where
+using all the vectors on a CPU doesn't happen. Since such a
+problem is only a theoretical concern rather than a practical
+concern, it has been left unaddressed.
+
+DMA
+---
+By default, Hyper-V pins all guest VM memory in the host
+when the VM is created, and programs the physical IOMMU to
+allow the VM to have DMA access to all its memory. Hence
+it is safe to assign PCI devices to the VM, and allow the
+guest operating system to program the DMA transfers. The
+physical IOMMU prevents a malicious guest from initiating
+DMA to memory belonging to the host or to other VMs on the
+host. From the Linux guest standpoint, such DMA transfers
+are in "direct" mode since Hyper-V does not provide a virtual
+IOMMU in the guest.
+
+Hyper-V assumes that physical PCI devices always perform
+cache-coherent DMA. When running on x86, this behavior is
+required by the architecture. When running on arm64, the
+architecture allows for both cache-coherent and
+non-cache-coherent devices, with the behavior of each device
+specified in the ACPI DSDT. But when a PCI device is assigned
+to a guest VM, that device does not appear in the DSDT, so the
+Hyper-V VMBus driver propagates cache-coherency information
+from the VMBus node in the ACPI DSDT to all VMBus devices,
+including vPCI devices (since they have a dual identity as a VMBus
+device and as a PCI device). See vmbus_dma_configure().
+Current Hyper-V versions always indicate that the VMBus is
+cache coherent, so vPCI devices on arm64 always get marked as
+cache coherent and the CPU does not perform any sync
+operations as part of dma_map/unmap_*() calls.
+
+vPCI protocol versions
+----------------------
+As previously described, during vPCI device setup and teardown
+messages are passed over a VMBus channel between the Hyper-V
+host and the Hyper-v vPCI driver in the Linux guest. Some
+messages have been revised in newer versions of Hyper-V, so
+the guest and host must agree on the vPCI protocol version to
+be used. The version is negotiated when communication over
+the VMBus channel is first established. See
+hv_pci_protocol_negotiation(). Newer versions of the protocol
+extend support to VMs with more than 64 vCPUs, and provide
+additional information about the vPCI device, such as the
+guest virtual NUMA node to which it is most closely affined in
+the underlying hardware.
+
+Guest NUMA node affinity
+------------------------
+When the vPCI protocol version provides it, the guest NUMA
+node affinity of the vPCI device is stored as part of the Linux
+device information for subsequent use by the Linux driver. See
+hv_pci_assign_numa_node(). If the negotiated protocol version
+does not support the host providing NUMA affinity information,
+the Linux guest defaults the device NUMA node to 0. But even
+when the negotiated protocol version includes NUMA affinity
+information, the ability of the host to provide such
+information depends on certain host configuration options. If
+the guest receives NUMA node value "0", it could mean NUMA
+node 0, or it could mean "no information is available".
+Unfortunately it is not possible to distinguish the two cases
+from the guest side.
+
+PCI config space access in a CoCo VM
+------------------------------------
+Linux PCI device drivers access PCI config space using a
+standard set of functions provided by the Linux PCI subsystem.
+In Hyper-V guests these standard functions map to functions
+hv_pcifront_read_config() and hv_pcifront_write_config()
+in the Hyper-V virtual PCI driver. In normal VMs,
+these hv_pcifront_*() functions directly access the PCI config
+space, and the accesses trap to Hyper-V to be handled.
+But in CoCo VMs, memory encryption prevents Hyper-V
+from reading the guest instruction stream to emulate the
+access, so the hv_pcifront_*() functions must invoke
+hypercalls with explicit arguments describing the access to be
+made.
+
+Config Block back-channel
+-------------------------
+The Hyper-V host and Hyper-V virtual PCI driver in Linux
+together implement a non-standard back-channel communication
+path between the host and guest. The back-channel path uses
+messages sent over the VMBus channel associated with the vPCI
+device. The functions hyperv_read_cfg_blk() and
+hyperv_write_cfg_blk() are the primary interfaces provided to
+other parts of the Linux kernel. As of this writing, these
+interfaces are used only by the Mellanox mlx5 driver to pass
+diagnostic data to a Hyper-V host running in the Azure public
+cloud. The functions hyperv_read_cfg_blk() and
+hyperv_write_cfg_blk() are implemented in a separate module
+(pci-hyperv-intf.c, under CONFIG_PCI_HYPERV_INTERFACE) that
+effectively stubs them out when running in non-Hyper-V
+environments.
diff --git a/MAINTAINERS b/MAINTAINERS
index 8709c7cd3656..90a58c73dbde 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1395,6 +1395,7 @@ F: drivers/hwmon/max31760.c
ANALOGBITS PLL LIBRARIES
M: Paul Walmsley <paul.walmsley@sifive.com>
+M: Samuel Holland <samuel.holland@sifive.com>
S: Supported
F: drivers/clk/analogbits/*
F: include/linux/clk/analogbits*
@@ -2156,7 +2157,7 @@ M: Shawn Guo <shawnguo@kernel.org>
M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
R: Fabio Estevam <festevam@gmail.com>
-R: NXP Linux Team <linux-imx@nxp.com>
+L: imx@lists.linux.dev
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -3168,10 +3169,10 @@ F: drivers/hwmon/asus-ec-sensors.c
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com>
-L: acpi4asus-user@lists.sourceforge.net
+M: Luke D. Jones <luke@ljones.dev>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-W: http://acpi4asus.sf.net
+W: https://asus-linux.org/
F: drivers/platform/x86/asus*.c
F: drivers/platform/x86/eeepc*.c
@@ -3692,6 +3693,13 @@ L: bpf@vger.kernel.org
S: Supported
F: arch/arm64/net/
+BPF JIT for LOONGARCH
+M: Tiezhu Yang <yangtiezhu@loongson.cn>
+R: Hengqi Chen <hengqi.chen@gmail.com>
+L: bpf@vger.kernel.org
+S: Maintained
+F: arch/loongarch/net/
+
BPF JIT for MIPS (32-BIT AND 64-BIT)
M: Johan Almbladh <johan.almbladh@anyfinetworks.com>
M: Paul Burton <paulburton@kernel.org>
@@ -3792,6 +3800,7 @@ M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net>
M: Andrii Nakryiko <andrii@kernel.org>
R: Martin KaFai Lau <martin.lau@linux.dev>
+R: Eduard Zingerman <eddyz87@gmail.com>
R: Song Liu <song@kernel.org>
R: Yonghong Song <yonghong.song@linux.dev>
R: John Fastabend <john.fastabend@gmail.com>
@@ -3852,6 +3861,7 @@ F: net/unix/unix_bpf.c
BPF [LIBRARY] (libbpf)
M: Andrii Nakryiko <andrii@kernel.org>
+M: Eduard Zingerman <eddyz87@gmail.com>
L: bpf@vger.kernel.org
S: Maintained
F: tools/lib/bpf/
@@ -3909,6 +3919,7 @@ F: security/bpf/
BPF [SELFTESTS] (Test Runners & Infrastructure)
M: Andrii Nakryiko <andrii@kernel.org>
+M: Eduard Zingerman <eddyz87@gmail.com>
R: Mykola Lysenko <mykolal@fb.com>
L: bpf@vger.kernel.org
S: Maintained
@@ -4162,14 +4173,14 @@ F: drivers/firmware/broadcom/tee_bnxt_fw.c
F: drivers/net/ethernet/broadcom/bnxt/
F: include/linux/firmware/broadcom/tee_bnxt_fw.h
-BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
-M: Arend van Spriel <aspriel@gmail.com>
-M: Franky Lin <franky.lin@broadcom.com>
-M: Hante Meuleman <hante.meuleman@broadcom.com>
+BROADCOM BRCM80211 IEEE802.11 WIRELESS DRIVERS
+M: Arend van Spriel <arend.vanspriel@broadcom.com>
L: linux-wireless@vger.kernel.org
+L: brcm80211@lists.linux.dev
L: brcm80211-dev-list.pdl@broadcom.com
S: Supported
F: drivers/net/wireless/broadcom/brcm80211/
+F: include/linux/platform_data/brcmfmac.h
BROADCOM BRCMSTB GPIO DRIVER
M: Doug Berger <opendmb@gmail.com>
@@ -4540,7 +4551,7 @@ F: drivers/net/ieee802154/ca8210.c
CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
M: David Howells <dhowells@redhat.com>
-L: linux-cachefs@redhat.com (moderated for non-subscribers)
+L: netfs@lists.linux.dev
S: Supported
F: Documentation/filesystems/caching/cachefiles.rst
F: fs/cachefiles/
@@ -4622,8 +4633,8 @@ S: Maintained
F: net/sched/sch_cake.c
CAN NETWORK DRIVERS
-M: Wolfgang Grandegger <wg@grandegger.com>
M: Marc Kleine-Budde <mkl@pengutronix.de>
+M: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
L: linux-can@vger.kernel.org
S: Maintained
W: https://github.com/linux-can
@@ -5229,7 +5240,7 @@ X: drivers/clk/clkdev.c
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
M: Steve French <sfrench@samba.org>
R: Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
-R: Ronnie Sahlberg <lsahlber@redhat.com> (directory leases, sparse files)
+R: Ronnie Sahlberg <ronniesahlberg@gmail.com> (directory leases, sparse files)
R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
R: Tom Talpey <tom@talpey.com> (RDMA, smbdirect)
L: linux-cifs@vger.kernel.org
@@ -5371,7 +5382,7 @@ CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
M: Roman Gushchin <roman.gushchin@linux.dev>
-M: Shakeel Butt <shakeelb@google.com>
+M: Shakeel Butt <shakeel.butt@linux.dev>
R: Muchun Song <muchun.song@linux.dev>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
@@ -5603,6 +5614,11 @@ S: Maintained
F: Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml
F: drivers/net/can/ctucanfd/
+CVE ASSIGNMENT CONTACT
+M: CVE Assignment Team <cve@kernel.org>
+S: Maintained
+F: Documentation/process/cve.rst
+
CW1200 WLAN driver
S: Orphan
F: drivers/net/wireless/st/cw1200/
@@ -5951,7 +5967,6 @@ S: Maintained
F: drivers/platform/x86/dell/dell-wmi-descriptor.c
DELL WMI HARDWARE PRIVACY SUPPORT
-M: Perry Yuan <Perry.Yuan@dell.com>
L: Dell.Client.Kernel@dell.com
L: platform-driver-x86@vger.kernel.org
S: Maintained
@@ -7878,6 +7893,13 @@ S: Maintained
F: include/linux/errseq.h
F: lib/errseq.c
+ESD CAN NETWORK DRIVERS
+M: Stefan Mätje <stefan.maetje@esd.eu>
+R: socketcan@esd.eu
+L: linux-can@vger.kernel.org
+S: Maintained
+F: drivers/net/can/esd/
+
ESD CAN/USB DRIVERS
M: Frank Jungclaus <frank.jungclaus@esd.eu>
R: socketcan@esd.eu
@@ -7948,12 +7970,13 @@ L: rust-for-linux@vger.kernel.org
S: Maintained
F: rust/kernel/net/phy.rs
-EXEC & BINFMT API
+EXEC & BINFMT API, ELF
R: Eric Biederman <ebiederm@xmission.com>
R: Kees Cook <keescook@chromium.org>
L: linux-mm@kvack.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
+F: Documentation/userspace-api/ELF.rst
F: fs/*binfmt_*.c
F: fs/exec.c
F: include/linux/binfmts.h
@@ -8214,6 +8237,20 @@ S: Supported
F: fs/iomap/
F: include/linux/iomap.h
+FILESYSTEMS [NETFS LIBRARY]
+M: David Howells <dhowells@redhat.com>
+R: Jeff Layton <jlayton@kernel.org>
+L: netfs@lists.linux.dev
+L: linux-fsdevel@vger.kernel.org
+S: Supported
+F: Documentation/filesystems/caching/
+F: Documentation/filesystems/netfs_library.rst
+F: fs/netfs/
+F: include/linux/fscache*.h
+F: include/linux/netfs.h
+F: include/trace/events/fscache.h
+F: include/trace/events/netfs.h
+
FILESYSTEMS [STACKABLE]
M: Miklos Szeredi <miklos@szeredi.hu>
M: Amir Goldstein <amir73il@gmail.com>
@@ -8469,7 +8506,7 @@ FREESCALE IMX / MXC FEC DRIVER
M: Wei Fang <wei.fang@nxp.com>
R: Shenwei Wang <shenwei.wang@nxp.com>
R: Clark Wang <xiaoning.wang@nxp.com>
-R: NXP Linux Team <linux-imx@nxp.com>
+L: imx@lists.linux.dev
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -8504,7 +8541,7 @@ F: drivers/i2c/busses/i2c-imx.c
FREESCALE IMX LPI2C DRIVER
M: Dong Aisheng <aisheng.dong@nxp.com>
L: linux-i2c@vger.kernel.org
-L: linux-imx@nxp.com
+L: imx@lists.linux.dev
S: Maintained
F: Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
F: drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -8659,14 +8696,6 @@ F: Documentation/power/freezing-of-tasks.rst
F: include/linux/freezer.h
F: kernel/freezer.c
-FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
-M: David Howells <dhowells@redhat.com>
-L: linux-cachefs@redhat.com (moderated for non-subscribers)
-S: Supported
-F: Documentation/filesystems/caching/
-F: fs/fscache/
-F: include/linux/fscache*.h
-
FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
M: Eric Biggers <ebiggers@kernel.org>
M: Theodore Y. Ts'o <tytso@mit.edu>
@@ -10078,7 +10107,7 @@ L: linux-i2c@vger.kernel.org
S: Maintained
W: https://i2c.wiki.kernel.org/
Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/andi.shyti/linux.git
F: Documentation/devicetree/bindings/i2c/
F: drivers/i2c/algos/
F: drivers/i2c/busses/
@@ -10270,7 +10299,7 @@ F: drivers/scsi/ibmvscsi/ibmvscsi*
F: include/scsi/viosrp.h
IBM Power Virtual SCSI Device Target Driver
-M: Michael Cyr <mikecyr@linux.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
S: Supported
@@ -10788,11 +10817,11 @@ F: drivers/gpio/gpio-tangier.h
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
-M: Zhi Wang <zhi.a.wang@intel.com>
+M: Zhi Wang <zhi.wang.linux@gmail.com>
L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org
S: Supported
-W: https://01.org/igvt-g
+W: https://github.com/intel/gvt-linux/wiki
T: git https://github.com/intel/gvt-linux.git
F: drivers/gpu/drm/i915/gvt/
@@ -11114,7 +11143,6 @@ S: Supported
F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
-M: Gregory Greenman <gregory.greenman@intel.com>
M: Miri Korenblit <miriam.rachel.korenblit@intel.com>
L: linux-wireless@vger.kernel.org
S: Supported
@@ -11712,6 +11740,7 @@ F: fs/smb/server/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendanhiggins@google.com>
M: David Gow <davidgow@google.com>
+R: Rae Moar <rmoar@google.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained
@@ -12627,6 +12656,13 @@ S: Maintained
F: Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml
F: drivers/gpio/gpio-loongson-64bit.c
+LOONGSON LS2X APB DMA DRIVER
+M: Binbin Zhou <zhoubinbin@loongson.cn>
+L: dmaengine@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/dma/loongson,ls2x-apbdma.yaml
+F: drivers/dma/ls2x-apb-dma.c
+
LOONGSON LS2X I2C DRIVER
M: Binbin Zhou <zhoubinbin@loongson.cn>
L: linux-i2c@vger.kernel.org
@@ -12883,6 +12919,8 @@ M: Alejandro Colomar <alx@kernel.org>
L: linux-man@vger.kernel.org
S: Maintained
W: http://www.kernel.org/doc/man-pages
+T: git git://git.kernel.org/pub/scm/docs/man-pages/man-pages.git
+T: git git://www.alejandro-colomar.es/src/alx/linux/man-pages/man-pages.git
MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP)
M: Jeremy Kerr <jk@codeconstruct.com.au>
@@ -13034,6 +13072,15 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/marvell/octeon_ep
+MARVELL OCTEON ENDPOINT VF DRIVER
+M: Veerasenareddy Burru <vburru@marvell.com>
+M: Sathesh Edara <sedara@marvell.com>
+M: Shinas Rasheed <srasheed@marvell.com>
+M: Satananda Burla <sburla@marvell.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/marvell/octeon_ep_vf
+
MARVELL OCTEONTX2 PHYSICAL FUNCTION DRIVER
M: Sunil Goutham <sgoutham@marvell.com>
M: Geetha sowjanya <gakula@marvell.com>
@@ -14084,6 +14131,17 @@ F: mm/
F: tools/mm/
F: tools/testing/selftests/mm/
+MEMORY MAPPING
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Liam R. Howlett <Liam.Howlett@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Lorenzo Stoakes <lstoakes@gmail.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: mm/mmap.c
+
MEMORY TECHNOLOGY DEVICES (MTD)
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Richard Weinberger <richard@nod.at>
@@ -14342,7 +14400,7 @@ MICROCHIP MCP16502 PMIC DRIVER
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
-F: Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
+F: Documentation/devicetree/bindings/regulator/microchip,mcp16502.yaml
F: drivers/regulator/mcp16502.c
MICROCHIP MCP3564 ADC DRIVER
@@ -15061,6 +15119,7 @@ NETDEVSIM
M: Jakub Kicinski <kuba@kernel.org>
S: Maintained
F: drivers/net/netdevsim/*
+F: tools/testing/selftests/drivers/net/netdevsim/*
NETEM NETWORK EMULATOR
M: Stephen Hemminger <stephen@networkplumber.org>
@@ -15158,6 +15217,7 @@ F: Documentation/networking/net_cachelines/net_device.rst
F: drivers/connector/
F: drivers/net/
F: include/dt-bindings/net/
+F: include/linux/cn_proc.h
F: include/linux/etherdevice.h
F: include/linux/fcdevice.h
F: include/linux/fddidevice.h
@@ -15165,6 +15225,7 @@ F: include/linux/hippidevice.h
F: include/linux/if_*
F: include/linux/inetdevice.h
F: include/linux/netdevice.h
+F: include/uapi/linux/cn_proc.h
F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
X: drivers/net/wireless/
@@ -15213,6 +15274,8 @@ F: Documentation/networking/
F: Documentation/networking/net_cachelines/
F: Documentation/process/maintainer-netdev.rst
F: Documentation/userspace-api/netlink/
+F: include/linux/framer/framer-provider.h
+F: include/linux/framer/framer.h
F: include/linux/in.h
F: include/linux/indirect_call_wrapper.h
F: include/linux/net.h
@@ -15300,7 +15363,7 @@ K: \bmdo_
NETWORKING [MPTCP]
M: Matthieu Baerts <matttbe@kernel.org>
M: Mat Martineau <martineau@kernel.org>
-R: Geliang Tang <geliang.tang@linux.dev>
+R: Geliang Tang <geliang@kernel.org>
L: netdev@vger.kernel.org
L: mptcp@lists.linux.dev
S: Maintained
@@ -15685,7 +15748,7 @@ F: drivers/iio/gyro/fxas21002c_spi.c
NXP i.MX 7D/6SX/6UL/93 AND VF610 ADC DRIVER
M: Haibo Chen <haibo.chen@nxp.com>
L: linux-iio@vger.kernel.org
-L: linux-imx@nxp.com
+L: imx@lists.linux.dev
S: Maintained
F: Documentation/devicetree/bindings/iio/adc/fsl,imx7d-adc.yaml
F: Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml
@@ -15722,7 +15785,7 @@ F: drivers/gpu/drm/imx/dcss/
NXP i.MX 8QXP ADC DRIVER
M: Cai Huoqing <cai.huoqing@linux.dev>
M: Haibo Chen <haibo.chen@nxp.com>
-L: linux-imx@nxp.com
+L: imx@lists.linux.dev
L: linux-iio@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
@@ -15730,7 +15793,7 @@ F: drivers/iio/adc/imx8qxp-adc.c
NXP i.MX 8QXP/8QM JPEG V4L2 DRIVER
M: Mirela Rabulea <mirela.rabulea@nxp.com>
-R: NXP Linux Team <linux-imx@nxp.com>
+L: imx@lists.linux.dev
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/nxp,imx8-jpeg.yaml
@@ -15740,7 +15803,7 @@ NXP i.MX CLOCK DRIVERS
M: Abel Vesa <abelvesa@kernel.org>
R: Peng Fan <peng.fan@nxp.com>
L: linux-clk@vger.kernel.org
-L: linux-imx@nxp.com
+L: imx@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelvesa/linux.git clk/imx
F: Documentation/devicetree/bindings/clock/imx*
@@ -16701,6 +16764,7 @@ F: drivers/pci/controller/dwc/*layerscape*
PCI DRIVER FOR FU740
M: Paul Walmsley <paul.walmsley@sifive.com>
M: Greentime Hu <greentime.hu@sifive.com>
+M: Samuel Holland <samuel.holland@sifive.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
@@ -16813,6 +16877,7 @@ F: drivers/pci/controller/dwc/*designware*
PCI DRIVER FOR TI DRA7XX/J721E
M: Vignesh Raghavendra <vigneshr@ti.com>
+R: Siddharth Vadapalli <s-vadapalli@ti.com>
L: linux-omap@vger.kernel.org
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -16837,9 +16902,8 @@ F: Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
F: drivers/pci/controller/pcie-xilinx-cpm.c
PCI ENDPOINT SUBSYSTEM
-M: Lorenzo Pieralisi <lpieralisi@kernel.org>
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
M: Krzysztof Wilczyński <kw@linux.com>
-R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
R: Kishon Vijay Abraham I <kishon@kernel.org>
L: linux-pci@vger.kernel.org
S: Supported
@@ -17128,10 +17192,10 @@ PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
+M: Namhyung Kim <namhyung@kernel.org>
R: Mark Rutland <mark.rutland@arm.com>
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
R: Jiri Olsa <jolsa@kernel.org>
-R: Namhyung Kim <namhyung@kernel.org>
R: Ian Rogers <irogers@google.com>
R: Adrian Hunter <adrian.hunter@intel.com>
L: linux-perf-users@vger.kernel.org
@@ -17159,7 +17223,7 @@ R: John Garry <john.g.garry@oracle.com>
R: Will Deacon <will@kernel.org>
R: James Clark <james.clark@arm.com>
R: Mike Leach <mike.leach@linaro.org>
-R: Leo Yan <leo.yan@linaro.org>
+R: Leo Yan <leo.yan@linux.dev>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: tools/build/feature/test-libopencsd.c
@@ -17953,33 +18017,34 @@ F: drivers/media/tuners/qt1010*
QUALCOMM ATH12K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
-M: Jeff Johnson <quic_jjohnson@quicinc.com>
+M: Jeff Johnson <jjohnson@kernel.org>
L: ath12k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath12k/
+N: ath12k
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
-M: Jeff Johnson <quic_jjohnson@quicinc.com>
+M: Jeff Johnson <jjohnson@kernel.org>
L: ath10k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
-F: Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
F: drivers/net/wireless/ath/ath10k/
+N: ath10k
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
-M: Jeff Johnson <quic_jjohnson@quicinc.com>
+M: Jeff Johnson <jjohnson@kernel.org>
L: ath11k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k
B: https://wireless.wiki.kernel.org/en/users/Drivers/ath11k/bugreport
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
-F: Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
F: drivers/net/wireless/ath/ath11k/
+N: ath11k
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
M: Toke Høiland-Jørgensen <toke@toke.dk>
@@ -17990,6 +18055,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
+QUALCOMM ATHEROS QCA7K ETHERNET DRIVER
+M: Stefan Wahren <wahrenst@gmx.net>
+L: netdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/qca,qca7000.txt
+F: drivers/net/ethernet/qualcomm/qca*
+
QUALCOMM BAM-DMUX WWAN NETWORK DRIVER
M: Stephan Gerhold <stephan@gerhold.net>
L: netdev@vger.kernel.org
@@ -18062,7 +18134,6 @@ F: drivers/net/ethernet/qualcomm/emac/
QUALCOMM ETHQOS ETHERNET DRIVER
M: Vinod Koul <vkoul@kernel.org>
-R: Bhupesh Sharma <bhupesh.sharma@linaro.org>
L: netdev@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -18409,7 +18480,7 @@ S: Supported
F: drivers/infiniband/sw/rdmavt
RDS - RELIABLE DATAGRAM SOCKETS
-M: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+M: Allison Henderson <allison.henderson@oracle.com>
L: netdev@vger.kernel.org
L: linux-rdma@vger.kernel.org
L: rds-devel@oss.oracle.com (moderated for non-subscribers)
@@ -19611,7 +19682,7 @@ F: drivers/mmc/host/sdhci-of-at91.c
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) NXP i.MX DRIVER
M: Haibo Chen <haibo.chen@nxp.com>
-L: linux-imx@nxp.com
+L: imx@lists.linux.dev
L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/sdhci-esdhc-imx.c
@@ -19946,36 +20017,15 @@ S: Maintained
F: drivers/watchdog/simatic-ipc-wdt.c
SIFIVE DRIVERS
-M: Palmer Dabbelt <palmer@dabbelt.com>
M: Paul Walmsley <paul.walmsley@sifive.com>
+M: Samuel Holland <samuel.holland@sifive.com>
L: linux-riscv@lists.infradead.org
S: Supported
+F: drivers/dma/sf-pdma/
N: sifive
+K: fu[57]40
K: [^@]sifive
-SIFIVE CACHE DRIVER
-M: Conor Dooley <conor@kernel.org>
-L: linux-riscv@lists.infradead.org
-S: Maintained
-F: Documentation/devicetree/bindings/cache/sifive,ccache0.yaml
-F: drivers/cache/sifive_ccache.c
-
-SIFIVE FU540 SYSTEM-ON-CHIP
-M: Paul Walmsley <paul.walmsley@sifive.com>
-M: Palmer Dabbelt <palmer@dabbelt.com>
-L: linux-riscv@lists.infradead.org
-S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
-N: fu540
-K: fu540
-
-SIFIVE PDMA DRIVER
-M: Green Wan <green.wan@sifive.com>
-S: Maintained
-F: Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
-F: drivers/dma/sf-pdma/
-
-
SILEAD TOUCHSCREEN DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-input@vger.kernel.org
@@ -20184,8 +20234,8 @@ F: Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
F: drivers/net/ethernet/socionext/sni_ave.c
SOCIONEXT (SNI) NETSEC NETWORK DRIVER
-M: Jassi Brar <jaswinder.singh@linaro.org>
M: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+M: Masahisa Kojima <kojima.masahisa@socionext.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/socionext,synquacer-netsec.yaml
@@ -20530,6 +20580,7 @@ F: Documentation/translations/sp_SP/
SPARC + UltraSPARC (sparc/sparc64)
M: "David S. Miller" <davem@davemloft.net>
+M: Andreas Larsson <andreas@gaisler.com>
L: sparclinux@vger.kernel.org
S: Maintained
Q: http://patchwork.ozlabs.org/project/sparclinux/list/
@@ -21986,6 +22037,14 @@ F: Documentation/devicetree/bindings/media/i2c/ti,ds90*
F: drivers/media/i2c/ds90*
F: include/media/i2c/ds90*
+TI HDC302X HUMIDITY DRIVER
+M: Javier Carrasco <javier.carrasco.cruz@gmail.com>
+M: Li peiyu <579lpy@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml
+F: drivers/iio/humidity/hdc3020.c
+
TI ICSSG ETHERNET DRIVER (ICSSG)
R: MD Danish Anwar <danishanwar@ti.com>
R: Roger Quadros <rogerq@kernel.org>
@@ -22841,9 +22900,8 @@ S: Maintained
F: drivers/usb/typec/mux/pi3usb30532.c
USB TYPEC PORT CONTROLLER DRIVERS
-M: Guenter Roeck <linux@roeck-us.net>
L: linux-usb@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/usb/typec/tcpm/
USB UHCI DRIVER
@@ -24099,7 +24157,6 @@ F: drivers/net/ethernet/xilinx/xilinx_axienet*
XILINX CAN DRIVER
M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
-R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
L: linux-can@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/can/xilinx,can.yaml
@@ -24320,13 +24377,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs.git
F: Documentation/filesystems/zonefs.rst
F: fs/zonefs/
-ZPOOL COMPRESSED PAGE STORAGE API
-M: Dan Streetman <ddstreet@ieee.org>
-L: linux-mm@kvack.org
-S: Maintained
-F: include/linux/zpool.h
-F: mm/zpool.c
-
ZR36067 VIDEO FOR LINUX DRIVER
M: Corentin Labbe <clabbe@baylibre.com>
L: mjpeg-users@lists.sourceforge.net
@@ -24378,7 +24428,9 @@ M: Nhat Pham <nphamcs@gmail.com>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/admin-guide/mm/zswap.rst
+F: include/linux/zpool.h
F: include/linux/zswap.h
+F: mm/zpool.c
F: mm/zswap.c
THE REST
diff --git a/Makefile b/Makefile
index f1b2fd977275..0e36eff14608 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 7
+PATCHLEVEL = 8
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc7
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -155,6 +155,15 @@ endif
export KBUILD_EXTMOD
+# backward compatibility
+KBUILD_EXTRA_WARN ?= $(KBUILD_ENABLE_EXTRA_GCC_CHECKS)
+
+ifeq ("$(origin W)", "command line")
+ KBUILD_EXTRA_WARN := $(W)
+endif
+
+export KBUILD_EXTRA_WARN
+
# Kbuild will save output files in the current working directory.
# This does not need to match to the root of the kernel source tree.
#
@@ -181,14 +190,11 @@ ifeq ("$(origin O)", "command line")
endif
ifneq ($(KBUILD_OUTPUT),)
-# Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
-# expand a shell special character '~'. We use a somewhat tedious way here.
-abs_objtree := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
-$(if $(abs_objtree),, \
- $(error failed to create output directory "$(KBUILD_OUTPUT)"))
-
+# $(realpath ...) gets empty if the path does not exist. Run 'mkdir -p' first.
+$(shell mkdir -p "$(KBUILD_OUTPUT)")
# $(realpath ...) resolves symlinks
-abs_objtree := $(realpath $(abs_objtree))
+abs_objtree := $(realpath $(KBUILD_OUTPUT))
+$(if $(abs_objtree),,$(error failed to create output directory "$(KBUILD_OUTPUT)"))
endif # ifneq ($(KBUILD_OUTPUT),)
ifneq ($(words $(subst :, ,$(abs_srctree))), 1)
@@ -288,15 +294,15 @@ may-sync-config := 1
single-build :=
ifneq ($(filter $(no-dot-config-targets), $(MAKECMDGOALS)),)
- ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
+ ifeq ($(filter-out $(no-dot-config-targets), $(MAKECMDGOALS)),)
need-config :=
- endif
+ endif
endif
ifneq ($(filter $(no-sync-config-targets), $(MAKECMDGOALS)),)
- ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
+ ifeq ($(filter-out $(no-sync-config-targets), $(MAKECMDGOALS)),)
may-sync-config :=
- endif
+ endif
endif
need-compiler := $(may-sync-config)
@@ -317,9 +323,9 @@ endif
# We cannot build single targets and the others at the same time
ifneq ($(filter $(single-targets), $(MAKECMDGOALS)),)
single-build := 1
- ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),)
+ ifneq ($(filter-out $(single-targets), $(MAKECMDGOALS)),)
mixed-build := 1
- endif
+ endif
endif
# For "make -j clean all", "make -j mrproper defconfig all", etc.
@@ -609,8 +615,6 @@ export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL KBUILD_RUSTFLAGS_KERNEL
export RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o \
-name CVS -o -name .pc -o -name .hg -o -name .git \) \
-prune -o
-export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
- --exclude CVS --exclude .pc --exclude .hg --exclude .git
# ===========================================================================
# Rules shared between *config targets and build targets
@@ -982,6 +986,10 @@ NOSTDINC_FLAGS += -nostdinc
# perform bounds checking.
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
+#Currently, disable -Wstringop-overflow for GCC 11, globally.
+KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
+KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
+
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow
@@ -1658,10 +1666,11 @@ help:
@echo ' (sparse by default)'
@echo ' make C=2 [targets] Force check of all c source with $$CHECK'
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
- @echo ' make W=n [targets] Enable extra build checks, n=1,2,3 where'
+ @echo ' make W=n [targets] Enable extra build checks, n=1,2,3,c,e where'
@echo ' 1: warnings which may be relevant and do not occur too often'
@echo ' 2: warnings which occur quite often but may still be relevant'
@echo ' 3: more obscure warnings, can most likely be ignored'
+ @echo ' c: extra checks in the configuration stage (Kconfig)'
@echo ' e: warnings are being treated as errors'
@echo ' Multiple levels can be combined with W=12 or W=123'
@$(if $(dtstree), \
diff --git a/arch/Kconfig b/arch/Kconfig
index c91917b50873..a5af0edd3eb8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -673,6 +673,7 @@ config SHADOW_CALL_STACK
bool "Shadow Call Stack"
depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+ depends on MMU
help
This option enables the compiler's Shadow Call Stack, which
uses a shadow stack to protect function return addresses from
diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h
index 9d9618079739..a339223d9e05 100644
--- a/arch/arc/include/asm/jump_label.h
+++ b/arch/arc/include/asm/jump_label.h
@@ -31,7 +31,7 @@
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
- asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
"1: \n"
"nop \n"
".pushsection __jump_table, \"aw\" \n"
@@ -47,7 +47,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
- asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
+ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
"1: \n"
"b %l[l_yes] \n"
".pushsection __jump_table, \"aw\" \n"
diff --git a/arch/arm/boot/dts/amazon/alpine.dtsi b/arch/arm/boot/dts/amazon/alpine.dtsi
index ff68dfb4eb78..90bd12feac01 100644
--- a/arch/arm/boot/dts/amazon/alpine.dtsi
+++ b/arch/arm/boot/dts/amazon/alpine.dtsi
@@ -167,7 +167,6 @@
msix: msix@fbe00000 {
compatible = "al,alpine-msix";
reg = <0x0 0xfbe00000 0x0 0x100000>;
- interrupt-controller;
msi-controller;
al,msi-base-spi = <96>;
al,msi-num-spis = <64>;
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
index e899de681f47..5be0e8fd2633 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
@@ -45,8 +45,8 @@
num-chipselects = <1>;
cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
- tpmdev@0 {
- compatible = "tcg,tpm_tis-spi";
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
spi-max-frequency = <33000000>;
reg = <0>;
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
index a677c827e758..5a8169bbda87 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
@@ -80,8 +80,8 @@
gpio-miso = <&gpio ASPEED_GPIO(R, 5) GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
- tpmdev@0 {
- compatible = "tcg,tpm_tis-spi";
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
spi-max-frequency = <33000000>;
reg = <0>;
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
index 3f6010ef2b86..213023bc5aec 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
+++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
@@ -456,7 +456,7 @@
status = "okay";
tpm: tpm@2e {
- compatible = "tcg,tpm-tis-i2c";
+ compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c";
reg = <0x2e>;
};
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
index 530491ae5eb2..857cb26ed6d7 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
@@ -466,7 +466,6 @@
i2c0: i2c-bus@40 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x40 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -482,7 +481,6 @@
i2c1: i2c-bus@80 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x80 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -498,7 +496,6 @@
i2c2: i2c-bus@c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0xc0 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -515,7 +512,6 @@
i2c3: i2c-bus@100 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x100 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -532,7 +528,6 @@
i2c4: i2c-bus@140 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x140 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -549,7 +544,6 @@
i2c5: i2c-bus@180 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x180 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -566,7 +560,6 @@
i2c6: i2c-bus@1c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x1c0 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -583,7 +576,6 @@
i2c7: i2c-bus@300 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x300 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -600,7 +592,6 @@
i2c8: i2c-bus@340 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x340 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -617,7 +608,6 @@
i2c9: i2c-bus@380 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x380 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -634,7 +624,6 @@
i2c10: i2c-bus@3c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x3c0 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -651,7 +640,6 @@
i2c11: i2c-bus@400 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x400 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -668,7 +656,6 @@
i2c12: i2c-bus@440 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x440 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -685,7 +672,6 @@
i2c13: i2c-bus@480 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x480 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
index 04f98d1dbb97..e6f3cf3c721e 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
@@ -363,6 +363,7 @@
interrupts = <40>;
reg = <0x1e780200 0x0100>;
clocks = <&syscon ASPEED_CLK_APB>;
+ #interrupt-cells = <2>;
interrupt-controller;
bus-frequency = <12000000>;
pinctrl-names = "default";
@@ -594,7 +595,6 @@
i2c0: i2c-bus@40 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x40 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -610,7 +610,6 @@
i2c1: i2c-bus@80 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x80 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -626,7 +625,6 @@
i2c2: i2c-bus@c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0xc0 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -643,7 +641,6 @@
i2c3: i2c-bus@100 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x100 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -660,7 +657,6 @@
i2c4: i2c-bus@140 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x140 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -677,7 +673,6 @@
i2c5: i2c-bus@180 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x180 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -694,7 +689,6 @@
i2c6: i2c-bus@1c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x1c0 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -711,7 +705,6 @@
i2c7: i2c-bus@300 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x300 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -728,7 +721,6 @@
i2c8: i2c-bus@340 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x340 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -745,7 +737,6 @@
i2c9: i2c-bus@380 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x380 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -762,7 +753,6 @@
i2c10: i2c-bus@3c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x3c0 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -779,7 +769,6 @@
i2c11: i2c-bus@400 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x400 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -796,7 +785,6 @@
i2c12: i2c-bus@440 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x440 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -813,7 +801,6 @@
i2c13: i2c-bus@480 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x480 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
index c4d1faade8be..29f94696d8b1 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
@@ -474,6 +474,7 @@
reg = <0x1e780500 0x100>;
interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_APB2>;
+ #interrupt-cells = <2>;
interrupt-controller;
bus-frequency = <12000000>;
pinctrl-names = "default";
@@ -488,6 +489,7 @@
reg = <0x1e780600 0x100>;
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_APB2>;
+ #interrupt-cells = <2>;
interrupt-controller;
bus-frequency = <12000000>;
pinctrl-names = "default";
@@ -902,7 +904,6 @@
i2c0: i2c-bus@80 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x80 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -917,7 +918,6 @@
i2c1: i2c-bus@100 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x100 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -932,7 +932,6 @@
i2c2: i2c-bus@180 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x180 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -947,7 +946,6 @@
i2c3: i2c-bus@200 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x200 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -962,7 +960,6 @@
i2c4: i2c-bus@280 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x280 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -977,7 +974,6 @@
i2c5: i2c-bus@300 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x300 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -992,7 +988,6 @@
i2c6: i2c-bus@380 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x380 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1007,7 +1002,6 @@
i2c7: i2c-bus@400 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x400 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1022,7 +1016,6 @@
i2c8: i2c-bus@480 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x480 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1037,7 +1030,6 @@
i2c9: i2c-bus@500 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x500 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1052,7 +1044,6 @@
i2c10: i2c-bus@580 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x580 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1067,7 +1058,6 @@
i2c11: i2c-bus@600 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x600 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1082,7 +1072,6 @@
i2c12: i2c-bus@680 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x680 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1097,7 +1086,6 @@
i2c13: i2c-bus@700 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x700 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1112,7 +1100,6 @@
i2c14: i2c-bus@780 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x780 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1127,7 +1114,6 @@
i2c15: i2c-bus@800 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x800 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
diff --git a/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi b/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
index 31590d3186a2..00e5887c926f 100644
--- a/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
+++ b/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
@@ -35,8 +35,8 @@
gpio-mosi = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
- tpmdev@0 {
- compatible = "tcg,tpm_tis-spi";
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
spi-max-frequency = <33000000>;
reg = <0>;
};
diff --git a/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi b/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
index f9f79ed82518..07ca0d993c9f 100644
--- a/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
@@ -167,6 +167,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&mailbox>;
interrupts = <0>;
};
@@ -247,6 +248,7 @@
gpio-controller;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
+ #interrupt-cells = <2>;
};
i2c1: i2c@1800b000 {
@@ -518,6 +520,7 @@
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&pinctrl 0 42 1>,
<&pinctrl 1 44 3>,
diff --git a/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi b/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
index 788a6806191a..75545b10ef2f 100644
--- a/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
@@ -200,6 +200,7 @@
gpio-controller;
ngpios = <4>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi b/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
index 9d20ba3b1ffb..6a4482c93167 100644
--- a/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
@@ -180,6 +180,7 @@
gpio-controller;
ngpios = <32>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&pinctrl 0 0 32>;
};
@@ -352,6 +353,7 @@
gpio-controller;
ngpios = <4>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
index 4d70f6afd13a..6d5e69035f94 100644
--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
+++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
@@ -60,6 +60,8 @@
* We have slots (IDSEL) 1 and 2 with one assigned IRQ
* each handling all IRQs.
*/
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map =
/* IDSEL 1 */
<0x0800 0 0 1 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 1 is irq 11 */
diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
index 9ec0169bacf8..5f4c849915db 100644
--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
+++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
@@ -89,6 +89,8 @@
* The slots have Ethernet, Ethernet, NEC and MPCI.
* The IDSELs are 11, 12, 13, 14.
*/
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map =
/* IDSEL 11 - Ethernet A */
<0x5800 0 0 1 &gpio0 4 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 11 is irq 4 */
diff --git a/arch/arm/boot/dts/marvell/kirkwood-l-50.dts b/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
index dffb9f84e67c..c841eb8e7fb1 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
@@ -65,6 +65,7 @@
gpio2: gpio-expander@20 {
#gpio-cells = <2>;
#interrupt-cells = <2>;
+ interrupt-controller;
compatible = "semtech,sx1505q";
reg = <0x20>;
@@ -79,6 +80,7 @@
gpio3: gpio-expander@21 {
#gpio-cells = <2>;
#interrupt-cells = <2>;
+ interrupt-controller;
compatible = "semtech,sx1505q";
reg = <0x21>;
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
index fd671c7a1e5d..6e1f0f164cb4 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
@@ -120,6 +120,7 @@
interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
<3 IRQ_TYPE_LEVEL_HIGH>,
<4 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
interrupt-controller;
};
@@ -128,6 +129,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
interrupt-controller;
};
diff --git a/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi b/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
index 1640763fd4af..ff0d684622f7 100644
--- a/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
@@ -997,7 +997,6 @@
compatible = "st,stmpe811";
reg = <0x41>;
irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
- interrupt-controller;
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
diff --git a/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi b/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
index 3b6fad273cab..d38f1dd38a90 100644
--- a/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
@@ -980,7 +980,6 @@
compatible = "st,stmpe811";
reg = <0x41>;
irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
- interrupt-controller;
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
diff --git a/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi b/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
index 4eb526fe9c55..81c8a5fd92cc 100644
--- a/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
@@ -861,7 +861,6 @@
compatible = "st,stmpe811";
reg = <0x41>;
irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
- interrupt-controller;
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts b/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
index db8c332df6a1..cad112e05475 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
@@ -227,7 +227,6 @@
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
bridge@2,1 {
compatible = "pci10b5,8605";
@@ -235,7 +234,6 @@
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
/* Intel Corporation I210 Gigabit Network Connection */
ethernet@3,0 {
@@ -250,7 +248,6 @@
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
/* Intel Corporation I210 Gigabit Network Connection */
switch_nic: ethernet@4,0 {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
index 99f4f6ac71d4..c1ae7c47b442 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
@@ -245,6 +245,7 @@
reg = <0x74>;
gpio-controller;
#gpio-cells = <2>;
+ #interrupt-cells = <2>;
interrupt-controller;
interrupt-parent = <&gpio2>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
@@ -390,7 +391,6 @@
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
index 2ae93f57fe5a..ea40623d12e5 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
@@ -626,7 +626,6 @@
blocks = <0x5>;
id = <0>;
interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
interrupt-parent = <&gpio4>;
irq-trigger = <0x1>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
index 55c90f6393ad..d3a7a6eeb8e0 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
@@ -550,7 +550,6 @@
blocks = <0x5>;
interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpio6>;
- interrupt-controller;
id = <0>;
irq-trigger = <0x1>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
index a63e73adc1fc..42b2ba23aefc 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
@@ -225,7 +225,6 @@
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio2>;
interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
onkey {
compatible = "dlg,da9063-onkey";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
index 113974520d54..c0c47adc5866 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
@@ -124,6 +124,7 @@
reg = <0x58>;
interrupt-parent = <&gpio2>;
interrupts = <9 IRQ_TYPE_LEVEL_LOW>; /* active-low GPIO2_9 */
+ #interrupt-cells = <2>;
interrupt-controller;
regulators {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
index 86b4269e0e01..85e278eb2016 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
@@ -100,6 +100,7 @@
interrupt-parent = <&gpio1>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
index 44cc4ff1d0df..d12fb44aeb14 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
@@ -116,7 +116,7 @@
tpm_tis: tpm@1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_tpm>;
- compatible = "tcg,tpm_tis-spi";
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
reg = <1>;
spi-max-frequency = <20000000>;
interrupt-parent = <&gpio5>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts b/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
index 3a723843d562..9984b343cdf0 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
@@ -130,7 +130,7 @@
* TCG specification - Section 6.4.1 Clocking:
* TPM shall support a SPI clock frequency range of 10-24 MHz.
*/
- st33htph: tpm-tis@0 {
+ st33htph: tpm@0 {
compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
reg = <0>;
spi-max-frequency = <24000000>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
index 12361fcbe24a..1b965652291b 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
@@ -63,6 +63,7 @@
gpio-controller;
#gpio-cells = <2>;
#interrupt-cells = <2>;
+ interrupt-controller;
reg = <0x25>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
index ebf7befcc11e..9c81c6baa2d3 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
@@ -834,16 +834,6 @@
<&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>;
clock-names = "pix", "axi";
status = "disabled";
-
- port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- lcdif_out_mipi_dsi: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&mipi_dsi_in_lcdif>;
- };
- };
};
mipi_csi: mipi-csi@30750000 {
@@ -895,22 +885,6 @@
samsung,esc-clock-frequency = <20000000>;
samsung,pll-clock-frequency = <24000000>;
status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mipi_dsi_in_lcdif: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&lcdif_out_mipi_dsi>;
- };
- };
- };
};
};
diff --git a/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
index b0ed68af0546..029f49be40e3 100644
--- a/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
@@ -338,6 +338,7 @@
reg = <0x22>;
gpio-controller;
#gpio-cells = <2>;
+ #interrupt-cells = <2>;
interrupt-controller;
interrupt-parent = <&gpio3>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
index 2045fc779f88..27429d0fedfb 100644
--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
@@ -340,10 +340,10 @@
"msi8";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 0 0 141 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 0 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 0 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 0 0 144 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ interrupt-map = <0 0 0 1 &intc 0 141 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
clocks = <&gcc GCC_PCIE_PIPE_CLK>,
<&gcc GCC_PCIE_AUX_CLK>,
diff --git a/arch/arm/boot/dts/renesas/r8a7790-lager.dts b/arch/arm/boot/dts/renesas/r8a7790-lager.dts
index 2fba4d084001..8590981245a6 100644
--- a/arch/arm/boot/dts/renesas/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/renesas/r8a7790-lager.dts
@@ -447,6 +447,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7790-stout.dts b/arch/arm/boot/dts/renesas/r8a7790-stout.dts
index f9bc5b4f019d..683f7395fab0 100644
--- a/arch/arm/boot/dts/renesas/r8a7790-stout.dts
+++ b/arch/arm/boot/dts/renesas/r8a7790-stout.dts
@@ -347,6 +347,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
onkey {
compatible = "dlg,da9063-onkey";
diff --git a/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts b/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
index e9c13bb03772..0efd9f98c75a 100644
--- a/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
@@ -819,6 +819,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7791-porter.dts b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
index 7e8bc06715f6..93c86e921645 100644
--- a/arch/arm/boot/dts/renesas/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
@@ -413,6 +413,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
watchdog {
compatible = "dlg,da9063-watchdog";
diff --git a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
index 4f9838cf97ee..540a9ad28f28 100644
--- a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+++ b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
@@ -381,6 +381,7 @@
interrupt-parent = <&irqc>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7793-gose.dts b/arch/arm/boot/dts/renesas/r8a7793-gose.dts
index 1744fdbf9e0c..1ea6c757893b 100644
--- a/arch/arm/boot/dts/renesas/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/renesas/r8a7793-gose.dts
@@ -759,6 +759,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7794-alt.dts b/arch/arm/boot/dts/renesas/r8a7794-alt.dts
index c0d067df22a0..b5ecafbb2e4d 100644
--- a/arch/arm/boot/dts/renesas/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/renesas/r8a7794-alt.dts
@@ -453,6 +453,7 @@
interrupt-parent = <&gpio3>;
interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7794-silk.dts b/arch/arm/boot/dts/renesas/r8a7794-silk.dts
index 43d480a7f3ea..595e074085eb 100644
--- a/arch/arm/boot/dts/renesas/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/renesas/r8a7794-silk.dts
@@ -439,6 +439,7 @@
interrupt-parent = <&gpio3>;
interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
onkey {
compatible = "dlg,da9063-onkey";
diff --git a/arch/arm/boot/dts/rockchip/rv1108.dtsi b/arch/arm/boot/dts/rockchip/rv1108.dtsi
index abf3006f0a84..f3291f3bbc6f 100644
--- a/arch/arm/boot/dts/rockchip/rv1108.dtsi
+++ b/arch/arm/boot/dts/rockchip/rv1108.dtsi
@@ -196,7 +196,6 @@
pwm4: pwm@10280000 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x10280000 0x10>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -208,7 +207,6 @@
pwm5: pwm@10280010 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x10280010 0x10>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -220,7 +218,6 @@
pwm6: pwm@10280020 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x10280020 0x10>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -232,7 +229,6 @@
pwm7: pwm@10280030 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x10280030 0x10>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -386,7 +382,6 @@
pwm0: pwm@20040000 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x20040000 0x10>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -398,7 +393,6 @@
pwm1: pwm@20040010 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x20040010 0x10>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -410,7 +404,6 @@
pwm2: pwm@20040020 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x20040020 0x10>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -422,7 +415,6 @@
pwm3: pwm@20040030 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x20040030 0x10>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
index d7954ff466b4..e5254e32aa8f 100644
--- a/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
+++ b/arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
@@ -434,6 +434,7 @@
};
&fimd {
+ samsung,invert-vclk;
status = "okay";
};
diff --git a/arch/arm/boot/dts/st/stm32429i-eval.dts b/arch/arm/boot/dts/st/stm32429i-eval.dts
index 576235ec3c51..afa417b34b25 100644
--- a/arch/arm/boot/dts/st/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/st/stm32429i-eval.dts
@@ -222,7 +222,6 @@
reg = <0x42>;
interrupts = <8 3>;
interrupt-parent = <&gpioi>;
- interrupt-controller;
wakeup-source;
stmpegpio: stmpe_gpio {
diff --git a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
index 510cca5acb79..7a701f7ef0c7 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
@@ -64,7 +64,6 @@
reg = <0x38>;
interrupts = <2 2>;
interrupt-parent = <&gpiof>;
- interrupt-controller;
touchscreen-size-x = <480>;
touchscreen-size-y = <800>;
status = "okay";
diff --git a/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi b/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
index b8730aa52ce6..a59331aa58e5 100644
--- a/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
+++ b/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
@@ -217,7 +217,7 @@
pinctrl-names = "default";
pinctrl-0 = <&spi1_pins>;
- tpm_spi_tis@0 {
+ tpm@0 {
compatible = "tcg,tpm_tis-spi";
reg = <0>;
spi-max-frequency = <500000>;
diff --git a/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts b/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
index c8e55642f9c6..3e834fc7e370 100644
--- a/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
+++ b/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
@@ -415,7 +415,6 @@
reg = <0x41>;
interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpio2>;
- interrupt-controller;
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 0a90583f9f01..8f9dbe8d9029 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -297,6 +297,7 @@ CONFIG_FB_MODE_HELPERS=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_L4F00242T03=y
CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_GPIO=y
CONFIG_FRAMEBUFFER_CONSOLE=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index feb38a94c1a7..43bc1255a5db 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -138,7 +138,8 @@ CONFIG_PWM_MXS=y
CONFIG_NVMEM_MXS_OCOTP=y
CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
CONFIG_FSCACHE_STATS=y
CONFIG_CACHEFILES=m
CONFIG_VFAT_FS=y
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index e12d7d096fc0..e4eb54f6cd9f 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -11,7 +11,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
WASM(nop) "\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
@@ -25,7 +25,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
WASM(b) " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 71b113976420..8b1ec60a9a46 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -339,6 +339,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
GPIO_LOOKUP_IDX("G", 0, NULL, 1,
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ { }
},
};
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index e96fb40b9cc3..07565b593ed6 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -298,6 +298,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ea01a2c43efa..aa7c1d435139 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1039,8 +1039,12 @@ config ARM64_ERRATUM_2645198
If unsure, say Y.
+config ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ bool
+
config ARM64_ERRATUM_2966298
bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
+ select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
default y
help
This option adds the workaround for ARM Cortex-A520 erratum 2966298.
@@ -1052,6 +1056,20 @@ config ARM64_ERRATUM_2966298
If unsure, say Y.
+config ARM64_ERRATUM_3117295
+ bool "Cortex-A510: 3117295: workaround for speculatively executed unprivileged load"
+ select ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ default y
+ help
+ This option adds the workaround for ARM Cortex-A510 erratum 3117295.
+
+ On an affected Cortex-A510 core, a speculatively executed unprivileged
+ load might leak data from a privileged level via a cache side channel.
+
+ Work around this problem by executing a TLBI before returning to EL0.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 47ecc4cff9d2..a88cdf910687 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -195,7 +195,7 @@ vdso_prepare: prepare0
include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so
ifdef CONFIG_COMPAT_VDSO
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
- include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so
+ arch/arm64/kernel/vdso32/vdso.so
endif
endif
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 91d505b385de..1f1f8d865d0e 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -42,5 +42,6 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-cb1-manta.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-bigtreetech-pi.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-orangepi-zero2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h616-x96-mate.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero2w.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-orangepi-zero3.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h618-transpeed-8k618-t.dtb
diff --git a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
index dccbba6e7f98..dbf2dce8d1d6 100644
--- a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
+++ b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
@@ -145,7 +145,6 @@
msix: msix@fbe00000 {
compatible = "al,alpine-msix";
reg = <0x0 0xfbe00000 0x0 0x100000>;
- interrupt-controller;
msi-controller;
al,msi-base-spi = <160>;
al,msi-num-spis = <160>;
diff --git a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
index 39481d7fd7d4..3ea178acdddf 100644
--- a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
+++ b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
@@ -355,7 +355,6 @@
msix: msix@fbe00000 {
compatible = "al,alpine-msix";
reg = <0x0 0xfbe00000 0x0 0x100000>;
- interrupt-controller;
msi-controller;
al,msi-base-spi = <336>;
al,msi-num-spis = <959>;
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 9dcd25ec2c04..896d1f33b5b6 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -586,6 +586,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index f049687d6b96..d8516ec0dae7 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -450,6 +450,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&pinmux 0 0 16>,
<&pinmux 16 71 2>,
diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
index 9747cb3fa03a..d838e3a7af6e 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
@@ -289,7 +289,7 @@
#clock-cells = <1>;
clocks = <&cmu_top CLK_DOUT_CMU_MISC_BUS>,
<&cmu_top CLK_DOUT_CMU_MISC_SSS>;
- clock-names = "dout_cmu_misc_bus", "dout_cmu_misc_sss";
+ clock-names = "bus", "sss";
};
watchdog_cl0: watchdog@10060000 {
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 2e027675d7bb..2cb0212b63c6 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -20,23 +20,41 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-frwy.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-tqmls1046a-mbls10xxa.dtb
+DTC_FLAGS_fsl-ls1088a-qds := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-qds.dtb
+DTC_FLAGS_fsl-ls1088a-rdb := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb
+DTC_FLAGS_fsl-ls1088a-ten64 := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-ten64.dtb
+DTC_FLAGS_fsl-ls1088a-tqmls1088a-mbls10xxa := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-tqmls1088a-mbls10xxa.dtb
+DTC_FLAGS_fsl-ls2080a-qds := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb
+DTC_FLAGS_fsl-ls2080a-rdb := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
+DTC_FLAGS_fsl-ls2081a-rdb := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2081a-rdb.dtb
+DTC_FLAGS_fsl-ls2080a-simu := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
+DTC_FLAGS_fsl-ls2088a-qds := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
+DTC_FLAGS_fsl-ls2088a-rdb := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
+DTC_FLAGS_fsl-lx2160a-bluebox3 := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-bluebox3.dtb
+DTC_FLAGS_fsl-lx2160a-bluebox3-rev-a := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-bluebox3-rev-a.dtb
+DTC_FLAGS_fsl-lx2160a-clearfog-cx := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-clearfog-cx.dtb
+DTC_FLAGS_fsl-lx2160a-honeycomb := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb
+DTC_FLAGS_fsl-lx2160a-qds := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
+DTC_FLAGS_fsl-lx2160a-rdb := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
+DTC_FLAGS_fsl-lx2162a-clearfog := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2162a-clearfog.dtb
+DTC_FLAGS_fsl-lx2162a-qds := -Wno-interrupt_map
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2162a-qds.dtb
fsl-ls1028a-qds-13bb-dtbs := fsl-ls1028a-qds.dtb fsl-ls1028a-qds-13bb.dtbo
@@ -53,6 +71,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds-85bb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds-899b.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds-9999.dtb
+DTC_FLAGS_fsl-lx2160a-tqmlx2160a-mblx2160a := -Wno-interrupt_map
fsl-lx2160a-tqmlx2160a-mblx2160a-12-11-x-dtbs := fsl-lx2160a-tqmlx2160a-mblx2160a.dtb \
fsl-lx2160a-tqmlx2160a-mblx2160a_12_x_x.dtbo \
fsl-lx2160a-tqmlx2160a-mblx2160a_x_11_x.dtbo
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
index 968f475b9a96..27a902569e2a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
@@ -120,7 +120,7 @@
};
tpm: tpm@1 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpio2>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
index 3f3f2a2c89cd..752caa38eb03 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
@@ -89,7 +89,7 @@
status = "okay";
tpm@1 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>;
spi-max-frequency = <36000000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
index 06fed9376996..2aa6c1090fc7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
@@ -109,7 +109,7 @@
status = "okay";
tpm@1 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>;
spi-max-frequency = <36000000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts b/arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts
index f38ee2266b25..a6b94d1957c9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som-symphony.dts
@@ -128,14 +128,9 @@
pinctrl-0 = <&pinctrl_ptn5150>;
status = "okay";
- connector {
- compatible = "usb-c-connector";
- label = "USB-C";
-
- port {
- typec1_dr_sw: endpoint {
- remote-endpoint = <&usb1_drd_sw>;
- };
+ port {
+ typec1_dr_sw: endpoint {
+ remote-endpoint = <&usb1_drd_sw>;
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
index feae77e03835..a08057410bde 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
@@ -234,7 +234,7 @@
status = "okay";
tpm: tpm@0 {
- compatible = "infineon,slb9670";
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
reg = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_tpm>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
index d98a040860a4..5828c9d7821d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
@@ -486,7 +486,7 @@
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart4>;
- status = "okay";
+ status = "disabled";
};
&usb3_phy0 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
index fea67a9282f0..b749e28e5ede 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-pdk3.dts
@@ -175,14 +175,10 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ptn5150>;
- connector {
- compatible = "usb-c-connector";
- label = "USB-C";
-
- port {
- ptn5150_out_ep: endpoint {
- remote-endpoint = <&dwc3_0_ep>;
- };
+ port {
+
+ ptn5150_out_ep: endpoint {
+ remote-endpoint = <&dwc3_0_ep>;
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
index 4ae4fdab461e..43f1d45ccc96 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
@@ -255,7 +255,7 @@
<&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-rates = <13000000>, <13000000>, <156000000>;
- reset-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
status = "disabled";
ports {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
index a2d5d19b2de0..86d3da36e4f3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
@@ -184,6 +184,13 @@
enable-active-high;
};
+ reg_vcc_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
reg_vcc_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC_3V3";
@@ -480,7 +487,7 @@
clock-names = "mclk";
clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1>;
reset-gpios = <&gpio4 29 GPIO_ACTIVE_LOW>;
- iov-supply = <&reg_vcc_3v3>;
+ iov-supply = <&reg_vcc_1v8>;
ldoin-supply = <&reg_vcc_3v3>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
index c24587c895e1..41c79d2ebdd6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
@@ -103,7 +103,7 @@
status = "okay";
tpm@1 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>;
spi-max-frequency = <36000000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
index 628ffba69862..d5c400b355af 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
@@ -115,7 +115,7 @@
status = "okay";
tpm@1 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x1>;
spi-max-frequency = <36000000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
index 9caf7ca25444..cae586cd45bd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
@@ -196,7 +196,7 @@
status = "okay";
tpm@0 {
- compatible = "tcg,tpm_tis-spi";
+ compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
reg = <0x0>;
spi-max-frequency = <36000000>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 76c73daf546b..39a550c1cd26 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -1820,7 +1820,7 @@
compatible = "fsl,imx8mp-ldb";
reg = <0x5c 0x4>, <0x128 0x4>;
reg-names = "ldb", "lvds";
- clocks = <&clk IMX8MP_CLK_MEDIA_LDB>;
+ clocks = <&clk IMX8MP_CLK_MEDIA_LDB_ROOT>;
clock-names = "ldb";
assigned-clocks = <&clk IMX8MP_CLK_MEDIA_LDB>;
assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts b/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
index 6376417e918c..d8cf1f27c3ec 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
@@ -65,7 +65,7 @@
status = "okay";
tpm@0 {
- compatible = "infineon,slb9670";
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
reg = <0>;
spi-max-frequency = <43000000>;
};
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
index 48ec4ebec0a8..b864ffa74ea8 100644
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -126,7 +126,6 @@
amba {
#address-cells = <2>;
#size-cells = <1>;
- #interrupt-cells = <3>;
compatible = "simple-bus";
interrupt-parent = <&gic>;
diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
index 3869460aa5dc..996fb39bb50c 100644
--- a/arch/arm64/boot/dts/lg/lg1313.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
@@ -126,7 +126,6 @@
amba {
#address-cells = <2>;
#size-cells = <1>;
- #interrupt-cells = <3>;
compatible = "simple-bus";
interrupt-parent = <&gic>;
diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
index 2c920e22cec2..7ec7c789d87e 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
@@ -138,7 +138,6 @@
odmi: odmi@300000 {
compatible = "marvell,odmi-controller";
- interrupt-controller;
msi-controller;
marvell,odmi-frames = <4>;
reg = <0x300000 0x4000>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 5506de83f61d..1b3396b1cee3 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -888,7 +888,7 @@
status = "okay";
cs-gpios = <&pio 86 GPIO_ACTIVE_LOW>;
- cr50@0 {
+ tpm@0 {
compatible = "google,cr50";
reg = <0>;
spi-max-frequency = <1000000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index f2281250ac35..d87aab8d7a79 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -1402,7 +1402,7 @@
pinctrl-names = "default";
pinctrl-0 = <&spi5_pins>;
- cr50@0 {
+ tpm@0 {
compatible = "google,cr50";
reg = <0>;
interrupts-extended = <&pio 171 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
index 69c7f3954ae5..4127cb84eba4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
@@ -128,6 +128,7 @@
compatible = "mediatek,mt6360";
reg = <0x34>;
interrupt-controller;
+ #interrupt-cells = <1>;
interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "IRQB";
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
index ea13c4a7027c..81a82933e350 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
@@ -175,7 +175,7 @@
status = "okay";
phy-handle = <&mgbe0_phy>;
- phy-mode = "usxgmii";
+ phy-mode = "10gbase-r";
mdio {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
index 3f16595d099c..d1bd328892af 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
@@ -1459,7 +1459,7 @@
<&mc TEGRA234_MEMORY_CLIENT_MGBEAWR &emc>;
interconnect-names = "dma-mem", "write";
iommus = <&smmu_niso0 TEGRA234_SID_MGBE>;
- power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEA>;
+ power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEB>;
status = "disabled";
};
@@ -1493,7 +1493,7 @@
<&mc TEGRA234_MEMORY_CLIENT_MGBEBWR &emc>;
interconnect-names = "dma-mem", "write";
iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF1>;
- power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEB>;
+ power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEC>;
status = "disabled";
};
@@ -1527,7 +1527,7 @@
<&mc TEGRA234_MEMORY_CLIENT_MGBECWR &emc>;
interconnect-names = "dma-mem", "write";
iommus = <&smmu_niso0 TEGRA234_SID_MGBE_VF2>;
- power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBEC>;
+ power-domains = <&bpmp TEGRA234_POWER_DOMAIN_MGBED>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index 5e1277fea725..61c8fd49c966 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -830,10 +830,10 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ interrupt-map = <0 0 0 1 &intc 0 0 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 0 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 0 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 0 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
<&gcc GCC_PCIE0_AXI_M_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index cf295bed3299..26441447c866 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -814,13 +814,13 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 142
+ interrupt-map = <0 0 0 1 &intc 0 0 142
IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 143
+ <0 0 0 2 &intc 0 0 143
IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 144
+ <0 0 0 3 &intc 0 0 144
IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 145
+ <0 0 0 4 &intc 0 0 145
IRQ_TYPE_LEVEL_HIGH>; /* int_d */
clocks = <&gcc GCC_SYS_NOC_PCIE1_AXI_CLK>,
@@ -876,13 +876,13 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 75
+ interrupt-map = <0 0 0 1 &intc 0 0 75
IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 78
+ <0 0 0 2 &intc 0 0 78
IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 79
+ <0 0 0 3 &intc 0 0 79
IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 83
+ <0 0 0 4 &intc 0 0 83
IRQ_TYPE_LEVEL_HIGH>; /* int_d */
clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 8d41ed261adf..ee6f87c828ae 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -457,25 +457,6 @@
};
};
- mpm: interrupt-controller {
- compatible = "qcom,mpm";
- qcom,rpm-msg-ram = <&apss_mpm>;
- interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
- mboxes = <&apcs_glb 1>;
- interrupt-controller;
- #interrupt-cells = <2>;
- #power-domain-cells = <0>;
- interrupt-parent = <&intc>;
- qcom,mpm-pin-count = <96>;
- qcom,mpm-pin-map = <2 184>, /* TSENS1 upper_lower_int */
- <52 243>, /* DWC3_PRI ss_phy_irq */
- <79 347>, /* DWC3_PRI hs_phy_irq */
- <80 352>, /* DWC3_SEC hs_phy_irq */
- <81 347>, /* QUSB2_PHY_PRI DP+DM */
- <82 352>, /* QUSB2_PHY_SEC DP+DM */
- <87 326>; /* SPMI */
- };
-
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -765,15 +746,8 @@
};
rpm_msg_ram: sram@68000 {
- compatible = "qcom,rpm-msg-ram", "mmio-sram";
+ compatible = "qcom,rpm-msg-ram";
reg = <0x00068000 0x6000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x00068000 0x7000>;
-
- apss_mpm: sram@1b8 {
- reg = <0x1b8 0x48>;
- };
};
qfprom@74000 {
@@ -856,8 +830,8 @@
reg = <0x004ad000 0x1000>, /* TM */
<0x004ac000 0x1000>; /* SROT */
#qcom,sensors = <8>;
- interrupts-extended = <&mpm 2 IRQ_TYPE_LEVEL_HIGH>,
- <&intc GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "uplow", "critical";
#thermal-sensor-cells = <1>;
};
@@ -1363,7 +1337,6 @@
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
gpio-ranges = <&tlmm 0 0 150>;
- wakeup-parent = <&mpm>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -1891,7 +1864,7 @@
<0x0400a000 0x002100>;
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
interrupt-names = "periph_irq";
- interrupts-extended = <&mpm 87 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
qcom,ee = <0>;
qcom,channel = <0>;
#address-cells = <2>;
@@ -3052,8 +3025,8 @@
#size-cells = <1>;
ranges;
- interrupts-extended = <&mpm 79 IRQ_TYPE_LEVEL_HIGH>,
- <&mpm 52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq";
clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
index ffc4406422ae..41215567b3ae 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts
@@ -563,6 +563,8 @@
};
&pcie4 {
+ max-link-speed = <2>;
+
perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index def3976bd5bb..eb657e544961 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -722,6 +722,8 @@
};
&pcie4 {
+ max-link-speed = <2>;
+
perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
index 160e098f1075..f9849b8befbf 100644
--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
@@ -1304,6 +1304,9 @@
&config_noc SLAVE_QUP_0 RPM_ALWAYS_TAG>,
<&system_noc MASTER_QUP_0 RPM_ALWAYS_TAG
&bimc SLAVE_EBI_CH0 RPM_ALWAYS_TAG>;
+ interconnect-names = "qup-core",
+ "qup-config",
+ "qup-memory";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
index 9d916edb1c73..be133a3d5cbe 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-mtp.dts
@@ -622,7 +622,7 @@
&tlmm {
/* Reserved I/Os for NFC */
- gpio-reserved-ranges = <32 8>;
+ gpio-reserved-ranges = <32 8>, <74 1>;
disp0_reset_n_active: disp0-reset-n-active-state {
pins = "gpio133";
diff --git a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
index 592a67a47c78..b9151c2ddf2e 100644
--- a/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8650-qrd.dts
@@ -659,7 +659,7 @@
&tlmm {
/* Reserved I/Os for NFC */
- gpio-reserved-ranges = <32 8>;
+ gpio-reserved-ranges = <32 8>, <74 1>;
bt_default: bt-default-state {
bt-en-pins {
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
index 3885ef3454ff..50de17e4fb3f 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
@@ -234,6 +234,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&gpio6>;
interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
@@ -294,6 +295,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&gpio6>;
interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
};
@@ -314,6 +316,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&gpio7>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
};
@@ -324,6 +327,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&gpio5>;
interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
};
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index d0905515399b..9137dd76e72c 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -631,6 +631,7 @@
clock-names = "spiclk", "apb_pclk";
dmas = <&dmac 12>, <&dmac 13>;
dma-names = "tx", "rx";
+ num-cs = <2>;
pinctrl-names = "default";
pinctrl-0 = <&spi0_clk &spi0_csn &spi0_miso &spi0_mosi>;
#address-cells = <1>;
@@ -646,6 +647,7 @@
clock-names = "spiclk", "apb_pclk";
dmas = <&dmac 14>, <&dmac 15>;
dma-names = "tx", "rx";
+ num-cs = <2>;
pinctrl-names = "default";
pinctrl-0 = <&spi1_clk &spi1_csn0 &spi1_csn1 &spi1_miso &spi1_mosi>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index fb5dcf6e9327..7b4c15c4a9c3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -488,7 +488,6 @@
pwm3: pwm@ff1b0030 {
compatible = "rockchip,rk3328-pwm";
reg = <0x0 0xff1b0030 0x0 0x10>;
- interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index 0f9cc042d9bf..1cba1d857c96 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -70,7 +70,7 @@
&spi0 {
status = "okay";
- cr50@0 {
+ tpm@0 {
compatible = "google,cr50";
reg = <0>;
interrupt-parent = <&gpio0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index c5e7de60c121..5846a11f0e84 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -706,7 +706,7 @@ camera: &i2c7 {
&spi2 {
status = "okay";
- cr50@0 {
+ tpm@0 {
compatible = "google,cr50";
reg = <0>;
interrupt-parent = <&gpio1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
index d4c70835e0fe..a4946cdc3bb3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
@@ -72,7 +72,7 @@
vin-supply = <&vcc3v3_sys>;
};
- vcc5v0_usb30_host: vcc5v0-usb30-host-regulator {
+ vcc5v0_usb_host1: vcc5v0_usb_host2: vcc5v0-usb-host-regulator {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_host";
regulator-boot-on;
@@ -114,6 +114,7 @@
status = "okay";
};
+/* Standard pcie */
&pcie3x2 {
reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_sys>;
@@ -122,6 +123,7 @@
/* M.2 M-Key ssd */
&pcie3x4 {
+ num-lanes = <2>;
reset-gpios = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_sys>;
status = "okay";
@@ -188,12 +190,12 @@
};
&u2phy2_host {
- phy-supply = <&vcc5v0_usb30_host>;
+ phy-supply = <&vcc5v0_usb_host1>;
status = "okay";
};
&u2phy3_host {
- phy-supply = <&vcc5v0_usb30_host>;
+ phy-supply = <&vcc5v0_usb_host2>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
index 0b02f4d6e003..cce1c8e83587 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
@@ -16,8 +16,8 @@
aliases {
mmc0 = &sdhci;
- mmc1 = &sdio;
- mmc2 = &sdmmc;
+ mmc1 = &sdmmc;
+ mmc2 = &sdio;
serial2 = &uart2;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
index ac7c677b0fb9..de30c2632b8e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
@@ -448,6 +448,7 @@
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
pinctrl-names = "default";
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
index 4ce70fb75a30..39d65002add1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
@@ -62,7 +62,6 @@
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&led1_pin>;
- status = "okay";
/* LED1 on PCB */
led-1 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
index d7722772ecd8..997b516c2533 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dts
@@ -189,19 +189,19 @@
cpu-supply = <&vdd_cpu_lit_s0>;
};
-&cpu_b0{
+&cpu_b0 {
cpu-supply = <&vdd_cpu_big0_s0>;
};
-&cpu_b1{
+&cpu_b1 {
cpu-supply = <&vdd_cpu_big0_s0>;
};
-&cpu_b2{
+&cpu_b2 {
cpu-supply = <&vdd_cpu_big1_s0>;
};
-&cpu_b3{
+&cpu_b3 {
cpu-supply = <&vdd_cpu_big1_s0>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
index ef4f058c20ff..e037bf9db75a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
@@ -19,8 +19,8 @@
aliases {
mmc0 = &sdhci;
- mmc1 = &sdio;
- mmc2 = &sdmmc;
+ mmc1 = &sdmmc;
+ mmc2 = &sdio;
};
analog-sound {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
index dc677f29a9c7..3c2278886851 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
@@ -195,13 +195,13 @@
&gpio1 {
gpio-line-names = /* GPIO1 A0-A7 */
- "HEADER_27_3v3", "HEADER_28_3v3", "", "",
+ "HEADER_27_3v3", "", "", "",
"HEADER_29_1v8", "", "HEADER_7_1v8", "",
/* GPIO1 B0-B7 */
"", "HEADER_31_1v8", "HEADER_33_1v8", "",
"HEADER_11_1v8", "HEADER_13_1v8", "", "",
/* GPIO1 C0-C7 */
- "", "", "", "",
+ "", "HEADER_28_3v3", "", "",
"", "", "", "",
/* GPIO1 D0-D7 */
"", "", "", "",
@@ -225,11 +225,11 @@
&gpio4 {
gpio-line-names = /* GPIO4 A0-A7 */
- "", "", "HEADER_37_3v3", "HEADER_32_3v3",
- "HEADER_36_3v3", "", "HEADER_35_3v3", "HEADER_38_3v3",
+ "", "", "HEADER_37_3v3", "HEADER_8_3v3",
+ "HEADER_10_3v3", "", "HEADER_32_3v3", "HEADER_35_3v3",
/* GPIO4 B0-B7 */
"", "", "", "HEADER_40_3v3",
- "HEADER_8_3v3", "HEADER_10_3v3", "", "",
+ "HEADER_38_3v3", "HEADER_36_3v3", "", "",
/* GPIO4 C0-C7 */
"", "", "", "",
"", "", "", "",
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index bac4cabef607..467ac2f768ac 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -227,8 +227,19 @@ static int ctr_encrypt(struct skcipher_request *req)
src += blocks * AES_BLOCK_SIZE;
}
if (nbytes && walk.nbytes == walk.total) {
+ u8 buf[AES_BLOCK_SIZE];
+ u8 *d = dst;
+
+ if (unlikely(nbytes < AES_BLOCK_SIZE))
+ src = dst = memcpy(buf + sizeof(buf) - nbytes,
+ src, nbytes);
+
neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
nbytes, walk.iv);
+
+ if (unlikely(nbytes < AES_BLOCK_SIZE))
+ memcpy(d, dst, nbytes);
+
nbytes = 0;
}
kernel_neon_end();
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index 210bb43cff2c..d328f549b1a6 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -229,7 +229,7 @@ alternative_has_cap_likely(const unsigned long cpucap)
if (!cpucap_is_possible(cpucap))
return false;
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE_CB("b %l[l_no]", %[cpucap], alt_cb_patch_nops)
:
: [cpucap] "i" (cpucap)
@@ -247,7 +247,7 @@ alternative_has_cap_unlikely(const unsigned long cpucap)
if (!cpucap_is_possible(cpucap))
return false;
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE("nop", "b %l[l_yes]", %[cpucap])
:
: [cpucap] "i" (cpucap)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 7b1975bf4b90..513787e43329 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -760,32 +760,25 @@ alternative_endif
.endm
/*
- * Check whether preempt/bh-disabled asm code should yield as soon as
- * it is able. This is the case if we are currently running in task
- * context, and either a softirq is pending, or the TIF_NEED_RESCHED
- * flag is set and re-enabling preemption a single time would result in
- * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
- * stored negated in the top word of the thread_info::preempt_count
+ * Check whether asm code should yield as soon as it is able. This is
+ * the case if we are currently running in task context, and the
+ * TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
+ * is stored negated in the top word of the thread_info::preempt_count
* field)
*/
- .macro cond_yield, lbl:req, tmp:req, tmp2:req
+ .macro cond_yield, lbl:req, tmp:req, tmp2
+#ifdef CONFIG_PREEMPT_VOLUNTARY
get_current_task \tmp
ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
/*
* If we are serving a softirq, there is no point in yielding: the
* softirq will not be preempted no matter what we do, so we should
- * run to completion as quickly as we can.
+ * run to completion as quickly as we can. The preempt_count field will
+ * have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
+ * catch this case too.
*/
- tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
-#ifdef CONFIG_PREEMPTION
- sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
cbz \tmp, \lbl
#endif
- adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
- get_this_cpu_offset \tmp2
- ldr w\tmp, [\tmp, \tmp2]
- cbnz w\tmp, \lbl // yield on pending softirq in task context
-.Lnoyield_\@:
.endm
/*
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 21c824edf8ce..bd8d4ca81a48 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -83,7 +83,7 @@ struct arm64_ftr_bits {
* to full-0 denotes that this field has no override
*
* A @mask field set to full-0 with the corresponding @val field set
- * to full-1 denotes thath this field has an invalid override.
+ * to full-1 denotes that this field has an invalid override.
*/
struct arm64_ftr_override {
u64 val;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 7c7493cb571f..52f076afeb96 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -61,6 +61,7 @@
#define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_IMP_APPLE 0x61
#define ARM_CPU_IMP_AMPERE 0xC0
+#define ARM_CPU_IMP_MICROSOFT 0x6D
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -135,6 +136,8 @@
#define AMPERE_CPU_PART_AMPERE1 0xAC3
+#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -193,6 +196,7 @@
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
+#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 50e5f25d3024..b67b89c54e1c 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -62,13 +62,13 @@ static inline void cpacr_restore(unsigned long cpacr)
* When we defined the maximum SVE vector length we defined the ABI so
* that the maximum vector length included all the reserved for future
* expansion bits in ZCR rather than those just currently defined by
- * the architecture. While SME follows a similar pattern the fact that
- * it includes a square matrix means that any allocations that attempt
- * to cover the maximum potential vector length (such as happen with
- * the regset used for ptrace) end up being extremely large. Define
- * the much lower actual limit for use in such situations.
+ * the architecture. Using this length to allocate worst size buffers
+ * results in excessively large allocations, and this effect is even
+ * more pronounced for SME due to ZA. Define more suitable VLs for
+ * these situations.
*/
-#define SME_VQ_MAX 16
+#define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1)
+#define SME_VQ_MAX ((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1)
struct task_struct;
@@ -386,6 +386,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
extern unsigned int sme_get_vl(void);
extern int sme_set_current_vl(unsigned long arg);
extern int sme_get_current_vl(void);
+extern void sme_suspend_exit(void);
/*
* Return how many bytes of memory are required to store the full SME
@@ -421,6 +422,7 @@ static inline int sme_max_vl(void) { return 0; }
static inline int sme_max_virtualisable_vl(void) { return 0; }
static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
static inline int sme_get_current_vl(void) { return -EINVAL; }
+static inline void sme_suspend_exit(void) { }
static inline size_t sme_state_size(struct task_struct const *task)
{
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 50ce8b697ff3..e93548914c36 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,6 +4,8 @@
#ifndef __ASSEMBLER__
+#include <linux/cpumask.h>
+
#include <asm-generic/irq.h>
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu);
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 48ddc0f45d22..6aafbb789991 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -18,7 +18,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: nop \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
@@ -35,7 +35,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
const bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: b %l[l_yes] \n\t"
" .pushsection __jump_table, \"aw\" \n\t"
" .align 3 \n\t"
diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h
index 68908b82b168..587bdb91ab7a 100644
--- a/arch/arm64/include/asm/patching.h
+++ b/arch/arm64/include/asm/patching.h
@@ -8,6 +8,8 @@ int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
int aarch64_insn_write_literal_u64(void *addr, u64 val);
+void *aarch64_insn_set(void *dst, u32 insn, size_t len);
+void *aarch64_insn_copy(void *dst, void *src, size_t len);
int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index b4ae32109932..4305995c8f82 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -17,9 +17,6 @@
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
-#ifdef CONFIG_COMPAT_VDSO
-#include <generated/vdso32-offsets.h>
-#endif
#define VDSO_SYMBOL(base, name) \
({ \
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index d95b3d6b471a..467cb7117273 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -73,7 +73,13 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso-wrap.o
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o
+
+# We need to prevent the SCS patching code from patching itself. Using
+# -mbranch-protection=none here to avoid the patchable PAC opcodes from being
+# generated triggers an issue with full LTO on Clang, which stops emitting PAC
+# instructions altogether. So disable LTO as well for the compilation unit.
CFLAGS_patch-scs.o += -mbranch-protection=none
+CFLAGS_REMOVE_patch-scs.o += $(CC_FLAGS_LTO)
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 5ff1942b04fc..5a7dbbe0ce63 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -117,8 +117,6 @@ int main(void)
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
- DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
- DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
BLANK();
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index e29e0fea63fb..76b8dd37092a 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -374,6 +374,7 @@ static const struct midr_range erratum_1463225[] = {
static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_2139208
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
#endif
#ifdef CONFIG_ARM64_ERRATUM_2119858
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
@@ -387,6 +388,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
static const struct midr_range tsb_flush_fail_cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_2067961
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
#endif
#ifdef CONFIG_ARM64_ERRATUM_2054223
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
@@ -399,6 +401,7 @@ static const struct midr_range tsb_flush_fail_cpus[] = {
static struct midr_range trbe_write_out_of_range_cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_2253138
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
#endif
#ifdef CONFIG_ARM64_ERRATUM_2224489
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
@@ -416,6 +419,19 @@ static struct midr_range broken_aarch32_aes[] = {
};
#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+static const struct midr_range erratum_spec_unpriv_load_list[] = {
+#ifdef CONFIG_ARM64_ERRATUM_3117295
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_2966298
+ /* Cortex-A520 r0p0 to r0p1 */
+ MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
+#endif
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -713,12 +729,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
},
#endif
-#ifdef CONFIG_ARM64_ERRATUM_2966298
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
{
- .desc = "ARM erratum 2966298",
- .capability = ARM64_WORKAROUND_2966298,
+ .desc = "ARM errata 2966298, 3117295",
+ .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
/* Cortex-A520 r0p0 - r0p1 */
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
+ ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
},
#endif
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a6030913cd58..7ef0e127b149 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -428,16 +428,9 @@ alternative_else_nop_endif
ldp x28, x29, [sp, #16 * 14]
.if \el == 0
-alternative_if ARM64_WORKAROUND_2966298
- tlbi vale1, xzr
- dsb nsh
-alternative_else_nop_endif
-alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
- ldr lr, [sp, #S_LR]
- add sp, sp, #PT_REGS_SIZE // restore sp
- eret
-alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0
+
msr far_el1, x29
ldr_this_cpu x30, this_cpu_vector, x29
@@ -446,16 +439,26 @@ alternative_else_nop_endif
ldr lr, [sp, #S_LR] // restore x30
add sp, sp, #PT_REGS_SIZE // restore sp
br x29
+
+.L_skip_tramp_exit_\@:
#endif
- .else
+ .endif
+
ldr lr, [sp, #S_LR]
add sp, sp, #PT_REGS_SIZE // restore sp
+ .if \el == 0
+ /* This must be after the last explicit memory access */
+alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+ tlbi vale1, xzr
+ dsb nsh
+alternative_else_nop_endif
+ .else
/* Ensure any device/NC reads complete */
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
+ .endif
eret
- .endif
sb
.endm
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 505f389be3e0..f27acca550d5 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -898,10 +898,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
* allocate SVE now in case it is needed for use in streaming
* mode.
*/
- if (system_supports_sve()) {
- sve_free(task);
- sve_alloc(task, true);
- }
+ sve_free(task);
+ sve_alloc(task, true);
if (free_sme)
sme_free(task);
@@ -1219,8 +1217,10 @@ void fpsimd_release_task(struct task_struct *dead_task)
*/
void sme_alloc(struct task_struct *task, bool flush)
{
- if (task->thread.sme_state && flush) {
- memset(task->thread.sme_state, 0, sme_state_size(task));
+ if (task->thread.sme_state) {
+ if (flush)
+ memset(task->thread.sme_state, 0,
+ sme_state_size(task));
return;
}
@@ -1311,6 +1311,22 @@ void __init sme_setup(void)
get_sme_default_vl());
}
+void sme_suspend_exit(void)
+{
+ u64 smcr = 0;
+
+ if (!system_supports_sme())
+ return;
+
+ if (system_supports_fa64())
+ smcr |= SMCR_ELx_FA64;
+ if (system_supports_sme2())
+ smcr |= SMCR_ELx_EZT0;
+
+ write_sysreg_s(smcr, SYS_SMCR_EL1);
+ write_sysreg_s(0, SYS_SMPRI_EL1);
+}
+
#endif /* CONFIG_ARM64_SME */
static void sve_init_regs(void)
@@ -1635,7 +1651,7 @@ void fpsimd_preserve_current_state(void)
void fpsimd_signal_preserve_current_state(void)
{
fpsimd_preserve_current_state();
- if (test_thread_flag(TIF_SVE))
+ if (current->thread.fp_type == FP_STATE_SVE)
sve_to_fpsimd(current);
}
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
index b4835f6d594b..255534930368 100644
--- a/arch/arm64/kernel/patching.c
+++ b/arch/arm64/kernel/patching.c
@@ -105,6 +105,81 @@ noinstr int aarch64_insn_write_literal_u64(void *addr, u64 val)
return ret;
}
+typedef void text_poke_f(void *dst, void *src, size_t patched, size_t len);
+
+static void *__text_poke(text_poke_f func, void *addr, void *src, size_t len)
+{
+ unsigned long flags;
+ size_t patched = 0;
+ size_t size;
+ void *waddr;
+ void *ptr;
+
+ raw_spin_lock_irqsave(&patch_lock, flags);
+
+ while (patched < len) {
+ ptr = addr + patched;
+ size = min_t(size_t, PAGE_SIZE - offset_in_page(ptr),
+ len - patched);
+
+ waddr = patch_map(ptr, FIX_TEXT_POKE0);
+ func(waddr, src, patched, size);
+ patch_unmap(FIX_TEXT_POKE0);
+
+ patched += size;
+ }
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+ flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
+
+ return addr;
+}
+
+static void text_poke_memcpy(void *dst, void *src, size_t patched, size_t len)
+{
+ copy_to_kernel_nofault(dst, src + patched, len);
+}
+
+static void text_poke_memset(void *dst, void *src, size_t patched, size_t len)
+{
+ u32 c = *(u32 *)src;
+
+ memset32(dst, c, len / 4);
+}
+
+/**
+ * aarch64_insn_copy - Copy instructions into (an unused part of) RX memory
+ * @dst: address to modify
+ * @src: source of the copy
+ * @len: length to copy
+ *
+ * Useful for JITs to dump new code blocks into unused regions of RX memory.
+ */
+noinstr void *aarch64_insn_copy(void *dst, void *src, size_t len)
+{
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)dst & 0x3)
+ return NULL;
+
+ return __text_poke(text_poke_memcpy, dst, src, len);
+}
+
+/**
+ * aarch64_insn_set - memset for RX memory regions.
+ * @dst: address to modify
+ * @insn: value to set
+ * @len: length of memory region.
+ *
+ * Useful for JITs to fill regions of RX memory with illegal instructions.
+ */
+noinstr void *aarch64_insn_set(void *dst, u32 insn, size_t len)
+{
+ if ((uintptr_t)dst & 0x3)
+ return NULL;
+
+ return __text_poke(text_poke_memset, dst, &insn, len);
+}
+
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
{
u32 *tp = addr;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 09bb7fc7d3c2..e3bef38fc2e2 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1108,12 +1108,13 @@ static int za_set(struct task_struct *target,
}
}
- /* Allocate/reinit ZA storage */
- sme_alloc(target, true);
- if (!target->thread.sme_state) {
- ret = -ENOMEM;
- goto out;
- }
+ /*
+ * Only flush the storage if PSTATE.ZA was not already set,
+ * otherwise preserve any existing data.
+ */
+ sme_alloc(target, !thread_za_enabled(&target->thread));
+ if (!target->thread.sme_state)
+ return -ENOMEM;
/* If there is no data then disable ZA */
if (!count) {
@@ -1499,7 +1500,8 @@ static const struct user_regset aarch64_regsets[] = {
#ifdef CONFIG_ARM64_SVE
[REGSET_SVE] = { /* Scalable Vector Extension */
.core_note_type = NT_ARM_SVE,
- .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
+ .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
+ SVE_PT_REGS_SVE),
SVE_VQ_BYTES),
.size = SVE_VQ_BYTES,
.align = SVE_VQ_BYTES,
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 0e8beb3349ea..425b1bc17a3f 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -242,7 +242,7 @@ static int preserve_sve_context(struct sve_context __user *ctx)
vl = task_get_sme_vl(current);
vq = sve_vq_from_vl(vl);
flags |= SVE_SIG_FLAG_SM;
- } else if (test_thread_flag(TIF_SVE)) {
+ } else if (current->thread.fp_type == FP_STATE_SVE) {
vq = sve_vq_from_vl(vl);
}
@@ -878,7 +878,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
if (system_supports_sve() || system_supports_sme()) {
unsigned int vq = 0;
- if (add_all || test_thread_flag(TIF_SVE) ||
+ if (add_all || current->thread.fp_type == FP_STATE_SVE ||
thread_sm_enabled(&current->thread)) {
int vl = max(sve_max_vl(), sme_max_vl());
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 7f88028a00c0..66cffc5fc0be 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/export.h>
+#include <linux/filter.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
#include <linux/sched.h>
@@ -266,6 +267,31 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
}
+struct bpf_unwind_consume_entry_data {
+ bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
+ void *cookie;
+};
+
+static bool
+arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
+{
+ struct bpf_unwind_consume_entry_data *data = cookie;
+
+ return data->consume_entry(data->cookie, state->common.pc, 0,
+ state->common.fp);
+}
+
+noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
+ u64 fp), void *cookie)
+{
+ struct bpf_unwind_consume_entry_data data = {
+ .consume_entry = consume_entry,
+ .cookie = cookie,
+ };
+
+ kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
+}
+
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
char *loglvl = arg;
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index eca4d0435211..eaaff94329cd 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -12,6 +12,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
+#include <asm/fpsimd.h>
#include <asm/mte.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
@@ -80,6 +81,8 @@ void notrace __cpu_suspend_exit(void)
*/
spectre_v4_enable_mitigation(NULL);
+ sme_suspend_exit();
+
/* Restore additional feature-specific configuration */
ptrauth_suspend_exit();
}
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 2266fcdff78a..f5f80fdce0fe 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -127,9 +127,6 @@ obj-vdso := $(c-obj-vdso) $(c-obj-vdso-gettimeofday) $(asm-obj-vdso)
targets += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
-include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE
- $(call if_changed,vdsosym)
-
# Strip rule for vdso.so
$(obj)/vdso.so: OBJCOPYFLAGS := -S
$(obj)/vdso.so: $(obj)/vdso32.so.dbg FORCE
@@ -166,9 +163,3 @@ quiet_cmd_vdsoas = AS32 $@
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(obj)/$(munge) $< $@
-
-# Generate vDSO offsets using helper script (borrowed from the 64-bit vDSO)
-gen-vdsosym := $(srctree)/$(src)/../vdso/gen_vdso_offsets.sh
-quiet_cmd_vdsosym = VDSOSYM $@
-# The AArch64 nm should be able to read an AArch32 binary
- cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 6c3c8ca73e7f..27ca89b628a0 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -3,7 +3,6 @@
# KVM configuration
#
-source "virt/lib/Kconfig"
source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index c651df904fe3..ab9d05fcf98b 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -1419,7 +1419,6 @@ kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
level + 1);
if (ret) {
kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
- mm_ops->put_page(pgtable);
return ERR_PTR(ret);
}
@@ -1502,7 +1501,6 @@ static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
if (!stage2_try_break_pte(ctx, mmu)) {
kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
- mm_ops->put_page(childp);
return -EAGAIN;
}
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 8350fb8fee0b..b7be96a53597 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -101,6 +101,17 @@ void __init kvm_hyp_reserve(void)
hyp_mem_base);
}
+static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
+{
+ if (host_kvm->arch.pkvm.handle) {
+ WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
+ host_kvm->arch.pkvm.handle));
+ }
+
+ host_kvm->arch.pkvm.handle = 0;
+ free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
+}
+
/*
* Allocates and donates memory for hypervisor VM structs at EL2.
*
@@ -181,7 +192,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
return 0;
destroy_vm:
- pkvm_destroy_hyp_vm(host_kvm);
+ __pkvm_destroy_hyp_vm(host_kvm);
return ret;
free_vm:
free_pages_exact(hyp_vm, hyp_vm_sz);
@@ -194,23 +205,19 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
{
int ret = 0;
- mutex_lock(&host_kvm->lock);
+ mutex_lock(&host_kvm->arch.config_lock);
if (!host_kvm->arch.pkvm.handle)
ret = __pkvm_create_hyp_vm(host_kvm);
- mutex_unlock(&host_kvm->lock);
+ mutex_unlock(&host_kvm->arch.config_lock);
return ret;
}
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
{
- if (host_kvm->arch.pkvm.handle) {
- WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
- host_kvm->arch.pkvm.handle));
- }
-
- host_kvm->arch.pkvm.handle = 0;
- free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
+ mutex_lock(&host_kvm->arch.config_lock);
+ __pkvm_destroy_hyp_vm(host_kvm);
+ mutex_unlock(&host_kvm->arch.config_lock);
}
int pkvm_init_host_vm(struct kvm *host_kvm)
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index e2764d0ffa9f..28a93074eca1 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -468,6 +468,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
}
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
+ if (!irq)
+ continue;
+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = pendmask & (1U << bit_nr);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
@@ -1432,6 +1435,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
for (i = 0; i < irq_count; i++) {
irq = vgic_get_irq(kvm, NULL, intids[i]);
+ if (!irq)
+ continue;
update_affinity(irq, vcpu2);
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 8955da5c47cf..5afc7a525eca 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -76,6 +76,7 @@ struct jit_ctx {
int *offset;
int exentry_idx;
__le32 *image;
+ __le32 *ro_image;
u32 stack_size;
int fpb_offset;
};
@@ -205,6 +206,14 @@ static void jit_fill_hole(void *area, unsigned int size)
*ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
}
+int bpf_arch_text_invalidate(void *dst, size_t len)
+{
+ if (!aarch64_insn_set(dst, AARCH64_BREAK_FAULT, len))
+ return -EINVAL;
+
+ return 0;
+}
+
static inline int epilogue_offset(const struct jit_ctx *ctx)
{
int to = ctx->epilogue_offset;
@@ -285,7 +294,8 @@ static bool is_lsi_offset(int offset, int scale)
/* Tail call offset to jump into */
#define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
-static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
+static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
+ bool is_exception_cb)
{
const struct bpf_prog *prog = ctx->prog;
const bool is_main_prog = !bpf_is_subprog(prog);
@@ -333,19 +343,34 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
emit(A64_NOP, ctx);
- /* Sign lr */
- if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
- emit(A64_PACIASP, ctx);
-
- /* Save FP and LR registers to stay align with ARM64 AAPCS */
- emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
- emit(A64_MOV(1, A64_FP, A64_SP), ctx);
-
- /* Save callee-saved registers */
- emit(A64_PUSH(r6, r7, A64_SP), ctx);
- emit(A64_PUSH(r8, r9, A64_SP), ctx);
- emit(A64_PUSH(fp, tcc, A64_SP), ctx);
- emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
+ if (!is_exception_cb) {
+ /* Sign lr */
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ emit(A64_PACIASP, ctx);
+ /* Save FP and LR registers to stay align with ARM64 AAPCS */
+ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+
+ /* Save callee-saved registers */
+ emit(A64_PUSH(r6, r7, A64_SP), ctx);
+ emit(A64_PUSH(r8, r9, A64_SP), ctx);
+ emit(A64_PUSH(fp, tcc, A64_SP), ctx);
+ emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
+ } else {
+ /*
+ * Exception callback receives FP of Main Program as third
+ * parameter
+ */
+ emit(A64_MOV(1, A64_FP, A64_R(2)), ctx);
+ /*
+ * Main Program already pushed the frame record and the
+ * callee-saved registers. The exception callback will not push
+ * anything and re-use the main program's stack.
+ *
+ * 10 registers are on the stack
+ */
+ emit(A64_SUB_I(1, A64_SP, A64_FP, 80), ctx);
+ }
/* Set up BPF prog stack base register */
emit(A64_MOV(1, fp, A64_SP), ctx);
@@ -365,6 +390,20 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit_bti(A64_BTI_J, ctx);
}
+ /*
+ * Program acting as exception boundary should save all ARM64
+ * Callee-saved registers as the exception callback needs to recover
+ * all ARM64 Callee-saved registers in its epilogue.
+ */
+ if (prog->aux->exception_boundary) {
+ /*
+ * As we are pushing two more registers, BPF_FP should be moved
+ * 16 bytes
+ */
+ emit(A64_SUB_I(1, fp, fp, 16), ctx);
+ emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
+ }
+
emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
/* Stack must be multiples of 16B */
@@ -653,7 +692,7 @@ static void build_plt(struct jit_ctx *ctx)
plt->target = (u64)&dummy_tramp;
}
-static void build_epilogue(struct jit_ctx *ctx)
+static void build_epilogue(struct jit_ctx *ctx, bool is_exception_cb)
{
const u8 r0 = bpf2a64[BPF_REG_0];
const u8 r6 = bpf2a64[BPF_REG_6];
@@ -666,6 +705,15 @@ static void build_epilogue(struct jit_ctx *ctx)
/* We're done with BPF stack */
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
+ /*
+ * Program acting as exception boundary pushes R23 and R24 in addition
+ * to BPF callee-saved registers. Exception callback uses the boundary
+ * program's stack frame, so recover these extra registers in the above
+ * two cases.
+ */
+ if (ctx->prog->aux->exception_boundary || is_exception_cb)
+ emit(A64_POP(A64_R(23), A64_R(24), A64_SP), ctx);
+
/* Restore x27 and x28 */
emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
/* Restore fs (x25) and x26 */
@@ -707,7 +755,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
struct jit_ctx *ctx,
int dst_reg)
{
- off_t offset;
+ off_t ins_offset;
+ off_t fixup_offset;
unsigned long pc;
struct exception_table_entry *ex;
@@ -724,12 +773,17 @@ static int add_exception_handler(const struct bpf_insn *insn,
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->exentry_idx];
- pc = (unsigned long)&ctx->image[ctx->idx - 1];
+ pc = (unsigned long)&ctx->ro_image[ctx->idx - 1];
- offset = pc - (long)&ex->insn;
- if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+ /*
+ * This is the relative offset of the instruction that may fault from
+ * the exception table itself. This will be written to the exception
+ * table and if this instruction faults, the destination register will
+ * be set to '0' and the execution will jump to the next instruction.
+ */
+ ins_offset = pc - (long)&ex->insn;
+ if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN))
return -ERANGE;
- ex->insn = offset;
/*
* Since the extable follows the program, the fixup offset is always
@@ -738,12 +792,25 @@ static int add_exception_handler(const struct bpf_insn *insn,
* bits. We don't need to worry about buildtime or runtime sort
* modifying the upper bits because the table is already sorted, and
* isn't part of the main exception table.
+ *
+ * The fixup_offset is set to the next instruction from the instruction
+ * that may fault. The execution will jump to this after handling the
+ * fault.
*/
- offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
- if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
+ fixup_offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
+ if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
return -ERANGE;
- ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
+ /*
+ * The offsets above have been calculated using the RO buffer but we
+ * need to use the R/W buffer for writes.
+ * switch ex to rw buffer for writing.
+ */
+ ex = (void *)ctx->image + ((void *)ex - (void *)ctx->ro_image);
+
+ ex->insn = ins_offset;
+
+ ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
@@ -1511,7 +1578,8 @@ static inline void bpf_flush_icache(void *start, void *end)
struct arm64_jit_data {
struct bpf_binary_header *header;
- u8 *image;
+ u8 *ro_image;
+ struct bpf_binary_header *ro_header;
struct jit_ctx ctx;
};
@@ -1520,12 +1588,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
int image_size, prog_size, extable_size, extable_align, extable_offset;
struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
+ struct bpf_binary_header *ro_header;
struct arm64_jit_data *jit_data;
bool was_classic = bpf_prog_was_classic(prog);
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
u8 *image_ptr;
+ u8 *ro_image_ptr;
if (!prog->jit_requested)
return orig_prog;
@@ -1552,8 +1622,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
if (jit_data->ctx.offset) {
ctx = jit_data->ctx;
- image_ptr = jit_data->image;
+ ro_image_ptr = jit_data->ro_image;
+ ro_header = jit_data->ro_header;
header = jit_data->header;
+ image_ptr = (void *)header + ((void *)ro_image_ptr
+ - (void *)ro_header);
extra_pass = true;
prog_size = sizeof(u32) * ctx.idx;
goto skip_init_ctx;
@@ -1575,7 +1648,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
- if (build_prologue(&ctx, was_classic)) {
+ if (build_prologue(&ctx, was_classic, prog->aux->exception_cb)) {
prog = orig_prog;
goto out_off;
}
@@ -1586,7 +1659,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.epilogue_offset = ctx.idx;
- build_epilogue(&ctx);
+ build_epilogue(&ctx, prog->aux->exception_cb);
build_plt(&ctx);
extable_align = __alignof__(struct exception_table_entry);
@@ -1598,63 +1671,81 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* also allocate space for plt target */
extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
image_size = extable_offset + extable_size;
- header = bpf_jit_binary_alloc(image_size, &image_ptr,
- sizeof(u32), jit_fill_hole);
- if (header == NULL) {
+ ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
+ sizeof(u32), &header, &image_ptr,
+ jit_fill_hole);
+ if (!ro_header) {
prog = orig_prog;
goto out_off;
}
/* 2. Now, the actual pass. */
+ /*
+ * Use the image(RW) for writing the JITed instructions. But also save
+ * the ro_image(RX) for calculating the offsets in the image. The RW
+ * image will be later copied to the RX image from where the program
+ * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
+ * final step.
+ */
ctx.image = (__le32 *)image_ptr;
+ ctx.ro_image = (__le32 *)ro_image_ptr;
if (extable_size)
- prog->aux->extable = (void *)image_ptr + extable_offset;
+ prog->aux->extable = (void *)ro_image_ptr + extable_offset;
skip_init_ctx:
ctx.idx = 0;
ctx.exentry_idx = 0;
- build_prologue(&ctx, was_classic);
+ build_prologue(&ctx, was_classic, prog->aux->exception_cb);
if (build_body(&ctx, extra_pass)) {
- bpf_jit_binary_free(header);
prog = orig_prog;
- goto out_off;
+ goto out_free_hdr;
}
- build_epilogue(&ctx);
+ build_epilogue(&ctx, prog->aux->exception_cb);
build_plt(&ctx);
/* 3. Extra pass to validate JITed code. */
if (validate_ctx(&ctx)) {
- bpf_jit_binary_free(header);
prog = orig_prog;
- goto out_off;
+ goto out_free_hdr;
}
/* And we're done. */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
- bpf_flush_icache(header, ctx.image + ctx.idx);
-
if (!prog->is_func || extra_pass) {
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
- bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
prog->jited_len = 0;
+ goto out_free_hdr;
+ }
+ if (WARN_ON(bpf_jit_binary_pack_finalize(prog, ro_header,
+ header))) {
+ /* ro_header has been freed */
+ ro_header = NULL;
+ prog = orig_prog;
goto out_off;
}
- bpf_jit_binary_lock_ro(header);
+ /*
+ * The instructions have now been copied to the ROX region from
+ * where they will execute. Now the data cache has to be cleaned to
+ * the PoU and the I-cache has to be invalidated for the VAs.
+ */
+ bpf_flush_icache(ro_header, ctx.ro_image + ctx.idx);
} else {
jit_data->ctx = ctx;
- jit_data->image = image_ptr;
+ jit_data->ro_image = ro_image_ptr;
jit_data->header = header;
+ jit_data->ro_header = ro_header;
}
- prog->bpf_func = (void *)ctx.image;
+
+ prog->bpf_func = (void *)ctx.ro_image;
prog->jited = 1;
prog->jited_len = prog_size;
@@ -1675,6 +1766,14 @@ out:
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
+
+out_free_hdr:
+ if (header) {
+ bpf_arch_text_copy(&ro_header->size, &header->size,
+ sizeof(header->size));
+ bpf_jit_binary_pack_free(ro_header, header);
+ }
+ goto out_off;
}
bool bpf_jit_supports_kfunc_call(void)
@@ -1682,6 +1781,13 @@ bool bpf_jit_supports_kfunc_call(void)
return true;
}
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ if (!aarch64_insn_copy(dst, src, len))
+ return ERR_PTR(-EINVAL);
+ return dst;
+}
+
u64 bpf_jit_alloc_exec_limit(void)
{
return VMALLOC_END - VMALLOC_START;
@@ -2305,3 +2411,42 @@ out:
return ret;
}
+
+bool bpf_jit_supports_ptr_xchg(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_exceptions(void)
+{
+ /* We unwind through both kernel frames starting from within bpf_throw
+ * call and BPF frames. Therefore we require FP unwinder to be enabled
+ * to walk kernel frames and reach BPF frames in the stack trace.
+ * ARM64 kernel is aways compiled with CONFIG_FRAME_POINTER=y
+ */
+ return true;
+}
+
+void bpf_jit_free(struct bpf_prog *prog)
+{
+ if (prog->jited) {
+ struct arm64_jit_data *jit_data = prog->aux->jit_data;
+ struct bpf_binary_header *hdr;
+
+ /*
+ * If we fail the final pass of JIT (from jit_subprogs),
+ * the program may not be finalized yet. Call finalize here
+ * before freeing it.
+ */
+ if (jit_data) {
+ bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
+ sizeof(jit_data->header->size));
+ kfree(jit_data);
+ }
+ hdr = bpf_jit_binary_pack_hdr(prog);
+ bpf_jit_binary_pack_free(hdr, NULL);
+ WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
+ }
+
+ bpf_prog_unlock_free(prog);
+}
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 1e07d74d7a6c..b912b1409fc0 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -84,7 +84,6 @@ WORKAROUND_2077057
WORKAROUND_2457168
WORKAROUND_2645198
WORKAROUND_2658417
-WORKAROUND_2966298
WORKAROUND_AMPERE_AC03_CPU_38
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
@@ -100,3 +99,4 @@ WORKAROUND_NVIDIA_CARMEL_CNP
WORKAROUND_QCOM_FALKOR_E1003
WORKAROUND_REPEAT_TLBI
WORKAROUND_SPECULATIVE_AT
+WORKAROUND_SPECULATIVE_UNPRIV_LOAD
diff --git a/arch/csky/configs/defconfig b/arch/csky/configs/defconfig
index af722e4dfb47..ff559e5162aa 100644
--- a/arch/csky/configs/defconfig
+++ b/arch/csky/configs/defconfig
@@ -34,7 +34,8 @@ CONFIG_GENERIC_PHY=y
CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_QUOTA=y
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
CONFIG_FSCACHE_STATS=y
CONFIG_CACHEFILES=m
CONFIG_MSDOS_FS=y
diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h
index 98a3f4b168bd..ef2e37a10a0f 100644
--- a/arch/csky/include/asm/jump_label.h
+++ b/arch/csky/include/asm/jump_label.h
@@ -12,7 +12,7 @@
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: nop32 \n"
" .pushsection __jump_table, \"aw\" \n"
" .align 2 \n"
@@ -29,7 +29,7 @@ label:
static __always_inline bool arch_static_branch_jump(struct static_key *key,
bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: bsr32 %l[label] \n"
" .pushsection __jump_table, \"aw\" \n"
" .align 2 \n"
diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild
index beb8499dd8ed..bfa21465d83a 100644
--- a/arch/loongarch/Kbuild
+++ b/arch/loongarch/Kbuild
@@ -4,6 +4,7 @@ obj-y += net/
obj-y += vdso/
obj-$(CONFIG_KVM) += kvm/
+obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
# for cleaning
subdir- += boot
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 15d05dd2b7f3..929f68926b34 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -12,6 +12,7 @@ config LOONGARCH
select ARCH_DISABLE_KASAN_INLINE
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
+ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_FORTIFY_SOURCE
@@ -99,6 +100,7 @@ config LOONGARCH
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
@@ -142,6 +144,7 @@ config LOONGARCH
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK
select HAVE_RSEQ
+ select HAVE_RUST
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_SETUP_PER_CPU_AREA if NUMA
@@ -376,6 +379,24 @@ config CMDLINE_FORCE
endchoice
+config BUILTIN_DTB
+ bool "Enable built-in dtb in kernel"
+ depends on OF
+ help
+ Some existing systems do not provide a canonical device tree to
+ the kernel at boot time. Let's provide a device tree table in the
+ kernel, keyed by the dts filename, containing the relevant DTBs.
+
+ Built-in DTBs are generic enough and can be used as references.
+
+config BUILTIN_DTB_NAME
+ string "Source file for built-in dtb"
+ depends on BUILTIN_DTB
+ help
+ Base name (without suffix, relative to arch/loongarch/boot/dts/)
+ for the DTS file that will be used to produce the DTB linked into
+ the kernel.
+
config DMI
bool "Enable DMI scanning"
select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
@@ -577,6 +598,9 @@ config ARCH_SELECTS_CRASH_DUMP
depends on CRASH_DUMP
select RELOCATABLE
+config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+ def_bool CRASH_CORE
+
config RELOCATABLE
bool "Relocatable kernel"
help
@@ -610,23 +634,6 @@ config RANDOMIZE_BASE_MAX_OFFSET
This is limited by the size of the lower address memory, 256MB.
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- default y
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via /proc/<pid>/seccomp, it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y. Only embedded should say N here.
-
endmenu
config ARCH_SELECT_MEMORY_MODEL
@@ -645,10 +652,6 @@ config ARCH_SPARSEMEM_ENABLE
or have huge holes in the physical address space for other reasons.
See <file:Documentation/mm/numa.rst> for more.
-config ARCH_ENABLE_THP_MIGRATION
- def_bool y
- depends on TRANSPARENT_HUGEPAGE
-
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 4ba8d67ddb09..983aa2b1629a 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -6,6 +6,7 @@
boot := arch/loongarch/boot
KBUILD_DEFCONFIG := loongson3_defconfig
+KBUILD_DTBS := dtbs
image-name-y := vmlinux
image-name-$(CONFIG_EFI_ZBOOT) := vmlinuz
@@ -81,8 +82,11 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
endif
+KBUILD_RUSTFLAGS_MODULE += -Crelocation-model=pic
+
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS_KERNEL += -fPIE
+KBUILD_RUSTFLAGS_KERNEL += -Crelocation-model=pie
LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)
endif
@@ -141,7 +145,7 @@ endif
vdso-install-y += arch/loongarch/vdso/vdso.so.dbg
-all: $(notdir $(KBUILD_IMAGE))
+all: $(notdir $(KBUILD_IMAGE)) $(KBUILD_DTBS)
vmlinuz.efi: vmlinux.efi
diff --git a/arch/loongarch/boot/dts/Makefile b/arch/loongarch/boot/dts/Makefile
index 5f1f55e911ad..747d0c3f6389 100644
--- a/arch/loongarch/boot/dts/Makefile
+++ b/arch/loongarch/boot/dts/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-dtstree := $(srctree)/$(src)
-dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
+dtb-y = loongson-2k0500-ref.dtb loongson-2k1000-ref.dtb loongson-2k2000-ref.dtb
+
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_NAME))
diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
new file mode 100644
index 000000000000..8aefb0c12672
--- /dev/null
+++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include "loongson-2k0500.dtsi"
+
+/ {
+ compatible = "loongson,ls2k0500-ref", "loongson,ls2k0500";
+ model = "Loongson-2K0500 Reference Board";
+
+ aliases {
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@200000 {
+ device_type = "memory";
+ reg = <0x0 0x00200000 0x0 0x0ee00000>,
+ <0x0 0x90000000 0x0 0x60000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x0 0x2000000>;
+ linux,cma-default;
+ };
+ };
+};
+
+&gmac0 {
+ status = "okay";
+
+ phy-mode = "rgmii";
+ bus_id = <0x0>;
+};
+
+&gmac1 {
+ status = "okay";
+
+ phy-mode = "rgmii";
+ bus_id = <0x1>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ eeprom@57 {
+ compatible = "atmel,24c16";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&rtc0 {
+ status = "okay";
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k0500.dtsi b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
new file mode 100644
index 000000000000..444779c21034
--- /dev/null
+++ b/arch/loongarch/boot/dts/loongson-2k0500.dtsi
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "loongson,la264";
+ device_type = "cpu";
+ reg = <0x0>;
+ clocks = <&cpu_clk>;
+ };
+ };
+
+ cpu_clk: cpu-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <500000000>;
+ };
+
+ cpuintc: interrupt-controller {
+ compatible = "loongson,cpu-interrupt-controller";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
+ bus@10000000 {
+ compatible = "simple-bus";
+ ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
+ <0x0 0x02000000 0x0 0x02000000 0x0 0x02000000>,
+ <0x0 0x20000000 0x0 0x20000000 0x0 0x10000000>,
+ <0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>,
+ <0xfe 0x0 0xfe 0x0 0x0 0x40000000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ isa@16400000 {
+ compatible = "isa";
+ #size-cells = <1>;
+ #address-cells = <2>;
+ ranges = <1 0x0 0x0 0x16400000 0x4000>;
+ };
+
+ liointc0: interrupt-controller@1fe11400 {
+ compatible = "loongson,liointc-2.0";
+ reg = <0x0 0x1fe11400 0x0 0x40>,
+ <0x0 0x1fe11040 0x0 0x8>;
+ reg-names = "main", "isr0";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ interrupt-names = "int0";
+
+ loongson,parent_int_map = <0xffffffff>, /* int0 */
+ <0x00000000>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+ };
+
+ liointc1: interrupt-controller@1fe11440 {
+ compatible = "loongson,liointc-2.0";
+ reg = <0x0 0x1fe11440 0x0 0x40>,
+ <0x0 0x1fe11048 0x0 0x8>;
+ reg-names = "main", "isr0";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <4>;
+ interrupt-names = "int2";
+
+ loongson,parent_int_map = <0x00000000>, /* int0 */
+ <0x00000000>, /* int1 */
+ <0xffffffff>, /* int2 */
+ <0x00000000>; /* int3 */
+ };
+
+ eiointc: interrupt-controller@1fe11600 {
+ compatible = "loongson,ls2k0500-eiointc";
+ reg = <0x0 0x1fe11600 0x0 0xea00>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <3>;
+ };
+
+ gmac0: ethernet@1f020000 {
+ compatible = "snps,dwmac-3.70a";
+ reg = <0x0 0x1f020000 0x0 0x10000>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ gmac1: ethernet@1f030000 {
+ compatible = "snps,dwmac-3.70a";
+ reg = <0x0 0x1f030000 0x0 0x10000>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ sata: sata@1f040000 {
+ compatible = "snps,spear-ahci";
+ reg = <0x0 0x1f040000 0x0 0x10000>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <75>;
+ status = "disabled";
+ };
+
+ ehci0: usb@1f050000 {
+ compatible = "generic-ehci";
+ reg = <0x0 0x1f050000 0x0 0x8000>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <71>;
+ status = "disabled";
+ };
+
+ ohci0: usb@1f058000 {
+ compatible = "generic-ohci";
+ reg = <0x0 0x1f058000 0x0 0x8000>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <72>;
+ status = "disabled";
+ };
+
+ uart0: serial@1ff40800 {
+ compatible = "ns16550a";
+ reg = <0x0 0x1ff40800 0x0 0x10>;
+ clock-frequency = <100000000>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <2>;
+ no-loopback-test;
+ status = "disabled";
+ };
+
+ i2c0: i2c@1ff48000 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1ff48000 0x0 0x0800>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <14>;
+ status = "disabled";
+ };
+
+ i2c@1ff48800 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1ff48800 0x0 0x0800>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <15>;
+ status = "disabled";
+ };
+
+ i2c@1ff49000 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1ff49000 0x0 0x0800>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <16>;
+ status = "disabled";
+ };
+
+ i2c@1ff49800 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1ff49800 0x0 0x0800>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <17>;
+ status = "disabled";
+ };
+
+ i2c@1ff4a000 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1ff4a000 0x0 0x0800>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <18>;
+ status = "disabled";
+ };
+
+ i2c@1ff4a800 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1ff4a800 0x0 0x0800>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <19>;
+ status = "disabled";
+ };
+
+ pmc: power-management@1ff6c000 {
+ compatible = "loongson,ls2k0500-pmc", "syscon";
+ reg = <0x0 0x1ff6c000 0x0 0x58>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <56>;
+ loongson,suspend-address = <0x0 0x1c000500>;
+
+ syscon-reboot {
+ compatible = "syscon-reboot";
+ offset = <0x30>;
+ mask = <0x1>;
+ };
+
+ syscon-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&pmc>;
+ offset = <0x14>;
+ mask = <0x3c00>;
+ value = <0x3c00>;
+ };
+ };
+
+ rtc0: rtc@1ff6c100 {
+ compatible = "loongson,ls2k0500-rtc", "loongson,ls7a-rtc";
+ reg = <0x0 0x1ff6c100 0x0 0x100>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <35>;
+ status = "disabled";
+ };
+
+ pcie@1a000000 {
+ compatible = "loongson,ls2k-pci";
+ reg = <0x0 0x1a000000 0x0 0x02000000>,
+ <0xfe 0x0 0x0 0x20000000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0x0 0x5>;
+ ranges = <0x01000000 0x0 0x00004000 0x0 0x16404000 0x0 0x00004000>,
+ <0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>;
+
+ pcie@0,0 {
+ reg = <0x0000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&eiointc>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &eiointc 81>;
+ ranges;
+ };
+
+ pcie@1,0 {
+ reg = <0x0800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&eiointc>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &eiointc 82>;
+ ranges;
+ };
+ };
+ };
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
new file mode 100644
index 000000000000..ed4d32434041
--- /dev/null
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include "loongson-2k1000.dtsi"
+
+/ {
+ compatible = "loongson,ls2k1000-ref", "loongson,ls2k1000";
+ model = "Loongson-2K1000 Reference Board";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@200000 {
+ device_type = "memory";
+ reg = <0x0 0x00200000 0x0 0x06e00000>,
+ <0x0 0x08000000 0x0 0x07000000>,
+ <0x0 0x90000000 0x1 0xe0000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x0 0x2000000>;
+ linux,cma-default;
+ };
+ };
+};
+
+&gmac0 {
+ status = "okay";
+
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
+
+&gmac1 {
+ status = "okay";
+
+ phy-mode = "rgmii";
+ phy-handle = <&phy1>;
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ reg = <16>;
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ pinctrl-0 = <&i2c0_pins_default>;
+ pinctrl-names = "default";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ eeprom@57 {
+ compatible = "atmel,24c16";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+&spi0 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ spidev@0 {
+ compatible = "rohm,dh2228fv";
+ spi-max-frequency = <100000000>;
+ reg = <0>;
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&clk {
+ status = "okay";
+};
+
+&rtc0 {
+ status = "okay";
+};
+
+&pctrl {
+ status = "okay";
+
+ sdio_pins_default: sdio-pins {
+ sdio-pinmux {
+ groups = "sdio";
+ function = "sdio";
+ };
+ sdio-det-pinmux {
+ groups = "pwm2";
+ function = "gpio";
+ };
+ };
+
+ pwm1_pins_default: pwm1-pins {
+ pinmux {
+ groups = "pwm1";
+ function = "pwm1";
+ };
+ };
+
+ pwm0_pins_default: pwm0-pins {
+ pinmux {
+ groups = "pwm0";
+ function = "pwm0";
+ };
+ };
+
+ i2c1_pins_default: i2c1-pins {
+ pinmux {
+ groups = "i2c1";
+ function = "i2c1";
+ };
+ };
+
+ i2c0_pins_default: i2c0-pins {
+ pinmux {
+ groups = "i2c0";
+ function = "i2c0";
+ };
+ };
+
+ nand_pins_default: nand-pins {
+ pinmux {
+ groups = "nand";
+ function = "nand";
+ };
+ };
+
+ hda_pins_default: hda-pins {
+ grp0-pinmux {
+ groups = "hda";
+ function = "hda";
+ };
+ grp1-pinmux {
+ groups = "i2s";
+ function = "gpio";
+ };
+ };
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k1000.dtsi b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
new file mode 100644
index 000000000000..49a70f8c3cab
--- /dev/null
+++ b/arch/loongarch/boot/dts/loongson-2k1000.dtsi
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/loongson,ls2k-clk.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "loongson,la264";
+ device_type = "cpu";
+ reg= <0x0>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
+ };
+
+ cpu1: cpu@1 {
+ compatible = "loongson,la264";
+ device_type = "cpu";
+ reg = <0x1>;
+ clocks = <&clk LOONGSON2_NODE_CLK>;
+ };
+ };
+
+ ref_100m: clock-ref-100m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "ref_100m";
+ };
+
+ cpuintc: interrupt-controller {
+ compatible = "loongson,cpu-interrupt-controller";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
+ /* i2c of the dvi eeprom edid */
+ i2c-gpio-0 {
+ compatible = "i2c-gpio";
+ scl-gpios = <&gpio0 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio0 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <5>; /* ~100 kHz */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ /* i2c of the eeprom edid */
+ i2c-gpio-1 {
+ compatible = "i2c-gpio";
+ scl-gpios = <&gpio0 33 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio0 32 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <5>; /* ~100 kHz */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tsensor 0>;
+
+ trips {
+ cpu_alert: cpu-alert {
+ temperature = <33000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_crit: cpu-crit {
+ temperature = <85000>;
+ hysteresis = <5000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
+ bus@10000000 {
+ compatible = "simple-bus";
+ ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
+ <0x0 0x02000000 0x0 0x02000000 0x0 0x02000000>,
+ <0x0 0x20000000 0x0 0x20000000 0x0 0x10000000>,
+ <0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>,
+ <0xfe 0x0 0xfe 0x0 0x0 0x40000000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-coherent;
+
+ liointc0: interrupt-controller@1fe01400 {
+ compatible = "loongson,liointc-2.0";
+ reg = <0x0 0x1fe01400 0x0 0x40>,
+ <0x0 0x1fe01040 0x0 0x8>,
+ <0x0 0x1fe01140 0x0 0x8>;
+ reg-names = "main", "isr0", "isr1";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ interrupt-names = "int0";
+ loongson,parent_int_map = <0xffffffff>, /* int0 */
+ <0x00000000>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+ };
+
+ liointc1: interrupt-controller@1fe01440 {
+ compatible = "loongson,liointc-2.0";
+ reg = <0x0 0x1fe01440 0x0 0x40>,
+ <0x0 0x1fe01048 0x0 0x8>,
+ <0x0 0x1fe01148 0x0 0x8>;
+ reg-names = "main", "isr0", "isr1";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <3>;
+ interrupt-names = "int1";
+ loongson,parent_int_map = <0x00000000>, /* int0 */
+ <0xffffffff>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+ };
+
+ chipid@1fe00000 {
+ compatible = "loongson,ls2k-chipid";
+ reg = <0x0 0x1fe00000 0x0 0x30>;
+ little-endian;
+ };
+
+ pctrl: pinctrl@1fe00420 {
+ compatible = "loongson,ls2k-pinctrl";
+ reg = <0x0 0x1fe00420 0x0 0x18>;
+ status = "disabled";
+ };
+
+ clk: clock-controller@1fe00480 {
+ compatible = "loongson,ls2k-clk";
+ reg = <0x0 0x1fe00480 0x0 0x58>;
+ #clock-cells = <1>;
+ clocks = <&ref_100m>;
+ clock-names = "ref_100m";
+ status = "disabled";
+ };
+
+ gpio0: gpio@1fe00500 {
+ compatible = "loongson,ls2k-gpio";
+ reg = <0x0 0x1fe00500 0x0 0x38>;
+ ngpios = <64>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pctrl 0x0 0x0 15>,
+ <&pctrl 16 16 15>,
+ <&pctrl 32 32 10>,
+ <&pctrl 44 44 20>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <28 IRQ_TYPE_LEVEL_HIGH>,
+ <29 IRQ_TYPE_LEVEL_HIGH>,
+ <30 IRQ_TYPE_LEVEL_HIGH>,
+ <30 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <26 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <>,
+ <>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>,
+ <27 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ tsensor: thermal-sensor@1fe01500 {
+ compatible = "loongson,ls2k1000-thermal";
+ reg = <0x0 0x1fe01500 0x0 0x30>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ dma-controller@1fe00c00 {
+ compatible = "loongson,ls2k1000-apbdma";
+ reg = <0x0 0x1fe00c00 0x0 0x8>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe00c10 {
+ compatible = "loongson,ls2k1000-apbdma";
+ reg = <0x0 0x1fe00c10 0x0 0x8>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe00c20 {
+ compatible = "loongson,ls2k1000-apbdma";
+ reg = <0x0 0x1fe00c20 0x0 0x8>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe00c30 {
+ compatible = "loongson,ls2k1000-apbdma";
+ reg = <0x0 0x1fe00c30 0x0 0x8>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ dma-controller@1fe00c40 {
+ compatible = "loongson,ls2k1000-apbdma";
+ reg = <0x0 0x1fe00c40 0x0 0x8>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LOONGSON2_APB_CLK>;
+ #dma-cells = <1>;
+ status = "disabled";
+ };
+
+ uart0: serial@1fe20000 {
+ compatible = "ns16550a";
+ reg = <0x0 0x1fe20000 0x0 0x10>;
+ clock-frequency = <125000000>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <0x0 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ status = "disabled";
+ };
+
+ i2c2: i2c@1fe21000 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1fe21000 0x0 0x8>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@1fe21800 {
+ compatible = "loongson,ls2k-i2c";
+ reg = <0x0 0x1fe21800 0x0 0x8>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ pmc: power-management@1fe27000 {
+ compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon";
+ reg = <0x0 0x1fe27000 0x0 0x58>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+ loongson,suspend-address = <0x0 0x1c000500>;
+
+ syscon-reboot {
+ compatible = "syscon-reboot";
+ offset = <0x30>;
+ mask = <0x1>;
+ };
+
+ syscon-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&pmc>;
+ offset = <0x14>;
+ mask = <0x3c00>;
+ value = <0x3c00>;
+ };
+ };
+
+ rtc0: rtc@1fe27800 {
+ compatible = "loongson,ls2k1000-rtc";
+ reg = <0x0 0x1fe27800 0x0 0x100>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ spi0: spi@1fff0220 {
+ compatible = "loongson,ls2k1000-spi";
+ reg = <0x0 0x1fff0220 0x0 0x10>;
+ clocks = <&clk LOONGSON2_BOOT_CLK>;
+ status = "disabled";
+ };
+
+ pcie@1a000000 {
+ compatible = "loongson,ls2k-pci";
+ reg = <0x0 0x1a000000 0x0 0x02000000>,
+ <0xfe 0x0 0x0 0x20000000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0x0 0xff>;
+ ranges = <0x01000000 0x0 0x00008000 0x0 0x18008000 0x0 0x00008000>,
+ <0x02000000 0x0 0x60000000 0x0 0x60000000 0x0 0x20000000>;
+
+ gmac0: ethernet@3,0 {
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+ <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ status = "disabled";
+ };
+
+ gmac1: ethernet@3,1 {
+ reg = <0x1900 0x0 0x0 0x0 0x0>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ status = "disabled";
+ };
+
+ ehci0: usb@4,1 {
+ reg = <0x2100 0x0 0x0 0x0 0x0>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ ohci0: usb@4,2 {
+ reg = <0x2200 0x0 0x0 0x0 0x0>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ display@6,0 {
+ reg = <0x3000 0x0 0x0 0x0 0x0>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ hda@7,0 {
+ reg = <0x3800 0x0 0x0 0x0 0x0>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ sata: sata@8,0 {
+ reg = <0x4000 0x0 0x0 0x0 0x0>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ pcie@9,0 {
+ reg = <0x4800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 0x0 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@a,0 {
+ reg = <0x5000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&liointc1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 1 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@b,0 {
+ reg = <0x5800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&liointc1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 2 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@c,0 {
+ reg = <0x6000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&liointc1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 3 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@d,0 {
+ reg = <0x6800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&liointc1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 4 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@e,0 {
+ reg = <0x7000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&liointc1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &liointc1 5 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+ };
+ };
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
new file mode 100644
index 000000000000..dca91caf895e
--- /dev/null
+++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include "loongson-2k2000.dtsi"
+
+/ {
+ compatible = "loongson,ls2k2000-ref", "loongson,ls2k2000";
+ model = "Loongson-2K2000 Reference Board";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@200000 {
+ device_type = "memory";
+ reg = <0x0 0x00200000 0x0 0x0ee00000>,
+ <0x0 0x90000000 0x0 0x70000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x0 0x2000000>;
+ linux,cma-default;
+ };
+ };
+};
+
+&sata {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&rtc0 {
+ status = "okay";
+};
+
+&xhci0 {
+ status = "okay";
+};
+
+&xhci1 {
+ status = "okay";
+};
+
+&gmac0 {
+ status = "okay";
+};
+
+&gmac1 {
+ status = "okay";
+};
+
+&gmac2 {
+ status = "okay";
+};
diff --git a/arch/loongarch/boot/dts/loongson-2k2000.dtsi b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
new file mode 100644
index 000000000000..a231949b5f55
--- /dev/null
+++ b/arch/loongarch/boot/dts/loongson-2k2000.dtsi
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@1 {
+ compatible = "loongson,la364";
+ device_type = "cpu";
+ reg = <0x0>;
+ clocks = <&cpu_clk>;
+ };
+
+ cpu1: cpu@2 {
+ compatible = "loongson,la364";
+ device_type = "cpu";
+ reg = <0x1>;
+ clocks = <&cpu_clk>;
+ };
+ };
+
+ cpu_clk: cpu-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1400000000>;
+ };
+
+ cpuintc: interrupt-controller {
+ compatible = "loongson,cpu-interrupt-controller";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
+ bus@10000000 {
+ compatible = "simple-bus";
+ ranges = <0x0 0x10000000 0x0 0x10000000 0x0 0x10000000>,
+ <0x0 0x02000000 0x0 0x02000000 0x0 0x02000000>,
+ <0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>,
+ <0xfe 0x0 0xfe 0x0 0x0 0x40000000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pmc: power-management@100d0000 {
+ compatible = "loongson,ls2k2000-pmc", "loongson,ls2k0500-pmc", "syscon";
+ reg = <0x0 0x100d0000 0x0 0x58>;
+ interrupt-parent = <&eiointc>;
+ interrupts = <47>;
+ loongson,suspend-address = <0x0 0x1c000500>;
+
+ syscon-reboot {
+ compatible = "syscon-reboot";
+ offset = <0x30>;
+ mask = <0x1>;
+ };
+
+ syscon-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&pmc>;
+ offset = <0x14>;
+ mask = <0x3c00>;
+ value = <0x3c00>;
+ };
+ };
+
+ liointc: interrupt-controller@1fe01400 {
+ compatible = "loongson,liointc-1.0";
+ reg = <0x0 0x1fe01400 0x0 0x64>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ interrupt-names = "int0";
+ loongson,parent_int_map = <0xffffffff>, /* int0 */
+ <0x00000000>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+ };
+
+ eiointc: interrupt-controller@1fe01600 {
+ compatible = "loongson,ls2k2000-eiointc";
+ reg = <0x0 0x1fe01600 0x0 0xea00>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <3>;
+ };
+
+ pic: interrupt-controller@10000000 {
+ compatible = "loongson,pch-pic-1.0";
+ reg = <0x0 0x10000000 0x0 0x400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ loongson,pic-base-vec = <0>;
+ interrupt-parent = <&eiointc>;
+ };
+
+ msi: msi-controller@1fe01140 {
+ compatible = "loongson,pch-msi-1.0";
+ reg = <0x0 0x1fe01140 0x0 0x8>;
+ msi-controller;
+ loongson,msi-base-vec = <64>;
+ loongson,msi-num-vecs = <192>;
+ interrupt-parent = <&eiointc>;
+ };
+
+ rtc0: rtc@100d0100 {
+ compatible = "loongson,ls2k2000-rtc", "loongson,ls7a-rtc";
+ reg = <0x0 0x100d0100 0x0 0x100>;
+ interrupt-parent = <&pic>;
+ interrupts = <52 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ uart0: serial@1fe001e0 {
+ compatible = "ns16550a";
+ reg = <0x0 0x1fe001e0 0x0 0x10>;
+ clock-frequency = <100000000>;
+ interrupt-parent = <&liointc>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ status = "disabled";
+ };
+
+ pcie@1a000000 {
+ compatible = "loongson,ls2k-pci";
+ reg = <0x0 0x1a000000 0x0 0x02000000>,
+ <0xfe 0x0 0x0 0x20000000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0x0 0xff>;
+ ranges = <0x01000000 0x0 0x00008000 0x0 0x18400000 0x0 0x00008000>,
+ <0x02000000 0x0 0x60000000 0x0 0x60000000 0x0 0x20000000>;
+
+ gmac0: ethernet@3,0 {
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ status = "disabled";
+ };
+
+ gmac1: ethernet@3,1 {
+ reg = <0x1900 0x0 0x0 0x0 0x0>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ status = "disabled";
+ };
+
+ gmac2: ethernet@3,2 {
+ reg = <0x1a00 0x0 0x0 0x0 0x0>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ status = "disabled";
+ };
+
+ xhci0: usb@4,0 {
+ reg = <0x2000 0x0 0x0 0x0 0x0>;
+ interrupts = <48 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ status = "disabled";
+ };
+
+ xhci1: usb@19,0 {
+ reg = <0xc800 0x0 0x0 0x0 0x0>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ status = "disabled";
+ };
+
+ display@6,1 {
+ reg = <0x3100 0x0 0x0 0x0 0x0>;
+ interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ status = "disabled";
+ };
+
+ hda@7,0 {
+ reg = <0x3800 0x0 0x0 0x0 0x0>;
+ interrupts = <58 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ status = "disabled";
+ };
+
+ sata: sata@8,0 {
+ reg = <0x4000 0x0 0x0 0x0 0x0>;
+ interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pic>;
+ status = "disabled";
+ };
+
+ pcie@9,0 {
+ reg = <0x4800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &pic 32 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@a,0 {
+ reg = <0x5000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &pic 33 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@b,0 {
+ reg = <0x5800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &pic 34 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@c,0 {
+ reg = <0x6000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &pic 35 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@d,0 {
+ reg = <0x6800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &pic 36 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@e,0 {
+ reg = <0x7000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &pic 37 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@f,0 {
+ reg = <0x7800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &pic 40 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+
+ pcie@10,0 {
+ reg = <0x8000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupt-parent = <&pic>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &pic 30 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ };
+ };
+ };
+};
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 60e331af9839..f18c2ba871ef 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -6,6 +6,8 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y
CONFIG_PREEMPT=y
+CONFIG_PREEMPT_DYNAMIC=y
+CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -19,6 +21,7 @@ CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
@@ -26,6 +29,7 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_MISC=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
@@ -35,6 +39,8 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PERF_EVENTS=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
CONFIG_LOONGARCH=y
CONFIG_64BIT=y
CONFIG_MACH_LOONGSON64=y
@@ -44,13 +50,11 @@ CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
-CONFIG_NR_CPUS=64
+CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_CPU_HAS_FPU=y
CONFIG_CPU_HAS_LSX=y
CONFIG_CPU_HAS_LASX=y
-CONFIG_KEXEC=y
-CONFIG_CRASH_DUMP=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_SUSPEND=y
CONFIG_HIBERNATION=y
@@ -62,10 +66,6 @@ CONFIG_ACPI_IPMI=m
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_PCI_SLOT=y
CONFIG_ACPI_HOTPLUG_MEMORY=y
-CONFIG_EFI_ZBOOT=y
-CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
-CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_EFI_TEST=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
@@ -74,10 +74,18 @@ CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_DEV_THROTTLING_LOW=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_FC_APPID=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_BSD_DISKLABEL=y
CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CMDLINE_PARTITION=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
@@ -93,6 +101,8 @@ CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_CMA_SYSFS=y
CONFIG_USERFAULTFD=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -128,6 +138,7 @@ CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_ESP=m
CONFIG_IPV6_MROUTE=y
+CONFIG_MPTCP=y
CONFIG_NETWORK_PHY_TIMESTAMPING=y
CONFIG_NETFILTER=y
CONFIG_BRIDGE_NETFILTER=m
@@ -352,6 +363,7 @@ CONFIG_PCIEAER=y
CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_SHPC=y
+CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCCARD=m
CONFIG_YENTA=m
CONFIG_RAPIDIO=y
@@ -365,6 +377,10 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_COMPRESS_ZSTD=y
+CONFIG_EFI_ZBOOT=y
+CONFIG_EFI_BOOTLOADER_CONTROL=m
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_EFI_TEST=m
CONFIG_MTD=m
CONFIG_MTD_BLOCK=m
CONFIG_MTD_CFI=m
@@ -586,6 +602,7 @@ CONFIG_RTW89_8852AE=m
CONFIG_RTW89_8852CE=m
CONFIG_ZD1211RW=m
CONFIG_USB_NET_RNDIS_WLAN=m
+CONFIG_USB4_NET=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_PSAUX=y
CONFIG_INPUT_EVDEV=y
@@ -691,6 +708,9 @@ CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_HDMI=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_SOC=m
+CONFIG_SND_SOC_LOONGSON_CARD=m
+CONFIG_SND_VIRTIO=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
CONFIG_HID_A4TECH=m
@@ -738,6 +758,11 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_LOONGSON=y
CONFIG_DMADEVICES=y
+CONFIG_LS2X_APB_DMA=y
+CONFIG_UDMABUF=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_DMABUF_HEAPS_CMA=y
CONFIG_UIO=m
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_UIO_DMEM_GENIRQ=m
@@ -778,7 +803,15 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_NTB=m
+CONFIG_NTB_MSI=y
+CONFIG_NTB_IDT=m
+CONFIG_NTB_EPF=m
+CONFIG_NTB_SWITCHTEC=m
+CONFIG_NTB_PERF=m
+CONFIG_NTB_TRANSPORT=m
CONFIG_PWM=y
+CONFIG_USB4=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -797,6 +830,10 @@ CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_F2FS_FS=m
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_CHECK_FS=y
+CONFIG_F2FS_FS_COMPRESSION=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
@@ -883,7 +920,6 @@ CONFIG_KEY_DH_OPERATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_APPARMOR=y
CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_DAC=y
@@ -914,6 +950,9 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_CRC32_LOONGARCH=m
CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_DMA_CMA=y
+CONFIG_DMA_NUMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
index 8de6c4b83a61..49e29b29996f 100644
--- a/arch/loongarch/include/asm/acpi.h
+++ b/arch/loongarch/include/asm/acpi.h
@@ -32,8 +32,10 @@ static inline bool acpi_has_cpu_in_madt(void)
return true;
}
+#define MAX_CORE_PIC 256
+
extern struct list_head acpi_wakeup_device_list;
-extern struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
+extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
extern int __init parse_acpi_topology(void);
diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h
index c60796869b2b..6d5846dd075c 100644
--- a/arch/loongarch/include/asm/bootinfo.h
+++ b/arch/loongarch/include/asm/bootinfo.h
@@ -24,13 +24,15 @@ struct loongson_board_info {
const char *board_vendor;
};
+#define NR_WORDS DIV_ROUND_UP(NR_CPUS, BITS_PER_LONG)
+
struct loongson_system_configuration {
int nr_cpus;
int nr_nodes;
int boot_cpu_id;
int cores_per_node;
int cores_per_package;
- unsigned long cores_io_master;
+ unsigned long cores_io_master[NR_WORDS];
unsigned long suspend_addr;
const char *cpuname;
};
@@ -42,7 +44,7 @@ extern struct loongson_system_configuration loongson_sysconf;
static inline bool io_master(int cpu)
{
- return test_bit(cpu, &loongson_sysconf.cores_io_master);
+ return test_bit(cpu, loongson_sysconf.cores_io_master);
}
#endif /* _ASM_BOOTINFO_H */
diff --git a/arch/loongarch/include/asm/crash_core.h b/arch/loongarch/include/asm/crash_core.h
new file mode 100644
index 000000000000..218bdbfa527b
--- /dev/null
+++ b/arch/loongarch/include/asm/crash_core.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LOONGARCH_CRASH_CORE_H
+#define _LOONGARCH_CRASH_CORE_H
+
+#define CRASH_ALIGN SZ_2M
+
+#define CRASH_ADDR_LOW_MAX SZ_4G
+#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
+
+extern phys_addr_t memblock_end_of_DRAM(void);
+
+#endif
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
index 9b16a3b8e706..f16bd42456e4 100644
--- a/arch/loongarch/include/asm/elf.h
+++ b/arch/loongarch/include/asm/elf.h
@@ -241,8 +241,6 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
do { \
current->thread.vdso = &vdso_info; \
\
- loongarch_set_personality_fcsr(state); \
- \
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
} while (0)
@@ -259,7 +257,6 @@ do { \
clear_thread_flag(TIF_32BIT_ADDR); \
\
current->thread.vdso = &vdso_info; \
- loongarch_set_personality_fcsr(state); \
\
p = personality(current->personality); \
if (p != PER_LINUX32 && p != PER_LINUX) \
@@ -340,6 +337,4 @@ extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
struct arch_elf_state *state);
-extern void loongarch_set_personality_fcsr(struct arch_elf_state *state);
-
#endif /* _ASM_ELF_H */
diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h
index a11996eb5892..de891c2c83d4 100644
--- a/arch/loongarch/include/asm/ftrace.h
+++ b/arch/loongarch/include/asm/ftrace.h
@@ -63,7 +63,7 @@ ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs)
static __always_inline void
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip)
{
- regs_set_return_value(&fregs->regs, ip);
+ instruction_pointer_set(&fregs->regs, ip);
}
#define ftrace_regs_get_argument(fregs, n) \
diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h
index 3cea299a5ef5..29acfe3de3fa 100644
--- a/arch/loongarch/include/asm/jump_label.h
+++ b/arch/loongarch/include/asm/jump_label.h
@@ -22,7 +22,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: nop \n\t"
JUMP_TABLE_ENTRY
: : "i"(&((char *)key)[branch]) : : l_yes);
@@ -35,7 +35,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
- asm_volatile_goto(
+ asm goto(
"1: b %l[l_yes] \n\t"
JUMP_TABLE_ENTRY
: : "i"(&((char *)key)[branch]) : : l_yes);
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index e71ceb88f29e..0cb4fdb8a9b5 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -60,7 +60,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu);
void kvm_save_lsx(struct loongarch_fpu *fpu);
void kvm_restore_lsx(struct loongarch_fpu *fpu);
#else
-static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { }
+static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; }
static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
#endif
@@ -70,7 +70,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu);
void kvm_save_lasx(struct loongarch_fpu *fpu);
void kvm_restore_lasx(struct loongarch_fpu *fpu);
#else
-static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { }
+static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; }
static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif
diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h
deleted file mode 100644
index c9554f48d2df..000000000000
--- a/arch/loongarch/include/asm/shmparam.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _ASM_SHMPARAM_H
-#define _ASM_SHMPARAM_H
-
-#define __ARCH_FORCE_SHMLBA 1
-
-#define SHMLBA SZ_64K /* attach addr a multiple of this */
-
-#endif /* _ASM_SHMPARAM_H */
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 8e00a754e548..5cf59c617126 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -29,11 +29,9 @@ int disabled_cpus;
u64 acpi_saved_sp;
-#define MAX_CORE_PIC 256
-
#define PREFIX "ACPI: "
-struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
+struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
{
@@ -119,7 +117,7 @@ acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long e
return -EINVAL;
core = eiointc->node * CORES_PER_EIO_NODE;
- set_bit(core, &(loongson_sysconf.cores_io_master));
+ set_bit(core, loongson_sysconf.cores_io_master);
return 0;
}
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
index acb5d3385675..000825406c1f 100644
--- a/arch/loongarch/kernel/efi.c
+++ b/arch/loongarch/kernel/efi.c
@@ -140,4 +140,6 @@ void __init efi_init(void)
early_memunmap(tbl, sizeof(*tbl));
}
+
+ efi_esrt_init();
}
diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
index 183e94fc9c69..0fa81ced28dc 100644
--- a/arch/loongarch/kernel/elf.c
+++ b/arch/loongarch/kernel/elf.c
@@ -23,8 +23,3 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
{
return 0;
}
-
-void loongarch_set_personality_fcsr(struct arch_elf_state *state)
-{
- current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
-}
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 6b3bfb0092e6..2f1f5b08638f 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -5,13 +5,16 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/of_clk.h>
#include <asm/early_ioremap.h>
#include <asm/bootinfo.h>
#include <asm/loongson.h>
#include <asm/setup.h>
+#include <asm/time.h>
u64 efi_system_table;
struct loongson_system_configuration loongson_sysconf;
@@ -36,7 +39,16 @@ void __init init_environ(void)
static int __init init_cpu_fullname(void)
{
- int cpu;
+ struct device_node *root;
+ int cpu, ret;
+ char *model;
+
+ /* Parsing cpuname from DTS model property */
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", (const char **)&model);
+ of_node_put(root);
+ if (ret == 0)
+ loongson_sysconf.cpuname = strsep(&model, " ");
if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
for (cpu = 0; cpu < NR_CPUS; cpu++)
@@ -46,6 +58,26 @@ static int __init init_cpu_fullname(void)
}
arch_initcall(init_cpu_fullname);
+static int __init fdt_cpu_clk_init(void)
+{
+ struct clk *clk;
+ struct device_node *np;
+
+ np = of_get_cpu_node(0, NULL);
+ if (!np)
+ return -ENODEV;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk))
+ return -ENODEV;
+
+ cpu_clock_freq = clk_get_rate(clk);
+ clk_put(clk);
+
+ return 0;
+}
+late_initcall(fdt_cpu_clk_init);
+
static ssize_t boardinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 0ecab4216392..c4f7de2e2805 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -74,6 +74,11 @@ SYM_CODE_START(kernel_entry) # kernel entry point
la.pcrel t0, fw_arg2
st.d a2, t0, 0
+#ifdef CONFIG_PAGE_SIZE_4KB
+ li.d t0, 0
+ li.d t1, CSR_STFILL
+ csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
+#endif
/* KSave3 used for percpu base, initialized as 0 */
csrwr zero, PERCPU_BASE_KS
/* GPR21 used for percpu base (runtime), initialized as 0 */
@@ -126,6 +131,11 @@ SYM_CODE_START(smpboot_entry)
JUMP_VIRT_ADDR t0, t1
+#ifdef CONFIG_PAGE_SIZE_4KB
+ li.d t0, 0
+ li.d t1, CSR_STFILL
+ csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
+#endif
/* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 767d94cce0de..f2ff8b5d591e 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -85,6 +85,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
regs->csr_euen = euen;
lose_fpu(0);
lose_lbt(0);
+ current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
clear_thread_flag(TIF_LSX_CTX_LIVE);
clear_thread_flag(TIF_LASX_CTX_LIVE);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index d183a745fb85..634ef17fd38b 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -252,38 +252,23 @@ static void __init arch_reserve_vmcore(void)
#endif
}
-/* 2MB alignment for crash kernel regions */
-#define CRASH_ALIGN SZ_2M
-#define CRASH_ADDR_MAX SZ_4G
-
-static void __init arch_parse_crashkernel(void)
+static void __init arch_reserve_crashkernel(void)
{
-#ifdef CONFIG_KEXEC
int ret;
- unsigned long long total_mem;
+ unsigned long long low_size = 0;
unsigned long long crash_base, crash_size;
+ char *cmdline = boot_command_line;
+ bool high = false;
- total_mem = memblock_phys_mem_size();
- ret = parse_crashkernel(boot_command_line, total_mem,
- &crash_size, &crash_base,
- NULL, NULL);
- if (ret < 0 || crash_size <= 0)
+ if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- if (crash_base <= 0) {
- crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, CRASH_ALIGN, CRASH_ADDR_MAX);
- if (!crash_base) {
- pr_warn("crashkernel reservation failed - No suitable area found.\n");
- return;
- }
- } else if (!memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_base + crash_size)) {
- pr_warn("Invalid memory region reserved for crash kernel\n");
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+ &crash_size, &crash_base, &low_size, &high);
+ if (ret)
return;
- }
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
-#endif
+ reserve_crashkernel_generic(cmdline, crash_size, crash_base, low_size, high);
}
static void __init fdt_setup(void)
@@ -295,8 +280,12 @@ static void __init fdt_setup(void)
if (acpi_os_get_root_pointer())
return;
- /* Look for a device tree configuration table entry */
- fdt_pointer = efi_fdt_pointer();
+ /* Prefer to use built-in dtb, checking its legality first. */
+ if (!fdt_check_header(__dtb_start))
+ fdt_pointer = __dtb_start;
+ else
+ fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
+
if (!fdt_pointer || fdt_check_header(fdt_pointer))
return;
@@ -330,7 +319,9 @@ static void __init bootcmdline_init(char **cmdline_p)
if (boot_command_line[0])
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
- strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
+ if (!strstr(boot_command_line, init_command_line))
+ strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
+
goto out;
}
#endif
@@ -357,7 +348,7 @@ out:
void __init platform_init(void)
{
arch_reserve_vmcore();
- arch_parse_crashkernel();
+ arch_reserve_crashkernel();
#ifdef CONFIG_ACPI_TABLE_UPGRADE
acpi_table_upgrade();
@@ -366,6 +357,8 @@ void __init platform_init(void)
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif
+
+ early_init_fdt_scan_reserved_mem();
unflatten_and_copy_device_tree();
#ifdef CONFIG_NUMA
@@ -399,8 +392,6 @@ static void __init arch_mem_init(char **cmdline_p)
check_kernel_sections_mem();
- early_init_fdt_scan_reserved_mem();
-
/*
* In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
@@ -467,15 +458,6 @@ static void __init resource_init(void)
request_resource(res, &data_resource);
request_resource(res, &bss_resource);
}
-
-#ifdef CONFIG_KEXEC
- if (crashk_res.start < crashk_res.end) {
- insert_resource(&iomem_resource, &crashk_res);
- pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
- (unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20),
- (unsigned long)(crashk_res.start >> 20));
- }
-#endif
}
static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 5bca12d16e06..aabee0b280fe 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -88,6 +88,73 @@ void show_ipi_list(struct seq_file *p, int prec)
}
}
+static inline void set_cpu_core_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_core_setup_map);
+
+ for_each_cpu(i, &cpu_core_setup_map) {
+ if (cpu_data[cpu].package == cpu_data[i].package) {
+ cpumask_set_cpu(i, &cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_core_map[i]);
+ }
+ }
+}
+
+static inline void set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+
+ for_each_cpu(i, &cpu_sibling_setup_map) {
+ if (cpus_are_siblings(cpu, i)) {
+ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+ }
+ }
+}
+
+static inline void clear_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ for_each_cpu(i, &cpu_sibling_setup_map) {
+ if (cpus_are_siblings(cpu, i)) {
+ cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
+ cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
+ }
+ }
+
+ cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
+}
+
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpus_are_siblings(i, k))
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
+}
+
/* Send mailbox buffer via Mail_Send */
static void csr_mail_send(uint64_t data, int cpu, int mailbox)
{
@@ -208,7 +275,7 @@ static void __init fdt_smp_setup(void)
}
loongson_sysconf.nr_cpus = num_processors;
- set_bit(0, &(loongson_sysconf.cores_io_master));
+ set_bit(0, loongson_sysconf.cores_io_master);
#endif
}
@@ -216,6 +283,9 @@ void __init loongson_smp_setup(void)
{
fdt_smp_setup();
+ if (loongson_sysconf.cores_per_package == 0)
+ loongson_sysconf.cores_per_package = num_processors;
+
cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
@@ -300,6 +370,7 @@ int loongson_cpu_disable(void)
numa_remove_cpu(cpu);
#endif
set_cpu_online(cpu, false);
+ clear_cpu_sibling_map(cpu);
calculate_cpu_foreign_map();
local_irq_save(flags);
irq_migrate_all_off_this_cpu();
@@ -334,6 +405,7 @@ void __noreturn arch_cpu_idle_dead(void)
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
} while (addr == 0);
+ local_irq_disable();
init_fn = (void *)TO_CACHE(addr);
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
@@ -376,59 +448,6 @@ static int __init ipi_pm_init(void)
core_initcall(ipi_pm_init);
#endif
-static inline void set_cpu_sibling_map(int cpu)
-{
- int i;
-
- cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
-
- for_each_cpu(i, &cpu_sibling_setup_map) {
- if (cpus_are_siblings(cpu, i)) {
- cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
- cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
- }
- }
-}
-
-static inline void set_cpu_core_map(int cpu)
-{
- int i;
-
- cpumask_set_cpu(cpu, &cpu_core_setup_map);
-
- for_each_cpu(i, &cpu_core_setup_map) {
- if (cpu_data[cpu].package == cpu_data[i].package) {
- cpumask_set_cpu(i, &cpu_core_map[cpu]);
- cpumask_set_cpu(cpu, &cpu_core_map[i]);
- }
- }
-}
-
-/*
- * Calculate a new cpu_foreign_map mask whenever a
- * new cpu appears or disappears.
- */
-void calculate_cpu_foreign_map(void)
-{
- int i, k, core_present;
- cpumask_t temp_foreign_map;
-
- /* Re-calculate the mask */
- cpumask_clear(&temp_foreign_map);
- for_each_online_cpu(i) {
- core_present = 0;
- for_each_cpu(k, &temp_foreign_map)
- if (cpus_are_siblings(i, k))
- core_present = 1;
- if (!core_present)
- cpumask_set_cpu(i, &temp_foreign_map);
- }
-
- for_each_online_cpu(i)
- cpumask_andnot(&cpu_foreign_map[i],
- &temp_foreign_map, &cpu_sibling_map[i]);
-}
-
/* Preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{
@@ -506,7 +525,6 @@ asmlinkage void start_secondary(void)
sync_counter();
cpu = raw_smp_processor_id();
set_my_cpu_offset(per_cpu_offset(cpu));
- rcutree_report_cpu_starting(cpu);
cpu_probe();
constant_clockevent_init();
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 915f17527893..50a6acd7ffe4 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -675,7 +675,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
*
* There are several ways to safely use this helper:
*
- * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
+ * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
* consuming it. In this case, mmu_lock doesn't need to be held during the
* lookup, but it does need to be held while checking the MMU notifier.
*
@@ -855,7 +855,7 @@ retry:
/* Check if an invalidation has taken place since we got pfn */
spin_lock(&kvm->mmu_lock);
- if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) {
+ if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 27701991886d..36106922b5d7 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -298,74 +298,73 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
return ret;
}
-static int _kvm_get_cpucfg(int id, u64 *v)
+static int _kvm_get_cpucfg_mask(int id, u64 *v)
{
- int ret = 0;
-
- if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
+ if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
return -EINVAL;
switch (id) {
case 2:
- /* Return CPUCFG2 features which have been supported by KVM */
+ /* CPUCFG2 features unconditionally supported by KVM */
*v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
CPUCFG2_LAM;
/*
- * If LSX is supported by CPU, it is also supported by KVM,
- * as we implement it.
+ * For the ISA extensions listed below, if one is supported
+ * by the host, then it is also supported by KVM.
*/
if (cpu_has_lsx)
*v |= CPUCFG2_LSX;
- /*
- * if LASX is supported by CPU, it is also supported by KVM,
- * as we implement it.
- */
if (cpu_has_lasx)
*v |= CPUCFG2_LASX;
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ /*
+ * No restrictions on other valid CPUCFG IDs' values, but
+ * CPUCFG data is limited to 32 bits as the LoongArch ISA
+ * manual says (Volume 1, Section 2.2.10.5 "CPUCFG").
+ */
+ *v = U32_MAX;
+ return 0;
}
- return ret;
}
static int kvm_check_cpucfg(int id, u64 val)
{
- u64 mask;
- int ret = 0;
-
- if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
- return -EINVAL;
+ int ret;
+ u64 mask = 0;
- if (_kvm_get_cpucfg(id, &mask))
+ ret = _kvm_get_cpucfg_mask(id, &mask);
+ if (ret)
return ret;
+ if (val & ~mask)
+ /* Unsupported features and/or the higher 32 bits should not be set */
+ return -EINVAL;
+
switch (id) {
case 2:
- /* CPUCFG2 features checking */
- if (val & ~mask)
- /* The unsupported features should not be set */
- ret = -EINVAL;
- else if (!(val & CPUCFG2_LLFTP))
- /* The LLFTP must be set, as guest must has a constant timer */
- ret = -EINVAL;
- else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
- /* Single and double float point must both be set when enable FP */
- ret = -EINVAL;
- else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
- /* FP should be set when enable LSX */
- ret = -EINVAL;
- else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
- /* LSX, FP should be set when enable LASX, and FP has been checked before. */
- ret = -EINVAL;
- break;
+ if (!(val & CPUCFG2_LLFTP))
+ /* Guests must have a constant timer */
+ return -EINVAL;
+ if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
+ /* Single and double float point must both be set when FP is enabled */
+ return -EINVAL;
+ if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
+ /* LSX architecturally implies FP but val does not satisfy that */
+ return -EINVAL;
+ if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
+ /* LASX architecturally implies LSX and FP but val does not satisfy that */
+ return -EINVAL;
+ return 0;
default:
- break;
+ /*
+ * Values for the other CPUCFG IDs are not being further validated
+ * besides the mask check above.
+ */
+ return 0;
}
- return ret;
}
static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
@@ -566,7 +565,7 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
uint64_t val;
uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
- ret = _kvm_get_cpucfg(attr->attr, &val);
+ ret = _kvm_get_cpucfg_mask(attr->attr, &val);
if (ret)
return ret;
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
index cc3e81fe0186..c608adc99845 100644
--- a/arch/loongarch/mm/kasan_init.c
+++ b/arch/loongarch/mm/kasan_init.c
@@ -44,6 +44,9 @@ void *kasan_mem_to_shadow(const void *addr)
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
unsigned long offset = 0;
+ if (maddr >= FIXADDR_START)
+ return (void *)(kasan_early_shadow_page);
+
maddr &= XRANGE_SHADOW_MASK;
switch (xrange) {
case XKPRANGE_CC_SEG:
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index 2c0a411f23aa..0b95d32b30c9 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu)
set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
- }
+ } else {
+ int vec_sz __maybe_unused;
+ void *addr __maybe_unused;
+ struct page *page __maybe_unused;
+
+ /* Avoid lockdep warning */
+ rcutree_report_cpu_starting(cpu);
+
#ifdef CONFIG_NUMA
- else {
- void *addr;
- struct page *page;
- const int vec_sz = sizeof(exception_handlers);
+ vec_sz = sizeof(exception_handlers);
if (pcpu_handlers[cpu])
return;
@@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu)
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
- }
#endif
+ }
}
void tlb_init(int cpu)
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index 4fcd6cd6da23..e73323d759d0 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -201,6 +201,11 @@ bool bpf_jit_supports_kfunc_call(void)
return true;
}
+bool bpf_jit_supports_far_kfunc_call(void)
+{
+ return true;
+}
+
/* initialized on the first pass of build_body() */
static int out_offset = -1;
static int emit_bpf_tail_call(struct jit_ctx *ctx)
@@ -465,7 +470,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
const u8 dst = regmap[insn->dst_reg];
const s16 off = insn->off;
const s32 imm = insn->imm;
- const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
switch (code) {
@@ -923,8 +927,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
+
move_imm(ctx, dst, imm64, is32);
return 1;
+ }
/* dst = *(size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index c74c9921304f..f597cd08a96b 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -2,6 +2,7 @@
# Objects to go into the VDSO.
KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# Include the generic Makefile to check the built vdso.
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 43e39040d3ac..0abcf994ce55 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -15,10 +15,10 @@
KBUILD_DEFCONFIG := multi_defconfig
ifdef cross_compiling
- ifeq ($(CROSS_COMPILE),)
+ ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, \
m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
- endif
+ endif
endif
#
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c
index b13d8adf3be4..20d30f6265cd 100644
--- a/arch/mips/alchemy/common/prom.c
+++ b/arch/mips/alchemy/common/prom.c
@@ -40,6 +40,7 @@
#include <linux/string.h>
#include <asm/bootinfo.h>
+#include <prom.h>
int prom_argc;
char **prom_argv;
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 2388d68786f4..a7a6d31a7a41 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -30,13 +30,11 @@
#include <linux/mm.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
+#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <au1000.h>
-extern void __init board_setup(void);
-extern void __init alchemy_set_lpj(void);
-
static bool alchemy_dma_coherent(void)
{
switch (alchemy_get_cputype()) {
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 01aff80a5967..99f321b6e417 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -702,7 +702,7 @@ static struct ssb_sprom bcm63xx_sprom = {
.boardflags_hi = 0x0000,
};
-int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
+static int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
{
if (bus->bustype == SSB_BUSTYPE_PCI) {
memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom));
diff --git a/arch/mips/bcm63xx/dev-rng.c b/arch/mips/bcm63xx/dev-rng.c
index d277b4dc6c68..f94151f7c96f 100644
--- a/arch/mips/bcm63xx/dev-rng.c
+++ b/arch/mips/bcm63xx/dev-rng.c
@@ -26,7 +26,7 @@ static struct platform_device bcm63xx_rng_device = {
.resource = rng_resources,
};
-int __init bcm63xx_rng_register(void)
+static int __init bcm63xx_rng_register(void)
{
if (!BCMCPU_IS_6368())
return -ENODEV;
diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c
index 3bc7f3bfc9ad..5d6bf0445b29 100644
--- a/arch/mips/bcm63xx/dev-uart.c
+++ b/arch/mips/bcm63xx/dev-uart.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_uart.h>
static struct resource uart0_resources[] = {
{
diff --git a/arch/mips/bcm63xx/dev-wdt.c b/arch/mips/bcm63xx/dev-wdt.c
index 42130914a3c2..302bf7ed5ad5 100644
--- a/arch/mips/bcm63xx/dev-wdt.c
+++ b/arch/mips/bcm63xx/dev-wdt.c
@@ -34,7 +34,7 @@ static struct platform_device bcm63xx_wdt_device = {
},
};
-int __init bcm63xx_wdt_register(void)
+static int __init bcm63xx_wdt_register(void)
{
wdt_resources[0].start = bcm63xx_regset_address(RSET_WDT);
wdt_resources[0].end = wdt_resources[0].start;
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index 2548013442f6..6240a8f88ea3 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -72,7 +72,7 @@ static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
*/
#define BUILD_IPIC_INTERNAL(width) \
-void __dispatch_internal_##width(int cpu) \
+static void __dispatch_internal_##width(int cpu) \
{ \
u32 pending[width / 32]; \
unsigned int src, tgt; \
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index d811e3e03f81..c13ddb544a23 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -159,7 +159,7 @@ void __init plat_mem_setup(void)
board_setup();
}
-int __init bcm63xx_register_devices(void)
+static int __init bcm63xx_register_devices(void)
{
/* register gpiochip */
bcm63xx_gpio_init();
diff --git a/arch/mips/bcm63xx/timer.c b/arch/mips/bcm63xx/timer.c
index a86065854c0c..74b83807df30 100644
--- a/arch/mips/bcm63xx/timer.c
+++ b/arch/mips/bcm63xx/timer.c
@@ -178,7 +178,7 @@ int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us)
EXPORT_SYMBOL(bcm63xx_timer_set);
-int bcm63xx_timer_init(void)
+static int bcm63xx_timer_init(void)
{
int ret, irq;
u32 reg;
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index 2e099d55a564..9a266bf78339 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -23,9 +23,6 @@
#include <cobalt.h>
-extern void cobalt_machine_restart(char *command);
-extern void cobalt_machine_halt(void);
-
const char *get_system_type(void)
{
switch (cobalt_board_id) {
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index b51f738a39a0..4714074c8bd7 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -287,7 +287,8 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_FUSE_FS=m
CONFIG_CUSE=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
CONFIG_FSCACHE_STATS=y
CONFIG_CACHEFILES=m
CONFIG_PROC_KCORE=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 38f17b658421..3389e6e885d9 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -238,7 +238,8 @@ CONFIG_BTRFS_FS=m
CONFIG_QUOTA=y
CONFIG_QFMT_V2=m
CONFIG_AUTOFS_FS=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 07839a4b397e..78f498752066 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -356,7 +356,8 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
CONFIG_VIRTIO_FS=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_MSDOS_FS=m
diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
index 166d2ad372d1..54774f90c23e 100644
--- a/arch/mips/configs/pic32mzda_defconfig
+++ b/arch/mips/configs/pic32mzda_defconfig
@@ -68,7 +68,8 @@ CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 66188739f54d..fb78e6fd5de4 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -37,7 +37,7 @@ static unsigned int nr_prom_mem __initdata;
*/
#define ARC_PAGE_SHIFT 12
-struct linux_mdesc * __init ArcGetMemoryDescriptor(struct linux_mdesc *Current)
+static struct linux_mdesc * __init ArcGetMemoryDescriptor(struct linux_mdesc *Current)
{
return (struct linux_mdesc *) ARC_CALL1(get_mdesc, Current);
}
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 4044eaf989ac..0921ddda11a4 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -241,7 +241,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" .set pop"
: "=&r" (sum), "=&r" (tmp)
: "r" (saddr), "r" (daddr),
- "0" (htonl(len)), "r" (htonl(proto)), "r" (sum));
+ "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)
+ : "memory");
return csum_fold(sum);
}
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 081be98c71ef..ff5d388502d4 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -39,7 +39,7 @@ extern void jump_label_apply_nops(struct module *mod);
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+ asm goto("1:\t" B_INSN " 2f\n\t"
"2:\t.insn\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
@@ -53,7 +53,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
+ asm goto("1:\t" J_INSN " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index a7eec3364a64..41546777902b 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -597,6 +597,9 @@
#include <asm/cpu.h>
+void alchemy_set_lpj(void);
+void board_setup(void);
+
/* helpers to access the SYS_* registers */
static inline unsigned long alchemy_rdsys(int regofs)
{
diff --git a/arch/mips/include/asm/mach-cobalt/cobalt.h b/arch/mips/include/asm/mach-cobalt/cobalt.h
index 5b9fce73f11d..97f9d5e9446d 100644
--- a/arch/mips/include/asm/mach-cobalt/cobalt.h
+++ b/arch/mips/include/asm/mach-cobalt/cobalt.h
@@ -19,4 +19,7 @@ extern int cobalt_board_id;
#define COBALT_BRD_ID_QUBE2 0x5
#define COBALT_BRD_ID_RAQ2 0x6
+void cobalt_machine_halt(void);
+void cobalt_machine_restart(char *command);
+
#endif /* __ASM_COBALT_H */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index daf3cf244ea9..d14d0e37ad02 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->cp0_epc = val;
+ regs->cp0_cause &= ~CAUSEF_BD;
}
/* Query offset/name of register from its name/offset */
@@ -154,6 +155,8 @@ static inline long regs_return_value(struct pt_regs *regs)
}
#define instruction_pointer(regs) ((regs)->cp0_epc)
+extern unsigned long exception_ip(struct pt_regs *regs);
+#define exception_ip(regs) exception_ip(regs)
#define profile_pc(regs) instruction_pointer(regs)
extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 5582a4ca1e9e..7aa2c2360ff6 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -11,6 +11,7 @@
#include <asm/cpu-features.h>
#include <asm/cpu-info.h>
+#include <asm/fpu.h>
#ifdef CONFIG_MIPS_FP_SUPPORT
@@ -309,6 +310,11 @@ void mips_set_personality_nan(struct arch_elf_state *state)
struct cpuinfo_mips *c = &boot_cpu_data;
struct task_struct *t = current;
+ /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case
+ * we are preempted before the lose_fpu(0) in start_thread.
+ */
+ lose_fpu(0);
+
t->thread.fpu.fcr31 = c->fpu_csr31;
switch (state->nan_2008) {
case 0:
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index d9df543f7e2c..59288c13b581 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -31,6 +31,7 @@
#include <linux/seccomp.h>
#include <linux/ftrace.h>
+#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
@@ -48,6 +49,12 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
+unsigned long exception_ip(struct pt_regs *regs)
+{
+ return exception_epc(regs);
+}
+EXPORT_SYMBOL(exception_ip);
+
/*
* Called by kernel/ptrace.c when detaching..
*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index dec6878b35f6..a1c1cb5de913 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2007,7 +2007,13 @@ unsigned long vi_handlers[64];
void reserve_exception_space(phys_addr_t addr, unsigned long size)
{
- memblock_reserve(addr, size);
+ /*
+ * reserve exception space on CPUs other than CPU0
+ * is too late, since memblock is unavailable when APs
+ * up
+ */
+ if (smp_processor_id() == 0)
+ memblock_reserve(addr, size);
}
void __init *set_except_vector(int n, void *addr)
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index a3cf29365858..0c45767eacf6 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -108,10 +108,9 @@ void __init prom_init(void)
prom_init_cmdline();
#if defined(CONFIG_MIPS_MT_SMP)
- if (cpu_has_mipsmt) {
- lantiq_smp_ops = vsmp_smp_ops;
+ lantiq_smp_ops = vsmp_smp_ops;
+ if (cpu_has_mipsmt)
lantiq_smp_ops.init_secondary = lantiq_init_secondary;
- register_smp_ops(&lantiq_smp_ops);
- }
+ register_smp_ops(&lantiq_smp_ops);
#endif
}
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
index f25caa6aa9d3..553142c1f14f 100644
--- a/arch/mips/loongson64/init.c
+++ b/arch/mips/loongson64/init.c
@@ -103,6 +103,9 @@ void __init szmem(unsigned int node)
if (loongson_sysconf.vgabios_addr)
memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
SZ_256K);
+ /* set nid for reserved memory */
+ memblock_set_node((u64)node << 44, (u64)(node + 1) << 44,
+ &memblock.reserved, node);
}
#ifndef CONFIG_NUMA
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index 8f61e93c0c5b..68dafd6d3e25 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -132,6 +132,8 @@ static void __init node_mem_init(unsigned int node)
/* Reserve pfn range 0~node[0]->node_start_pfn */
memblock_reserve(0, PAGE_SIZE * start_pfn);
+ /* set nid for reserved memory on node 0 */
+ memblock_set_node(0, 1ULL << 44, &memblock.reserved, 0);
}
}
diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile
index 27c14ede191e..9877fcc512b1 100644
--- a/arch/mips/sgi-ip27/Makefile
+++ b/arch/mips/sgi-ip27/Makefile
@@ -5,7 +5,7 @@
obj-y := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o \
ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \
- ip27-hubio.o ip27-xtalk.o
+ ip27-xtalk.o
obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o
obj-$(CONFIG_SMP) += ip27-smp.o
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index 923a63a51cda..9eb497cb5d52 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -22,6 +22,8 @@
#include <asm/traps.h>
#include <linux/uaccess.h>
+#include "ip27-common.h"
+
static void dump_hub_information(unsigned long errst0, unsigned long errst1)
{
static char *err_type[2][8] = {
@@ -57,7 +59,7 @@ static void dump_hub_information(unsigned long errst0, unsigned long errst1)
[st0.pi_stat0_fmt.s0_err_type] ? : "invalid");
}
-int ip27_be_handler(struct pt_regs *regs, int is_fixup)
+static int ip27_be_handler(struct pt_regs *regs, int is_fixup)
{
unsigned long errst0, errst1;
int data = regs->cp0_cause & 4;
diff --git a/arch/mips/sgi-ip27/ip27-common.h b/arch/mips/sgi-ip27/ip27-common.h
index ed008a08464c..a0059fa13934 100644
--- a/arch/mips/sgi-ip27/ip27-common.h
+++ b/arch/mips/sgi-ip27/ip27-common.h
@@ -10,6 +10,7 @@ extern void hub_rt_clock_event_init(void);
extern void hub_rtc_init(nasid_t nasid);
extern void install_cpu_nmi_handler(int slice);
extern void install_ipi(void);
+extern void ip27_be_init(void);
extern void ip27_reboot_setup(void);
extern const struct plat_smp_ops ip27_smp_ops;
extern unsigned long node_getfirstfree(nasid_t nasid);
@@ -17,4 +18,5 @@ extern void per_cpu_init(void);
extern void replicate_kernel_text(void);
extern void setup_replication_mask(void);
+
#endif /* __IP27_COMMON_H */
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
deleted file mode 100644
index c57f0d8f3218..000000000000
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ /dev/null
@@ -1,185 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
- * Copyright (C) 2004 Christoph Hellwig.
- *
- * Support functions for the HUB ASIC - mostly PIO mapping related.
- */
-
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/mmzone.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/agent.h>
-#include <asm/sn/io.h>
-#include <asm/xtalk/xtalk.h>
-
-
-static int force_fire_and_forget = 1;
-
-/**
- * hub_pio_map - establish a HUB PIO mapping
- *
- * @nasid: nasid to perform PIO mapping on
- * @widget: widget ID to perform PIO mapping for
- * @xtalk_addr: xtalk_address that needs to be mapped
- * @size: size of the PIO mapping
- *
- **/
-unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
- unsigned long xtalk_addr, size_t size)
-{
- unsigned i;
-
- /* use small-window mapping if possible */
- if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
- return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
-
- if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
- printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
- " too big (%ld)\n",
- nasid, widget, xtalk_addr, size);
- return 0;
- }
-
- xtalk_addr &= ~(BWIN_SIZE-1);
- for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
- if (test_and_set_bit(i, hub_data(nasid)->h_bigwin_used))
- continue;
-
- /*
- * The code below does a PIO write to setup an ITTE entry.
- *
- * We need to prevent other CPUs from seeing our updated
- * memory shadow of the ITTE (in the piomap) until the ITTE
- * entry is actually set up; otherwise, another CPU might
- * attempt a PIO prematurely.
- *
- * Also, the only way we can know that an entry has been
- * received by the hub and can be used by future PIO reads/
- * writes is by reading back the ITTE entry after writing it.
- *
- * For these two reasons, we PIO read back the ITTE entry
- * after we write it.
- */
- IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
- __raw_readq(IIO_ITTE_GET(nasid, i));
-
- return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
- }
-
- printk(KERN_WARNING "unable to establish PIO mapping for at"
- " hub %d widget %d addr 0x%lx\n",
- nasid, widget, xtalk_addr);
- return 0;
-}
-
-
-/*
- * hub_setup_prb(nasid, prbnum, credits, conveyor)
- *
- * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
- * put it into conveyor belt mode with the specified number of credits.
- */
-static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
-{
- union iprb_u prb;
- int prb_offset;
-
- /*
- * Get the current register value.
- */
- prb_offset = IIO_IOPRB(prbnum);
- prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
-
- /*
- * Clear out some fields.
- */
- prb.iprb_ovflow = 1;
- prb.iprb_bnakctr = 0;
- prb.iprb_anakctr = 0;
-
- /*
- * Enable or disable fire-and-forget mode.
- */
- prb.iprb_ff = force_fire_and_forget ? 1 : 0;
-
- /*
- * Set the appropriate number of PIO credits for the widget.
- */
- prb.iprb_xtalkctr = credits;
-
- /*
- * Store the new value to the register.
- */
- REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
-}
-
-/**
- * hub_set_piomode - set pio mode for a given hub
- *
- * @nasid: physical node ID for the hub in question
- *
- * Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
- * To do this, we have to make absolutely sure that no PIOs are in progress
- * so we turn off access to all widgets for the duration of the function.
- *
- * XXX - This code should really check what kind of widget we're talking
- * to. Bridges can only handle three requests, but XG will do more.
- * How many can crossbow handle to widget 0? We're assuming 1.
- *
- * XXX - There is a bug in the crossbow that link reset PIOs do not
- * return write responses. The easiest solution to this problem is to
- * leave widget 0 (xbow) in fire-and-forget mode at all times. This
- * only affects pio's to xbow registers, which should be rare.
- **/
-static void hub_set_piomode(nasid_t nasid)
-{
- u64 ii_iowa;
- union hubii_wcr_u ii_wcr;
- unsigned i;
-
- ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
- REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
-
- ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
-
- if (ii_wcr.iwcr_dir_con) {
- /*
- * Assume a bridge here.
- */
- hub_setup_prb(nasid, 0, 3);
- } else {
- /*
- * Assume a crossbow here.
- */
- hub_setup_prb(nasid, 0, 1);
- }
-
- /*
- * XXX - Here's where we should take the widget type into
- * when account assigning credits.
- */
- for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
- hub_setup_prb(nasid, i, 3);
-
- REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
-}
-
-/*
- * hub_pio_init - PIO-related hub initialization
- *
- * @hub: hubinfo structure for our hub
- */
-void hub_pio_init(nasid_t nasid)
-{
- unsigned i;
-
- /* initialize big window piomaps for this hub */
- bitmap_zero(hub_data(nasid)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
- for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
- IIO_ITTE_DISABLE(nasid, i);
-
- hub_set_piomode(nasid);
-}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index a0dd3bd2b81b..8f5299b269e7 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -23,6 +23,8 @@
#include <asm/sn/intr.h>
#include <asm/sn/irq_alloc.h>
+#include "ip27-common.h"
+
struct hub_irq_data {
u64 *irq_mask[2];
cpuid_t cpu;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index f79c48393716..b8ca94cfb4fe 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -23,6 +23,7 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
+#include <asm/sgialib.h>
#include <asm/sn/arch.h>
#include <asm/sn/agent.h>
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index 84889b57d5ff..fc2816398d0c 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -11,6 +11,8 @@
#include <asm/sn/arch.h>
#include <asm/sn/agent.h>
+#include "ip27-common.h"
+
#if 0
#define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
#else
@@ -23,16 +25,7 @@
typedef unsigned long machreg_t;
static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-
-/*
- * Let's see what else we need to do here. Set up sp, gp?
- */
-void nmi_dump(void)
-{
- void cont_nmi_dump(void);
-
- cont_nmi_dump();
-}
+static void nmi_dump(void);
void install_cpu_nmi_handler(int slice)
{
@@ -53,7 +46,7 @@ void install_cpu_nmi_handler(int slice)
* into the eframe format for the node under consideration.
*/
-void nmi_cpu_eframe_save(nasid_t nasid, int slice)
+static void nmi_cpu_eframe_save(nasid_t nasid, int slice)
{
struct reg_struct *nr;
int i;
@@ -129,7 +122,7 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice)
pr_emerg("\n");
}
-void nmi_dump_hub_irq(nasid_t nasid, int slice)
+static void nmi_dump_hub_irq(nasid_t nasid, int slice)
{
u64 mask0, mask1, pend0, pend1;
@@ -153,7 +146,7 @@ void nmi_dump_hub_irq(nasid_t nasid, int slice)
* Copy the cpu registers which have been saved in the IP27prom format
* into the eframe format for the node under consideration.
*/
-void nmi_node_eframe_save(nasid_t nasid)
+static void nmi_node_eframe_save(nasid_t nasid)
{
int slice;
@@ -170,8 +163,7 @@ void nmi_node_eframe_save(nasid_t nasid)
/*
* Save the nmi cpu registers for all cpus in the system.
*/
-void
-nmi_eframes_save(void)
+static void nmi_eframes_save(void)
{
nasid_t nasid;
@@ -179,8 +171,7 @@ nmi_eframes_save(void)
nmi_node_eframe_save(nasid);
}
-void
-cont_nmi_dump(void)
+static void nmi_dump(void)
{
#ifndef REAL_NMI_SIGNAL
static atomic_t nmied_cpus = ATOMIC_INIT(0);
diff --git a/arch/mips/sgi-ip30/ip30-console.c b/arch/mips/sgi-ip30/ip30-console.c
index b91f8c4fdc78..7c6dcf6e73f7 100644
--- a/arch/mips/sgi-ip30/ip30-console.c
+++ b/arch/mips/sgi-ip30/ip30-console.c
@@ -3,6 +3,7 @@
#include <linux/io.h>
#include <asm/sn/ioc3.h>
+#include <asm/setup.h>
static inline struct ioc3_uartregs *console_uart(void)
{
diff --git a/arch/mips/sgi-ip30/ip30-setup.c b/arch/mips/sgi-ip30/ip30-setup.c
index 75a34684e704..e8547636a748 100644
--- a/arch/mips/sgi-ip30/ip30-setup.c
+++ b/arch/mips/sgi-ip30/ip30-setup.c
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/memblock.h>
+#include <asm/bootinfo.h>
#include <asm/smp-ops.h>
#include <asm/sgialib.h>
#include <asm/time.h>
diff --git a/arch/mips/sgi-ip32/crime.c b/arch/mips/sgi-ip32/crime.c
index a8e0c776ca6c..b8a0e4cfa9ce 100644
--- a/arch/mips/sgi-ip32/crime.c
+++ b/arch/mips/sgi-ip32/crime.c
@@ -18,6 +18,8 @@
#include <asm/ip32/crime.h>
#include <asm/ip32/mace.h>
+#include "ip32-common.h"
+
struct sgi_crime __iomem *crime;
struct sgi_mace __iomem *mace;
@@ -39,7 +41,7 @@ void __init crime_init(void)
id, rev, field, (unsigned long) CRIME_BASE);
}
-irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id)
+irqreturn_t crime_memerr_intr(int irq, void *dev_id)
{
unsigned long stat, addr;
int fatal = 0;
@@ -90,7 +92,7 @@ irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id)
return IRQ_HANDLED;
}
-irqreturn_t crime_cpuerr_intr(unsigned int irq, void *dev_id)
+irqreturn_t crime_cpuerr_intr(int irq, void *dev_id)
{
unsigned long stat = crime->cpu_error_stat & CRIME_CPU_ERROR_MASK;
unsigned long addr = crime->cpu_error_addr & CRIME_CPU_ERROR_ADDR_MASK;
diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c
index 478b63b4c808..7cbc27941f92 100644
--- a/arch/mips/sgi-ip32/ip32-berr.c
+++ b/arch/mips/sgi-ip32/ip32-berr.c
@@ -18,6 +18,8 @@
#include <asm/ptrace.h>
#include <asm/tlbdebug.h>
+#include "ip32-common.h"
+
static int ip32_be_handler(struct pt_regs *regs, int is_fixup)
{
int data = regs->cp0_cause & 4;
diff --git a/arch/mips/sgi-ip32/ip32-common.h b/arch/mips/sgi-ip32/ip32-common.h
new file mode 100644
index 000000000000..cfc0225b1419
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-common.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __IP32_COMMON_H
+#define __IP32_COMMON_H
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+void __init crime_init(void);
+irqreturn_t crime_memerr_intr(int irq, void *dev_id);
+irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
+void __init ip32_be_init(void);
+void ip32_prepare_poweroff(void);
+
+#endif /* __IP32_COMMON_H */
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index e21ea1de05e3..29d04468a06b 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -28,6 +28,8 @@
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
+#include "ip32-common.h"
+
/* issue a PIO read to make sure no PIO writes are pending */
static inline void flush_crime_bus(void)
{
@@ -107,10 +109,6 @@ static inline void flush_mace_bus(void)
* is quite different anyway.
*/
-/* Some initial interrupts to set up */
-extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
-extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
-
/*
* This is for pure CRIME interrupts - ie not MACE. The advantage?
* We get to split the register in half and do faster lookups.
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
index 3fc8d0a0bdfa..5fee33744f67 100644
--- a/arch/mips/sgi-ip32/ip32-memory.c
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -15,6 +15,7 @@
#include <asm/ip32/crime.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
+#include <asm/sgialib.h>
extern void crime_init(void);
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 18d1c115cd53..6bdc1421cda4 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -29,6 +29,8 @@
#include <asm/ip32/crime.h>
#include <asm/ip32/ip32_ints.h>
+#include "ip32-common.h"
+
#define POWERDOWN_TIMEOUT 120
/*
* Blink frequency during reboot grace period and when panicked.
diff --git a/arch/mips/sgi-ip32/ip32-setup.c b/arch/mips/sgi-ip32/ip32-setup.c
index 8019dae1721a..aeb0805aae57 100644
--- a/arch/mips/sgi-ip32/ip32-setup.c
+++ b/arch/mips/sgi-ip32/ip32-setup.c
@@ -26,8 +26,7 @@
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
-extern void ip32_be_init(void);
-extern void crime_init(void);
+#include "ip32-common.h"
#ifdef CONFIG_SGI_O2MACE_ETH
/*
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index d14ccc948a29..5c845e8d59d9 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -25,7 +25,6 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
- select BUILDTIME_TABLE_SORT
select HAVE_KERNEL_UNCOMPRESSED
select HAVE_PCI
select HAVE_PERF_EVENTS
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 920db57b6b4c..316f84f1d15c 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -50,12 +50,12 @@ export CROSS32CC
# Set default cross compiler for kernel build
ifdef cross_compiling
- ifeq ($(CROSS_COMPILE),)
+ ifeq ($(CROSS_COMPILE),)
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS_COMPILE := $(call cc-cross-prefix, \
$(foreach a,$(CC_ARCHES), \
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
- endif
+ endif
endif
ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 74d17d7e759d..5937d5edaba1 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -576,6 +576,7 @@
.section __ex_table,"aw" ! \
.align 4 ! \
.word (fault_addr - .), (except_addr - .) ! \
+ or %r0,%r0,%r0 ! \
.previous
diff --git a/arch/parisc/include/asm/extable.h b/arch/parisc/include/asm/extable.h
new file mode 100644
index 000000000000..4ea23e3d79dc
--- /dev/null
+++ b/arch/parisc/include/asm/extable.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PARISC_EXTABLE_H
+#define __PARISC_EXTABLE_H
+
+#include <asm/ptrace.h>
+#include <linux/compiler.h>
+
+/*
+ * The exception table consists of three addresses:
+ *
+ * - A relative address to the instruction that is allowed to fault.
+ * - A relative address at which the program should continue (fixup routine)
+ * - An asm statement which specifies which CPU register will
+ * receive -EFAULT when an exception happens if the lowest bit in
+ * the fixup address is set.
+ *
+ * Note: The register specified in the err_opcode instruction will be
+ * modified at runtime if a fault happens. Register %r0 will be ignored.
+ *
+ * Since relative addresses are used, 32bit values are sufficient even on
+ * 64bit kernel.
+ */
+
+struct pt_regs;
+int fixup_exception(struct pt_regs *regs);
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+struct exception_table_entry {
+ int insn; /* relative address of insn that is allowed to fault. */
+ int fixup; /* relative address of fixup routine */
+ int err_opcode; /* sample opcode with register which holds error code */
+};
+
+#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr, opcode )\
+ ".section __ex_table,\"aw\"\n" \
+ ".align 4\n" \
+ ".word (" #fault_addr " - .), (" #except_addr " - .)\n" \
+ opcode "\n" \
+ ".previous\n"
+
+/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT on fault into the register specified by the err_opcode instruction,
+ * and zeroes the target register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_VAR(__err_var) \
+ int __err_var = 0
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr, register )\
+ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1, "or %%r0,%%r0," register)
+
+static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
+ struct exception_table_entry *b,
+ struct exception_table_entry tmp,
+ int delta)
+{
+ a->fixup = b->fixup + delta;
+ b->fixup = tmp.fixup - delta;
+ a->err_opcode = b->err_opcode;
+ b->err_opcode = tmp.err_opcode;
+}
+#define swap_ex_entry_fixup swap_ex_entry_fixup
+
+#endif
diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
index 94428798b6aa..317ebc5edc9f 100644
--- a/arch/parisc/include/asm/jump_label.h
+++ b/arch/parisc/include/asm/jump_label.h
@@ -12,7 +12,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
@@ -29,7 +29,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
diff --git a/arch/parisc/include/asm/kprobes.h b/arch/parisc/include/asm/kprobes.h
index 0a175ac87698..0f42f5c8e3b6 100644
--- a/arch/parisc/include/asm/kprobes.h
+++ b/arch/parisc/include/asm/kprobes.h
@@ -10,9 +10,10 @@
#ifndef _PARISC_KPROBES_H
#define _PARISC_KPROBES_H
+#include <asm-generic/kprobes.h>
+
#ifdef CONFIG_KPROBES
-#include <asm-generic/kprobes.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index c822bd0c0e3c..51f40eaf7780 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -8,7 +8,8 @@
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%1),%0\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
+ "or %%r0,%%r0,%%r0") \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
@@ -22,7 +23,8 @@
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%%sr3,%1),%0\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \
+ "or %%r0,%%r0,%%r0") \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4165079898d9..88d0ae5769dd 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -7,6 +7,7 @@
*/
#include <asm/page.h>
#include <asm/cache.h>
+#include <asm/extable.h>
#include <linux/bug.h>
#include <linux/string.h>
@@ -26,37 +27,6 @@
#define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr)
#endif
-/*
- * The exception table contains two values: the first is the relative offset to
- * the address of the instruction that is allowed to fault, and the second is
- * the relative offset to the address of the fixup routine. Since relative
- * addresses are used, 32bit values are sufficient even on 64bit kernel.
- */
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-struct exception_table_entry {
- int insn; /* relative address of insn that is allowed to fault. */
- int fixup; /* relative address of fixup routine */
-};
-
-#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
- ".section __ex_table,\"aw\"\n" \
- ".align 4\n" \
- ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
- ".previous\n"
-
-/*
- * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
- * (with lowest bit set) for which the fault handler in fixup_exception() will
- * load -EFAULT into %r29 for a read or write fault, and zeroes the target
- * register in case of a read fault in get_user().
- */
-#define ASM_EXCEPTIONTABLE_REG 29
-#define ASM_EXCEPTIONTABLE_VAR(__variable) \
- register long __variable __asm__ ("r29") = 0
-#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
- ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
-
#define __get_user_internal(sr, val, ptr) \
({ \
ASM_EXCEPTIONTABLE_VAR(__gu_err); \
@@ -83,7 +53,7 @@ struct exception_table_entry {
\
__asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \
: "=r"(__gu_val), "+r"(__gu_err) \
: "i"(sr), "r"(ptr)); \
\
@@ -115,8 +85,8 @@ struct exception_table_entry {
"1: ldw 0(%%sr%2,%3),%0\n" \
"2: ldw 4(%%sr%2,%3),%R0\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1") \
: "=&r"(__gu_tmp.l), "+r"(__gu_err) \
: "i"(sr), "r"(ptr)); \
\
@@ -174,7 +144,7 @@ struct exception_table_entry {
__asm__ __volatile__ ( \
"1: " stx " %1,0(%%sr%2,%3)\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \
: "+r"(__pu_err) \
: "r"(x), "i"(sr), "r"(ptr))
@@ -186,15 +156,14 @@ struct exception_table_entry {
"1: stw %1,0(%%sr%2,%3)\n" \
"2: stw %R1,4(%%sr%2,%3)\n" \
"9:\n" \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0") \
: "+r"(__pu_err) \
: "r"(__val), "i"(sr), "r"(ptr)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
-
/*
* Complex access routines -- external declarations
*/
@@ -216,7 +185,4 @@ unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
-struct pt_regs;
-int fixup_exception(struct pt_regs *regs);
-
#endif /* __PARISC_UACCESS_H */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 268d90a9325b..422f3e1e6d9c 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -58,7 +58,7 @@ int pa_serialize_tlb_flushes __ro_after_init;
struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
-struct pdc_btlb_info btlb_info __ro_after_init;
+struct pdc_btlb_info btlb_info;
#endif
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
@@ -264,6 +264,10 @@ parisc_cache_init(void)
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
+ /* stride needs to be non-zero, otherwise cache flushes will not work */
+ WARN_ON(cache_info.dc_size && dcache_stride == 0);
+ WARN_ON(cache_info.ic_size && icache_stride == 0);
+
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
@@ -850,7 +854,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
#endif
" fic,m %3(%4,%0)\n"
"2: sync\n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
: "+r" (start), "+r" (error)
: "r" (end), "r" (dcache_stride), "i" (SR_USER));
}
@@ -865,7 +869,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
#endif
" fdc,m %3(%4,%0)\n"
"2: sync\n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
: "+r" (start), "+r" (error)
: "r" (end), "r" (icache_stride), "i" (SR_USER));
}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 25f9b9e9d6df..c7ff339732ba 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -742,7 +742,7 @@ parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
};
if (device_for_each_child(parent, &recurse_data, descend_children))
- { /* nothing */ };
+ { /* nothing */ }
return d.dev;
}
@@ -1004,6 +1004,9 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
pr_info("\n");
+ /* Prevent hung task messages when printing on serial console */
+ cond_resched();
+
pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n",
hpa, parisc_hardware_description(&dev->id));
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index d1defb9ede70..621a4b386ae4 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -78,7 +78,7 @@ asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
#endif
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
int ftrace_enable_ftrace_graph_caller(void)
{
static_key_enable(&ftrace_graph_enable.key);
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index e95a977ba5f3..bf73562706b2 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -172,7 +172,6 @@ static int __init processor_probe(struct parisc_device *dev)
p->cpu_num = cpu_info.cpu_num;
p->cpu_loc = cpu_info.cpu_loc;
- set_cpu_possible(cpuid, true);
store_cpu_topology(cpuid);
#ifdef CONFIG_SMP
@@ -474,13 +473,6 @@ static struct parisc_driver cpu_driver __refdata = {
*/
void __init processor_init(void)
{
- unsigned int cpu;
-
reset_cpu_topology();
-
- /* reset possible mask. We will mark those which are possible. */
- for_each_possible_cpu(cpu)
- set_cpu_possible(cpu, false);
-
register_parisc_driver(&cpu_driver);
}
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index ce25acfe4889..c520e551a165 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -120,8 +120,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg)
"2: ldbs 1(%%sr1,%3), %0\n"
" depw %2, 23, 24, %0\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
: "+r" (val), "+r" (ret), "=&r" (temp1)
: "r" (saddr), "r" (regs->isr) );
@@ -152,8 +152,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
" mtctl %2,11\n"
" vshd %0,%3,%0\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
: "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2)
: "r" (saddr), "r" (regs->isr) );
@@ -189,8 +189,8 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
" mtsar %%r19\n"
" shrpd %0,%%r20,%%sar,%0\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
: "=r" (val), "+r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20" );
@@ -209,9 +209,9 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
" vshd %0,%R0,%0\n"
" vshd %R0,%4,%R0\n"
"4: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
: "r" (regs->isr) );
}
@@ -244,8 +244,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg)
"1: stb %1, 0(%%sr1, %3)\n"
"2: stb %2, 1(%%sr1, %3)\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
: "+r" (ret), "=&r" (temp1)
: "r" (val), "r" (regs->ior), "r" (regs->isr) );
@@ -285,8 +285,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
" stw %%r20,0(%%sr1,%2)\n"
" stw %%r21,4(%%sr1,%2)\n"
"3: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0")
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
@@ -329,10 +329,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
"3: std %%r20,0(%%sr1,%2)\n"
"4: std %%r21,8(%%sr1,%2)\n"
"5: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0")
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
@@ -357,11 +357,11 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
"4: stw %%r1,4(%%sr1,%2)\n"
"5: stw %R1,8(%%sr1,%2)\n"
"6: \n"
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b)
- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b)
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0")
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0")
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r1" );
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 27ae40a443b8..f7e0fee5ee55 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -228,10 +228,8 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
#ifdef CONFIG_IRQSTACKS
extern void * const _call_on_stack;
#endif /* CONFIG_IRQSTACKS */
- void *ptr;
- ptr = dereference_kernel_function_descriptor(&handle_interruption);
- if (pc_is_kernel_fn(pc, ptr)) {
+ if (pc_is_kernel_fn(pc, handle_interruption)) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
@@ -239,13 +237,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
return 1;
}
- if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
- pc_is_kernel_fn(pc, syscall_exit)) {
+ if (pc == (unsigned long)&ret_from_kernel_thread ||
+ pc == (unsigned long)&syscall_exit) {
info->prev_sp = info->prev_ip = 0;
return 1;
}
- if (pc_is_kernel_fn(pc, intr_return)) {
+ if (pc == (unsigned long)&intr_return) {
struct pt_regs *regs;
dbg("Found intr_return()\n");
@@ -257,14 +255,14 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
}
if (pc_is_kernel_fn(pc, _switch_to) ||
- pc_is_kernel_fn(pc, _switch_to_ret)) {
+ pc == (unsigned long)&_switch_to_ret) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
return 1;
}
#ifdef CONFIG_IRQSTACKS
- if (pc_is_kernel_fn(pc, _call_on_stack)) {
+ if (pc == (unsigned long)&_call_on_stack) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 548051b0b4af..b445e47903cf 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -127,7 +127,7 @@ SECTIONS
}
#endif
- RO_DATA(8)
+ RO_DATA(PAGE_SIZE)
/* unwind info */
. = ALIGN(4);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 2fe5b44986e0..c39de84e98b0 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -150,11 +150,16 @@ int fixup_exception(struct pt_regs *regs)
* Fix up get_user() and put_user().
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
* bit in the relative address of the fixup routine to indicate
- * that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
- * -EFAULT to report a userspace access error.
+ * that the register encoded in the "or %r0,%r0,register"
+ * opcode should be loaded with -EFAULT to report a userspace
+ * access error.
*/
if (fix->fixup & 1) {
- regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
+ int fault_error_reg = fix->err_opcode & 0x1f;
+ if (!WARN_ON(!fault_error_reg))
+ regs->gr[fault_error_reg] = -EFAULT;
+ pr_debug("Unalignment fixup of register %d at %pS\n",
+ fault_error_reg, (void*)regs->iaoq[0]);
/* zero target register for get_user() */
if (parisc_acctyp(0, regs->iir) == VM_READ) {
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 414b978b8010..b9fc064d38d2 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -859,6 +859,7 @@ config THREAD_SHIFT
int "Thread shift" if EXPERT
range 13 15
default "15" if PPC_256K_PAGES
+ default "15" if PPC_PSERIES || PPC_POWERNV
default "14" if PPC64
default "13"
help
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 1ebd2ca97f12..107fc5a48456 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -20,14 +20,6 @@
#ifndef __ASSEMBLY__
extern void _mcount(void);
-static inline unsigned long ftrace_call_adjust(unsigned long addr)
-{
- if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
- addr += MCOUNT_INSN_SIZE;
-
- return addr;
-}
-
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
unsigned long sp);
@@ -142,8 +134,10 @@ static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
#ifdef CONFIG_FUNCTION_TRACER
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
void ftrace_free_init_tramp(void);
+unsigned long ftrace_call_adjust(unsigned long addr);
#else
static inline void ftrace_free_init_tramp(void) { }
+static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 93ce3ec25387..2f2a86ed2280 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -17,7 +17,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"nop # arch_static_branch\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".long 1b - ., %l[l_yes] - .\n\t"
@@ -32,7 +32,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"b %l[l_yes] # arch_static_branch_jump\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".long 1b - ., %l[l_yes] - .\n\t"
diff --git a/arch/powerpc/include/asm/papr-sysparm.h b/arch/powerpc/include/asm/papr-sysparm.h
index 0dbbff59101d..c3cd5b131033 100644
--- a/arch/powerpc/include/asm/papr-sysparm.h
+++ b/arch/powerpc/include/asm/papr-sysparm.h
@@ -32,7 +32,7 @@ typedef struct {
*/
struct papr_sysparm_buf {
__be16 len;
- char val[PAPR_SYSPARM_MAX_OUTPUT];
+ u8 val[PAPR_SYSPARM_MAX_OUTPUT];
};
struct papr_sysparm_buf *papr_sysparm_buf_alloc(void);
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index ce2b1b5eebdd..a8b7e8682f5b 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -30,6 +30,16 @@ void *pci_traverse_device_nodes(struct device_node *start,
void *data);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+#if defined(CONFIG_IOMMU_API) && (defined(CONFIG_PPC_PSERIES) || \
+ defined(CONFIG_PPC_POWERNV))
+extern void ppc_iommu_register_device(struct pci_controller *phb);
+extern void ppc_iommu_unregister_device(struct pci_controller *phb);
+#else
+static inline void ppc_iommu_register_device(struct pci_controller *phb) { }
+static inline void ppc_iommu_unregister_device(struct pci_controller *phb) { }
+#endif
+
+
/* From rtas_pci.h */
extern void init_pci_config_tokens (void);
extern unsigned long get_phb_buid (struct device_node *);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7fd09f25452d..bb47af9054a9 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -617,6 +617,8 @@
#endif
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
+#define SPRN_HID2_G2_LE 0x3F3 /* G2_LE HID2 Register */
+#define HID2_G2_LE_HBE (1<<18) /* High BAT Enable (G2_LE) */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 9bb2210c8d44..065ffd1b2f8a 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -69,7 +69,7 @@ enum rtas_function_index {
RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE,
RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2,
RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW,
- RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS,
+ RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW,
RTAS_FNIDX__IBM_SCAN_LOG_DUMP,
RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR,
RTAS_FNIDX__IBM_SET_EEH_OPTION,
@@ -164,7 +164,7 @@ typedef struct {
#define RTAS_FN_IBM_READ_SLOT_RESET_STATE rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE)
#define RTAS_FN_IBM_READ_SLOT_RESET_STATE2 rtas_fn_handle(RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2)
#define RTAS_FN_IBM_REMOVE_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW)
-#define RTAS_FN_IBM_RESET_PE_DMA_WINDOWS rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS)
+#define RTAS_FN_IBM_RESET_PE_DMA_WINDOW rtas_fn_handle(RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW)
#define RTAS_FN_IBM_SCAN_LOG_DUMP rtas_fn_handle(RTAS_FNIDX__IBM_SCAN_LOG_DUMP)
#define RTAS_FN_IBM_SET_DYNAMIC_INDICATOR rtas_fn_handle(RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR)
#define RTAS_FN_IBM_SET_EEH_OPTION rtas_fn_handle(RTAS_FNIDX__IBM_SET_EEH_OPTION)
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index ea26665f82cf..f43f3a6b0051 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -14,6 +14,7 @@ typedef struct func_desc func_desc_t;
extern char __head_end[];
extern char __srwx_boundary[];
+extern char __exittext_begin[], __exittext_end[];
/* Patch sites */
extern s32 patch__call_flush_branch_caches1;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index bf5dde1a4114..15c5691dd218 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -14,7 +14,7 @@
#ifdef __KERNEL__
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && CONFIG_THREAD_SHIFT < 15
#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
#else
#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index f1f9890f50d3..de10437fd206 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -74,7 +74,7 @@ __pu_failed: \
/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm_goto(x, addr, label, op) \
- asm_volatile_goto( \
+ asm goto( \
"1: " op " %0,0(%1) # put_user\n" \
EX_TABLE(1b, %l2) \
: \
@@ -83,7 +83,7 @@ __pu_failed: \
: label)
#else
#define __put_user_asm_goto(x, addr, label, op) \
- asm_volatile_goto( \
+ asm goto( \
"1: " op "%U1%X1 %0,%1 # put_user\n" \
EX_TABLE(1b, %l2) \
: \
@@ -97,7 +97,7 @@ __pu_failed: \
__put_user_asm_goto(x, ptr, label, "std")
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
- asm_volatile_goto( \
+ asm goto( \
"1: stw%X1 %0, %1\n" \
"2: stw%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \
@@ -146,7 +146,7 @@ do { \
/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm_goto(x, addr, label, op) \
- asm_volatile_goto( \
+ asm_goto_output( \
"1: "op" %0,0(%1) # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
@@ -155,7 +155,7 @@ do { \
: label)
#else
#define __get_user_asm_goto(x, addr, label, op) \
- asm_volatile_goto( \
+ asm_goto_output( \
"1: "op"%U1%X1 %0, %1 # get_user\n" \
EX_TABLE(1b, %l2) \
: "=r" (x) \
@@ -169,7 +169,7 @@ do { \
__get_user_asm_goto(x, addr, label, "ld")
#else /* __powerpc64__ */
#define __get_user_asm2_goto(x, addr, label) \
- asm_volatile_goto( \
+ asm_goto_output( \
"1: lwz%X1 %0, %1\n" \
"2: lwz%X1 %L0, %L1\n" \
EX_TABLE(1b, %l2) \
diff --git a/arch/powerpc/include/uapi/asm/papr-sysparm.h b/arch/powerpc/include/uapi/asm/papr-sysparm.h
index 9f9a0f267ea5..f733467b1534 100644
--- a/arch/powerpc/include/uapi/asm/papr-sysparm.h
+++ b/arch/powerpc/include/uapi/asm/papr-sysparm.h
@@ -14,7 +14,7 @@ enum {
struct papr_sysparm_io_block {
__u32 parameter;
__u16 length;
- char data[PAPR_SYSPARM_MAX_OUTPUT];
+ __u8 data[PAPR_SYSPARM_MAX_OUTPUT];
};
/**
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index f29ce3dd6140..bfd3f442e5eb 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -26,6 +26,15 @@ BEGIN_FTR_SECTION
bl __init_fpu_registers
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
bl setup_common_caches
+
+ /*
+ * This assumes that all cores using __setup_cpu_603 with
+ * MMU_FTR_USE_HIGH_BATS are G2_LE compatible
+ */
+BEGIN_MMU_FTR_SECTION
+ bl setup_g2_le_hid2
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
mtlr r5
blr
_GLOBAL(__setup_cpu_604)
@@ -115,6 +124,16 @@ SYM_FUNC_START_LOCAL(setup_604_hid0)
blr
SYM_FUNC_END(setup_604_hid0)
+/* Enable high BATs for G2_LE and derivatives like e300cX */
+SYM_FUNC_START_LOCAL(setup_g2_le_hid2)
+ mfspr r11,SPRN_HID2_G2_LE
+ oris r11,r11,HID2_G2_LE_HBE@h
+ mtspr SPRN_HID2_G2_LE,r11
+ sync
+ isync
+ blr
+SYM_FUNC_END(setup_g2_le_hid2)
+
/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
* erratas we work around here.
* Moto MPC710CE.pdf describes them, those are errata
@@ -495,4 +514,3 @@ _GLOBAL(__restore_cpu_setup)
mtcr r7
blr
_ASM_NOKPROBE_SYMBOL(__restore_cpu_setup)
-
diff --git a/arch/powerpc/kernel/cpu_specs_e500mc.h b/arch/powerpc/kernel/cpu_specs_e500mc.h
index ceb06b109f83..2ae8e9a7b461 100644
--- a/arch/powerpc/kernel/cpu_specs_e500mc.h
+++ b/arch/powerpc/kernel/cpu_specs_e500mc.h
@@ -8,7 +8,8 @@
#ifdef CONFIG_PPC64
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
- PPC_FEATURE_HAS_FPU | PPC_FEATURE_64)
+ PPC_FEATURE_HAS_FPU | PPC_FEATURE_64 | \
+ PPC_FEATURE_BOOKE)
#else
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_BOOKE)
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index bd863702d812..1ad059a9e2fe 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -52,7 +52,8 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
- std r11,_NIP(r1)
+ std r11,_LINK(r1)
+ std r11,_NIP(r1) /* Saved LR is also the next instruction */
std r12,_MSR(r1)
std r0,GPR0(r1)
std r10,GPR1(r1)
@@ -70,7 +71,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
std r9,GPR13(r1)
SAVE_NVGPRS(r1)
std r11,_XER(r1)
- std r11,_LINK(r1)
std r11,_CTR(r1)
li r11,\trapnr
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ebe259bdd462..1185efebf032 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1287,20 +1287,22 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_group *grp = iommu_group_get(dev);
struct iommu_table_group *table_group;
- int ret = -EINVAL;
/* At first attach the ownership is already set */
- if (!domain)
+ if (!domain) {
+ iommu_group_put(grp);
return 0;
-
- if (!grp)
- return -ENODEV;
+ }
table_group = iommu_group_get_iommudata(grp);
- ret = table_group->ops->take_ownership(table_group);
+ /*
+ * The domain being set to PLATFORM from earlier
+ * BLOCKED. The table_group ownership has to be released.
+ */
+ table_group->ops->release_ownership(table_group);
iommu_group_put(grp);
- return ret;
+ return 0;
}
static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
@@ -1312,13 +1314,32 @@ static struct iommu_domain spapr_tce_platform_domain = {
.ops = &spapr_tce_platform_domain_ops,
};
-static struct iommu_domain spapr_tce_blocked_domain = {
- .type = IOMMU_DOMAIN_BLOCKED,
+static int
+spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
+ struct device *dev)
+{
+ struct iommu_group *grp = iommu_group_get(dev);
+ struct iommu_table_group *table_group;
+ int ret = -EINVAL;
+
/*
* FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
* also sets the dma_api ops
*/
- .ops = &spapr_tce_platform_domain_ops,
+ table_group = iommu_group_get_iommudata(grp);
+ ret = table_group->ops->take_ownership(table_group);
+ iommu_group_put(grp);
+
+ return ret;
+}
+
+static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
+ .attach_dev = spapr_tce_blocked_iommu_attach_dev,
+};
+
+static struct iommu_domain spapr_tce_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &spapr_tce_blocked_domain_ops,
};
static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
@@ -1339,7 +1360,7 @@ static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
struct pci_controller *hose;
if (!dev_is_pci(dev))
- return ERR_PTR(-EPERM);
+ return ERR_PTR(-ENODEV);
pdev = to_pci_dev(dev);
hose = pdev->bus->sysdata;
@@ -1388,6 +1409,21 @@ static const struct attribute_group *spapr_tce_iommu_groups[] = {
NULL,
};
+void ppc_iommu_register_device(struct pci_controller *phb)
+{
+ iommu_device_sysfs_add(&phb->iommu, phb->parent,
+ spapr_tce_iommu_groups, "iommu-phb%04x",
+ phb->global_number);
+ iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
+ phb->parent);
+}
+
+void ppc_iommu_unregister_device(struct pci_controller *phb)
+{
+ iommu_device_unregister(&phb->iommu);
+ iommu_device_sysfs_remove(&phb->iommu);
+}
+
/*
* This registers IOMMU devices of PHBs. This needs to happen
* after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
@@ -1398,11 +1434,7 @@ static int __init spapr_tce_setup_phb_iommus_initcall(void)
struct pci_controller *hose;
list_for_each_entry(hose, &hose_list, list_node) {
- iommu_device_sysfs_add(&hose->iommu, hose->parent,
- spapr_tce_iommu_groups, "iommu-phb%04x",
- hose->global_number);
- iommu_device_register(&hose->iommu, &spapr_tce_iommu_ops,
- hose->parent);
+ ppc_iommu_register_device(hose);
}
return 0;
}
diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
index 938e66829eae..d5c48d1b0a31 100644
--- a/arch/powerpc/kernel/irq_64.c
+++ b/arch/powerpc/kernel/irq_64.c
@@ -230,7 +230,7 @@ again:
* This allows interrupts to be unmasked without hard disabling, and
* also without new hard interrupts coming in ahead of pending ones.
*/
- asm_volatile_goto(
+ asm goto(
"1: \n"
" lbz 9,%0(13) \n"
" cmpwi 9,0 \n"
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 7e793b503e29..8064d9c3de86 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -375,8 +375,13 @@ static struct rtas_function rtas_function_table[] __ro_after_init = {
[RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = {
.name = "ibm,remove-pe-dma-window",
},
- [RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOWS] = {
- .name = "ibm,reset-pe-dma-windows",
+ [RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW] = {
+ /*
+ * Note: PAPR+ v2.13 7.3.31.4.1 spells this as
+ * "ibm,reset-pe-dma-windows" (plural), but RTAS
+ * implementations use the singular form in practice.
+ */
+ .name = "ibm,reset-pe-dma-window",
},
[RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = {
.name = "ibm,scan-log-dump",
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 82010629cf88..d8d6b4fd9a14 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -27,10 +27,22 @@
#include <asm/ftrace.h>
#include <asm/syscall.h>
#include <asm/inst.h>
+#include <asm/sections.h>
#define NUM_FTRACE_TRAMPS 2
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
+unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
+ addr += MCOUNT_INSN_SIZE;
+
+ return addr;
+}
+
static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
{
ppc_inst_t op;
diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.c b/arch/powerpc/kernel/trace/ftrace_64_pg.c
index 7b85c3b460a3..12fab1803bcf 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_pg.c
+++ b/arch/powerpc/kernel/trace/ftrace_64_pg.c
@@ -37,6 +37,11 @@
#define NUM_FTRACE_TRAMPS 8
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
+unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
static ppc_inst_t
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 1c5970df3233..f420df7888a7 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -281,7 +281,9 @@ SECTIONS
* to deal with references from __bug_table
*/
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
+ __exittext_begin = .;
EXIT_TEXT
+ __exittext_end = .;
}
. = ALIGN(PAGE_SIZE);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 52427fc2a33f..0b921704da45 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -391,6 +391,24 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
/* Dummy value used in computing PCR value below */
#define PCR_ARCH_31 (PCR_ARCH_300 << 1)
+static inline unsigned long map_pcr_to_cap(unsigned long pcr)
+{
+ unsigned long cap = 0;
+
+ switch (pcr) {
+ case PCR_ARCH_300:
+ cap = H_GUEST_CAP_POWER9;
+ break;
+ case PCR_ARCH_31:
+ cap = H_GUEST_CAP_POWER10;
+ break;
+ default:
+ break;
+ }
+
+ return cap;
+}
+
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
unsigned long host_pcr_bit = 0, guest_pcr_bit = 0, cap = 0;
@@ -424,11 +442,9 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
break;
case PVR_ARCH_300:
guest_pcr_bit = PCR_ARCH_300;
- cap = H_GUEST_CAP_POWER9;
break;
case PVR_ARCH_31:
guest_pcr_bit = PCR_ARCH_31;
- cap = H_GUEST_CAP_POWER10;
break;
default:
return -EINVAL;
@@ -440,6 +456,12 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
return -EINVAL;
if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) {
+ /*
+ * 'arch_compat == 0' would mean the guest should default to
+ * L1's compatibility. In this case, the guest would pick
+ * host's PCR and evaluate the corresponding capabilities.
+ */
+ cap = map_pcr_to_cap(guest_pcr_bit);
if (!(cap & nested_capabilities))
return -EINVAL;
}
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
index 5378eb40b162..8e6f5355f08b 100644
--- a/arch/powerpc/kvm/book3s_hv_nestedv2.c
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -138,6 +138,7 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
vector128 v;
int rc, i;
u16 iden;
+ u32 arch_compat = 0;
vcpu = gsm->data;
@@ -347,8 +348,23 @@ static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
break;
}
case KVMPPC_GSID_LOGICAL_PVR:
- rc = kvmppc_gse_put_u32(gsb, iden,
- vcpu->arch.vcore->arch_compat);
+ /*
+ * Though 'arch_compat == 0' would mean the default
+ * compatibility, arch_compat, being a Guest Wide
+ * Element, cannot be filled with a value of 0 in GSB
+ * as this would result into a kernel trap.
+ * Hence, when `arch_compat == 0`, arch_compat should
+ * default to L1's PVR.
+ */
+ if (!vcpu->arch.vcore->arch_compat) {
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ arch_compat = PVR_ARCH_31;
+ else if (cpu_has_feature(CPU_FTR_ARCH_300))
+ arch_compat = PVR_ARCH_300;
+ } else {
+ arch_compat = vcpu->arch.vcore->arch_compat;
+ }
+ rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
break;
}
diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c
index a70828a6d935..aa9aa11927b2 100644
--- a/arch/powerpc/mm/kasan/init_32.c
+++ b/arch/powerpc/mm/kasan/init_32.c
@@ -64,6 +64,7 @@ int __init __weak kasan_init_region(void *start, size_t size)
if (ret)
return ret;
+ k_start = k_start & PAGE_MASK;
block = memblock_alloc(k_end - k_start, PAGE_SIZE);
if (!block)
return -ENOMEM;
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index e966b2ad8ecd..b3327a358eb4 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -27,7 +27,7 @@
#include "mpc85xx.h"
-void __init mpc8536_ds_pic_init(void)
+static void __init mpc8536_ds_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
diff --git a/arch/powerpc/platforms/85xx/mvme2500.c b/arch/powerpc/platforms/85xx/mvme2500.c
index 1b59e45a0c64..19122daadb55 100644
--- a/arch/powerpc/platforms/85xx/mvme2500.c
+++ b/arch/powerpc/platforms/85xx/mvme2500.c
@@ -21,7 +21,7 @@
#include "mpc85xx.h"
-void __init mvme2500_pic_init(void)
+static void __init mvme2500_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0,
MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU,
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 10d6f1fa3327..491895ac8bcf 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -24,7 +24,7 @@
#include "mpc85xx.h"
-void __init p1010_rdb_pic_init(void)
+static void __init p1010_rdb_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 0dd786a061a6..adc3a2ee1415 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -370,7 +370,7 @@ exit:
*
* @pixclock: the wavelength, in picoseconds, of the clock
*/
-void p1022ds_set_pixel_clock(unsigned int pixclock)
+static void p1022ds_set_pixel_clock(unsigned int pixclock)
{
struct device_node *guts_np = NULL;
struct ccsr_guts __iomem *guts;
@@ -418,7 +418,7 @@ void p1022ds_set_pixel_clock(unsigned int pixclock)
/**
* p1022ds_valid_monitor_port: set the monitor port for sysfs
*/
-enum fsl_diu_monitor_port
+static enum fsl_diu_monitor_port
p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
{
switch (port) {
@@ -432,7 +432,7 @@ p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
#endif
-void __init p1022_ds_pic_init(void)
+static void __init p1022_ds_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 25ab6e9c1470..6198299d95b1 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -40,7 +40,7 @@
*
* @pixclock: the wavelength, in picoseconds, of the clock
*/
-void p1022rdk_set_pixel_clock(unsigned int pixclock)
+static void p1022rdk_set_pixel_clock(unsigned int pixclock)
{
struct device_node *guts_np = NULL;
struct ccsr_guts __iomem *guts;
@@ -88,7 +88,7 @@ void p1022rdk_set_pixel_clock(unsigned int pixclock)
/**
* p1022rdk_valid_monitor_port: set the monitor port for sysfs
*/
-enum fsl_diu_monitor_port
+static enum fsl_diu_monitor_port
p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port)
{
return FSL_DIU_PORT_DVI;
@@ -96,7 +96,7 @@ p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port)
#endif
-void __init p1022_rdk_pic_init(void)
+static void __init p1022_rdk_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index baa12eff6d5d..60e0b8947ce6 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -8,6 +8,8 @@
#include <linux/of_irq.h>
#include <linux/io.h>
+#include "socrates_fpga_pic.h"
+
/*
* The FPGA supports 9 interrupt sources, which can be routed to 3
* interrupt request lines of the MPIC. The line to be used can be
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 45f257fc1ade..2582427d8d01 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -37,7 +37,7 @@
#define MPC85xx_L2CTL_L2I 0x40000000 /* L2 flash invalidate */
#define MPC85xx_L2CTL_L2SIZ_MASK 0x30000000 /* L2 SRAM size (R/O) */
-void __init xes_mpc85xx_pic_init(void)
+static void __init xes_mpc85xx_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 496e16c588aa..e8c4129697b1 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -574,29 +574,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
struct iommu_table_ops iommu_table_lpar_multi_ops;
-/*
- * iommu_table_setparms_lpar
- *
- * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
- */
-static void iommu_table_setparms_lpar(struct pci_controller *phb,
- struct device_node *dn,
- struct iommu_table *tbl,
- struct iommu_table_group *table_group,
- const __be32 *dma_window)
-{
- unsigned long offset, size, liobn;
-
- of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
-
- iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
- &iommu_table_lpar_multi_ops);
-
-
- table_group->tce32_start = offset;
- table_group->tce32_size = size;
-}
-
struct iommu_table_ops iommu_table_pseries_ops = {
.set = tce_build_pSeries,
.clear = tce_free_pSeries,
@@ -724,26 +701,71 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
* dynamic 64bit DMA window, walking up the device tree.
*/
static struct device_node *pci_dma_find(struct device_node *dn,
- const __be32 **dma_window)
+ struct dynamic_dma_window_prop *prop)
{
- const __be32 *dw = NULL;
+ const __be32 *default_prop = NULL;
+ const __be32 *ddw_prop = NULL;
+ struct device_node *rdn = NULL;
+ bool default_win = false, ddw_win = false;
for ( ; dn && PCI_DN(dn); dn = dn->parent) {
- dw = of_get_property(dn, "ibm,dma-window", NULL);
- if (dw) {
- if (dma_window)
- *dma_window = dw;
- return dn;
+ default_prop = of_get_property(dn, "ibm,dma-window", NULL);
+ if (default_prop) {
+ rdn = dn;
+ default_win = true;
+ }
+ ddw_prop = of_get_property(dn, DIRECT64_PROPNAME, NULL);
+ if (ddw_prop) {
+ rdn = dn;
+ ddw_win = true;
+ break;
+ }
+ ddw_prop = of_get_property(dn, DMA64_PROPNAME, NULL);
+ if (ddw_prop) {
+ rdn = dn;
+ ddw_win = true;
+ break;
}
- dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
- if (dw)
- return dn;
- dw = of_get_property(dn, DMA64_PROPNAME, NULL);
- if (dw)
- return dn;
+
+ /* At least found default window, which is the case for normal boot */
+ if (default_win)
+ break;
}
- return NULL;
+ /* For PCI devices there will always be a DMA window, either on the device
+ * or parent bus
+ */
+ WARN_ON(!(default_win | ddw_win));
+
+ /* caller doesn't want to get DMA window property */
+ if (!prop)
+ return rdn;
+
+ /* parse DMA window property. During normal system boot, only default
+ * DMA window is passed in OF. But, for kdump, a dedicated adapter might
+ * have both default and DDW in FDT. In this scenario, DDW takes precedence
+ * over default window.
+ */
+ if (ddw_win) {
+ struct dynamic_dma_window_prop *p;
+
+ p = (struct dynamic_dma_window_prop *)ddw_prop;
+ prop->liobn = p->liobn;
+ prop->dma_base = p->dma_base;
+ prop->tce_shift = p->tce_shift;
+ prop->window_shift = p->window_shift;
+ } else if (default_win) {
+ unsigned long offset, size, liobn;
+
+ of_parse_dma_window(rdn, default_prop, &liobn, &offset, &size);
+
+ prop->liobn = cpu_to_be32((u32)liobn);
+ prop->dma_base = cpu_to_be64(offset);
+ prop->tce_shift = cpu_to_be32(IOMMU_PAGE_SHIFT_4K);
+ prop->window_shift = cpu_to_be32(order_base_2(size));
+ }
+
+ return rdn;
}
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
@@ -751,17 +773,20 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
struct iommu_table *tbl;
struct device_node *dn, *pdn;
struct pci_dn *ppci;
- const __be32 *dma_window = NULL;
+ struct dynamic_dma_window_prop prop;
dn = pci_bus_to_OF_node(bus);
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
dn);
- pdn = pci_dma_find(dn, &dma_window);
+ pdn = pci_dma_find(dn, &prop);
- if (dma_window == NULL)
- pr_debug(" no ibm,dma-window property !\n");
+ /* In PPC architecture, there will always be DMA window on bus or one of the
+ * parent bus. During reboot, there will be ibm,dma-window property to
+ * define DMA window. For kdump, there will at least be default window or DDW
+ * or both.
+ */
ppci = PCI_DN(pdn);
@@ -771,13 +796,24 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
if (!ppci->table_group) {
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
tbl = ppci->table_group->tables[0];
- if (dma_window) {
- iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
- ppci->table_group, dma_window);
- if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
- panic("Failed to initialize iommu table");
- }
+ iommu_table_setparms_common(tbl, ppci->phb->bus->number,
+ be32_to_cpu(prop.liobn),
+ be64_to_cpu(prop.dma_base),
+ 1ULL << be32_to_cpu(prop.window_shift),
+ be32_to_cpu(prop.tce_shift), NULL,
+ &iommu_table_lpar_multi_ops);
+
+ /* Only for normal boot with default window. Doesn't matter even
+ * if we set these with DDW which is 64bit during kdump, since
+ * these will not be used during kdump.
+ */
+ ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
+ ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
+
+ if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
+ panic("Failed to initialize iommu table");
+
iommu_register_group(ppci->table_group,
pci_domain_nr(bus), 0);
pr_debug(" created table: %p\n", ppci->table_group);
@@ -968,6 +1004,12 @@ static void find_existing_ddw_windows_named(const char *name)
continue;
}
+ /* If at the time of system initialization, there are DDWs in OF,
+ * it means this is during kexec. DDW could be direct or dynamic.
+ * We will just mark DDWs as "dynamic" since this is kdump path,
+ * no need to worry about perforance. ddw_list_new_entry() will
+ * set window->direct = false.
+ */
window = ddw_list_new_entry(pdn, dma64);
if (!window) {
of_node_put(pdn);
@@ -1524,8 +1566,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
struct iommu_table *tbl;
- const __be32 *dma_window = NULL;
struct pci_dn *pci;
+ struct dynamic_dma_window_prop prop;
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
@@ -1538,7 +1580,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
pr_debug(" node is %pOF\n", dn);
- pdn = pci_dma_find(dn, &dma_window);
+ pdn = pci_dma_find(dn, &prop);
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
"no DMA window found for pci dev=%s dn=%pOF\n",
@@ -1551,8 +1593,20 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
if (!pci->table_group) {
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
tbl = pci->table_group->tables[0];
- iommu_table_setparms_lpar(pci->phb, pdn, tbl,
- pci->table_group, dma_window);
+
+ iommu_table_setparms_common(tbl, pci->phb->bus->number,
+ be32_to_cpu(prop.liobn),
+ be64_to_cpu(prop.dma_base),
+ 1ULL << be32_to_cpu(prop.window_shift),
+ be32_to_cpu(prop.tce_shift), NULL,
+ &iommu_table_lpar_multi_ops);
+
+ /* Only for normal boot with default window. Doesn't matter even
+ * if we set these with DDW which is 64bit during kdump, since
+ * these will not be used during kdump.
+ */
+ pci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
+ pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
iommu_init_table(tbl, pci->phb->node, 0, 0);
iommu_register_group(pci->table_group,
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 4561667832ed..4e9916bb03d7 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -662,8 +662,12 @@ u64 pseries_paravirt_steal_clock(int cpu)
{
struct lppaca *lppaca = &lppaca_of(cpu);
- return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
- be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb));
+ /*
+ * VPA steal time counters are reported at TB frequency. Hence do a
+ * conversion to ns before returning
+ */
+ return tb_to_ns(be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
+ be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)));
}
#endif
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 4ba824568119..4448386268d9 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -35,6 +35,8 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
pseries_msi_allocate_domains(phb);
+ ppc_iommu_register_device(phb);
+
/* Create EEH devices for the PHB */
eeh_phb_pe_create(phb);
@@ -76,6 +78,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
}
}
+ ppc_iommu_unregister_device(phb);
+
pseries_msi_free_domains(phb);
/* Keep a reference so phb isn't freed yet */
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
index 5020044400dc..4de57ba52236 100644
--- a/arch/powerpc/sysdev/udbg_memcons.c
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -41,7 +41,7 @@ struct memcons memcons = {
.input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
};
-void memcons_putc(char c)
+static void memcons_putc(char c)
{
char *new_output_pos;
@@ -54,7 +54,7 @@ void memcons_putc(char c)
memcons.output_pos = new_output_pos;
}
-int memcons_getc_poll(void)
+static int memcons_getc_poll(void)
{
char c;
char *new_input_pos;
@@ -77,7 +77,7 @@ int memcons_getc_poll(void)
return -1;
}
-int memcons_getc(void)
+static int memcons_getc(void)
{
int c;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index b549499eb363..e3142ce531a0 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -53,6 +53,7 @@ config RISCV
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
+ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
@@ -66,9 +67,10 @@ config RISCV
select CLINT_TIMER if !MMU
select CLONE_BACKWARDS
select COMMON_CLK
- select CPU_PM if CPU_IDLE || HIBERNATION
+ select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
select EDAC_SUPPORT
select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
+ select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
select GENERIC_ARCH_TOPOLOGY
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
@@ -115,6 +117,7 @@ config RISCV
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
+ select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
@@ -142,6 +145,8 @@ config RISCV
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
+ select HAVE_SAMPLE_FTRACE_DIRECT
+ select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
@@ -183,6 +188,20 @@ config HAVE_SHADOW_CALL_STACK
# https://github.com/riscv-non-isa/riscv-elf-psabi-doc/commit/a484e843e6eeb51f0cb7b8819e50da6d2444d769
depends on $(ld-option,--no-relax-gp)
+config RISCV_USE_LINKER_RELAXATION
+ def_bool y
+ # https://github.com/llvm/llvm-project/commit/6611d58f5bbcbec77262d392e2923e1d680f6985
+ depends on !LD_IS_LLD || LLD_VERSION >= 150000
+
+# https://github.com/llvm/llvm-project/commit/bbc0f99f3bc96f1db16f649fc21dd18e5b0918f6
+config ARCH_HAS_BROKEN_DWARF5
+ def_bool y
+ depends on RISCV_USE_LINKER_RELAXATION
+ # https://github.com/llvm/llvm-project/commit/1df5ea29b43690b6622db2cad7b745607ca4de6a
+ depends on AS_IS_LLVM && AS_VERSION < 180000
+ # https://github.com/llvm/llvm-project/commit/7ffabb61a5569444b5ac9322e22e5471cc5e4a77
+ depends on LD_IS_LLD && LLD_VERSION < 180000
+
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
default 8
@@ -296,7 +315,6 @@ config AS_HAS_OPTION_ARCH
# https://reviews.llvm.org/D123515
def_bool y
depends on $(as-instr, .option arch$(comma) +m)
- depends on !$(as-instr, .option arch$(comma) -i)
source "arch/riscv/Kconfig.socs"
source "arch/riscv/Kconfig.errata"
@@ -529,6 +547,28 @@ config RISCV_ISA_V_DEFAULT_ENABLE
If you don't know what to do here, say Y.
+config RISCV_ISA_V_UCOPY_THRESHOLD
+ int "Threshold size for vectorized user copies"
+ depends on RISCV_ISA_V
+ default 768
+ help
+ Prefer using vectorized copy_to_user()/copy_from_user() when the
+ workload size exceeds this value.
+
+config RISCV_ISA_V_PREEMPTIVE
+ bool "Run kernel-mode Vector with kernel preemption"
+ depends on PREEMPTION
+ depends on RISCV_ISA_V
+ default y
+ help
+ Usually, in-kernel SIMD routines are run with preemption disabled.
+ Functions which envoke long running SIMD thus must yield core's
+ vector unit to prevent blocking other tasks for too long.
+
+ This config allows kernel to run SIMD without explicitly disable
+ preemption. Enabling this config will result in higher memory
+ consumption due to the allocation of per-task's kernel Vector context.
+
config TOOLCHAIN_HAS_ZBB
bool
default y
@@ -655,6 +695,20 @@ config RISCV_MISALIGNED
load/store for both kernel and userspace. When disable, misaligned
accesses will generate SIGBUS in userspace and panic in kernel.
+config RISCV_EFFICIENT_UNALIGNED_ACCESS
+ bool "Assume the CPU supports fast unaligned memory accesses"
+ depends on NONPORTABLE
+ select DCACHE_WORD_ACCESS if MMU
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ help
+ Say Y here if you want the kernel to assume that the CPU supports
+ efficient unaligned memory accesses. When enabled, this option
+ improves the performance of the kernel on such CPUs. However, the
+ kernel will run much more slowly, or will not be able to run at all,
+ on CPUs that do not support efficient unaligned memory accesses.
+
+ If unsure what to do here, say N.
+
endmenu # "Platform type"
menu "Kernel features"
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index f5c432b005e7..910ba8837add 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -98,6 +98,7 @@ config ERRATA_THEAD_CMO
depends on ERRATA_THEAD && MMU
select DMA_DIRECT_REMAP
select RISCV_DMA_NONCOHERENT
+ select RISCV_NONSTANDARD_CACHE_OPS
default y
help
This will apply the cache management errata to handle the
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index a74be78678eb..0b7d109258e7 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -43,8 +43,7 @@ else
KBUILD_LDFLAGS += -melf32lriscv
endif
-ifeq ($(CONFIG_LD_IS_LLD),y)
-ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 150000),y)
+ifndef CONFIG_RISCV_USE_LINKER_RELAXATION
KBUILD_CFLAGS += -mno-relax
KBUILD_AFLAGS += -mno-relax
ifndef CONFIG_AS_IS_LLVM
@@ -52,7 +51,6 @@ ifndef CONFIG_AS_IS_LLVM
KBUILD_AFLAGS += -Wa,-mno-relax
endif
endif
-endif
ifeq ($(CONFIG_SHADOW_CALL_STACK),y)
KBUILD_LDFLAGS += --no-relax-gp
@@ -108,7 +106,9 @@ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# unaligned accesses. While unaligned accesses are explicitly allowed in the
# RISC-V ISA, they're emulated by machine mode traps on all extant
# architectures. It's faster to have GCC emit only aligned accesses.
+ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS),y)
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+endif
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
prepare: stack_protector_prepare
@@ -163,6 +163,8 @@ BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi
all: $(notdir $(KBUILD_IMAGE))
+loader.bin: loader
+Image.gz loader vmlinuz.efi: Image
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@$(kecho) ' Kernel: $(boot)/$@ is ready'
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index 07387f9c135c..72b87b08ab44 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -123,6 +123,7 @@
interrupt-parent = <&gpio>;
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
onkey {
compatible = "dlg,da9063-onkey";
diff --git a/arch/riscv/boot/dts/sophgo/sg2042.dtsi b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
index 93256540d078..ead1cc35d88b 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042.dtsi
+++ b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
@@ -93,144 +93,160 @@
<&cpu63_intc 3>;
};
- clint_mtimer0: timer@70ac000000 {
+ clint_mtimer0: timer@70ac004000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac000000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac004000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu0_intc 7>,
<&cpu1_intc 7>,
<&cpu2_intc 7>,
<&cpu3_intc 7>;
};
- clint_mtimer1: timer@70ac010000 {
+ clint_mtimer1: timer@70ac014000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac010000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac014000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu4_intc 7>,
<&cpu5_intc 7>,
<&cpu6_intc 7>,
<&cpu7_intc 7>;
};
- clint_mtimer2: timer@70ac020000 {
+ clint_mtimer2: timer@70ac024000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac020000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac024000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu8_intc 7>,
<&cpu9_intc 7>,
<&cpu10_intc 7>,
<&cpu11_intc 7>;
};
- clint_mtimer3: timer@70ac030000 {
+ clint_mtimer3: timer@70ac034000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac030000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac034000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu12_intc 7>,
<&cpu13_intc 7>,
<&cpu14_intc 7>,
<&cpu15_intc 7>;
};
- clint_mtimer4: timer@70ac040000 {
+ clint_mtimer4: timer@70ac044000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac040000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac044000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu16_intc 7>,
<&cpu17_intc 7>,
<&cpu18_intc 7>,
<&cpu19_intc 7>;
};
- clint_mtimer5: timer@70ac050000 {
+ clint_mtimer5: timer@70ac054000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac050000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac054000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu20_intc 7>,
<&cpu21_intc 7>,
<&cpu22_intc 7>,
<&cpu23_intc 7>;
};
- clint_mtimer6: timer@70ac060000 {
+ clint_mtimer6: timer@70ac064000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac060000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac064000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu24_intc 7>,
<&cpu25_intc 7>,
<&cpu26_intc 7>,
<&cpu27_intc 7>;
};
- clint_mtimer7: timer@70ac070000 {
+ clint_mtimer7: timer@70ac074000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac070000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac074000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu28_intc 7>,
<&cpu29_intc 7>,
<&cpu30_intc 7>,
<&cpu31_intc 7>;
};
- clint_mtimer8: timer@70ac080000 {
+ clint_mtimer8: timer@70ac084000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac080000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac084000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu32_intc 7>,
<&cpu33_intc 7>,
<&cpu34_intc 7>,
<&cpu35_intc 7>;
};
- clint_mtimer9: timer@70ac090000 {
+ clint_mtimer9: timer@70ac094000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac090000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac094000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu36_intc 7>,
<&cpu37_intc 7>,
<&cpu38_intc 7>,
<&cpu39_intc 7>;
};
- clint_mtimer10: timer@70ac0a0000 {
+ clint_mtimer10: timer@70ac0a4000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac0a0000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac0a4000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu40_intc 7>,
<&cpu41_intc 7>,
<&cpu42_intc 7>,
<&cpu43_intc 7>;
};
- clint_mtimer11: timer@70ac0b0000 {
+ clint_mtimer11: timer@70ac0b4000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac0b0000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac0b4000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu44_intc 7>,
<&cpu45_intc 7>,
<&cpu46_intc 7>,
<&cpu47_intc 7>;
};
- clint_mtimer12: timer@70ac0c0000 {
+ clint_mtimer12: timer@70ac0c4000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac0c0000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac0c4000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu48_intc 7>,
<&cpu49_intc 7>,
<&cpu50_intc 7>,
<&cpu51_intc 7>;
};
- clint_mtimer13: timer@70ac0d0000 {
+ clint_mtimer13: timer@70ac0d4000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac0d0000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac0d4000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu52_intc 7>,
<&cpu53_intc 7>,
<&cpu54_intc 7>,
<&cpu55_intc 7>;
};
- clint_mtimer14: timer@70ac0e0000 {
+ clint_mtimer14: timer@70ac0e4000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac0e0000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac0e4000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu56_intc 7>,
<&cpu57_intc 7>,
<&cpu58_intc 7>,
<&cpu59_intc 7>;
};
- clint_mtimer15: timer@70ac0f0000 {
+ clint_mtimer15: timer@70ac0f4000 {
compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
- reg = <0x00000070 0xac0f0000 0x00000000 0x00007ff8>;
+ reg = <0x00000070 0xac0f4000 0x00000000 0x0000c000>;
+ reg-names = "mtimecmp";
interrupts-extended = <&cpu60_intc 7>,
<&cpu61_intc 7>,
<&cpu62_intc 7>,
diff --git a/arch/riscv/boot/dts/starfive/jh7100.dtsi b/arch/riscv/boot/dts/starfive/jh7100.dtsi
index c216aaecac53..8bcf36d07f3f 100644
--- a/arch/riscv/boot/dts/starfive/jh7100.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7100.dtsi
@@ -96,14 +96,14 @@
thermal-sensors = <&sfctemp>;
trips {
- cpu_alert0 {
+ cpu-alert0 {
/* milliCelsius */
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit {
+ cpu-crit {
/* milliCelsius */
temperature = <90000>;
hysteresis = <2000>;
@@ -113,28 +113,28 @@
};
};
- osc_sys: osc_sys {
+ osc_sys: osc-sys {
compatible = "fixed-clock";
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
};
- osc_aud: osc_aud {
+ osc_aud: osc-aud {
compatible = "fixed-clock";
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
};
- gmac_rmii_ref: gmac_rmii_ref {
+ gmac_rmii_ref: gmac-rmii-ref {
compatible = "fixed-clock";
#clock-cells = <0>;
/* Should be overridden by the board when needed */
clock-frequency = <0>;
};
- gmac_gr_mii_rxclk: gmac_gr_mii_rxclk {
+ gmac_gr_mii_rxclk: gmac-gr-mii-rxclk {
compatible = "fixed-clock";
#clock-cells = <0>;
/* Should be overridden by the board when needed */
diff --git a/arch/riscv/boot/dts/starfive/jh7110.dtsi b/arch/riscv/boot/dts/starfive/jh7110.dtsi
index 45213cdf50dc..74ed3b9264d8 100644
--- a/arch/riscv/boot/dts/starfive/jh7110.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110.dtsi
@@ -237,14 +237,14 @@
};
trips {
- cpu_alert0: cpu_alert0 {
+ cpu_alert0: cpu-alert0 {
/* milliCelsius */
temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit {
+ cpu-crit {
/* milliCelsius */
temperature = <100000>;
hysteresis = <2000>;
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 905881282a7c..eaf34e871e30 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -149,6 +149,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index 0554ed4bf087..b1c410bbc1ae 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -12,8 +12,10 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
+#include <asm/dma-noncoherent.h>
#include <asm/errata_list.h>
#include <asm/hwprobe.h>
+#include <asm/io.h>
#include <asm/patch.h>
#include <asm/vendorid_list.h>
@@ -33,6 +35,69 @@ static bool errata_probe_pbmt(unsigned int stage,
return false;
}
+/*
+ * th.dcache.ipa rs1 (invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01010 rs1 000 00000 0001011
+ * th.dcache.iva rs1 (invalidate, virtual address)
+ * 0000001 00110 rs1 000 00000 0001011
+ *
+ * th.dcache.cpa rs1 (clean, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01001 rs1 000 00000 0001011
+ * th.dcache.cva rs1 (clean, virtual address)
+ * 0000001 00101 rs1 000 00000 0001011
+ *
+ * th.dcache.cipa rs1 (clean then invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01011 rs1 000 00000 0001011
+ * th.dcache.civa rs1 (clean then invalidate, virtual address)
+ * 0000001 00111 rs1 000 00000 0001011
+ *
+ * th.sync.s (make sure all cache operations finished)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000000 11001 00000 000 00000 0001011
+ */
+#define THEAD_INVAL_A0 ".long 0x02a5000b"
+#define THEAD_CLEAN_A0 ".long 0x0295000b"
+#define THEAD_FLUSH_A0 ".long 0x02b5000b"
+#define THEAD_SYNC_S ".long 0x0190000b"
+
+#define THEAD_CMO_OP(_op, _start, _size, _cachesize) \
+asm volatile("mv a0, %1\n\t" \
+ "j 2f\n\t" \
+ "3:\n\t" \
+ THEAD_##_op##_A0 "\n\t" \
+ "add a0, a0, %0\n\t" \
+ "2:\n\t" \
+ "bltu a0, %2, 3b\n\t" \
+ THEAD_SYNC_S \
+ : : "r"(_cachesize), \
+ "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \
+ "r"((unsigned long)(_start) + (_size)) \
+ : "a0")
+
+static void thead_errata_cache_inv(phys_addr_t paddr, size_t size)
+{
+ THEAD_CMO_OP(INVAL, paddr, size, riscv_cbom_block_size);
+}
+
+static void thead_errata_cache_wback(phys_addr_t paddr, size_t size)
+{
+ THEAD_CMO_OP(CLEAN, paddr, size, riscv_cbom_block_size);
+}
+
+static void thead_errata_cache_wback_inv(phys_addr_t paddr, size_t size)
+{
+ THEAD_CMO_OP(FLUSH, paddr, size, riscv_cbom_block_size);
+}
+
+static const struct riscv_nonstd_cache_ops thead_errata_cmo_ops = {
+ .wback = &thead_errata_cache_wback,
+ .inv = &thead_errata_cache_inv,
+ .wback_inv = &thead_errata_cache_wback_inv,
+};
+
static bool errata_probe_cmo(unsigned int stage,
unsigned long arch_id, unsigned long impid)
{
@@ -48,6 +113,7 @@ static bool errata_probe_cmo(unsigned int stage,
if (stage == RISCV_ALTERNATIVES_BOOT) {
riscv_cbom_block_size = L1_CACHE_BYTES;
riscv_noncoherent_supported();
+ riscv_noncoherent_register_cache_ops(&thead_errata_cmo_ops);
}
return true;
@@ -77,8 +143,7 @@ static u32 thead_errata_probe(unsigned int stage,
if (errata_probe_pbmt(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PBMT);
- if (errata_probe_cmo(stage, archid, impid))
- cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
+ errata_probe_cmo(stage, archid, impid);
if (errata_probe_pmu(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h
new file mode 100644
index 000000000000..85b2c443823e
--- /dev/null
+++ b/arch/riscv/include/asm/arch_hweight.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Based on arch/x86/include/asm/arch_hweight.h
+ */
+
+#ifndef _ASM_RISCV_HWEIGHT_H
+#define _ASM_RISCV_HWEIGHT_H
+
+#include <asm/alternative-macros.h>
+#include <asm/hwcap.h>
+
+#if (BITS_PER_LONG == 64)
+#define CPOPW "cpopw "
+#elif (BITS_PER_LONG == 32)
+#define CPOPW "cpop "
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+static __always_inline unsigned int __arch_hweight32(unsigned int w)
+{
+#ifdef CONFIG_RISCV_ISA_ZBB
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm (".option push\n"
+ ".option arch,+zbb\n"
+ CPOPW "%0, %0\n"
+ ".option pop\n"
+ : "+r" (w) : :);
+
+ return w;
+
+legacy:
+#endif
+ return __sw_hweight32(w);
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __arch_hweight32(w & 0xffff);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __arch_hweight32(w & 0xff);
+}
+
+#if BITS_PER_LONG == 64
+static __always_inline unsigned long __arch_hweight64(__u64 w)
+{
+# ifdef CONFIG_RISCV_ISA_ZBB
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ : : : : legacy);
+
+ asm (".option push\n"
+ ".option arch,+zbb\n"
+ "cpop %0, %0\n"
+ ".option pop\n"
+ : "+r" (w) : :);
+
+ return w;
+
+legacy:
+# endif
+ return __sw_hweight64(w);
+}
+#else /* BITS_PER_LONG == 64 */
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ return __arch_hweight32((u32)w) +
+ __arch_hweight32((u32)(w >> 32));
+}
+#endif /* !(BITS_PER_LONG == 64) */
+
+#endif /* _ASM_RISCV_HWEIGHT_H */
diff --git a/arch/riscv/include/asm/archrandom.h b/arch/riscv/include/asm/archrandom.h
new file mode 100644
index 000000000000..5345360adfb9
--- /dev/null
+++ b/arch/riscv/include/asm/archrandom.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel interface for the RISCV arch_random_* functions
+ *
+ * Copyright (c) 2023 Rivos Inc.
+ *
+ */
+
+#ifndef ASM_RISCV_ARCHRANDOM_H
+#define ASM_RISCV_ARCHRANDOM_H
+
+#include <asm/csr.h>
+#include <asm/processor.h>
+
+#define SEED_RETRY_LOOPS 100
+
+static inline bool __must_check csr_seed_long(unsigned long *v)
+{
+ unsigned int retry = SEED_RETRY_LOOPS, valid_seeds = 0;
+ const int needed_seeds = sizeof(long) / sizeof(u16);
+ u16 *entropy = (u16 *)v;
+
+ do {
+ /*
+ * The SEED CSR must be accessed with a read-write instruction.
+ */
+ unsigned long csr_seed = csr_swap(CSR_SEED, 0);
+ unsigned long opst = csr_seed & SEED_OPST_MASK;
+
+ switch (opst) {
+ case SEED_OPST_ES16:
+ entropy[valid_seeds++] = csr_seed & SEED_ENTROPY_MASK;
+ if (valid_seeds == needed_seeds)
+ return true;
+ break;
+
+ case SEED_OPST_DEAD:
+ pr_err_once("archrandom: Unrecoverable error\n");
+ return false;
+
+ case SEED_OPST_BIST:
+ case SEED_OPST_WAIT:
+ default:
+ cpu_relax();
+ continue;
+ }
+ } while (--retry);
+
+ return false;
+}
+
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+ if (!max_longs)
+ return 0;
+
+ /*
+ * If Zkr is supported and csr_seed_long succeeds, we return one long
+ * worth of entropy.
+ */
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZKR) && csr_seed_long(v))
+ return 1;
+
+ return 0;
+}
+
+#endif /* ASM_RISCV_ARCHRANDOM_H */
diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h
index 00a96e7a9664..0c8bfd54fc4e 100644
--- a/arch/riscv/include/asm/asm-extable.h
+++ b/arch/riscv/include/asm/asm-extable.h
@@ -6,6 +6,7 @@
#define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2
#define EX_TYPE_UACCESS_ERR_ZERO 3
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
#ifdef CONFIG_MMU
@@ -47,6 +48,11 @@
#define EX_DATA_REG_ZERO_SHIFT 5
#define EX_DATA_REG_ZERO GENMASK(9, 5)
+#define EX_DATA_REG_DATA_SHIFT 0
+#define EX_DATA_REG_DATA GENMASK(4, 0)
+#define EX_DATA_REG_ADDR_SHIFT 5
+#define EX_DATA_REG_ADDR GENMASK(9, 5)
+
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
@@ -62,6 +68,15 @@
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
+#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD), \
+ "(" \
+ EX_DATA_REG(DATA, data) " | " \
+ EX_DATA_REG(ADDR, addr) \
+ ")")
+
#endif /* __ASSEMBLY__ */
#else /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index 36b955c762ba..cd627ec289f1 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -9,6 +9,33 @@ long long __lshrti3(long long a, int b);
long long __ashrti3(long long a, int b);
long long __ashlti3(long long a, int b);
+#ifdef CONFIG_RISCV_ISA_V
+
+#ifdef CONFIG_MMU
+asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n);
+#endif /* CONFIG_MMU */
+
+void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2);
+void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3);
+void xor_regs_4_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4);
+void xor_regs_5_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4,
+ const unsigned long *__restrict p5);
+
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs);
+asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs);
+#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
+
+#endif /* CONFIG_RISCV_ISA_V */
#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs)
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 224b4dc02b50..329d8244a9b3 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -39,7 +39,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
{
int num;
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -95,7 +95,7 @@ static __always_inline unsigned long variable__fls(unsigned long word)
{
int num;
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -154,7 +154,7 @@ static __always_inline int variable_ffs(int x)
if (!x)
return 0;
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -209,7 +209,7 @@ static __always_inline int variable_fls(unsigned int x)
if (!x)
return 0;
- asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -271,7 +271,9 @@ legacy:
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/hweight.h>
+#include <asm/arch_hweight.h>
+
+#include <asm-generic/bitops/const_hweight.h>
#if (BITS_PER_LONG == 64)
#define __AMO(op) "amo" #op ".d"
diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h
new file mode 100644
index 000000000000..88e6f1499e88
--- /dev/null
+++ b/arch/riscv/include/asm/checksum.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Checksum routines
+ *
+ * Copyright (C) 2023 Rivos Inc.
+ */
+#ifndef __ASM_RISCV_CHECKSUM_H
+#define __ASM_RISCV_CHECKSUM_H
+
+#include <linux/in6.h>
+#include <linux/uaccess.h>
+
+#define ip_fast_csum ip_fast_csum
+
+extern unsigned int do_csum(const unsigned char *buff, int len);
+#define do_csum do_csum
+
+/* Default version is sufficient for 32 bit */
+#ifndef CONFIG_32BIT
+#define _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum sum);
+#endif
+
+/* Define riscv versions of functions before importing asm-generic/checksum.h */
+#include <asm-generic/checksum.h>
+
+/**
+ * Quickly compute an IP checksum with the assumption that IPv4 headers will
+ * always be in multiples of 32-bits, and have an ihl of at least 5.
+ *
+ * @ihl: the number of 32 bit segments and must be greater than or equal to 5.
+ * @iph: assumed to be word aligned given that NET_IP_ALIGN is set to 2 on
+ * riscv, defining IP headers to be aligned.
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned long csum = 0;
+ int pos = 0;
+
+ do {
+ csum += ((const unsigned int *)iph)[pos];
+ if (IS_ENABLED(CONFIG_32BIT))
+ csum += csum < ((const unsigned int *)iph)[pos];
+ } while (++pos < ihl);
+
+ /*
+ * ZBB only saves three instructions on 32-bit and five on 64-bit so not
+ * worth checking if supported without Alternatives.
+ */
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ unsigned long fold_temp;
+
+ asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ :
+ :
+ :
+ : no_zbb);
+
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ asm(".option push \n\
+ .option arch,+zbb \n\
+ not %[fold_temp], %[csum] \n\
+ rori %[csum], %[csum], 16 \n\
+ sub %[csum], %[fold_temp], %[csum] \n\
+ .option pop"
+ : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp));
+ } else {
+ asm(".option push \n\
+ .option arch,+zbb \n\
+ rori %[fold_temp], %[csum], 32 \n\
+ add %[csum], %[fold_temp], %[csum] \n\
+ srli %[csum], %[csum], 32 \n\
+ not %[fold_temp], %[csum] \n\
+ roriw %[csum], %[csum], 16 \n\
+ subw %[csum], %[fold_temp], %[csum] \n\
+ .option pop"
+ : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp));
+ }
+ return (__force __sum16)(csum >> 16);
+ }
+no_zbb:
+#ifndef CONFIG_32BIT
+ csum += ror64(csum, 32);
+ csum >>= 32;
+#endif
+ return csum_fold((__force __wsum)csum);
+}
+
+#endif /* __ASM_RISCV_CHECKSUM_H */
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index fbdde8b8a47e..0bd11862b760 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -80,7 +80,7 @@ riscv_has_extension_likely(const unsigned long ext)
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
:
: [ext] "i" (ext)
@@ -103,7 +103,7 @@ riscv_has_extension_unlikely(const unsigned long ext)
"ext must be < RISCV_ISA_EXT_MAX");
if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
:
: [ext] "i" (ext)
@@ -135,4 +135,6 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
+DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
+
#endif
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 306a19a5509c..2468c55933cd 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -411,10 +411,20 @@
#define CSR_VTYPE 0xc21
#define CSR_VLENB 0xc22
+/* Scalar Crypto Extension - Entropy */
+#define CSR_SEED 0x015
+#define SEED_OPST_MASK _AC(0xC0000000, UL)
+#define SEED_OPST_BIST _AC(0x00000000, UL)
+#define SEED_OPST_WAIT _AC(0x40000000, UL)
+#define SEED_OPST_ES16 _AC(0x80000000, UL)
+#define SEED_OPST_DEAD _AC(0xC0000000, UL)
+#define SEED_ENTROPY_MASK _AC(0xFFFF, UL)
+
#ifdef CONFIG_RISCV_M_MODE
# define CSR_STATUS CSR_MSTATUS
# define CSR_IE CSR_MIE
# define CSR_TVEC CSR_MTVEC
+# define CSR_ENVCFG CSR_MENVCFG
# define CSR_SCRATCH CSR_MSCRATCH
# define CSR_EPC CSR_MEPC
# define CSR_CAUSE CSR_MCAUSE
@@ -439,6 +449,7 @@
# define CSR_STATUS CSR_SSTATUS
# define CSR_IE CSR_SIE
# define CSR_TVEC CSR_STVEC
+# define CSR_ENVCFG CSR_SENVCFG
# define CSR_SCRATCH CSR_SSCRATCH
# define CSR_EPC CSR_SEPC
# define CSR_CAUSE CSR_SCAUSE
diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
index 7ab5e34318c8..2293e535f865 100644
--- a/arch/riscv/include/asm/entry-common.h
+++ b/arch/riscv/include/asm/entry-common.h
@@ -4,6 +4,23 @@
#define _ASM_RISCV_ENTRY_COMMON_H
#include <asm/stacktrace.h>
+#include <asm/thread_info.h>
+#include <asm/vector.h>
+
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+ if (ti_work & _TIF_RISCV_V_DEFER_RESTORE) {
+ clear_thread_flag(TIF_RISCV_V_DEFER_RESTORE);
+ /*
+ * We are already called with irq disabled, so go without
+ * keeping track of riscv_v_flags.
+ */
+ riscv_v_vstate_restore(&current->thread.vstate, regs);
+ }
+}
+
+#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
void handle_page_fault(struct pt_regs *regs);
void handle_break(struct pt_regs *regs);
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 83ed25e43553..ea33288f8a25 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -24,9 +24,8 @@
#ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_PBMT 0
-#define ERRATA_THEAD_CMO 1
-#define ERRATA_THEAD_PMU 2
-#define ERRATA_THEAD_NUMBER 3
+#define ERRATA_THEAD_PMU 1
+#define ERRATA_THEAD_NUMBER 2
#endif
#ifdef __ASSEMBLY__
@@ -94,54 +93,17 @@ asm volatile(ALTERNATIVE( \
#define ALT_THEAD_PMA(_val)
#endif
-/*
- * th.dcache.ipa rs1 (invalidate, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- * 0000001 01010 rs1 000 00000 0001011
- * th.dache.iva rs1 (invalida, virtual address)
- * 0000001 00110 rs1 000 00000 0001011
- *
- * th.dcache.cpa rs1 (clean, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- * 0000001 01001 rs1 000 00000 0001011
- * th.dcache.cva rs1 (clean, virtual address)
- * 0000001 00101 rs1 000 00000 0001011
- *
- * th.dcache.cipa rs1 (clean then invalidate, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- * 0000001 01011 rs1 000 00000 0001011
- * th.dcache.civa rs1 (... virtual address)
- * 0000001 00111 rs1 000 00000 0001011
- *
- * th.sync.s (make sure all cache operations finished)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- * 0000000 11001 00000 000 00000 0001011
- */
-#define THEAD_INVAL_A0 ".long 0x0265000b"
-#define THEAD_CLEAN_A0 ".long 0x0255000b"
-#define THEAD_FLUSH_A0 ".long 0x0275000b"
-#define THEAD_SYNC_S ".long 0x0190000b"
-
#define ALT_CMO_OP(_op, _start, _size, _cachesize) \
-asm volatile(ALTERNATIVE_2( \
- __nops(6), \
+asm volatile(ALTERNATIVE( \
+ __nops(5), \
"mv a0, %1\n\t" \
"j 2f\n\t" \
"3:\n\t" \
CBO_##_op(a0) \
"add a0, a0, %0\n\t" \
"2:\n\t" \
- "bltu a0, %2, 3b\n\t" \
- "nop", 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \
- "mv a0, %1\n\t" \
- "j 2f\n\t" \
- "3:\n\t" \
- THEAD_##_op##_A0 "\n\t" \
- "add a0, a0, %0\n\t" \
- "2:\n\t" \
- "bltu a0, %2, 3b\n\t" \
- THEAD_SYNC_S, THEAD_VENDOR_ID, \
- ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \
+ "bltu a0, %2, 3b\n\t", \
+ 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM) \
: : "r"(_cachesize), \
"r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \
"r"((unsigned long)(_start) + (_size)) \
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 2b2f5df7ef2c..15055f9df4da 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -25,6 +25,11 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
+
+extern void *return_address(unsigned int level);
+
+#define ftrace_return_address(n) return_address(n)
+
void MCOUNT_NAME(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
@@ -128,7 +133,23 @@ do { \
struct dyn_ftrace;
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
-#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+struct ftrace_ops;
+struct ftrace_regs;
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+#define ftrace_graph_func ftrace_graph_func
+
+static inline void __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+{
+ regs->t1 = addr;
+}
+#define arch_ftrace_set_direct_caller(fregs, addr) \
+ __arch_ftrace_set_direct_caller(&(fregs)->regs, addr)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+#endif /* __ASSEMBLY__ */
#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index 4c5b0e929890..22deb7a2a6ec 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -11,6 +11,11 @@ static inline void arch_clear_hugepage_flags(struct page *page)
}
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+bool arch_hugetlb_migration_supported(struct hstate *h);
+#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
+#endif
+
#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define __HAVE_ARCH_HUGE_PTE_CLEAR
void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 5340f818746b..1f2d2599c655 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -81,6 +81,8 @@
#define RISCV_ISA_EXT_ZTSO 72
#define RISCV_ISA_EXT_ZACAS 73
+#define RISCV_ISA_EXT_XLINUXENVCFG 127
+
#define RISCV_ISA_EXT_MAX 128
#define RISCV_ISA_EXT_INVALID U32_MAX
diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
index 14a5ea8d8ef0..4a35d787c019 100644
--- a/arch/riscv/include/asm/jump_label.h
+++ b/arch/riscv/include/asm/jump_label.h
@@ -17,7 +17,7 @@
static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
- asm_volatile_goto(
+ asm goto(
" .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
@@ -39,7 +39,7 @@ label:
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
const bool branch)
{
- asm_volatile_goto(
+ asm goto(
" .align 2 \n\t"
" .option push \n\t"
" .option norelax \n\t"
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index d169a4f41a2e..c80bb9990d32 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -95,7 +95,13 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
__pud_free(mm, pud);
}
-#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
+#define __pud_free_tlb(tlb, pud, addr) \
+do { \
+ if (pgtable_l4_enabled) { \
+ pagetable_pud_dtor(virt_to_ptdesc(pud)); \
+ tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
+ } \
+} while (0)
#define p4d_alloc_one p4d_alloc_one
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -124,7 +130,11 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
__p4d_free(mm, p4d);
}
-#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
+#define __p4d_free_tlb(tlb, p4d, addr) \
+do { \
+ if (pgtable_l5_enabled) \
+ tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(p4d)); \
+} while (0)
#endif /* __PAGETABLE_PMD_FOLDED */
static inline void sync_kernel_mappings(pgd_t *pgd)
@@ -149,7 +159,11 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#ifndef __PAGETABLE_PMD_FOLDED
-#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+#define __pmd_free_tlb(tlb, pmd, addr) \
+do { \
+ pagetable_pmd_dtor(virt_to_ptdesc(pmd)); \
+ tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
+} while (0)
#endif /* __PAGETABLE_PMD_FOLDED */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index b42017d76924..b99bd66107a6 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -136,7 +136,7 @@ enum napot_cont_order {
* 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
*/
#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
-#define _PAGE_NOCACHE_THEAD ((1UL < 61) | (1UL << 60))
+#define _PAGE_NOCACHE_THEAD ((1UL << 61) | (1UL << 60))
#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60))
#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index e3ffef1c6119..6066822e7396 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -84,7 +84,7 @@
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
-#define vmemmap ((struct page *)VMEMMAP_START)
+#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
@@ -439,6 +439,10 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
+#define pte_leaf_size(pte) (pte_napot(pte) ? \
+ napot_cont_size(napot_cont_order(pte)) :\
+ PAGE_SIZE)
+
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/asm-generic/pgtable.h
@@ -865,7 +869,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
+#define TASK_SIZE_32 (_AC(0x80000000, UL))
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#else
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index f19f861cda54..a8509cc31ab2 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -16,7 +16,7 @@
#ifdef CONFIG_64BIT
#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
-#define STACK_TOP_MAX TASK_SIZE_64
+#define STACK_TOP_MAX TASK_SIZE
#define arch_get_mmap_end(addr, len, flags) \
({ \
@@ -73,6 +73,43 @@
struct task_struct;
struct pt_regs;
+/*
+ * We use a flag to track in-kernel Vector context. Currently the flag has the
+ * following meaning:
+ *
+ * - bit 0: indicates whether the in-kernel Vector context is active. The
+ * activation of this state disables the preemption. On a non-RT kernel, it
+ * also disable bh.
+ * - bits 8: is used for tracking preemptible kernel-mode Vector, when
+ * RISCV_ISA_V_PREEMPTIVE is enabled. Calling kernel_vector_begin() does not
+ * disable the preemption if the thread's kernel_vstate.datap is allocated.
+ * Instead, the kernel set this bit field. Then the trap entry/exit code
+ * knows if we are entering/exiting the context that owns preempt_v.
+ * - 0: the task is not using preempt_v
+ * - 1: the task is actively using preempt_v. But whether does the task own
+ * the preempt_v context is decided by bits in RISCV_V_CTX_DEPTH_MASK.
+ * - bit 16-23 are RISCV_V_CTX_DEPTH_MASK, used by context tracking routine
+ * when preempt_v starts:
+ * - 0: the task is actively using, and own preempt_v context.
+ * - non-zero: the task was using preempt_v, but then took a trap within.
+ * Thus, the task does not own preempt_v. Any use of Vector will have to
+ * save preempt_v, if dirty, and fallback to non-preemptible kernel-mode
+ * Vector.
+ * - bit 30: The in-kernel preempt_v context is saved, and requries to be
+ * restored when returning to the context that owns the preempt_v.
+ * - bit 31: The in-kernel preempt_v context is dirty, as signaled by the
+ * trap entry code. Any context switches out-of current task need to save
+ * it to the task's in-kernel V context. Also, any traps nesting on-top-of
+ * preempt_v requesting to use V needs a save.
+ */
+#define RISCV_V_CTX_DEPTH_MASK 0x00ff0000
+
+#define RISCV_V_CTX_UNIT_DEPTH 0x00010000
+#define RISCV_KERNEL_MODE_V 0x00000001
+#define RISCV_PREEMPT_V 0x00000100
+#define RISCV_PREEMPT_V_DIRTY 0x80000000
+#define RISCV_PREEMPT_V_NEED_RESTORE 0x40000000
+
/* CPU-specific state of a task */
struct thread_struct {
/* Callee-saved registers */
@@ -81,9 +118,11 @@ struct thread_struct {
unsigned long s[12]; /* s[0]: frame pointer */
struct __riscv_d_ext_state fstate;
unsigned long bad_cause;
- unsigned long vstate_ctrl;
+ u32 riscv_v_flags;
+ u32 vstate_ctrl;
struct __riscv_v_ext_state vstate;
unsigned long align_ctl;
+ struct __riscv_v_ext_state kernel_vstate;
};
/* Whitelist the fstate from the task_struct for hardened usercopy */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index b6f898c56940..6e68f8dff76b 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -29,6 +29,7 @@ enum sbi_ext_id {
SBI_EXT_RFENCE = 0x52464E43,
SBI_EXT_HSM = 0x48534D,
SBI_EXT_SRST = 0x53525354,
+ SBI_EXT_SUSP = 0x53555350,
SBI_EXT_PMU = 0x504D55,
SBI_EXT_DBCN = 0x4442434E,
SBI_EXT_STA = 0x535441,
@@ -115,6 +116,14 @@ enum sbi_srst_reset_reason {
SBI_SRST_RESET_REASON_SYS_FAILURE,
};
+enum sbi_ext_susp_fid {
+ SBI_EXT_SUSP_SYSTEM_SUSPEND = 0,
+};
+
+enum sbi_ext_susp_sleep_type {
+ SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0,
+};
+
enum sbi_ext_pmu_fid {
SBI_EXT_PMU_NUM_COUNTERS = 0,
SBI_EXT_PMU_COUNTER_GET_INFO,
@@ -288,8 +297,13 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
unsigned long arg3, unsigned long arg4,
unsigned long arg5);
+#ifdef CONFIG_RISCV_SBI_V01
void sbi_console_putchar(int ch);
int sbi_console_getchar(void);
+#else
+static inline void sbi_console_putchar(int ch) { }
+static inline int sbi_console_getchar(void) { return -ENOENT; }
+#endif
long sbi_get_mvendorid(void);
long sbi_get_marchid(void);
long sbi_get_mimpid(void);
@@ -346,6 +360,11 @@ static inline unsigned long sbi_mk_version(unsigned long major,
}
int sbi_err_map_linux_errno(int err);
+
+extern bool sbi_debug_console_available;
+int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);
+int sbi_debug_console_read(char *bytes, unsigned int num_bytes);
+
#else /* CONFIG_RISCV_SBI */
static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
static inline void sbi_init(void) {}
diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
new file mode 100644
index 000000000000..54efbf523d49
--- /dev/null
+++ b/arch/riscv/include/asm/simd.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2023 SiFive
+ */
+
+#ifndef __ASM_SIMD_H
+#define __ASM_SIMD_H
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+#include <linux/thread_info.h>
+
+#include <asm/vector.h>
+
+#ifdef CONFIG_RISCV_ISA_V
+/*
+ * may_use_simd - whether it is allowable at this time to issue vector
+ * instructions or access the vector register file
+ *
+ * Callers must not assume that the result remains true beyond the next
+ * preempt_enable() or return from softirq context.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ /*
+ * RISCV_KERNEL_MODE_V is only set while preemption is disabled,
+ * and is clear whenever preemption is enabled.
+ */
+ if (in_hardirq() || in_nmi())
+ return false;
+
+ /*
+ * Nesting is acheived in preempt_v by spreading the control for
+ * preemptible and non-preemptible kernel-mode Vector into two fields.
+ * Always try to match with prempt_v if kernel V-context exists. Then,
+ * fallback to check non preempt_v if nesting happens, or if the config
+ * is not set.
+ */
+ if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) && current->thread.kernel_vstate.datap) {
+ if (!riscv_preempt_v_started(current))
+ return true;
+ }
+ /*
+ * Non-preemptible kernel-mode Vector temporarily disables bh. So we
+ * must not return true on irq_disabled(). Otherwise we would fail the
+ * lockdep check calling local_bh_enable()
+ */
+ return !irqs_disabled() && !(riscv_v_flags() & RISCV_KERNEL_MODE_V);
+}
+
+#else /* ! CONFIG_RISCV_ISA_V */
+
+static __must_check inline bool may_use_simd(void)
+{
+ return false;
+}
+
+#endif /* ! CONFIG_RISCV_ISA_V */
+
+#endif
diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h
index f7e8ef2418b9..b1495a7e06ce 100644
--- a/arch/riscv/include/asm/stacktrace.h
+++ b/arch/riscv/include/asm/stacktrace.h
@@ -21,4 +21,9 @@ static inline bool on_thread_stack(void)
return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
+
+#ifdef CONFIG_VMAP_STACK
+DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+#endif /* CONFIG_VMAP_STACK */
+
#endif /* _ASM_RISCV_STACKTRACE_H */
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 02f87867389a..491296a335d0 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -14,6 +14,7 @@ struct suspend_context {
struct pt_regs regs;
/* Saved and restored by high-level functions */
unsigned long scratch;
+ unsigned long envcfg;
unsigned long tvec;
unsigned long ie;
#ifdef CONFIG_MMU
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index f90d8e42f3c7..7efdb0584d47 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -53,8 +53,7 @@ static inline void __switch_to_fpu(struct task_struct *prev,
struct pt_regs *regs;
regs = task_pt_regs(prev);
- if (unlikely(regs->status & SR_SD))
- fstate_save(prev, regs);
+ fstate_save(prev, regs);
fstate_restore(next, task_pt_regs(next));
}
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 4856697c5f25..5d473343634b 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -102,12 +102,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
#define TIF_32BIT 11 /* compat-mode 32bit process */
+#define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 1eb5682b2af6..50b63b5c15bd 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -16,7 +16,7 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb)
{
#ifdef CONFIG_MMU
- if (tlb->fullmm || tlb->need_flush_all)
+ if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables)
flush_tlb_mm(tlb->mm);
else
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
diff --git a/arch/riscv/include/asm/tlbbatch.h b/arch/riscv/include/asm/tlbbatch.h
new file mode 100644
index 000000000000..46014f70b9da
--- /dev/null
+++ b/arch/riscv/include/asm/tlbbatch.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+#ifndef _ASM_RISCV_TLBBATCH_H
+#define _ASM_RISCV_TLBBATCH_H
+
+#include <linux/cpumask.h>
+
+struct arch_tlbflush_unmap_batch {
+ struct cpumask cpumask;
+};
+
+#endif /* _ASM_RISCV_TLBBATCH_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index a60416bbe190..4112cc8d1d69 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -47,6 +47,14 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#endif
+
+bool arch_tlbbatch_should_defer(struct mm_struct *mm);
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm,
+ unsigned long uaddr);
+void arch_flush_tlb_batched_pending(struct mm_struct *mm);
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+
#else /* CONFIG_SMP && CONFIG_MMU */
#define flush_tlb_all() local_flush_tlb_all()
@@ -67,6 +75,7 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#define flush_tlb_mm(mm) flush_tlb_all()
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
+#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */
#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 87aaef656257..0cd6f0a027d1 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -22,6 +22,18 @@
extern unsigned long riscv_v_vsize;
int riscv_v_setup_vsize(void);
bool riscv_v_first_use_handler(struct pt_regs *regs);
+void kernel_vector_begin(void);
+void kernel_vector_end(void);
+void get_cpu_vector_context(void);
+void put_cpu_vector_context(void);
+void riscv_v_thread_free(struct task_struct *tsk);
+void __init riscv_v_setup_ctx_cache(void);
+void riscv_v_thread_alloc(struct task_struct *tsk);
+
+static inline u32 riscv_v_flags(void)
+{
+ return READ_ONCE(current->thread.riscv_v_flags);
+}
static __always_inline bool has_vector(void)
{
@@ -162,36 +174,89 @@ static inline void riscv_v_vstate_discard(struct pt_regs *regs)
__riscv_v_vstate_dirty(regs);
}
-static inline void riscv_v_vstate_save(struct task_struct *task,
+static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
if ((regs->status & SR_VS) == SR_VS_DIRTY) {
- struct __riscv_v_ext_state *vstate = &task->thread.vstate;
-
__riscv_v_vstate_save(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
}
-static inline void riscv_v_vstate_restore(struct task_struct *task,
+static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
if ((regs->status & SR_VS) != SR_VS_OFF) {
- struct __riscv_v_ext_state *vstate = &task->thread.vstate;
-
__riscv_v_vstate_restore(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
}
+static inline void riscv_v_vstate_set_restore(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) != SR_VS_OFF) {
+ set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
+ riscv_v_vstate_on(regs);
+ }
+}
+
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+static inline bool riscv_preempt_v_dirty(struct task_struct *task)
+{
+ return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_DIRTY);
+}
+
+static inline bool riscv_preempt_v_restore(struct task_struct *task)
+{
+ return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_NEED_RESTORE);
+}
+
+static inline void riscv_preempt_v_clear_dirty(struct task_struct *task)
+{
+ barrier();
+ task->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_DIRTY;
+}
+
+static inline void riscv_preempt_v_set_restore(struct task_struct *task)
+{
+ barrier();
+ task->thread.riscv_v_flags |= RISCV_PREEMPT_V_NEED_RESTORE;
+}
+
+static inline bool riscv_preempt_v_started(struct task_struct *task)
+{
+ return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V);
+}
+
+#else /* !CONFIG_RISCV_ISA_V_PREEMPTIVE */
+static inline bool riscv_preempt_v_dirty(struct task_struct *task) { return false; }
+static inline bool riscv_preempt_v_restore(struct task_struct *task) { return false; }
+static inline bool riscv_preempt_v_started(struct task_struct *task) { return false; }
+#define riscv_preempt_v_clear_dirty(tsk) do {} while (0)
+#define riscv_preempt_v_set_restore(tsk) do {} while (0)
+#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
+
static inline void __switch_to_vector(struct task_struct *prev,
struct task_struct *next)
{
struct pt_regs *regs;
- regs = task_pt_regs(prev);
- riscv_v_vstate_save(prev, regs);
- riscv_v_vstate_restore(next, task_pt_regs(next));
+ if (riscv_preempt_v_started(prev)) {
+ if (riscv_preempt_v_dirty(prev)) {
+ __riscv_v_vstate_save(&prev->thread.kernel_vstate,
+ prev->thread.kernel_vstate.datap);
+ riscv_preempt_v_clear_dirty(prev);
+ }
+ } else {
+ regs = task_pt_regs(prev);
+ riscv_v_vstate_save(&prev->thread.vstate, regs);
+ }
+
+ if (riscv_preempt_v_started(next))
+ riscv_preempt_v_set_restore(next);
+ else
+ riscv_v_vstate_set_restore(next, task_pt_regs(next));
}
void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
@@ -208,11 +273,14 @@ static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
#define riscv_v_vsize (0)
#define riscv_v_vstate_discard(regs) do {} while (0)
-#define riscv_v_vstate_save(task, regs) do {} while (0)
-#define riscv_v_vstate_restore(task, regs) do {} while (0)
+#define riscv_v_vstate_save(vstate, regs) do {} while (0)
+#define riscv_v_vstate_restore(vstate, regs) do {} while (0)
#define __switch_to_vector(__prev, __next) do {} while (0)
#define riscv_v_vstate_off(regs) do {} while (0)
#define riscv_v_vstate_on(regs) do {} while (0)
+#define riscv_v_thread_free(tsk) do {} while (0)
+#define riscv_v_setup_ctx_cache() do {} while (0)
+#define riscv_v_thread_alloc(tsk) do {} while (0)
#endif /* CONFIG_RISCV_ISA_V */
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index 924d01b56c9a..51f6dfe19745 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot)
return true;
}
-#ifdef CONFIG_RISCV_ISA_SVNAPOT
-#include <linux/pgtable.h>
+#endif
-#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
-static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
- u64 pfn, unsigned int max_page_shift)
-{
- unsigned long map_size = PAGE_SIZE;
- unsigned long size, order;
-
- if (!has_svnapot())
- return map_size;
-
- for_each_napot_order_rev(order) {
- if (napot_cont_shift(order) > max_page_shift)
- continue;
-
- size = napot_cont_size(order);
- if (end - addr < size)
- continue;
-
- if (!IS_ALIGNED(addr, size))
- continue;
-
- if (!IS_ALIGNED(PFN_PHYS(pfn), size))
- continue;
-
- map_size = size;
- break;
- }
-
- return map_size;
-}
-
-#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
-static inline int arch_vmap_pte_supported_shift(unsigned long size)
-{
- int shift = PAGE_SHIFT;
- unsigned long order;
-
- if (!has_svnapot())
- return shift;
-
- WARN_ON_ONCE(size >= PMD_SIZE);
-
- for_each_napot_order_rev(order) {
- if (napot_cont_size(order) > size)
- continue;
-
- if (!IS_ALIGNED(size, napot_cont_size(order)))
- continue;
-
- shift = napot_cont_shift(order);
- break;
- }
-
- return shift;
-}
-
-#endif /* CONFIG_RISCV_ISA_SVNAPOT */
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#endif /* _ASM_RISCV_VMALLOC_H */
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
index 7c086ac6ecd4..f3f031e34191 100644
--- a/arch/riscv/include/asm/word-at-a-time.h
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -9,6 +9,7 @@
#define _ASM_RISCV_WORD_AT_A_TIME_H
+#include <asm/asm-extable.h>
#include <linux/kernel.h>
struct word_at_a_time {
@@ -45,4 +46,30 @@ static inline unsigned long find_zero(unsigned long mask)
/* The mask we created is directly usable as a bytemask */
#define zero_bytemask(mask) (mask)
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: " REG_L " %0, %2\n"
+ "2:\n"
+ _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1)
+ : "=&r" (ret)
+ : "r" (addr), "m" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#endif /* CONFIG_DCACHE_WORD_ACCESS */
+
#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/arch/riscv/include/asm/xor.h b/arch/riscv/include/asm/xor.h
new file mode 100644
index 000000000000..96011861e46b
--- /dev/null
+++ b/arch/riscv/include/asm/xor.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+
+#include <linux/hardirq.h>
+#include <asm-generic/xor.h>
+#ifdef CONFIG_RISCV_ISA_V
+#include <asm/vector.h>
+#include <asm/switch_to.h>
+#include <asm/asm-prototypes.h>
+
+static void xor_vector_2(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2)
+{
+ kernel_vector_begin();
+ xor_regs_2_(bytes, p1, p2);
+ kernel_vector_end();
+}
+
+static void xor_vector_3(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3)
+{
+ kernel_vector_begin();
+ xor_regs_3_(bytes, p1, p2, p3);
+ kernel_vector_end();
+}
+
+static void xor_vector_4(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4)
+{
+ kernel_vector_begin();
+ xor_regs_4_(bytes, p1, p2, p3, p4);
+ kernel_vector_end();
+}
+
+static void xor_vector_5(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4,
+ const unsigned long *__restrict p5)
+{
+ kernel_vector_begin();
+ xor_regs_5_(bytes, p1, p2, p3, p4, p5);
+ kernel_vector_end();
+}
+
+static struct xor_block_template xor_block_rvv = {
+ .name = "rvv",
+ .do_2 = xor_vector_2,
+ .do_3 = xor_vector_3,
+ .do_4 = xor_vector_4,
+ .do_5 = xor_vector_5
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ if (has_vector()) { \
+ xor_speed(&xor_block_rvv);\
+ } \
+ } while (0)
+#endif
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index d6b7a5b95874..7499e88a947c 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -139,6 +139,33 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZIHPM,
KVM_RISCV_ISA_EXT_SMSTATEEN,
KVM_RISCV_ISA_EXT_ZICOND,
+ KVM_RISCV_ISA_EXT_ZBC,
+ KVM_RISCV_ISA_EXT_ZBKB,
+ KVM_RISCV_ISA_EXT_ZBKC,
+ KVM_RISCV_ISA_EXT_ZBKX,
+ KVM_RISCV_ISA_EXT_ZKND,
+ KVM_RISCV_ISA_EXT_ZKNE,
+ KVM_RISCV_ISA_EXT_ZKNH,
+ KVM_RISCV_ISA_EXT_ZKR,
+ KVM_RISCV_ISA_EXT_ZKSED,
+ KVM_RISCV_ISA_EXT_ZKSH,
+ KVM_RISCV_ISA_EXT_ZKT,
+ KVM_RISCV_ISA_EXT_ZVBB,
+ KVM_RISCV_ISA_EXT_ZVBC,
+ KVM_RISCV_ISA_EXT_ZVKB,
+ KVM_RISCV_ISA_EXT_ZVKG,
+ KVM_RISCV_ISA_EXT_ZVKNED,
+ KVM_RISCV_ISA_EXT_ZVKNHA,
+ KVM_RISCV_ISA_EXT_ZVKNHB,
+ KVM_RISCV_ISA_EXT_ZVKSED,
+ KVM_RISCV_ISA_EXT_ZVKSH,
+ KVM_RISCV_ISA_EXT_ZVKT,
+ KVM_RISCV_ISA_EXT_ZFH,
+ KVM_RISCV_ISA_EXT_ZFHMIN,
+ KVM_RISCV_ISA_EXT_ZIHINTNTL,
+ KVM_RISCV_ISA_EXT_ZVFH,
+ KVM_RISCV_ISA_EXT_ZVFHMIN,
+ KVM_RISCV_ISA_EXT_ZFA,
KVM_RISCV_ISA_EXT_MAX,
};
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index c92c623b311e..604d6bf7e476 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -7,6 +7,7 @@ ifdef CONFIG_FTRACE
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
@@ -46,6 +47,7 @@ obj-y += irq.o
obj-y += process.o
obj-y += ptrace.o
obj-y += reset.o
+obj-y += return_address.o
obj-y += setup.o
obj-y += signal.o
obj-y += syscall_table.o
@@ -64,6 +66,7 @@ obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_RISCV_ISA_V) += vector.o
+obj-$(CONFIG_RISCV_ISA_V) += kernel_mode_vector.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += cpu_ops.o
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index e32591e9da90..79a5a35fab96 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -8,8 +8,10 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
+#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
#include <linux/ctype.h>
+#include <linux/jump_label.h>
#include <linux/log2.h>
#include <linux/memory.h>
#include <linux/module.h>
@@ -22,6 +24,7 @@
#include <asm/hwprobe.h>
#include <asm/patch.h>
#include <asm/processor.h>
+#include <asm/sbi.h>
#include <asm/vector.h>
#include "copy-unaligned.h"
@@ -44,6 +47,8 @@ struct riscv_isainfo hart_isa[NR_CPUS];
/* Performance information */
DEFINE_PER_CPU(long, misaligned_access_speed);
+static cpumask_t fast_misaligned_access;
+
/**
* riscv_isa_extension_base() - Get base extension word
*
@@ -198,6 +203,16 @@ static const unsigned int riscv_zvbb_exts[] = {
};
/*
+ * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V
+ * privileged ISA, the existence of the CSRs is implied by any extension which
+ * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the
+ * existence of the CSR, and treat it as a subset of those other extensions.
+ */
+static const unsigned int riscv_xlinuxenvcfg_exts[] = {
+ RISCV_ISA_EXT_XLINUXENVCFG
+};
+
+/*
* The canonical order of ISA extension names in the ISA string is defined in
* chapter 27 of the unprivileged specification.
*
@@ -246,8 +261,8 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
- __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
- __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
+ __RISCV_ISA_EXT_SUPERSET(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts),
+ __RISCV_ISA_EXT_SUPERSET(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts),
__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
@@ -535,6 +550,20 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
}
/*
+ * "V" in ISA strings is ambiguous in practice: it should mean
+ * just the standard V-1.0 but vendors aren't well behaved.
+ * Many vendors with T-Head CPU cores which implement the 0.7.1
+ * version of the vector specification put "v" into their DTs.
+ * CPU cores with the ratified spec will contain non-zero
+ * marchid.
+ */
+ if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
+ riscv_cached_marchid(cpu) == 0x0) {
+ this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
+ clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
+ }
+
+ /*
* All "okay" hart should have same isa. Set HWCAP based on
* common capabilities of every "okay" hart, in case they don't
* have.
@@ -784,6 +813,16 @@ static int check_unaligned_access(void *param)
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
per_cpu(misaligned_access_speed, cpu) = speed;
+
+ /*
+ * Set the value of fast_misaligned_access of a CPU. These operations
+ * are atomic to avoid race conditions.
+ */
+ if (speed == RISCV_HWPROBE_MISALIGNED_FAST)
+ cpumask_set_cpu(cpu, &fast_misaligned_access);
+ else
+ cpumask_clear_cpu(cpu, &fast_misaligned_access);
+
return 0;
}
@@ -796,13 +835,69 @@ static void check_unaligned_access_nonboot_cpu(void *param)
check_unaligned_access(pages[cpu]);
}
+DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
+
+static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
+{
+ if (cpumask_weight(mask) == weight)
+ static_branch_enable_cpuslocked(&fast_misaligned_access_speed_key);
+ else
+ static_branch_disable_cpuslocked(&fast_misaligned_access_speed_key);
+}
+
+static void set_unaligned_access_static_branches_except_cpu(int cpu)
+{
+ /*
+ * Same as set_unaligned_access_static_branches, except excludes the
+ * given CPU from the result. When a CPU is hotplugged into an offline
+ * state, this function is called before the CPU is set to offline in
+ * the cpumask, and thus the CPU needs to be explicitly excluded.
+ */
+
+ cpumask_t fast_except_me;
+
+ cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask);
+ cpumask_clear_cpu(cpu, &fast_except_me);
+
+ modify_unaligned_access_branches(&fast_except_me, num_online_cpus() - 1);
+}
+
+static void set_unaligned_access_static_branches(void)
+{
+ /*
+ * This will be called after check_unaligned_access_all_cpus so the
+ * result of unaligned access speed for all CPUs will be available.
+ *
+ * To avoid the number of online cpus changing between reading
+ * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be
+ * held before calling this function.
+ */
+
+ cpumask_t fast_and_online;
+
+ cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask);
+
+ modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
+}
+
+static int lock_and_set_unaligned_access_static_branch(void)
+{
+ cpus_read_lock();
+ set_unaligned_access_static_branches();
+ cpus_read_unlock();
+
+ return 0;
+}
+
+arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
+
static int riscv_online_cpu(unsigned int cpu)
{
static struct page *buf;
/* We are already set since the last check */
if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
- return 0;
+ goto exit;
buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
if (!buf) {
@@ -812,6 +907,17 @@ static int riscv_online_cpu(unsigned int cpu)
check_unaligned_access(buf);
__free_pages(buf, MISALIGNED_BUFFER_ORDER);
+
+exit:
+ set_unaligned_access_static_branches();
+
+ return 0;
+}
+
+static int riscv_offline_cpu(unsigned int cpu)
+{
+ set_unaligned_access_static_branches_except_cpu(cpu);
+
return 0;
}
@@ -846,9 +952,12 @@ static int check_unaligned_access_all_cpus(void)
/* Check core 0. */
smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
- /* Setup hotplug callback for any new CPUs that come online. */
+ /*
+ * Setup hotplug callbacks for any new CPUs that come online or go
+ * offline.
+ */
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
- riscv_online_cpu, NULL);
+ riscv_online_cpu, riscv_offline_cpu);
out:
unaligned_emulation_finish();
@@ -866,7 +975,7 @@ arch_initcall(check_unaligned_access_all_cpus);
void riscv_user_isa_enable(void)
{
if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
- csr_set(CSR_SENVCFG, ENVCFG_CBZE);
+ csr_set(CSR_ENVCFG, ENVCFG_CBZE);
}
#ifdef CONFIG_RISCV_ALTERNATIVE
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 54ca4564a926..9d1a305d5508 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -83,6 +83,10 @@ SYM_CODE_START(handle_exception)
/* Load the kernel shadow call stack pointer if coming from userspace */
scs_load_current_if_task_changed s5
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+ move a0, sp
+ call riscv_v_context_nesting_start
+#endif
move a0, sp /* pt_regs */
la ra, ret_from_exception
@@ -138,6 +142,10 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
*/
csrw CSR_SCRATCH, tp
1:
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+ move a0, sp
+ call riscv_v_context_nesting_end
+#endif
REG_L a0, PT_STATUS(sp)
/*
* The current load reservation is effectively part of the processor's
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 03a6434a8cdd..f5aa24d9e1c1 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -178,32 +178,28 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
}
#ifdef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
+{
+ struct pt_regs *regs = arch_ftrace_get_regs(fregs);
+ unsigned long *parent = (unsigned long *)&regs->ra;
+
+ prepare_ftrace_return(parent, ip, frame_pointer(regs));
+}
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
extern void ftrace_graph_call(void);
-extern void ftrace_graph_regs_call(void);
int ftrace_enable_ftrace_graph_caller(void)
{
- int ret;
-
- ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
- (unsigned long)&prepare_ftrace_return, true, true);
- if (ret)
- return ret;
-
- return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
+ return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, true, true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
- int ret;
-
- ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
- (unsigned long)&prepare_ftrace_return, false, true);
- if (ret)
- return ret;
-
- return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
+ return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, false, true);
}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
new file mode 100644
index 000000000000..6afe80c7f03a
--- /dev/null
+++ b/arch/riscv/kernel/kernel_mode_vector.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2021 SiFive
+ */
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#include <asm/vector.h>
+#include <asm/switch_to.h>
+#include <asm/simd.h>
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+#include <asm/asm-prototypes.h>
+#endif
+
+static inline void riscv_v_flags_set(u32 flags)
+{
+ WRITE_ONCE(current->thread.riscv_v_flags, flags);
+}
+
+static inline void riscv_v_start(u32 flags)
+{
+ int orig;
+
+ orig = riscv_v_flags();
+ BUG_ON((orig & flags) != 0);
+ riscv_v_flags_set(orig | flags);
+ barrier();
+}
+
+static inline void riscv_v_stop(u32 flags)
+{
+ int orig;
+
+ barrier();
+ orig = riscv_v_flags();
+ BUG_ON((orig & flags) == 0);
+ riscv_v_flags_set(orig & ~flags);
+}
+
+/*
+ * Claim ownership of the CPU vector context for use by the calling context.
+ *
+ * The caller may freely manipulate the vector context metadata until
+ * put_cpu_vector_context() is called.
+ */
+void get_cpu_vector_context(void)
+{
+ /*
+ * disable softirqs so it is impossible for softirqs to nest
+ * get_cpu_vector_context() when kernel is actively using Vector.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_disable();
+ else
+ preempt_disable();
+
+ riscv_v_start(RISCV_KERNEL_MODE_V);
+}
+
+/*
+ * Release the CPU vector context.
+ *
+ * Must be called from a context in which get_cpu_vector_context() was
+ * previously called, with no call to put_cpu_vector_context() in the
+ * meantime.
+ */
+void put_cpu_vector_context(void)
+{
+ riscv_v_stop(RISCV_KERNEL_MODE_V);
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_enable();
+ else
+ preempt_enable();
+}
+
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+static __always_inline u32 *riscv_v_flags_ptr(void)
+{
+ return &current->thread.riscv_v_flags;
+}
+
+static inline void riscv_preempt_v_set_dirty(void)
+{
+ *riscv_v_flags_ptr() |= RISCV_PREEMPT_V_DIRTY;
+}
+
+static inline void riscv_preempt_v_reset_flags(void)
+{
+ *riscv_v_flags_ptr() &= ~(RISCV_PREEMPT_V_DIRTY | RISCV_PREEMPT_V_NEED_RESTORE);
+}
+
+static inline void riscv_v_ctx_depth_inc(void)
+{
+ *riscv_v_flags_ptr() += RISCV_V_CTX_UNIT_DEPTH;
+}
+
+static inline void riscv_v_ctx_depth_dec(void)
+{
+ *riscv_v_flags_ptr() -= RISCV_V_CTX_UNIT_DEPTH;
+}
+
+static inline u32 riscv_v_ctx_get_depth(void)
+{
+ return *riscv_v_flags_ptr() & RISCV_V_CTX_DEPTH_MASK;
+}
+
+static int riscv_v_stop_kernel_context(void)
+{
+ if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current))
+ return 1;
+
+ riscv_preempt_v_clear_dirty(current);
+ riscv_v_stop(RISCV_PREEMPT_V);
+ return 0;
+}
+
+static int riscv_v_start_kernel_context(bool *is_nested)
+{
+ struct __riscv_v_ext_state *kvstate, *uvstate;
+
+ kvstate = &current->thread.kernel_vstate;
+ if (!kvstate->datap)
+ return -ENOENT;
+
+ if (riscv_preempt_v_started(current)) {
+ WARN_ON(riscv_v_ctx_get_depth() == 0);
+ *is_nested = true;
+ get_cpu_vector_context();
+ if (riscv_preempt_v_dirty(current)) {
+ __riscv_v_vstate_save(kvstate, kvstate->datap);
+ riscv_preempt_v_clear_dirty(current);
+ }
+ riscv_preempt_v_set_restore(current);
+ return 0;
+ }
+
+ /* Transfer the ownership of V from user to kernel, then save */
+ riscv_v_start(RISCV_PREEMPT_V | RISCV_PREEMPT_V_DIRTY);
+ if ((task_pt_regs(current)->status & SR_VS) == SR_VS_DIRTY) {
+ uvstate = &current->thread.vstate;
+ __riscv_v_vstate_save(uvstate, uvstate->datap);
+ }
+ riscv_preempt_v_clear_dirty(current);
+ return 0;
+}
+
+/* low-level V context handling code, called with irq disabled */
+asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs)
+{
+ int depth;
+
+ if (!riscv_preempt_v_started(current))
+ return;
+
+ depth = riscv_v_ctx_get_depth();
+ if (depth == 0 && (regs->status & SR_VS) == SR_VS_DIRTY)
+ riscv_preempt_v_set_dirty();
+
+ riscv_v_ctx_depth_inc();
+}
+
+asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs)
+{
+ struct __riscv_v_ext_state *vstate = &current->thread.kernel_vstate;
+ u32 depth;
+
+ WARN_ON(!irqs_disabled());
+
+ if (!riscv_preempt_v_started(current))
+ return;
+
+ riscv_v_ctx_depth_dec();
+ depth = riscv_v_ctx_get_depth();
+ if (depth == 0) {
+ if (riscv_preempt_v_restore(current)) {
+ __riscv_v_vstate_restore(vstate, vstate->datap);
+ __riscv_v_vstate_clean(regs);
+ riscv_preempt_v_reset_flags();
+ }
+ }
+}
+#else
+#define riscv_v_start_kernel_context(nested) (-ENOENT)
+#define riscv_v_stop_kernel_context() (-ENOENT)
+#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
+
+/*
+ * kernel_vector_begin(): obtain the CPU vector registers for use by the calling
+ * context
+ *
+ * Must not be called unless may_use_simd() returns true.
+ * Task context in the vector registers is saved back to memory as necessary.
+ *
+ * A matching call to kernel_vector_end() must be made before returning from the
+ * calling context.
+ *
+ * The caller may freely use the vector registers until kernel_vector_end() is
+ * called.
+ */
+void kernel_vector_begin(void)
+{
+ bool nested = false;
+
+ if (WARN_ON(!has_vector()))
+ return;
+
+ BUG_ON(!may_use_simd());
+
+ if (riscv_v_start_kernel_context(&nested)) {
+ get_cpu_vector_context();
+ riscv_v_vstate_save(&current->thread.vstate, task_pt_regs(current));
+ }
+
+ if (!nested)
+ riscv_v_vstate_set_restore(current, task_pt_regs(current));
+
+ riscv_v_enable();
+}
+EXPORT_SYMBOL_GPL(kernel_vector_begin);
+
+/*
+ * kernel_vector_end(): give the CPU vector registers back to the current task
+ *
+ * Must be called from a context in which kernel_vector_begin() was previously
+ * called, with no call to kernel_vector_end() in the meantime.
+ *
+ * The caller must not use the vector registers after this function is called,
+ * unless kernel_vector_begin() is called again in the meantime.
+ */
+void kernel_vector_end(void)
+{
+ if (WARN_ON(!has_vector()))
+ return;
+
+ riscv_v_disable();
+
+ if (riscv_v_stop_kernel_context())
+ put_cpu_vector_context();
+}
+EXPORT_SYMBOL_GPL(kernel_vector_end);
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 79dc81223238..b7561288e8da 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -57,31 +57,150 @@
.endm
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- .macro SAVE_ALL
+
+/**
+* SAVE_ABI_REGS - save regs against the pt_regs struct
+*
+* @all: tell if saving all the regs
+*
+* If all is set, all the regs will be saved, otherwise only ABI
+* related regs (a0-a7,epc,ra and optional s0) will be saved.
+*
+* After the stack is established,
+*
+* 0(sp) stores the PC of the traced function which can be accessed
+* by &(fregs)->regs->epc in tracing function. Note that the real
+* function entry address should be computed with -FENTRY_RA_OFFSET.
+*
+* 8(sp) stores the function return address (i.e. parent IP) that
+* can be accessed by &(fregs)->regs->ra in tracing function.
+*
+* The other regs are saved at the respective localtion and accessed
+* by the respective pt_regs member.
+*
+* Here is the layout of stack for your reference.
+*
+* PT_SIZE_ON_STACK -> +++++++++
+* + ..... +
+* + t3-t6 +
+* + s2-s11+
+* + a0-a7 + --++++-> ftrace_caller saved
+* + s1 + +
+* + s0 + --+
+* + t0-t2 + +
+* + tp + +
+* + gp + +
+* + sp + +
+* + ra + --+ // parent IP
+* sp -> + epc + --+ // PC
+* +++++++++
+**/
+ .macro SAVE_ABI_REGS, all=0
addi sp, sp, -PT_SIZE_ON_STACK
- REG_S t0, PT_EPC(sp)
- REG_S x1, PT_RA(sp)
- REG_S x2, PT_SP(sp)
- REG_S x3, PT_GP(sp)
- REG_S x4, PT_TP(sp)
- REG_S x5, PT_T0(sp)
- save_from_x6_to_x31
+ REG_S t0, PT_EPC(sp)
+ REG_S x1, PT_RA(sp)
+
+ // save the ABI regs
+
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+
+ // save the leftover regs
+
+ .if \all == 1
+ REG_S x2, PT_SP(sp)
+ REG_S x3, PT_GP(sp)
+ REG_S x4, PT_TP(sp)
+ REG_S x5, PT_T0(sp)
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+
+ // save s0 if FP_TEST defined
+
+ .else
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ REG_S x8, PT_S0(sp)
+#endif
+ .endif
.endm
- .macro RESTORE_ALL
- REG_L x1, PT_RA(sp)
- REG_L x2, PT_SP(sp)
- REG_L x3, PT_GP(sp)
- REG_L x4, PT_TP(sp)
- /* Restore t0 with PT_EPC */
- REG_L x5, PT_EPC(sp)
- restore_from_x6_to_x31
+ .macro RESTORE_ABI_REGS, all=0
+ REG_L t0, PT_EPC(sp)
+ REG_L x1, PT_RA(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ .if \all == 1
+ REG_L x2, PT_SP(sp)
+ REG_L x3, PT_GP(sp)
+ REG_L x4, PT_TP(sp)
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+
+ .else
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ REG_L x8, PT_S0(sp)
+#endif
+ .endif
addi sp, sp, PT_SIZE_ON_STACK
.endm
+
+ .macro PREPARE_ARGS
+ addi a0, t0, -FENTRY_RA_OFFSET
+ la a1, function_trace_op
+ REG_L a2, 0(a1)
+ mv a1, ra
+ mv a3, sp
+ .endm
+
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
SYM_FUNC_START(ftrace_caller)
SAVE_ABI
@@ -105,34 +224,39 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
call ftrace_stub
#endif
RESTORE_ABI
- jr t0
+ jr t0
SYM_FUNC_END(ftrace_caller)
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
SYM_FUNC_START(ftrace_regs_caller)
- SAVE_ALL
-
- addi a0, t0, -FENTRY_RA_OFFSET
- la a1, function_trace_op
- REG_L a2, 0(a1)
- mv a1, ra
- mv a3, sp
+ mv t1, zero
+ SAVE_ABI_REGS 1
+ PREPARE_ARGS
SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
call ftrace_stub
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- addi a0, sp, PT_RA
- REG_L a1, PT_EPC(sp)
- addi a1, a1, -FENTRY_RA_OFFSET
-#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
- mv a2, s0
-#endif
-SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
+ RESTORE_ABI_REGS 1
+ bnez t1, .Ldirect
+ jr t0
+.Ldirect:
+ jr t1
+SYM_FUNC_END(ftrace_regs_caller)
+
+SYM_FUNC_START(ftrace_caller)
+ SAVE_ABI_REGS 0
+ PREPARE_ARGS
+
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub
-#endif
- RESTORE_ALL
- jr t0
-SYM_FUNC_END(ftrace_regs_caller)
+ RESTORE_ABI_REGS 0
+ jr t0
+SYM_FUNC_END(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+SYM_CODE_START(ftrace_stub_direct_tramp)
+ jr t0
+SYM_CODE_END(ftrace_stub_direct_tramp)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 862834bb1d64..5e5a82644451 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -723,8 +723,8 @@ static int add_relocation_to_accumulate(struct module *me, int type,
if (!bucket) {
kfree(entry);
- kfree(rel_head);
kfree(rel_head->rel_entry);
+ kfree(rel_head);
return -ENOMEM;
}
@@ -747,6 +747,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
{
/* Can safely assume that bits is not greater than sizeof(long) */
unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
+ /*
+ * When hashtable_size == 1, hashtable_bits == 0.
+ * This is valid because the hashing algorithm returns 0 in this case.
+ */
unsigned int hashtable_bits = ilog2(hashtable_size);
/*
@@ -760,10 +764,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
hashtable_size <<= should_double_size;
*relocation_hashtable = kmalloc_array(hashtable_size,
- sizeof(*relocation_hashtable),
+ sizeof(**relocation_hashtable),
GFP_KERNEL);
if (!*relocation_hashtable)
- return -ENOMEM;
+ return 0;
__hash_init(*relocation_hashtable, hashtable_size);
@@ -779,6 +783,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf_Sym *sym;
void *location;
unsigned int i, type;
+ unsigned int j_idx = 0;
Elf_Addr v;
int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
@@ -789,8 +794,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
hashtable_bits = initialize_relocation_hashtable(num_relocations,
&relocation_hashtable);
- if (hashtable_bits < 0)
- return hashtable_bits;
+ if (!relocation_hashtable)
+ return -ENOMEM;
INIT_LIST_HEAD(&used_buckets_list);
@@ -829,9 +834,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
v = sym->st_value + rel[i].r_addend;
if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
- unsigned int j;
+ unsigned int j = j_idx;
+ bool found = false;
- for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
+ do {
unsigned long hi20_loc =
sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[j].r_offset;
@@ -860,16 +866,26 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = offset - hi20;
v = lo12;
+ found = true;
break;
}
- }
- if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
+
+ j++;
+ if (j > sechdrs[relsec].sh_size / sizeof(*rel))
+ j = 0;
+
+ } while (j_idx != j);
+
+ if (!found) {
pr_err(
"%s: Can not find HI20 relocation information\n",
me->name);
return -EINVAL;
}
+
+ /* Record the previous j-loop end index */
+ j_idx = j;
}
if (reloc_handlers[type].accumulate_handler)
diff --git a/arch/riscv/kernel/paravirt.c b/arch/riscv/kernel/paravirt.c
index 8e114f5930ce..0d6225fd3194 100644
--- a/arch/riscv/kernel/paravirt.c
+++ b/arch/riscv/kernel/paravirt.c
@@ -41,7 +41,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
-DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
+static DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
static bool __init has_pv_steal_clock(void)
{
@@ -91,8 +91,8 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
static u64 pv_time_steal_clock(int cpu)
{
struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
- u32 sequence;
- u64 steal;
+ __le32 sequence;
+ __le64 steal;
/*
* Check the sequence field before and after reading the steal
diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
index 68e786c84c94..f6d4dedffb84 100644
--- a/arch/riscv/kernel/pi/cmdline_early.c
+++ b/arch/riscv/kernel/pi/cmdline_early.c
@@ -38,8 +38,7 @@ static char *get_early_cmdline(uintptr_t dtb_pa)
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
fdt_cmdline_size == 0 /* CONFIG_CMDLINE_FALLBACK */) {
- strncat(early_cmdline, CONFIG_CMDLINE,
- COMMAND_LINE_SIZE - fdt_cmdline_size);
+ strlcat(early_cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
}
return early_cmdline;
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 4f21d970a129..92922dbd5b5c 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -171,6 +171,7 @@ void flush_thread(void)
riscv_v_vstate_off(task_pt_regs(current));
kfree(current->thread.vstate.datap);
memset(&current->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
+ clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
#endif
}
@@ -178,7 +179,7 @@ void arch_release_task_struct(struct task_struct *tsk)
{
/* Free the vector context of datap. */
if (has_vector())
- kfree(tsk->thread.vstate.datap);
+ riscv_v_thread_free(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -187,6 +188,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
*dst = *src;
/* clear entire V context, including datap for a new task */
memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
+ memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
+ clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
return 0;
}
@@ -221,7 +224,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->a0 = 0; /* Return value of fork() */
p->thread.s[0] = 0;
}
+ p->thread.riscv_v_flags = 0;
+ if (has_vector())
+ riscv_v_thread_alloc(p);
p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
return 0;
}
+
+void __init arch_task_cache_init(void)
+{
+ riscv_v_setup_ctx_cache();
+}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 2afe460de16a..e8515aa9d80b 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -99,8 +99,11 @@ static int riscv_vr_get(struct task_struct *target,
* Ensure the vector registers have been saved to the memory before
* copying them to membuf.
*/
- if (target == current)
- riscv_v_vstate_save(current, task_pt_regs(current));
+ if (target == current) {
+ get_cpu_vector_context();
+ riscv_v_vstate_save(&current->thread.vstate, task_pt_regs(current));
+ put_cpu_vector_context();
+ }
ptrace_vstate.vstart = vstate->vstart;
ptrace_vstate.vl = vstate->vl;
diff --git a/arch/riscv/kernel/return_address.c b/arch/riscv/kernel/return_address.c
new file mode 100644
index 000000000000..c8115ec8fb30
--- /dev/null
+++ b/arch/riscv/kernel/return_address.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This code come from arch/arm64/kernel/return_address.c
+ *
+ * Copyright (C) 2023 SiFive.
+ */
+
+#include <linux/export.h>
+#include <linux/kprobes.h>
+#include <linux/stacktrace.h>
+
+struct return_address_data {
+ unsigned int level;
+ void *addr;
+};
+
+static bool save_return_addr(void *d, unsigned long pc)
+{
+ struct return_address_data *data = d;
+
+ if (!data->level) {
+ data->addr = (void *)pc;
+ return false;
+ }
+
+ --data->level;
+
+ return true;
+}
+NOKPROBE_SYMBOL(save_return_addr);
+
+noinline void *return_address(unsigned int level)
+{
+ struct return_address_data data;
+
+ data.level = level + 3;
+ data.addr = NULL;
+
+ arch_stack_walk(save_return_addr, &data, current, NULL);
+
+ if (!data.level)
+ return data.addr;
+ else
+ return NULL;
+
+}
+EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 5a62ed1da453..e66e0999a800 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -7,6 +7,7 @@
#include <linux/bits.h>
#include <linux/init.h>
+#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/reboot.h>
#include <asm/sbi.h>
@@ -571,6 +572,66 @@ long sbi_get_mimpid(void)
}
EXPORT_SYMBOL_GPL(sbi_get_mimpid);
+bool sbi_debug_console_available;
+
+int sbi_debug_console_write(const char *bytes, unsigned int num_bytes)
+{
+ phys_addr_t base_addr;
+ struct sbiret ret;
+
+ if (!sbi_debug_console_available)
+ return -EOPNOTSUPP;
+
+ if (is_vmalloc_addr(bytes))
+ base_addr = page_to_phys(vmalloc_to_page(bytes)) +
+ offset_in_page(bytes);
+ else
+ base_addr = __pa(bytes);
+ if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
+ num_bytes = PAGE_SIZE - offset_in_page(bytes);
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
+ num_bytes, lower_32_bits(base_addr),
+ upper_32_bits(base_addr), 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
+ num_bytes, base_addr, 0, 0, 0, 0);
+
+ if (ret.error == SBI_ERR_FAILURE)
+ return -EIO;
+ return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
+}
+
+int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
+{
+ phys_addr_t base_addr;
+ struct sbiret ret;
+
+ if (!sbi_debug_console_available)
+ return -EOPNOTSUPP;
+
+ if (is_vmalloc_addr(bytes))
+ base_addr = page_to_phys(vmalloc_to_page(bytes)) +
+ offset_in_page(bytes);
+ else
+ base_addr = __pa(bytes);
+ if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
+ num_bytes = PAGE_SIZE - offset_in_page(bytes);
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
+ num_bytes, lower_32_bits(base_addr),
+ upper_32_bits(base_addr), 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
+ num_bytes, base_addr, 0, 0, 0, 0);
+
+ if (ret.error == SBI_ERR_FAILURE)
+ return -EIO;
+ return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
+}
+
void __init sbi_init(void)
{
int ret;
@@ -612,6 +673,11 @@ void __init sbi_init(void)
sbi_srst_reboot_nb.priority = 192;
register_restart_handler(&sbi_srst_reboot_nb);
}
+ if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
+ (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
+ pr_info("SBI DBCN extension detected\n");
+ sbi_debug_console_available = true;
+ }
} else {
__sbi_set_timer = __sbi_set_timer_v01;
__sbi_send_ipi = __sbi_send_ipi_v01;
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 33dfb5078301..501e66debf69 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -86,7 +86,10 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec)
/* datap is designed to be 16 byte aligned for better performance */
WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16)));
- riscv_v_vstate_save(current, regs);
+ get_cpu_vector_context();
+ riscv_v_vstate_save(&current->thread.vstate, regs);
+ put_cpu_vector_context();
+
/* Copy everything of vstate but datap. */
err = __copy_to_user(&state->v_state, &current->thread.vstate,
offsetof(struct __riscv_v_ext_state, datap));
@@ -134,7 +137,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
if (unlikely(err))
return err;
- riscv_v_vstate_restore(current, regs);
+ riscv_v_vstate_set_restore(current, regs);
return err;
}
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
index 3c89b8ec69c4..299795341e8a 100644
--- a/arch/riscv/kernel/suspend.c
+++ b/arch/riscv/kernel/suspend.c
@@ -4,13 +4,19 @@
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
+#define pr_fmt(fmt) "suspend: " fmt
+
#include <linux/ftrace.h>
+#include <linux/suspend.h>
#include <asm/csr.h>
+#include <asm/sbi.h>
#include <asm/suspend.h>
void suspend_save_csrs(struct suspend_context *context)
{
context->scratch = csr_read(CSR_SCRATCH);
+ if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
+ context->envcfg = csr_read(CSR_ENVCFG);
context->tvec = csr_read(CSR_TVEC);
context->ie = csr_read(CSR_IE);
@@ -32,6 +38,8 @@ void suspend_save_csrs(struct suspend_context *context)
void suspend_restore_csrs(struct suspend_context *context)
{
csr_write(CSR_SCRATCH, context->scratch);
+ if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
+ csr_write(CSR_ENVCFG, context->envcfg);
csr_write(CSR_TVEC, context->tvec);
csr_write(CSR_IE, context->ie);
@@ -85,3 +93,43 @@ int cpu_suspend(unsigned long arg,
return rc;
}
+
+#ifdef CONFIG_RISCV_SBI
+static int sbi_system_suspend(unsigned long sleep_type,
+ unsigned long resume_addr,
+ unsigned long opaque)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
+ sleep_type, resume_addr, opaque, 0, 0, 0);
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+
+ return ret.value;
+}
+
+static int sbi_system_suspend_enter(suspend_state_t state)
+{
+ return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
+}
+
+static const struct platform_suspend_ops sbi_system_suspend_ops = {
+ .valid = suspend_valid_only_mem,
+ .enter = sbi_system_suspend_enter,
+};
+
+static int __init sbi_system_suspend_init(void)
+{
+ if (sbi_spec_version >= sbi_mk_version(2, 0) &&
+ sbi_probe_extension(SBI_EXT_SUSP) > 0) {
+ pr_info("SBI SUSP extension detected\n");
+ if (IS_ENABLED(CONFIG_SUSPEND))
+ suspend_set_ops(&sbi_system_suspend_ops);
+ }
+
+ return 0;
+}
+
+arch_initcall(sbi_system_suspend_init);
+#endif /* CONFIG_RISCV_SBI */
diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
index 578b6292487e..6727d1d3b8f2 100644
--- a/arch/riscv/kernel/vector.c
+++ b/arch/riscv/kernel/vector.c
@@ -21,6 +21,10 @@
#include <asm/bug.h>
static bool riscv_v_implicit_uacc = IS_ENABLED(CONFIG_RISCV_ISA_V_DEFAULT_ENABLE);
+static struct kmem_cache *riscv_v_user_cachep;
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+static struct kmem_cache *riscv_v_kernel_cachep;
+#endif
unsigned long riscv_v_vsize __read_mostly;
EXPORT_SYMBOL_GPL(riscv_v_vsize);
@@ -47,6 +51,21 @@ int riscv_v_setup_vsize(void)
return 0;
}
+void __init riscv_v_setup_ctx_cache(void)
+{
+ if (!has_vector())
+ return;
+
+ riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx",
+ riscv_v_vsize, 16, SLAB_PANIC,
+ 0, riscv_v_vsize, NULL);
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+ riscv_v_kernel_cachep = kmem_cache_create("riscv_vector_kctx",
+ riscv_v_vsize, 16,
+ SLAB_PANIC, NULL);
+#endif
+}
+
static bool insn_is_vector(u32 insn_buf)
{
u32 opcode = insn_buf & __INSN_OPCODE_MASK;
@@ -80,20 +99,37 @@ static bool insn_is_vector(u32 insn_buf)
return false;
}
-static int riscv_v_thread_zalloc(void)
+static int riscv_v_thread_zalloc(struct kmem_cache *cache,
+ struct __riscv_v_ext_state *ctx)
{
void *datap;
- datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
+ datap = kmem_cache_zalloc(cache, GFP_KERNEL);
if (!datap)
return -ENOMEM;
- current->thread.vstate.datap = datap;
- memset(&current->thread.vstate, 0, offsetof(struct __riscv_v_ext_state,
- datap));
+ ctx->datap = datap;
+ memset(ctx, 0, offsetof(struct __riscv_v_ext_state, datap));
return 0;
}
+void riscv_v_thread_alloc(struct task_struct *tsk)
+{
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+ riscv_v_thread_zalloc(riscv_v_kernel_cachep, &tsk->thread.kernel_vstate);
+#endif
+}
+
+void riscv_v_thread_free(struct task_struct *tsk)
+{
+ if (tsk->thread.vstate.datap)
+ kmem_cache_free(riscv_v_user_cachep, tsk->thread.vstate.datap);
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+ if (tsk->thread.kernel_vstate.datap)
+ kmem_cache_free(riscv_v_kernel_cachep, tsk->thread.kernel_vstate.datap);
+#endif
+}
+
#define VSTATE_CTRL_GET_CUR(x) ((x) & PR_RISCV_V_VSTATE_CTRL_CUR_MASK)
#define VSTATE_CTRL_GET_NEXT(x) (((x) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) >> 2)
#define VSTATE_CTRL_MAKE_NEXT(x) (((x) << 2) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK)
@@ -122,7 +158,8 @@ static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt,
ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt);
if (inherit)
ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT;
- tsk->thread.vstate_ctrl = ctrl;
+ tsk->thread.vstate_ctrl &= ~PR_RISCV_V_VSTATE_CTRL_MASK;
+ tsk->thread.vstate_ctrl |= ctrl;
}
bool riscv_v_vstate_ctrl_user_allowed(void)
@@ -162,12 +199,12 @@ bool riscv_v_first_use_handler(struct pt_regs *regs)
* context where VS has been off. So, try to allocate the user's V
* context and resume execution.
*/
- if (riscv_v_thread_zalloc()) {
+ if (riscv_v_thread_zalloc(riscv_v_user_cachep, &current->thread.vstate)) {
force_sig(SIGBUS);
return true;
}
riscv_v_vstate_on(regs);
- riscv_v_vstate_restore(current, regs);
+ riscv_v_vstate_set_restore(current, regs);
return true;
}
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index fc34557f5356..5f7355e96008 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -42,15 +42,42 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB),
+ KVM_ISA_EXT_ARR(ZBC),
+ KVM_ISA_EXT_ARR(ZBKB),
+ KVM_ISA_EXT_ARR(ZBKC),
+ KVM_ISA_EXT_ARR(ZBKX),
KVM_ISA_EXT_ARR(ZBS),
+ KVM_ISA_EXT_ARR(ZFA),
+ KVM_ISA_EXT_ARR(ZFH),
+ KVM_ISA_EXT_ARR(ZFHMIN),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
KVM_ISA_EXT_ARR(ZICNTR),
KVM_ISA_EXT_ARR(ZICOND),
KVM_ISA_EXT_ARR(ZICSR),
KVM_ISA_EXT_ARR(ZIFENCEI),
+ KVM_ISA_EXT_ARR(ZIHINTNTL),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZIHPM),
+ KVM_ISA_EXT_ARR(ZKND),
+ KVM_ISA_EXT_ARR(ZKNE),
+ KVM_ISA_EXT_ARR(ZKNH),
+ KVM_ISA_EXT_ARR(ZKR),
+ KVM_ISA_EXT_ARR(ZKSED),
+ KVM_ISA_EXT_ARR(ZKSH),
+ KVM_ISA_EXT_ARR(ZKT),
+ KVM_ISA_EXT_ARR(ZVBB),
+ KVM_ISA_EXT_ARR(ZVBC),
+ KVM_ISA_EXT_ARR(ZVFH),
+ KVM_ISA_EXT_ARR(ZVFHMIN),
+ KVM_ISA_EXT_ARR(ZVKB),
+ KVM_ISA_EXT_ARR(ZVKG),
+ KVM_ISA_EXT_ARR(ZVKNED),
+ KVM_ISA_EXT_ARR(ZVKNHA),
+ KVM_ISA_EXT_ARR(ZVKNHB),
+ KVM_ISA_EXT_ARR(ZVKSED),
+ KVM_ISA_EXT_ARR(ZVKSH),
+ KVM_ISA_EXT_ARR(ZVKT),
};
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
@@ -92,13 +119,40 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZBA:
case KVM_RISCV_ISA_EXT_ZBB:
+ case KVM_RISCV_ISA_EXT_ZBC:
+ case KVM_RISCV_ISA_EXT_ZBKB:
+ case KVM_RISCV_ISA_EXT_ZBKC:
+ case KVM_RISCV_ISA_EXT_ZBKX:
case KVM_RISCV_ISA_EXT_ZBS:
+ case KVM_RISCV_ISA_EXT_ZFA:
+ case KVM_RISCV_ISA_EXT_ZFH:
+ case KVM_RISCV_ISA_EXT_ZFHMIN:
case KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_RISCV_ISA_EXT_ZICOND:
case KVM_RISCV_ISA_EXT_ZICSR:
case KVM_RISCV_ISA_EXT_ZIFENCEI:
+ case KVM_RISCV_ISA_EXT_ZIHINTNTL:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZIHPM:
+ case KVM_RISCV_ISA_EXT_ZKND:
+ case KVM_RISCV_ISA_EXT_ZKNE:
+ case KVM_RISCV_ISA_EXT_ZKNH:
+ case KVM_RISCV_ISA_EXT_ZKR:
+ case KVM_RISCV_ISA_EXT_ZKSED:
+ case KVM_RISCV_ISA_EXT_ZKSH:
+ case KVM_RISCV_ISA_EXT_ZKT:
+ case KVM_RISCV_ISA_EXT_ZVBB:
+ case KVM_RISCV_ISA_EXT_ZVBC:
+ case KVM_RISCV_ISA_EXT_ZVFH:
+ case KVM_RISCV_ISA_EXT_ZVFHMIN:
+ case KVM_RISCV_ISA_EXT_ZVKB:
+ case KVM_RISCV_ISA_EXT_ZVKG:
+ case KVM_RISCV_ISA_EXT_ZVKNED:
+ case KVM_RISCV_ISA_EXT_ZVKNHA:
+ case KVM_RISCV_ISA_EXT_ZVKNHB:
+ case KVM_RISCV_ISA_EXT_ZVKSED:
+ case KVM_RISCV_ISA_EXT_ZVKSH:
+ case KVM_RISCV_ISA_EXT_ZVKT:
return false;
/* Extensions which can be disabled using Smstateen */
case KVM_RISCV_ISA_EXT_SSAIA:
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index 01f09fe8c3b0..d8cf9ca28c61 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{
gpa_t shmem = vcpu->arch.sta.shmem;
u64 last_steal = vcpu->arch.sta.last_steal;
- u32 *sequence_ptr, sequence;
- u64 *steal_ptr, steal;
+ __le32 __user *sequence_ptr;
+ __le64 __user *steal_ptr;
+ __le32 sequence_le;
+ __le64 steal_le;
+ u32 sequence;
+ u64 steal;
unsigned long hva;
gfn_t gfn;
@@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
return;
}
- sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
+ sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, sequence));
- steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
+ steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, steal));
- if (WARN_ON(get_user(sequence, sequence_ptr)))
+ if (WARN_ON(get_user(sequence_le, sequence_ptr)))
return;
- sequence = le32_to_cpu(sequence);
+ sequence = le32_to_cpu(sequence_le);
sequence += 1;
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
return;
- if (!WARN_ON(get_user(steal, steal_ptr))) {
- steal = le64_to_cpu(steal);
+ if (!WARN_ON(get_user(steal_le, steal_ptr))) {
+ steal = le64_to_cpu(steal_le);
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.sta.last_steal - last_steal;
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 26cb2502ecf8..bd6e6c1b0497 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -6,8 +6,14 @@ lib-y += memmove.o
lib-y += strcmp.o
lib-y += strlen.o
lib-y += strncmp.o
+lib-y += csum.o
+ifeq ($(CONFIG_MMU), y)
+lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o
+endif
lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+lib-$(CONFIG_RISCV_ISA_V) += xor.o
+lib-$(CONFIG_RISCV_ISA_V) += riscv_v_helpers.o
diff --git a/arch/riscv/lib/csum.c b/arch/riscv/lib/csum.c
new file mode 100644
index 000000000000..74af3ab520b6
--- /dev/null
+++ b/arch/riscv/lib/csum.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Checksum library
+ *
+ * Influenced by arch/arm64/lib/csum.c
+ * Copyright (C) 2023 Rivos Inc.
+ */
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/jump_label.h>
+#include <linux/kasan-checks.h>
+#include <linux/kernel.h>
+
+#include <asm/cpufeature.h>
+
+#include <net/checksum.h>
+
+/* Default version is sufficient for 32 bit */
+#ifndef CONFIG_32BIT
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum csum)
+{
+ unsigned int ulen, uproto;
+ unsigned long sum = (__force unsigned long)csum;
+
+ sum += (__force unsigned long)saddr->s6_addr32[0];
+ sum += (__force unsigned long)saddr->s6_addr32[1];
+ sum += (__force unsigned long)saddr->s6_addr32[2];
+ sum += (__force unsigned long)saddr->s6_addr32[3];
+
+ sum += (__force unsigned long)daddr->s6_addr32[0];
+ sum += (__force unsigned long)daddr->s6_addr32[1];
+ sum += (__force unsigned long)daddr->s6_addr32[2];
+ sum += (__force unsigned long)daddr->s6_addr32[3];
+
+ ulen = (__force unsigned int)htonl((unsigned int)len);
+ sum += ulen;
+
+ uproto = (__force unsigned int)htonl(proto);
+ sum += uproto;
+
+ /*
+ * Zbb support saves 4 instructions, so not worth checking without
+ * alternatives if supported
+ */
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ unsigned long fold_temp;
+
+ /*
+ * Zbb is likely available when the kernel is compiled with Zbb
+ * support, so nop when Zbb is available and jump when Zbb is
+ * not available.
+ */
+ asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ :
+ :
+ :
+ : no_zbb);
+ asm(".option push \n\
+ .option arch,+zbb \n\
+ rori %[fold_temp], %[sum], 32 \n\
+ add %[sum], %[fold_temp], %[sum] \n\
+ srli %[sum], %[sum], 32 \n\
+ not %[fold_temp], %[sum] \n\
+ roriw %[sum], %[sum], 16 \n\
+ subw %[sum], %[fold_temp], %[sum] \n\
+ .option pop"
+ : [sum] "+r" (sum), [fold_temp] "=&r" (fold_temp));
+ return (__force __sum16)(sum >> 16);
+ }
+no_zbb:
+ sum += ror64(sum, 32);
+ sum >>= 32;
+ return csum_fold((__force __wsum)sum);
+}
+EXPORT_SYMBOL(csum_ipv6_magic);
+#endif /* !CONFIG_32BIT */
+
+#ifdef CONFIG_32BIT
+#define OFFSET_MASK 3
+#elif CONFIG_64BIT
+#define OFFSET_MASK 7
+#endif
+
+static inline __no_sanitize_address unsigned long
+do_csum_common(const unsigned long *ptr, const unsigned long *end,
+ unsigned long data)
+{
+ unsigned int shift;
+ unsigned long csum = 0, carry = 0;
+
+ /*
+ * Do 32-bit reads on RV32 and 64-bit reads otherwise. This should be
+ * faster than doing 32-bit reads on architectures that support larger
+ * reads.
+ */
+ while (ptr < end) {
+ csum += data;
+ carry += csum < data;
+ data = *(ptr++);
+ }
+
+ /*
+ * Perform alignment (and over-read) bytes on the tail if any bytes
+ * leftover.
+ */
+ shift = ((long)ptr - (long)end) * 8;
+#ifdef __LITTLE_ENDIAN
+ data = (data << shift) >> shift;
+#else
+ data = (data >> shift) << shift;
+#endif
+ csum += data;
+ carry += csum < data;
+ csum += carry;
+ csum += csum < carry;
+
+ return csum;
+}
+
+/*
+ * Algorithm accounts for buff being misaligned.
+ * If buff is not aligned, will over-read bytes but not use the bytes that it
+ * shouldn't. The same thing will occur on the tail-end of the read.
+ */
+static inline __no_sanitize_address unsigned int
+do_csum_with_alignment(const unsigned char *buff, int len)
+{
+ unsigned int offset, shift;
+ unsigned long csum, data;
+ const unsigned long *ptr, *end;
+
+ /*
+ * Align address to closest word (double word on rv64) that comes before
+ * buff. This should always be in the same page and cache line.
+ * Directly call KASAN with the alignment we will be using.
+ */
+ offset = (unsigned long)buff & OFFSET_MASK;
+ kasan_check_read(buff, len);
+ ptr = (const unsigned long *)(buff - offset);
+
+ /*
+ * Clear the most significant bytes that were over-read if buff was not
+ * aligned.
+ */
+ shift = offset * 8;
+ data = *(ptr++);
+#ifdef __LITTLE_ENDIAN
+ data = (data >> shift) << shift;
+#else
+ data = (data << shift) >> shift;
+#endif
+ end = (const unsigned long *)(buff + len);
+ csum = do_csum_common(ptr, end, data);
+
+#ifdef CC_HAS_ASM_GOTO_TIED_OUTPUT
+ /*
+ * Zbb support saves 6 instructions, so not worth checking without
+ * alternatives if supported
+ */
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ unsigned long fold_temp;
+
+ /*
+ * Zbb is likely available when the kernel is compiled with Zbb
+ * support, so nop when Zbb is available and jump when Zbb is
+ * not available.
+ */
+ asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ :
+ :
+ :
+ : no_zbb);
+
+#ifdef CONFIG_32BIT
+ asm_goto_output(".option push \n\
+ .option arch,+zbb \n\
+ rori %[fold_temp], %[csum], 16 \n\
+ andi %[offset], %[offset], 1 \n\
+ add %[csum], %[fold_temp], %[csum] \n\
+ beq %[offset], zero, %l[end] \n\
+ rev8 %[csum], %[csum] \n\
+ .option pop"
+ : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
+ : [offset] "r" (offset)
+ :
+ : end);
+
+ return (unsigned short)csum;
+#else /* !CONFIG_32BIT */
+ asm_goto_output(".option push \n\
+ .option arch,+zbb \n\
+ rori %[fold_temp], %[csum], 32 \n\
+ add %[csum], %[fold_temp], %[csum] \n\
+ srli %[csum], %[csum], 32 \n\
+ roriw %[fold_temp], %[csum], 16 \n\
+ addw %[csum], %[fold_temp], %[csum] \n\
+ andi %[offset], %[offset], 1 \n\
+ beq %[offset], zero, %l[end] \n\
+ rev8 %[csum], %[csum] \n\
+ .option pop"
+ : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
+ : [offset] "r" (offset)
+ :
+ : end);
+
+ return (csum << 16) >> 48;
+#endif /* !CONFIG_32BIT */
+end:
+ return csum >> 16;
+ }
+no_zbb:
+#endif /* CC_HAS_ASM_GOTO_TIED_OUTPUT */
+#ifndef CONFIG_32BIT
+ csum += ror64(csum, 32);
+ csum >>= 32;
+#endif
+ csum = (u32)csum + ror32((u32)csum, 16);
+ if (offset & 1)
+ return (u16)swab32(csum);
+ return csum >> 16;
+}
+
+/*
+ * Does not perform alignment, should only be used if machine has fast
+ * misaligned accesses, or when buff is known to be aligned.
+ */
+static inline __no_sanitize_address unsigned int
+do_csum_no_alignment(const unsigned char *buff, int len)
+{
+ unsigned long csum, data;
+ const unsigned long *ptr, *end;
+
+ ptr = (const unsigned long *)(buff);
+ data = *(ptr++);
+
+ kasan_check_read(buff, len);
+
+ end = (const unsigned long *)(buff + len);
+ csum = do_csum_common(ptr, end, data);
+
+ /*
+ * Zbb support saves 6 instructions, so not worth checking without
+ * alternatives if supported
+ */
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) &&
+ IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ unsigned long fold_temp;
+
+ /*
+ * Zbb is likely available when the kernel is compiled with Zbb
+ * support, so nop when Zbb is available and jump when Zbb is
+ * not available.
+ */
+ asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0,
+ RISCV_ISA_EXT_ZBB, 1)
+ :
+ :
+ :
+ : no_zbb);
+
+#ifdef CONFIG_32BIT
+ asm (".option push \n\
+ .option arch,+zbb \n\
+ rori %[fold_temp], %[csum], 16 \n\
+ add %[csum], %[fold_temp], %[csum] \n\
+ .option pop"
+ : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
+ :
+ : );
+
+#else /* !CONFIG_32BIT */
+ asm (".option push \n\
+ .option arch,+zbb \n\
+ rori %[fold_temp], %[csum], 32 \n\
+ add %[csum], %[fold_temp], %[csum] \n\
+ srli %[csum], %[csum], 32 \n\
+ roriw %[fold_temp], %[csum], 16 \n\
+ addw %[csum], %[fold_temp], %[csum] \n\
+ .option pop"
+ : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)
+ :
+ : );
+#endif /* !CONFIG_32BIT */
+ return csum >> 16;
+ }
+no_zbb:
+#ifndef CONFIG_32BIT
+ csum += ror64(csum, 32);
+ csum >>= 32;
+#endif
+ csum = (u32)csum + ror32((u32)csum, 16);
+ return csum >> 16;
+}
+
+/*
+ * Perform a checksum on an arbitrary memory address.
+ * Will do a light-weight address alignment if buff is misaligned, unless
+ * cpu supports fast misaligned accesses.
+ */
+unsigned int do_csum(const unsigned char *buff, int len)
+{
+ if (unlikely(len <= 0))
+ return 0;
+
+ /*
+ * Significant performance gains can be seen by not doing alignment
+ * on machines with fast misaligned accesses.
+ *
+ * There is some duplicate code between the "with_alignment" and
+ * "no_alignment" implmentations, but the overlap is too awkward to be
+ * able to fit in one function without introducing multiple static
+ * branches. The largest chunk of overlap was delegated into the
+ * do_csum_common function.
+ */
+ if (static_branch_likely(&fast_misaligned_access_speed_key))
+ return do_csum_no_alignment(buff, len);
+
+ if (((unsigned long)buff & OFFSET_MASK) == 0)
+ return do_csum_no_alignment(buff, len);
+
+ return do_csum_with_alignment(buff, len);
+}
diff --git a/arch/riscv/lib/riscv_v_helpers.c b/arch/riscv/lib/riscv_v_helpers.c
new file mode 100644
index 000000000000..be38a93cedae
--- /dev/null
+++ b/arch/riscv/lib/riscv_v_helpers.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 SiFive
+ * Author: Andy Chiu <andy.chiu@sifive.com>
+ */
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+#include <asm/vector.h>
+#include <asm/simd.h>
+
+#ifdef CONFIG_MMU
+#include <asm/asm-prototypes.h>
+#endif
+
+#ifdef CONFIG_MMU
+size_t riscv_v_usercopy_threshold = CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD;
+int __asm_vector_usercopy(void *dst, void *src, size_t n);
+int fallback_scalar_usercopy(void *dst, void *src, size_t n);
+asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n)
+{
+ size_t remain, copied;
+
+ /* skip has_vector() check because it has been done by the asm */
+ if (!may_use_simd())
+ goto fallback;
+
+ kernel_vector_begin();
+ remain = __asm_vector_usercopy(dst, src, n);
+ kernel_vector_end();
+
+ if (remain) {
+ copied = n - remain;
+ dst += copied;
+ src += copied;
+ n = remain;
+ goto fallback;
+ }
+
+ return remain;
+
+fallback:
+ return fallback_scalar_usercopy(dst, src, n);
+}
+#endif
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index a9d356d6c03c..bc22c078aba8 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -3,6 +3,8 @@
#include <asm/asm.h>
#include <asm/asm-extable.h>
#include <asm/csr.h>
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
.macro fixup op reg addr lbl
100:
@@ -11,6 +13,13 @@
.endm
SYM_FUNC_START(__asm_copy_to_user)
+#ifdef CONFIG_RISCV_ISA_V
+ ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V)
+ REG_L t0, riscv_v_usercopy_threshold
+ bltu a2, t0, fallback_scalar_usercopy
+ tail enter_vector_usercopy
+#endif
+SYM_FUNC_START(fallback_scalar_usercopy)
/* Enable access to user memory */
li t6, SR_SUM
@@ -181,6 +190,7 @@ SYM_FUNC_START(__asm_copy_to_user)
sub a0, t5, a0
ret
SYM_FUNC_END(__asm_copy_to_user)
+SYM_FUNC_END(fallback_scalar_usercopy)
EXPORT_SYMBOL(__asm_copy_to_user)
SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
EXPORT_SYMBOL(__asm_copy_from_user)
diff --git a/arch/riscv/lib/uaccess_vector.S b/arch/riscv/lib/uaccess_vector.S
new file mode 100644
index 000000000000..51ab5588e9ff
--- /dev/null
+++ b/arch/riscv/lib/uaccess_vector.S
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/linkage.h>
+#include <asm-generic/export.h>
+#include <asm/asm.h>
+#include <asm/asm-extable.h>
+#include <asm/csr.h>
+
+#define pDst a0
+#define pSrc a1
+#define iNum a2
+
+#define iVL a3
+
+#define ELEM_LMUL_SETTING m8
+#define vData v0
+
+ .macro fixup op reg addr lbl
+100:
+ \op \reg, \addr
+ _asm_extable 100b, \lbl
+ .endm
+
+SYM_FUNC_START(__asm_vector_usercopy)
+ /* Enable access to user memory */
+ li t6, SR_SUM
+ csrs CSR_STATUS, t6
+
+loop:
+ vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma
+ fixup vle8.v vData, (pSrc), 10f
+ sub iNum, iNum, iVL
+ add pSrc, pSrc, iVL
+ fixup vse8.v vData, (pDst), 11f
+ add pDst, pDst, iVL
+ bnez iNum, loop
+
+ /* Exception fixup for vector load is shared with normal exit */
+10:
+ /* Disable access to user memory */
+ csrc CSR_STATUS, t6
+ mv a0, iNum
+ ret
+
+ /* Exception fixup code for vector store. */
+11:
+ /* Undo the subtraction after vle8.v */
+ add iNum, iNum, iVL
+ /* Make sure the scalar fallback skip already processed bytes */
+ csrr t2, CSR_VSTART
+ sub iNum, iNum, t2
+ j 10b
+SYM_FUNC_END(__asm_vector_usercopy)
diff --git a/arch/riscv/lib/xor.S b/arch/riscv/lib/xor.S
new file mode 100644
index 000000000000..b28f2430e52f
--- /dev/null
+++ b/arch/riscv/lib/xor.S
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+#include <linux/linkage.h>
+#include <linux/export.h>
+#include <asm/asm.h>
+
+SYM_FUNC_START(xor_regs_2_)
+ vsetvli a3, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a3
+ vxor.vv v16, v0, v8
+ add a2, a2, a3
+ vse8.v v16, (a1)
+ add a1, a1, a3
+ bnez a0, xor_regs_2_
+ ret
+SYM_FUNC_END(xor_regs_2_)
+EXPORT_SYMBOL(xor_regs_2_)
+
+SYM_FUNC_START(xor_regs_3_)
+ vsetvli a4, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a4
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a4
+ vxor.vv v16, v0, v16
+ add a3, a3, a4
+ vse8.v v16, (a1)
+ add a1, a1, a4
+ bnez a0, xor_regs_3_
+ ret
+SYM_FUNC_END(xor_regs_3_)
+EXPORT_SYMBOL(xor_regs_3_)
+
+SYM_FUNC_START(xor_regs_4_)
+ vsetvli a5, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a5
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a5
+ vxor.vv v0, v0, v16
+ vle8.v v24, (a4)
+ add a3, a3, a5
+ vxor.vv v16, v0, v24
+ add a4, a4, a5
+ vse8.v v16, (a1)
+ add a1, a1, a5
+ bnez a0, xor_regs_4_
+ ret
+SYM_FUNC_END(xor_regs_4_)
+EXPORT_SYMBOL(xor_regs_4_)
+
+SYM_FUNC_START(xor_regs_5_)
+ vsetvli a6, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a6
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a6
+ vxor.vv v0, v0, v16
+ vle8.v v24, (a4)
+ add a3, a3, a6
+ vxor.vv v0, v0, v24
+ vle8.v v8, (a5)
+ add a4, a4, a6
+ vxor.vv v16, v0, v8
+ add a5, a5, a6
+ vse8.v v16, (a1)
+ add a1, a1, a6
+ bnez a0, xor_regs_5_
+ ret
+SYM_FUNC_END(xor_regs_5_)
+EXPORT_SYMBOL(xor_regs_5_)
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
index 35484d830fd6..dd1530af3ef1 100644
--- a/arch/riscv/mm/extable.c
+++ b/arch/riscv/mm/extable.c
@@ -27,6 +27,14 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex,
return true;
}
+static inline unsigned long regs_get_gpr(struct pt_regs *regs, unsigned int offset)
+{
+ if (unlikely(!offset || offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
unsigned long val)
{
@@ -50,6 +58,27 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
return true;
}
+static bool
+ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data);
+ int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+ unsigned long data, addr, offset;
+
+ addr = regs_get_gpr(regs, reg_addr * sizeof(unsigned long));
+
+ offset = addr & 0x7UL;
+ addr &= ~0x7UL;
+
+ data = *(unsigned long *)addr >> (offset * 8);
+
+ regs_set_gpr(regs, reg_data * sizeof(unsigned long), data);
+
+ regs->epc = get_ex_fixup(ex);
+ return true;
+}
+
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
@@ -65,6 +94,8 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
+ case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
+ return ex_handler_load_unaligned_zeropad(ex, regs);
}
BUG();
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index 431596c0e20e..5ef2a6891158 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return pte;
}
+unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+ unsigned long hp_size = huge_page_size(h);
+
+ switch (hp_size) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ return P4D_SIZE - PUD_SIZE;
+#endif
+ case PMD_SIZE:
+ return PUD_SIZE - PMD_SIZE;
+ case napot_cont_size(NAPOT_CONT64KB_ORDER):
+ return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
+ default:
+ break;
+ }
+
+ return 0UL;
+}
+
static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
@@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
return entry;
}
+static void clear_flush(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep,
+ unsigned long pgsize,
+ unsigned long ncontig)
+{
+ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
+ unsigned long i, saddr = addr;
+
+ for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
+ ptep_get_and_clear(mm, addr, ptep);
+
+ flush_tlb_range(&vma, saddr, addr);
+}
+
+/*
+ * When dealing with NAPOT mappings, the privileged specification indicates that
+ * "if an update needs to be made, the OS generally should first mark all of the
+ * PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
+ * within the range, [...] then update the PTE(s), as described in Section
+ * 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
+ * arm64.
+ */
void set_huge_pte_at(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
pte_t pte,
unsigned long sz)
{
- unsigned long hugepage_shift;
+ unsigned long hugepage_shift, pgsize;
int i, pte_num;
if (sz >= PGDIR_SIZE)
@@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
hugepage_shift = PAGE_SHIFT;
pte_num = sz >> hugepage_shift;
- for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
+ pgsize = 1 << hugepage_shift;
+
+ if (!pte_present(pte)) {
+ for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
+ set_ptes(mm, addr, ptep, pte, 1);
+ return;
+ }
+
+ if (!pte_napot(pte)) {
+ set_ptes(mm, addr, ptep, pte, 1);
+ return;
+ }
+
+ clear_flush(mm, addr, ptep, pgsize, pte_num);
+
+ for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
set_pte_at(mm, addr, ptep, pte);
}
@@ -306,7 +364,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_clear(mm, addr, ptep);
}
-static __init bool is_napot_size(unsigned long size)
+static bool is_napot_size(unsigned long size)
{
unsigned long order;
@@ -334,7 +392,7 @@ arch_initcall(napot_hugetlbpages_init);
#else
-static __init bool is_napot_size(unsigned long size)
+static bool is_napot_size(unsigned long size)
{
return false;
}
@@ -351,7 +409,7 @@ int pmd_huge(pmd_t pmd)
return pmd_leaf(pmd);
}
-bool __init arch_hugetlb_valid_size(unsigned long size)
+static bool __hugetlb_valid_size(unsigned long size)
{
if (size == HPAGE_SIZE)
return true;
@@ -363,6 +421,18 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
return false;
}
+bool __init arch_hugetlb_valid_size(unsigned long size)
+{
+ return __hugetlb_valid_size(size);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+bool arch_hugetlb_migration_supported(struct hstate *h)
+{
+ return __hugetlb_valid_size(huge_page_size(h));
+}
+#endif
+
#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)
{
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index a65937336cdc..fa34cf55037b 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1060,7 +1060,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
#ifdef CONFIG_XIP_KERNEL
+#ifdef CONFIG_64BIT
kernel_map.page_offset = PAGE_OFFSET_L3;
+#else
+ kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
+#endif
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
@@ -1381,16 +1385,39 @@ void __init misc_mem_init(void)
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
arch_numa_init();
sparse_init();
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* The entire VMEMMAP region has been populated. Flush TLB for this region */
+ local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
+#endif
zone_sizes_init();
arch_reserve_crashkernel();
memblock_dump_all();
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
+void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+ unsigned long addr, unsigned long next)
+{
+ pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL);
+}
+
+int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
+ unsigned long addr, unsigned long next)
+{
+ vmemmap_verify((pte_t *)pmdp, node, addr, next);
+ return 1;
+}
+
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
- return vmemmap_populate_basepages(start, end, node, NULL);
+ /*
+ * Note that SPARSEMEM_VMEMMAP is only selected for rv64 and that we
+ * can't use hugepage mappings for 2-level page table because in case of
+ * memory hotplug, we are not able to update all the page tables with
+ * the new PMDs.
+ */
+ return vmemmap_populate_hugepages(start, end, node, NULL);
}
#endif
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 8aadc5f71c93..893566e004b7 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -66,9 +66,10 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
local_flush_tlb_range_threshold_asid(start, size, stride, asid);
}
+/* Flush a range of kernel pages without broadcasting */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID);
+ local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
}
static void __ipi_flush_tlb_all(void *info)
@@ -98,29 +99,23 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
- unsigned long size, unsigned long stride)
+static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
+ unsigned long start, unsigned long size,
+ unsigned long stride)
{
struct flush_tlb_range_data ftd;
- const struct cpumask *cmask;
- unsigned long asid = FLUSH_TLB_NO_ASID;
bool broadcast;
- if (mm) {
- unsigned int cpuid;
+ if (cpumask_empty(cmask))
+ return;
- cmask = mm_cpumask(mm);
- if (cpumask_empty(cmask))
- return;
+ if (cmask != cpu_online_mask) {
+ unsigned int cpuid;
cpuid = get_cpu();
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
-
- if (static_branch_unlikely(&use_asid_allocator))
- asid = atomic_long_read(&mm->context.id) & asid_mask;
} else {
- cmask = cpu_online_mask;
broadcast = true;
}
@@ -140,25 +135,34 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
local_flush_tlb_range_asid(start, size, stride, asid);
}
- if (mm)
+ if (cmask != cpu_online_mask)
put_cpu();
}
+static inline unsigned long get_mm_asid(struct mm_struct *mm)
+{
+ return static_branch_unlikely(&use_asid_allocator) ?
+ atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
+}
+
void flush_tlb_mm(struct mm_struct *mm)
{
- __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
+ 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
}
void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int page_size)
{
- __flush_tlb_range(mm, start, end - start, page_size);
+ __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
+ start, end - start, page_size);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
+ __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ addr, PAGE_SIZE, PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -190,18 +194,45 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
- __flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
+ __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ start, end - start, stride_size);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
+ __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
+ start, end - start, PAGE_SIZE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
+ __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ start, end - start, PMD_SIZE);
}
#endif
+
+bool arch_tlbbatch_should_defer(struct mm_struct *mm)
+{
+ return true;
+}
+
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm,
+ unsigned long uaddr)
+{
+ cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+}
+
+void arch_flush_tlb_batched_pending(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+}
+
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+{
+ __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
+ FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ cpumask_clear(&batch->cpumask);
+}
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index a5ce1ab76ece..8b35f12a4452 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -18,6 +18,11 @@ static inline bool rvc_enabled(void)
return IS_ENABLED(CONFIG_RISCV_ISA_C);
}
+static inline bool rvzbb_enabled(void)
+{
+ return IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBB);
+}
+
enum {
RV_REG_ZERO = 0, /* The constant value 0 */
RV_REG_RA = 1, /* Return address */
@@ -730,6 +735,33 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
return rv_css_insn(0x6, imm, rs2, 0x2);
}
+/* RVZBB instrutions. */
+static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
+{
+ return rv_i_insn(0x604, rs1, 1, rd, 0x13);
+}
+
+static inline u32 rvzbb_sexth(u8 rd, u8 rs1)
+{
+ return rv_i_insn(0x605, rs1, 1, rd, 0x13);
+}
+
+static inline u32 rvzbb_zexth(u8 rd, u8 rs)
+{
+ if (IS_ENABLED(CONFIG_64BIT))
+ return rv_i_insn(0x80, rs, 4, rd, 0x3b);
+
+ return rv_i_insn(0x80, rs, 4, rd, 0x33);
+}
+
+static inline u32 rvzbb_rev8(u8 rd, u8 rs)
+{
+ if (IS_ENABLED(CONFIG_64BIT))
+ return rv_i_insn(0x6b8, rs, 5, rd, 0x13);
+
+ return rv_i_insn(0x698, rs, 5, rd, 0x13);
+}
+
/*
* RV64-only instructions.
*
@@ -1087,6 +1119,108 @@ static inline void emit_subw(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
emit(rv_subw(rd, rs1, rs2), ctx);
}
+static inline void emit_sextb(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ if (rvzbb_enabled()) {
+ emit(rvzbb_sextb(rd, rs), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs, 56, ctx);
+ emit_srai(rd, rd, 56, ctx);
+}
+
+static inline void emit_sexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ if (rvzbb_enabled()) {
+ emit(rvzbb_sexth(rd, rs), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs, 48, ctx);
+ emit_srai(rd, rd, 48, ctx);
+}
+
+static inline void emit_sextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ emit_addiw(rd, rs, 0, ctx);
+}
+
+static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ if (rvzbb_enabled()) {
+ emit(rvzbb_zexth(rd, rs), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs, 48, ctx);
+ emit_srli(rd, rd, 48, ctx);
+}
+
+static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ emit_slli(rd, rs, 32, ctx);
+ emit_srli(rd, rd, 32, ctx);
+}
+
+static inline void emit_bswap(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvzbb_enabled()) {
+ int bits = 64 - imm;
+
+ emit(rvzbb_rev8(rd, rd), ctx);
+ if (bits)
+ emit_srli(rd, rd, bits, ctx);
+ return;
+ }
+
+ emit_li(RV_REG_T2, 0, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+ if (imm == 16)
+ goto out_be;
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+ if (imm == 32)
+ goto out_be;
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+out_be:
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+
+ emit_mv(rd, RV_REG_T2, ctx);
+}
+
#endif /* __riscv_xlen == 64 */
void bpf_jit_build_prologue(struct rv_jit_context *ctx);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 58dc64dd94a8..869e4282a2c4 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -141,6 +141,19 @@ static bool in_auipc_jalr_range(s64 val)
val < ((1L << 31) - (1L << 11));
}
+/* Modify rd pointer to alternate reg to avoid corrupting original reg */
+static void emit_sextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
+{
+ emit_sextw(ra, *rd, ctx);
+ *rd = ra;
+}
+
+static void emit_zextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
+{
+ emit_zextw(ra, *rd, ctx);
+ *rd = ra;
+}
+
/* Emit fixed-length instructions for address */
static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
{
@@ -326,12 +339,6 @@ static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx);
}
-static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
-{
- emit_slli(reg, reg, 32, ctx);
- emit_srli(reg, reg, 32, ctx);
-}
-
static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
{
int tc_ninsn, off, start_insn = ctx->ninsns;
@@ -346,7 +353,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
*/
tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
ctx->offset[0];
- emit_zext_32(RV_REG_A2, ctx);
+ emit_zextw(RV_REG_A2, RV_REG_A2, ctx);
off = offsetof(struct bpf_array, map.max_entries);
if (is_12b_check(off, insn))
@@ -405,38 +412,6 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
*rs = bpf_to_rv_reg(insn->src_reg, ctx);
}
-static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
-{
- emit_mv(RV_REG_T2, *rd, ctx);
- emit_zext_32(RV_REG_T2, ctx);
- emit_mv(RV_REG_T1, *rs, ctx);
- emit_zext_32(RV_REG_T1, ctx);
- *rd = RV_REG_T2;
- *rs = RV_REG_T1;
-}
-
-static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
-{
- emit_addiw(RV_REG_T2, *rd, 0, ctx);
- emit_addiw(RV_REG_T1, *rs, 0, ctx);
- *rd = RV_REG_T2;
- *rs = RV_REG_T1;
-}
-
-static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
-{
- emit_mv(RV_REG_T2, *rd, ctx);
- emit_zext_32(RV_REG_T2, ctx);
- emit_zext_32(RV_REG_T1, ctx);
- *rd = RV_REG_T2;
-}
-
-static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
-{
- emit_addiw(RV_REG_T2, *rd, 0, ctx);
- *rd = RV_REG_T2;
-}
-
static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
struct rv_jit_context *ctx)
{
@@ -519,32 +494,32 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
case BPF_AND | BPF_FETCH:
emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
rv_amoand_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
case BPF_OR | BPF_FETCH:
emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
rv_amoor_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
case BPF_XOR | BPF_FETCH:
emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
case BPF_XCHG:
emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
- emit_zext_32(rs, ctx);
+ emit_zextw(rs, rs, ctx);
break;
/* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
case BPF_CMPXCHG:
@@ -795,6 +770,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
void *orig_call = func_addr;
bool save_ret;
u32 insn;
@@ -878,7 +854,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
stack_size = round_up(stack_size, 16);
- if (func_addr) {
+ if (!is_struct_ops) {
/* For the trampoline called from function entry,
* the frame of traced function and the frame of
* trampoline need to be considered.
@@ -998,7 +974,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
- if (func_addr) {
+ if (!is_struct_ops) {
/* trampoline called from function entry */
emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx);
emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
@@ -1090,7 +1066,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU64 | BPF_MOV | BPF_X:
if (imm == 1) {
/* Special mov32 for zext */
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
}
switch (insn->off) {
@@ -1098,16 +1074,17 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_mv(rd, rs, ctx);
break;
case 8:
+ emit_sextb(rd, rs, ctx);
+ break;
case 16:
- emit_slli(RV_REG_T1, rs, 64 - insn->off, ctx);
- emit_srai(rd, RV_REG_T1, 64 - insn->off, ctx);
+ emit_sexth(rd, rs, ctx);
break;
case 32:
- emit_addiw(rd, rs, 0, ctx);
+ emit_sextw(rd, rs, ctx);
break;
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* dst = dst OP src */
@@ -1115,7 +1092,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU64 | BPF_ADD | BPF_X:
emit_add(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
@@ -1125,31 +1102,31 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_subw(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
emit_and(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
emit_or(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
emit_xor(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
@@ -1158,7 +1135,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
else
emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
@@ -1167,25 +1144,25 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
else
emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* dst = -dst */
@@ -1193,73 +1170,27 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_ALU64 | BPF_NEG:
emit_sub(rd, RV_REG_ZERO, rd, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
- emit_slli(rd, rd, 48, ctx);
- emit_srli(rd, rd, 48, ctx);
+ emit_zexth(rd, rd, ctx);
break;
case 32:
if (!aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case 64:
/* Do nothing */
break;
}
break;
-
case BPF_ALU | BPF_END | BPF_FROM_BE:
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
- emit_li(RV_REG_T2, 0, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
- if (imm == 16)
- goto out_be;
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
- if (imm == 32)
- goto out_be;
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
- emit_srli(rd, rd, 8, ctx);
-out_be:
- emit_andi(RV_REG_T1, rd, 0xff, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
-
- emit_mv(rd, RV_REG_T2, ctx);
+ emit_bswap(rd, imm, ctx);
break;
/* dst = imm */
@@ -1267,7 +1198,7 @@ out_be:
case BPF_ALU64 | BPF_MOV | BPF_K:
emit_imm(rd, imm, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* dst = dst OP imm */
@@ -1280,7 +1211,7 @@ out_be:
emit_add(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
@@ -1291,7 +1222,7 @@ out_be:
emit_sub(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
@@ -1302,7 +1233,7 @@ out_be:
emit_and(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
@@ -1313,7 +1244,7 @@ out_be:
emit_or(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
@@ -1324,7 +1255,7 @@ out_be:
emit_xor(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K:
@@ -1332,7 +1263,7 @@ out_be:
emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
rv_mulw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
@@ -1344,7 +1275,7 @@ out_be:
emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
rv_divuw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
@@ -1356,14 +1287,14 @@ out_be:
emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
rv_remuw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
emit_slli(rd, rd, imm, ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
@@ -1373,7 +1304,7 @@ out_be:
emit(rv_srliw(rd, rd, imm), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
@@ -1383,7 +1314,7 @@ out_be:
emit(rv_sraiw(rd, rd, imm), ctx);
if (!is64 && !aux->verifier_zext)
- emit_zext_32(rd, ctx);
+ emit_zextw(rd, rd, ctx);
break;
/* JUMP off */
@@ -1424,10 +1355,13 @@ out_be:
rvoff = rv_offset(i, off, ctx);
if (!is64) {
s = ctx->ninsns;
- if (is_signed_bpf_cond(BPF_OP(code)))
- emit_sext_32_rd_rs(&rd, &rs, ctx);
- else
- emit_zext_32_rd_rs(&rd, &rs, ctx);
+ if (is_signed_bpf_cond(BPF_OP(code))) {
+ emit_sextw_alt(&rs, RV_REG_T1, ctx);
+ emit_sextw_alt(&rd, RV_REG_T2, ctx);
+ } else {
+ emit_zextw_alt(&rs, RV_REG_T1, ctx);
+ emit_zextw_alt(&rd, RV_REG_T2, ctx);
+ }
e = ctx->ninsns;
/* Adjust for extra insns */
@@ -1438,8 +1372,7 @@ out_be:
/* Adjust for and */
rvoff -= 4;
emit_and(RV_REG_T1, rd, rs, ctx);
- emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
- ctx);
+ emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
} else {
emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
}
@@ -1468,18 +1401,18 @@ out_be:
case BPF_JMP32 | BPF_JSLE | BPF_K:
rvoff = rv_offset(i, off, ctx);
s = ctx->ninsns;
- if (imm) {
+ if (imm)
emit_imm(RV_REG_T1, imm, ctx);
- rs = RV_REG_T1;
- } else {
- /* If imm is 0, simply use zero register. */
- rs = RV_REG_ZERO;
- }
+ rs = imm ? RV_REG_T1 : RV_REG_ZERO;
if (!is64) {
- if (is_signed_bpf_cond(BPF_OP(code)))
- emit_sext_32_rd(&rd, ctx);
- else
- emit_zext_32_rd_t1(&rd, ctx);
+ if (is_signed_bpf_cond(BPF_OP(code))) {
+ emit_sextw_alt(&rd, RV_REG_T2, ctx);
+ /* rs has been sign extended */
+ } else {
+ emit_zextw_alt(&rd, RV_REG_T2, ctx);
+ if (imm)
+ emit_zextw(rs, rs, ctx);
+ }
}
e = ctx->ninsns;
@@ -1503,7 +1436,7 @@ out_be:
* as t1 is used only in comparison against zero.
*/
if (!is64 && imm < 0)
- emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx);
+ emit_sextw(RV_REG_T1, RV_REG_T1, ctx);
e = ctx->ninsns;
rvoff -= ninsns_rvoff(e - s);
emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
@@ -1873,3 +1806,8 @@ bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
+
+bool bpf_jit_supports_ptr_xchg(void)
+{
+ return true;
+}
diff --git a/arch/s390/configs/compat.config b/arch/s390/configs/compat.config
new file mode 100644
index 000000000000..6fd051453ae8
--- /dev/null
+++ b/arch/s390/configs/compat.config
@@ -0,0 +1,3 @@
+# Help: Enable compat support
+CONFIG_COMPAT=y
+CONFIG_COMPAT_32BIT_TIME=y
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 67ba0157fbdb..c924be0d7ed8 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -118,7 +118,6 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
-CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
@@ -374,6 +373,7 @@ CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
@@ -436,9 +436,6 @@ CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
# CONFIG_MD_BITMAP_FILE is not set
-CONFIG_MD_LINEAR=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
@@ -638,7 +635,7 @@ CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
CONFIG_OVERLAY_FS=m
CONFIG_NETFS_STATS=y
-CONFIG_FSCACHE=m
+CONFIG_FSCACHE=y
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
@@ -708,7 +705,6 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
-CONFIG_INIT_STACK_NONE=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -738,7 +734,6 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 4c2650c1fbdd..c8f0c9fe40d7 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -109,7 +109,6 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
-CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
@@ -364,6 +363,7 @@ CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
@@ -426,9 +426,6 @@ CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
# CONFIG_MD_BITMAP_FILE is not set
-CONFIG_MD_LINEAR=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
CONFIG_MD_CLUSTER=m
CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=y
@@ -623,7 +620,7 @@ CONFIG_CUSE=m
CONFIG_VIRTIO_FS=m
CONFIG_OVERLAY_FS=m
CONFIG_NETFS_STATS=y
-CONFIG_FSCACHE=m
+CONFIG_FSCACHE=y
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
@@ -692,7 +689,6 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
-CONFIG_INIT_STACK_NONE=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_USER=m
@@ -723,11 +719,9 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_AEGIS128=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 30d2a1687665..c51f3ec4eb28 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=2
@@ -64,7 +65,6 @@ CONFIG_ZFCP=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
-CONFIG_INIT_STACK_NONE=y
# CONFIG_ZLIB_DFLTCC is not set
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_PRINTK_TIME=y
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 895f774bbcc5..bf78cf381dfc 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -25,7 +25,7 @@
*/
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("0: brcl 0,%l[label]\n"
+ asm goto("0: brcl 0,%l[label]\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
@@ -39,7 +39,7 @@ label:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("0: brcl 15,%l[label]\n"
+ asm goto("0: brcl 15,%l[label]\n"
".pushsection __jump_table,\"aw\"\n"
".balign 8\n"
".long 0b-.,%l[label]-.\n"
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 621a17fd1a1b..f875a404a0a0 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -676,8 +676,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.crypto.pqap_hook) {
pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
ret = pqap_hook(vcpu);
- if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
- kvm_s390_set_psw_cc(vcpu, 3);
+ if (!ret) {
+ if (vcpu->run->s.regs.gprs[1] & 0x00ff0000)
+ kvm_s390_set_psw_cc(vcpu, 3);
+ else
+ kvm_s390_set_psw_cc(vcpu, 0);
+ }
up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
return ret;
}
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index fef42e2a80a2..3af3bd20ac7b 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1235,7 +1235,6 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
if (IS_ERR(gmap))
return PTR_ERR(gmap);
- gmap->private = vcpu->kvm;
vcpu->kvm->stat.gmap_shadow_create++;
WRITE_ONCE(vsie_page->gmap, gmap);
return 0;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 6f96b5a71c63..8da39deb56ca 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -1691,6 +1691,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
return ERR_PTR(-ENOMEM);
new->mm = parent->mm;
new->parent = gmap_get(parent);
+ new->private = parent->private;
new->orig_asce = asce;
new->edat_level = edat_level;
new->initialized = false;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 676ac74026a8..52a44e353796 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -252,7 +252,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
/* combine single writes by using store-block insn */
void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
- zpci_memcpy_toio(to, from, count);
+ zpci_memcpy_toio(to, from, count * 8);
}
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 0f279360838a..30d117f9ad7e 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -1220,7 +1220,7 @@ static int __init arch_setup(void)
lcdc_info.ch[0].num_modes = ARRAY_SIZE(ecovec_dvi_modes);
/* No backlight */
- gpio_backlight_data.fbdev = NULL;
+ gpio_backlight_data.dev = NULL;
gpio_set_value(GPIO_PTA2, 1);
gpio_set_value(GPIO_PTU1, 1);
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index cf59b98446e4..7b427c17fbfe 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -171,7 +171,8 @@ CONFIG_BTRFS_FS=y
CONFIG_AUTOFS_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
-CONFIG_FSCACHE=m
+CONFIG_NETFS_SUPPORT=m
+CONFIG_FSCACHE=y
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
index 6e8664448048..118744d349e2 100644
--- a/arch/sh/kernel/vsyscall/Makefile
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += vsyscall.o vsyscall-syscall.o vsyscall-syms.o
-$(obj)/vsyscall-syscall.o: \
- $(foreach F,trapa,$(obj)/vsyscall-$F.so)
+$(obj)/vsyscall-syscall.o: $(obj)/vsyscall-trapa.so
# Teach kbuild about targets
-targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so)
+targets += vsyscall-trapa.o vsyscall-traps.so
targets += vsyscall-note.o vsyscall.lds vsyscall-dummy.o
# The DSO images are built using a special linker script
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 5f6035936131..2a03daa68f28 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -60,7 +60,7 @@ libs-y += arch/sparc/prom/
libs-y += arch/sparc/lib/
drivers-$(CONFIG_PM) += arch/sparc/power/
-drivers-$(CONFIG_FB) += arch/sparc/video/
+drivers-$(CONFIG_FB_CORE) += arch/sparc/video/
boot := arch/sparc/boot
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 94eb529dcb77..2718cbea826a 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -10,7 +10,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"nop\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
@@ -26,7 +26,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"b %l[l_yes]\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 3c38ca40a22b..a84598568300 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -13,7 +13,10 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <asm/apb.h>
#include <asm/iommu.h>
@@ -456,7 +459,6 @@ static void sabre_pbm_init(struct pci_pbm_info *pbm,
static const struct of_device_id sabre_match[];
static int sabre_probe(struct platform_device *op)
{
- const struct of_device_id *match;
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->dev.of_node;
struct pci_pbm_info *pbm;
@@ -466,8 +468,7 @@ static int sabre_probe(struct platform_device *op)
const u32 *vdma;
u64 clear_irq;
- match = of_match_device(sabre_match, &op->dev);
- hummingbird_p = match && (match->data != NULL);
+ hummingbird_p = (uintptr_t)device_get_match_data(&op->dev);
if (!hummingbird_p) {
struct device_node *cpu_dp;
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 23b47f7fdb1d..5d8dd4949586 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -11,7 +11,10 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/numa.h>
#include <asm/iommu.h>
@@ -1459,15 +1462,13 @@ out_err:
return err;
}
-static const struct of_device_id schizo_match[];
static int schizo_probe(struct platform_device *op)
{
- const struct of_device_id *match;
+ unsigned long chip_type = (unsigned long)device_get_match_data(&op->dev);
- match = of_match_device(schizo_match, &op->dev);
- if (!match)
+ if (!chip_type)
return -EINVAL;
- return __schizo_init(op, (unsigned long)match->data);
+ return __schizo_init(op, chip_type);
}
/* The ordering of this table is very important. Some Tomatillo
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index d08c3a0443f3..7f5eedf1f5e0 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -3,9 +3,6 @@
# Building vDSO images for sparc.
#
-VDSO64-$(CONFIG_SPARC64) := y
-VDSOCOMPAT-$(CONFIG_COMPAT) := y
-
# files to link into the vdso
vobjs-y := vdso-note.o vclock_gettime.o
@@ -13,22 +10,15 @@ vobjs-y := vdso-note.o vclock_gettime.o
obj-y += vma.o
# vDSO images to build
-vdso_img-$(VDSO64-y) += 64
-vdso_img-$(VDSOCOMPAT-y) += 32
+obj-$(CONFIG_SPARC64) += vdso-image-64.o
+obj-$(CONFIG_COMPAT) += vdso-image-32.o
-vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
+vobjs := $(addprefix $(obj)/, $(vobjs-y))
$(obj)/vdso.o: $(obj)/vdso.so
targets += vdso.lds $(vobjs-y)
-
-# Build the vDSO image C files and link them in.
-vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
-vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
-vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
-obj-y += $(vdso_img_objs)
-targets += $(vdso_img_cfiles)
-targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
+targets += $(foreach x, 32 64, vdso-image-$(x).c vdso$(x).so vdso$(x).so.dbg)
CPPFLAGS_vdso.lds += -P -C
diff --git a/arch/sparc/video/Makefile b/arch/sparc/video/Makefile
index 6baddbd58e4d..d4d83f1702c6 100644
--- a/arch/sparc/video/Makefile
+++ b/arch/sparc/video/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_FB) += fbdev.o
+obj-$(CONFIG_FB_CORE) += fbdev.o
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 82f05f250634..34957dcb88b9 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -115,7 +115,9 @@ archprepare:
$(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
-LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie)
+ifdef CONFIG_LD_SCRIPT_DYN
+LINK-$(call gcc-min-version, 60100)$(CONFIG_CC_IS_CLANG) += -no-pie
+endif
LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h
index 4b6d1b526bc1..66fe06db872f 100644
--- a/arch/um/include/asm/cpufeature.h
+++ b/arch/um/include/asm/cpufeature.h
@@ -75,7 +75,7 @@ extern void setup_clear_cpu_cap(unsigned int bit);
*/
static __always_inline bool _static_cpu_has(u16 bit)
{
- asm_volatile_goto("1: jmp 6f\n"
+ asm goto("1: jmp 6f\n"
"2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index b9224cf2ee4d..2a7279d80460 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -379,7 +379,7 @@ config X86_CMOV
config X86_MINIMUM_CPU_FAMILY
int
default "64" if X86_64
- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
+ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
default "5" if X86_32 && X86_CMPXCHG64
default "4"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1a068de12a56..da8f3caf2781 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -112,13 +112,13 @@ ifeq ($(CONFIG_X86_32),y)
# temporary until string.h is fixed
KBUILD_CFLAGS += -ffreestanding
- ifeq ($(CONFIG_STACKPROTECTOR),y)
- ifeq ($(CONFIG_SMP),y)
+ ifeq ($(CONFIG_STACKPROTECTOR),y)
+ ifeq ($(CONFIG_SMP),y)
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
- else
+ else
KBUILD_CFLAGS += -mstack-protector-guard=global
- endif
- endif
+ endif
+ endif
else
BITS := 64
UTS_MACHINE := x86_64
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index b2771710ed98..a1bbedd989e4 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -106,8 +106,7 @@ extra_header_fields:
.word 0 # MinorSubsystemVersion
.long 0 # Win32VersionValue
- .long setup_size + ZO__end + pecompat_vsize
- # SizeOfImage
+ .long setup_size + ZO__end # SizeOfImage
.long salign # SizeOfHeaders
.long 0 # CheckSum
@@ -143,7 +142,7 @@ section_table:
.ascii ".setup"
.byte 0
.byte 0
- .long setup_size - salign # VirtualSize
+ .long pecompat_fstart - salign # VirtualSize
.long salign # VirtualAddress
.long pecompat_fstart - salign # SizeOfRawData
.long salign # PointerToRawData
@@ -156,8 +155,8 @@ section_table:
#ifdef CONFIG_EFI_MIXED
.asciz ".compat"
- .long 8 # VirtualSize
- .long setup_size + ZO__end # VirtualAddress
+ .long pecompat_fsize # VirtualSize
+ .long pecompat_fstart # VirtualAddress
.long pecompat_fsize # SizeOfRawData
.long pecompat_fstart # PointerToRawData
@@ -172,17 +171,16 @@ section_table:
* modes this image supports.
*/
.pushsection ".pecompat", "a", @progbits
- .balign falign
- .set pecompat_vsize, salign
+ .balign salign
.globl pecompat_fstart
pecompat_fstart:
.byte 0x1 # Version
.byte 8 # Size
.word IMAGE_FILE_MACHINE_I386 # PE machine type
.long setup_size + ZO_efi32_pe_entry # Entrypoint
+ .byte 0x0 # Sentinel
.popsection
#else
- .set pecompat_vsize, 0
.set pecompat_fstart, setup_size
#endif
.ascii ".text"
diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld
index 83bb7efad8ae..3a2d1360abb0 100644
--- a/arch/x86/boot/setup.ld
+++ b/arch/x86/boot/setup.ld
@@ -24,6 +24,9 @@ SECTIONS
.text : { *(.text .text.*) }
.text32 : { *(.text32) }
+ .pecompat : { *(.pecompat) }
+ PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
+
. = ALIGN(16);
.rodata : { *(.rodata*) }
@@ -36,9 +39,6 @@ SECTIONS
. = ALIGN(16);
.data : { *(.data*) }
- .pecompat : { *(.pecompat) }
- PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
-
.signature : {
setup_sig = .;
LONG(0x5a5aaa55)
diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
index 8c8d38f0cb1d..003379049924 100644
--- a/arch/x86/entry/entry.S
+++ b/arch/x86/entry/entry.S
@@ -6,6 +6,9 @@
#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/msr-index.h>
+#include <asm/unwind_hints.h>
+#include <asm/segment.h>
+#include <asm/cache.h>
.pushsection .noinstr.text, "ax"
@@ -20,3 +23,23 @@ SYM_FUNC_END(entry_ibpb)
EXPORT_SYMBOL_GPL(entry_ibpb);
.popsection
+
+/*
+ * Define the VERW operand that is disguised as entry code so that
+ * it can be referenced with KPTI enabled. This ensure VERW can be
+ * used late in exit-to-user path after page tables are switched.
+ */
+.pushsection .entry.text, "ax"
+
+.align L1_CACHE_BYTES, 0xcc
+SYM_CODE_START_NOALIGN(mds_verw_sel)
+ UNWIND_HINT_UNDEFINED
+ ANNOTATE_NOENDBR
+ .word __KERNEL_DS
+.align L1_CACHE_BYTES, 0xcc
+SYM_CODE_END(mds_verw_sel);
+/* For KVM */
+EXPORT_SYMBOL_GPL(mds_verw_sel);
+
+.popsection
+
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index c73047bf9f4b..fba427646805 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -885,6 +885,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
BUG_IF_WRONG_CR3 no_user_check=1
popfl
popl %eax
+ CLEAR_CPU_BUFFERS
/*
* Return back to the vDSO, which will pop ecx and edx.
@@ -954,6 +955,7 @@ restore_all_switch_stack:
/* Restore user state */
RESTORE_REGS pop=4 # skip orig_eax/error_code
+ CLEAR_CPU_BUFFERS
.Lirq_return:
/*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -1146,6 +1148,7 @@ SYM_CODE_START(asm_exc_nmi)
/* Not on SYSENTER stack. */
call exc_nmi
+ CLEAR_CPU_BUFFERS
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index c40f89ab1b4c..9bb485977629 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -161,6 +161,7 @@ syscall_return_via_sysret:
SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
swapgs
+ CLEAR_CPU_BUFFERS
sysretq
SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
@@ -573,6 +574,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
.Lswapgs_and_iret:
swapgs
+ CLEAR_CPU_BUFFERS
/* Assert that the IRET frame indicates user mode. */
testb $3, 8(%rsp)
jnz .Lnative_iret
@@ -723,6 +725,8 @@ native_irq_return_ldt:
*/
popq %rax /* Restore user RAX */
+ CLEAR_CPU_BUFFERS
+
/*
* RSP now points to an ordinary IRET frame, except that the page
* is read-only and RSP[31:16] are preloaded with the userspace
@@ -1450,6 +1454,12 @@ nmi_restore:
movq $0, 5*8(%rsp) /* clear "NMI executing" */
/*
+ * Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
+ * NMI in kernel after user state is restored. For an unprivileged user
+ * these conditions are hard to meet.
+ */
+
+ /*
* iretq reads the "iret" frame and exits the NMI stack in a
* single instruction. We are returning to kernel mode, so this
* cannot result in a fault. Similarly, we don't need to worry
@@ -1466,6 +1476,7 @@ SYM_CODE_START(entry_SYSCALL32_ignore)
UNWIND_HINT_END_OF_STACK
ENDBR
mov $-ENOSYS, %eax
+ CLEAR_CPU_BUFFERS
sysretl
SYM_CODE_END(entry_SYSCALL32_ignore)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index de94e2e84ecc..eabf48c4d4b4 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -270,6 +270,7 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL)
xorl %r9d, %r9d
xorl %r10d, %r10d
swapgs
+ CLEAR_CPU_BUFFERS
sysretl
SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index 96e6c51515f5..cf1b78cb2d04 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -16,6 +16,11 @@
extern struct boot_params boot_params;
static struct real_mode_header hv_vtl_real_mode_header;
+static bool __init hv_vtl_msi_ext_dest_id(void)
+{
+ return true;
+}
+
void __init hv_vtl_init_platform(void)
{
pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
@@ -38,6 +43,8 @@ void __init hv_vtl_init_platform(void)
x86_platform.legacy.warm_reset = 0;
x86_platform.legacy.reserve_bios_regions = 0;
x86_platform.legacy.devices.pnpbios = 0;
+
+ x86_init.hyper.msi_ext_dest_id = hv_vtl_msi_ext_dest_id;
}
static inline u64 hv_vtl_system_desc_base(struct ldttss_desc *desc)
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 7dcbf153ad72..768d73de0d09 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -15,6 +15,7 @@
#include <asm/io.h>
#include <asm/coco.h>
#include <asm/mem_encrypt.h>
+#include <asm/set_memory.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/mtrr.h>
@@ -503,6 +504,31 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
}
/*
+ * When transitioning memory between encrypted and decrypted, the caller
+ * of set_memory_encrypted() or set_memory_decrypted() is responsible for
+ * ensuring that the memory isn't in use and isn't referenced while the
+ * transition is in progress. The transition has multiple steps, and the
+ * memory is in an inconsistent state until all steps are complete. A
+ * reference while the state is inconsistent could result in an exception
+ * that can't be cleanly fixed up.
+ *
+ * But the Linux kernel load_unaligned_zeropad() mechanism could cause a
+ * stray reference that can't be prevented by the caller, so Linux has
+ * specific code to handle this case. But when the #VC and #VE exceptions
+ * routed to a paravisor, the specific code doesn't work. To avoid this
+ * problem, mark the pages as "not present" while the transition is in
+ * progress. If load_unaligned_zeropad() causes a stray reference, a normal
+ * page fault is generated instead of #VC or #VE, and the page-fault-based
+ * handlers for load_unaligned_zeropad() resolve the reference. When the
+ * transition is complete, hv_vtom_set_host_visibility() marks the pages
+ * as "present" again.
+ */
+static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
+{
+ return !set_memory_np(kbuffer, pagecount);
+}
+
+/*
* hv_vtom_set_host_visibility - Set specified memory visible to host.
*
* In Isolation VM, all guest memory is encrypted from host and guest
@@ -515,16 +541,28 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
enum hv_mem_host_visibility visibility = enc ?
VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
u64 *pfn_array;
+ phys_addr_t paddr;
+ void *vaddr;
int ret = 0;
bool result = true;
int i, pfn;
pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
- if (!pfn_array)
- return false;
+ if (!pfn_array) {
+ result = false;
+ goto err_set_memory_p;
+ }
for (i = 0, pfn = 0; i < pagecount; i++) {
- pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
+ /*
+ * Use slow_virt_to_phys() because the PRESENT bit has been
+ * temporarily cleared in the PTEs. slow_virt_to_phys() works
+ * without the PRESENT bit while virt_to_hvpfn() or similar
+ * does not.
+ */
+ vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE);
+ paddr = slow_virt_to_phys(vaddr);
+ pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT;
pfn++;
if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
@@ -538,14 +576,30 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo
}
}
- err_free_pfn_array:
+err_free_pfn_array:
kfree(pfn_array);
+
+err_set_memory_p:
+ /*
+ * Set the PTE PRESENT bits again to revert what hv_vtom_clear_present()
+ * did. Do this even if there is an error earlier in this function in
+ * order to avoid leaving the memory range in a "broken" state. Setting
+ * the PRESENT bits shouldn't fail, but return an error if it does.
+ */
+ if (set_memory_p(kbuffer, pagecount))
+ result = false;
+
return result;
}
static bool hv_vtom_tlb_flush_required(bool private)
{
- return true;
+ /*
+ * Since hv_vtom_clear_present() marks the PTEs as "not present"
+ * and flushes the TLB, they can't be in the TLB. That makes the
+ * flush controlled by this function redundant, so return "false".
+ */
+ return false;
}
static bool hv_vtom_cache_flush_required(void)
@@ -608,6 +662,7 @@ void __init hv_vtom_init(void)
x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
+ x86_platform.guest.enc_status_change_prepare = hv_vtom_clear_present;
x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
/* Set WB as the default cache mode. */
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index 6ae2d16a7613..76c310b19b11 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -10,13 +10,14 @@ enum cc_vendor {
CC_VENDOR_INTEL,
};
-extern enum cc_vendor cc_vendor;
-
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+extern enum cc_vendor cc_vendor;
void cc_set_mask(u64 mask);
u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val);
#else
+#define cc_vendor (CC_VENDOR_NONE)
+
static inline u64 cc_mkenc(u64 val)
{
return val;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index a26bebbdff87..a1273698fc43 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -168,7 +168,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/
static __always_inline bool _static_cpu_has(u16 bit)
{
- asm_volatile_goto(
+ asm goto(
ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
".pushsection .altinstr_aux,\"ax\"\n"
"6:\n"
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 29cb275a219d..2b62cdd8dd12 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -81,10 +81,8 @@
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-
-/* CPU types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */
+#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
@@ -97,7 +95,7 @@
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
-/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
+#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index ce8f50192ae3..7e523bb3d2d3 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -91,7 +91,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void)
{
- mds_user_clear_cpu_buffers();
amd_clear_divider();
}
#define arch_exit_to_user_mode arch_exit_to_user_mode
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 197316121f04..b65e9c46b922 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -162,6 +162,8 @@
#define INTEL_FAM6_ATOM_CRESTMONT_X 0xAF /* Sierra Forest */
#define INTEL_FAM6_ATOM_CRESTMONT 0xB6 /* Grand Ridge */
+#define INTEL_FAM6_ATOM_DARKMONT_X 0xDD /* Clearwater Forest */
+
/* Xeon Phi */
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 071572e23d3a..cbbef32517f0 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -24,7 +24,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:"
+ asm goto("1:"
"jmp %l[l_yes] # objtool NOPs this \n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (2 | branch) : : l_yes);
@@ -38,7 +38,7 @@ l_yes:
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
- asm_volatile_goto("1:"
+ asm goto("1:"
".byte " __stringify(BYTES_NOP5) "\n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
@@ -52,7 +52,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
- asm_volatile_goto("1:"
+ asm goto("1:"
"jmp %l[l_yes]\n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
diff --git a/arch/x86/include/asm/kmsan.h b/arch/x86/include/asm/kmsan.h
index 8fa6ac0e2d76..d91b37f5b4bb 100644
--- a/arch/x86/include/asm/kmsan.h
+++ b/arch/x86/include/asm/kmsan.h
@@ -64,6 +64,7 @@ static inline bool kmsan_virt_addr_valid(void *addr)
{
unsigned long x = (unsigned long)addr;
unsigned long y = x - __START_KERNEL_map;
+ bool ret;
/* use the carry flag to determine if x was < __START_KERNEL_map */
if (unlikely(x > y)) {
@@ -79,7 +80,21 @@ static inline bool kmsan_virt_addr_valid(void *addr)
return false;
}
- return pfn_valid(x >> PAGE_SHIFT);
+ /*
+ * pfn_valid() relies on RCU, and may call into the scheduler on exiting
+ * the critical section. However, this would result in recursion with
+ * KMSAN. Therefore, disable preemption here, and re-enable preemption
+ * below while suppressing reschedules to avoid recursion.
+ *
+ * Note, this sacrifices occasionally breaking scheduling guarantees.
+ * Although, a kernel compiled with KMSAN has already given up on any
+ * performance guarantees due to being heavily instrumented.
+ */
+ preempt_disable();
+ ret = pfn_valid(x >> PAGE_SHIFT);
+ preempt_enable_no_resched();
+
+ return ret;
}
#endif /* !MODULE */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b5b2d0fde579..d271ba20a0b2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1145,6 +1145,8 @@ struct kvm_hv {
unsigned int synic_auto_eoi_used;
struct kvm_hv_syndbg hv_syndbg;
+
+ bool xsaves_xsavec_checked;
};
#endif
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 262e65539f83..2aa52cab1e46 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -315,6 +315,17 @@
#endif
.endm
+/*
+ * Macro to execute VERW instruction that mitigate transient data sampling
+ * attacks such as MDS. On affected systems a microcode update overloaded VERW
+ * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
+ *
+ * Note: Only the memory operand variant of VERW clears the CPU buffers.
+ */
+.macro CLEAR_CPU_BUFFERS
+ ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
+.endm
+
#else /* __ASSEMBLY__ */
#define ANNOTATE_RETPOLINE_SAFE \
@@ -529,13 +540,14 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
-DECLARE_STATIC_KEY_FALSE(mds_user_clear);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+extern u16 mds_verw_sel;
+
#include <asm/segment.h>
/**
@@ -562,17 +574,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
}
/**
- * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
- *
- * Clear CPU buffers if the corresponding static key is enabled
- */
-static __always_inline void mds_user_clear_cpu_buffers(void)
-{
- if (static_branch_likely(&mds_user_clear))
- mds_clear_cpu_buffers();
-}
-
-/**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
*
* Clear CPU buffers if the corresponding static key is enabled
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 4b081e0d3306..363266cbcada 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -13,7 +13,7 @@
#define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \
({ \
bool c = false; \
- asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \
+ asm goto (fullop "; j" #cc " %l[cc_label]" \
: : [var] "m" (_var), ## __VA_ARGS__ \
: clobbers : cc_label); \
if (0) { \
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index a5e89641bd2d..9aee31862b4a 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -47,6 +47,7 @@ int set_memory_uc(unsigned long addr, int numpages);
int set_memory_wc(unsigned long addr, int numpages);
int set_memory_wb(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
+int set_memory_p(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index d6cd9344f6c7..48f8dd47cf68 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -205,7 +205,7 @@ static inline void clwb(volatile void *__p)
#ifdef CONFIG_X86_USER_SHADOW_STACK
static inline int write_user_shstk_64(u64 __user *addr, u64 val)
{
- asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
+ asm goto("1: wrussq %[val], (%[addr])\n"
_ASM_EXTABLE(1b, %l[fail])
:: [addr] "r" (addr), [val] "r" (val)
:: fail);
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index 21f9407be5d3..7e88705e907f 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -58,12 +58,29 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
,,regs->di,,regs->si,,regs->dx \
,,regs->r10,,regs->r8,,regs->r9) \
+
+/* SYSCALL_PT_ARGS is Adapted from s390x */
+#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \
+ SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
+#define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \
+ SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di))
+#define SYSCALL_PT_ARG4(m, t1, t2, t3, t4) \
+ SYSCALL_PT_ARG3(m, t1, t2, t3), m(t4, (regs->si))
+#define SYSCALL_PT_ARG3(m, t1, t2, t3) \
+ SYSCALL_PT_ARG2(m, t1, t2), m(t3, (regs->dx))
+#define SYSCALL_PT_ARG2(m, t1, t2) \
+ SYSCALL_PT_ARG1(m, t1), m(t2, (regs->cx))
+#define SYSCALL_PT_ARG1(m, t1) m(t1, (regs->bx))
+#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
+
+#define __SC_COMPAT_CAST(t, a) \
+ (__typeof(__builtin_choose_expr(__TYPE_IS_L(t), 0, 0U))) \
+ (unsigned int)a
+
/* Mapping of registers to parameters for syscalls on i386 */
#define SC_IA32_REGS_TO_ARGS(x, ...) \
- __MAP(x,__SC_ARGS \
- ,,(unsigned int)regs->bx,,(unsigned int)regs->cx \
- ,,(unsigned int)regs->dx,,(unsigned int)regs->si \
- ,,(unsigned int)regs->di,,(unsigned int)regs->bp)
+ SYSCALL_PT_ARGS(x, __SC_COMPAT_CAST, \
+ __MAP(x, __SC_TYPE, __VA_ARGS__)) \
#define __SYS_STUB0(abi, name) \
long __##abi##_##name(const struct pt_regs *regs); \
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 5c367c1290c3..237dc8cdd12b 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -133,7 +133,7 @@ extern int __get_user_bad(void);
#ifdef CONFIG_X86_32
#define __put_user_goto_u64(x, addr, label) \
- asm_volatile_goto("\n" \
+ asm goto("\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
_ASM_EXTABLE_UA(1b, %l2) \
@@ -295,7 +295,7 @@ do { \
} while (0)
#define __get_user_asm(x, addr, itype, ltype, label) \
- asm_volatile_goto("\n" \
+ asm_goto_output("\n" \
"1: mov"itype" %[umem],%[output]\n" \
_ASM_EXTABLE_UA(1b, %l2) \
: [output] ltype(x) \
@@ -375,7 +375,7 @@ do { \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
- asm_volatile_goto("\n" \
+ asm_goto_output("\n" \
"1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
_ASM_EXTABLE_UA(1b, %l[label]) \
: CC_OUT(z) (success), \
@@ -394,7 +394,7 @@ do { \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
- asm_volatile_goto("\n" \
+ asm_goto_output("\n" \
"1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
_ASM_EXTABLE_UA(1b, %l[label]) \
: CC_OUT(z) (success), \
@@ -477,7 +477,7 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_goto(x, addr, itype, ltype, label) \
- asm_volatile_goto("\n" \
+ asm goto("\n" \
"1: mov"itype" %0,%1\n" \
_ASM_EXTABLE_UA(1b, %l2) \
: : ltype(x), "m" (__m(addr)) \
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index ab60a71a8dcb..472f0263dbc6 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -4,6 +4,7 @@
#include <linux/seqlock.h>
#include <uapi/asm/vsyscall.h>
+#include <asm/page_types.h>
#ifdef CONFIG_X86_VSYSCALL_EMULATION
extern void map_vsyscall(void);
@@ -24,4 +25,13 @@ static inline bool emulate_vsyscall(unsigned long error_code,
}
#endif
+/*
+ * The (legacy) vsyscall page is the long page in the kernel portion
+ * of the address space that has user-accessible permissions.
+ */
+static inline bool is_vsyscall_vaddr(unsigned long vaddr)
+{
+ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
+}
+
#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index cc130b57542a..1d85cb7071cb 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -403,7 +403,7 @@ noinstr void BUG_func(void)
{
BUG();
}
-EXPORT_SYMBOL_GPL(BUG_func);
+EXPORT_SYMBOL(BUG_func);
#define CALL_RIP_REL_OPCODE 0xff
#define CALL_RIP_REL_MODRM 0x15
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 9f42d1c59e09..f3abca334199 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -538,7 +538,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
/* Figure out Zen generations: */
switch (c->x86) {
- case 0x17: {
+ case 0x17:
switch (c->x86_model) {
case 0x00 ... 0x2f:
case 0x50 ... 0x5f:
@@ -554,8 +554,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
goto warn;
}
break;
- }
- case 0x19: {
+
+ case 0x19:
switch (c->x86_model) {
case 0x00 ... 0x0f:
case 0x20 ... 0x5f:
@@ -569,7 +569,20 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
goto warn;
}
break;
- }
+
+ case 0x1a:
+ switch (c->x86_model) {
+ case 0x00 ... 0x0f:
+ case 0x20 ... 0x2f:
+ case 0x40 ... 0x4f:
+ case 0x70 ... 0x7f:
+ setup_force_cpu_cap(X86_FEATURE_ZEN5);
+ break;
+ default:
+ goto warn;
+ }
+ break;
+
default:
break;
}
@@ -1039,6 +1052,11 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
}
+static void init_amd_zen5(struct cpuinfo_x86 *c)
+{
+ init_amd_zen_common();
+}
+
static void init_amd(struct cpuinfo_x86 *c)
{
u64 vm_cr;
@@ -1084,6 +1102,8 @@ static void init_amd(struct cpuinfo_x86 *c)
init_amd_zen3(c);
else if (boot_cpu_has(X86_FEATURE_ZEN4))
init_amd_zen4(c);
+ else if (boot_cpu_has(X86_FEATURE_ZEN5))
+ init_amd_zen5(c);
/*
* Enable workaround for FXSAVE leak on CPUs
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bb0ab8466b91..48d049cd74e7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -111,9 +111,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
-/* Control MDS CPU buffer clear before returning to user space */
-DEFINE_STATIC_KEY_FALSE(mds_user_clear);
-EXPORT_SYMBOL_GPL(mds_user_clear);
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear);
@@ -252,7 +249,7 @@ static void __init mds_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
mds_mitigation = MDS_MITIGATION_VMWERV;
- static_branch_enable(&mds_user_clear);
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
(mds_nosmt || cpu_mitigations_auto_nosmt()))
@@ -356,7 +353,7 @@ static void __init taa_select_mitigation(void)
* For guests that can't determine whether the correct microcode is
* present on host, enable the mitigation for UCODE_NEEDED as well.
*/
- static_branch_enable(&mds_user_clear);
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (taa_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
@@ -424,7 +421,7 @@ static void __init mmio_select_mitigation(void)
*/
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
boot_cpu_has(X86_FEATURE_RTM)))
- static_branch_enable(&mds_user_clear);
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else
static_branch_enable(&mmio_stale_data_clear);
@@ -484,12 +481,12 @@ static void __init md_clear_update_mitigation(void)
if (cpu_mitigations_off())
return;
- if (!static_key_enabled(&mds_user_clear))
+ if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
goto out;
/*
- * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
- * mitigation, if necessary.
+ * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
+ * Stale Data mitigation, if necessary.
*/
if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) {
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0b97bcde70c6..fbc4e60d027c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1589,6 +1589,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
get_cpu_vendor(c);
get_cpu_cap(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
+ get_cpu_address_sizes(c);
cpu_parse_early_param();
if (this_cpu->c_early_init)
@@ -1601,10 +1602,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
this_cpu->c_bsp_init(c);
} else {
setup_clear_cpu_cap(X86_FEATURE_CPUID);
+ get_cpu_address_sizes(c);
}
- get_cpu_address_sizes(c);
-
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
cpu_set_bug_bits(c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a927a8fc9624..40dec9b56f87 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -184,6 +184,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
return false;
}
+#define MSR_IA32_TME_ACTIVATE 0x982
+
+/* Helpers to access TME_ACTIVATE MSR */
+#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
+#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
+
+#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
+#define TME_ACTIVATE_POLICY_AES_XTS_128 0
+
+#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
+
+#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
+#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
+
+/* Values for mktme_status (SW only construct) */
+#define MKTME_ENABLED 0
+#define MKTME_DISABLED 1
+#define MKTME_UNINITIALIZED 2
+static int mktme_status = MKTME_UNINITIALIZED;
+
+static void detect_tme_early(struct cpuinfo_x86 *c)
+{
+ u64 tme_activate, tme_policy, tme_crypto_algs;
+ int keyid_bits = 0, nr_keyids = 0;
+ static u64 tme_activate_cpu0 = 0;
+
+ rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
+
+ if (mktme_status != MKTME_UNINITIALIZED) {
+ if (tme_activate != tme_activate_cpu0) {
+ /* Broken BIOS? */
+ pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
+ pr_err_once("x86/tme: MKTME is not usable\n");
+ mktme_status = MKTME_DISABLED;
+
+ /* Proceed. We may need to exclude bits from x86_phys_bits. */
+ }
+ } else {
+ tme_activate_cpu0 = tme_activate;
+ }
+
+ if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
+ pr_info_once("x86/tme: not enabled by BIOS\n");
+ mktme_status = MKTME_DISABLED;
+ return;
+ }
+
+ if (mktme_status != MKTME_UNINITIALIZED)
+ goto detect_keyid_bits;
+
+ pr_info("x86/tme: enabled by BIOS\n");
+
+ tme_policy = TME_ACTIVATE_POLICY(tme_activate);
+ if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
+ pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
+
+ tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
+ if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
+ pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
+ tme_crypto_algs);
+ mktme_status = MKTME_DISABLED;
+ }
+detect_keyid_bits:
+ keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
+ nr_keyids = (1UL << keyid_bits) - 1;
+ if (nr_keyids) {
+ pr_info_once("x86/mktme: enabled by BIOS\n");
+ pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
+ } else {
+ pr_info_once("x86/mktme: disabled by BIOS\n");
+ }
+
+ if (mktme_status == MKTME_UNINITIALIZED) {
+ /* MKTME is usable */
+ mktme_status = MKTME_ENABLED;
+ }
+
+ /*
+ * KeyID bits effectively lower the number of physical address
+ * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
+ */
+ c->x86_phys_bits -= keyid_bits;
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -322,6 +406,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/
if (detect_extended_topology_early(c) < 0)
detect_ht_early(c);
+
+ /*
+ * Adjust the number of physical bits early because it affects the
+ * valid bits of the MTRR mask registers.
+ */
+ if (cpu_has(c, X86_FEATURE_TME))
+ detect_tme_early(c);
}
static void bsp_init_intel(struct cpuinfo_x86 *c)
@@ -482,90 +573,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
-#define MSR_IA32_TME_ACTIVATE 0x982
-
-/* Helpers to access TME_ACTIVATE MSR */
-#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
-#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
-
-#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
-#define TME_ACTIVATE_POLICY_AES_XTS_128 0
-
-#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
-
-#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
-#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
-
-/* Values for mktme_status (SW only construct) */
-#define MKTME_ENABLED 0
-#define MKTME_DISABLED 1
-#define MKTME_UNINITIALIZED 2
-static int mktme_status = MKTME_UNINITIALIZED;
-
-static void detect_tme(struct cpuinfo_x86 *c)
-{
- u64 tme_activate, tme_policy, tme_crypto_algs;
- int keyid_bits = 0, nr_keyids = 0;
- static u64 tme_activate_cpu0 = 0;
-
- rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
-
- if (mktme_status != MKTME_UNINITIALIZED) {
- if (tme_activate != tme_activate_cpu0) {
- /* Broken BIOS? */
- pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
- pr_err_once("x86/tme: MKTME is not usable\n");
- mktme_status = MKTME_DISABLED;
-
- /* Proceed. We may need to exclude bits from x86_phys_bits. */
- }
- } else {
- tme_activate_cpu0 = tme_activate;
- }
-
- if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
- pr_info_once("x86/tme: not enabled by BIOS\n");
- mktme_status = MKTME_DISABLED;
- return;
- }
-
- if (mktme_status != MKTME_UNINITIALIZED)
- goto detect_keyid_bits;
-
- pr_info("x86/tme: enabled by BIOS\n");
-
- tme_policy = TME_ACTIVATE_POLICY(tme_activate);
- if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
- pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
-
- tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
- if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
- pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
- tme_crypto_algs);
- mktme_status = MKTME_DISABLED;
- }
-detect_keyid_bits:
- keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
- nr_keyids = (1UL << keyid_bits) - 1;
- if (nr_keyids) {
- pr_info_once("x86/mktme: enabled by BIOS\n");
- pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
- } else {
- pr_info_once("x86/mktme: disabled by BIOS\n");
- }
-
- if (mktme_status == MKTME_UNINITIALIZED) {
- /* MKTME is usable */
- mktme_status = MKTME_ENABLED;
- }
-
- /*
- * KeyID bits effectively lower the number of physical address
- * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
- */
- c->x86_phys_bits -= keyid_bits;
-}
-
static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
@@ -702,9 +709,6 @@ static void init_intel(struct cpuinfo_x86 *c)
init_ia32_feat_ctl(c);
- if (cpu_has(c, X86_FEATURE_TME))
- detect_tme(c);
-
init_intel_misc_features(c);
split_lock_init();
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index fb8cf953380d..b66f540de054 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1017,10 +1017,12 @@ void __init e820__reserve_setup_data(void)
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
/*
- * SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
- * to be reserved.
+ * SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
+ * kexec and do not need to be reserved.
*/
- if (data->type != SETUP_EFI && data->type != SETUP_IMA)
+ if (data->type != SETUP_EFI &&
+ data->type != SETUP_IMA &&
+ data->type != SETUP_RNG_SEED)
e820__range_update_kexec(pa_data,
sizeof(*data) + data->len,
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 558076dbde5b..247f2225aa9f 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -274,12 +274,13 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures,
* Attempt to restore the FPU registers directly from user memory.
* Pagefaults are handled and any errors returned are fatal.
*/
-static bool restore_fpregs_from_user(void __user *buf, u64 xrestore,
- bool fx_only, unsigned int size)
+static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only)
{
struct fpu *fpu = &current->thread.fpu;
int ret;
+ /* Restore enabled features only. */
+ xrestore &= fpu->fpstate->user_xfeatures;
retry:
fpregs_lock();
/* Ensure that XFD is up to date */
@@ -309,7 +310,7 @@ retry:
if (ret != X86_TRAP_PF)
return false;
- if (!fault_in_readable(buf, size))
+ if (!fault_in_readable(buf, fpu->fpstate->user_size))
goto retry;
return false;
}
@@ -339,7 +340,6 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
struct user_i387_ia32_struct env;
bool success, fx_only = false;
union fpregs_state *fpregs;
- unsigned int state_size;
u64 user_xfeatures = 0;
if (use_xsave()) {
@@ -349,17 +349,14 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
return false;
fx_only = !fx_sw_user.magic1;
- state_size = fx_sw_user.xstate_size;
user_xfeatures = fx_sw_user.xfeatures;
} else {
user_xfeatures = XFEATURE_MASK_FPSSE;
- state_size = fpu->fpstate->user_size;
}
if (likely(!ia32_fxstate)) {
/* Restore the FPU registers directly from user memory. */
- return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
- state_size);
+ return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only);
}
/*
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index dfe9945b9bec..428ee74002e1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -434,7 +434,8 @@ static void __init sev_map_percpu_data(void)
{
int cpu;
- if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+ if (cc_vendor != CC_VENDOR_AMD ||
+ !cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return;
for_each_possible_cpu(cpu) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 17e955ab69fe..3082cf24b69e 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -563,9 +563,6 @@ nmi_restart:
}
if (this_cpu_dec_return(nmi_state))
goto nmi_restart;
-
- if (user_mode(regs))
- mds_user_clear_cpu_buffers();
}
#if IS_ENABLED(CONFIG_KVM_INTEL)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 4943f6b2bbee..8a47f8541eab 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1322,6 +1322,56 @@ static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
return false;
}
+#define KVM_HV_WIN2016_GUEST_ID 0x1040a00003839
+#define KVM_HV_WIN2016_GUEST_ID_MASK (~GENMASK_ULL(23, 16)) /* mask out the service version */
+
+/*
+ * Hyper-V enabled Windows Server 2016 SMP VMs fail to boot in !XSAVES && XSAVEC
+ * configuration.
+ * Such configuration can result from, for example, AMD Erratum 1386 workaround.
+ *
+ * Print a notice so users aren't left wondering what's suddenly gone wrong.
+ */
+static void __kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_hv *hv = to_kvm_hv(kvm);
+
+ /* Check again under the hv_lock. */
+ if (hv->xsaves_xsavec_checked)
+ return;
+
+ if ((hv->hv_guest_os_id & KVM_HV_WIN2016_GUEST_ID_MASK) !=
+ KVM_HV_WIN2016_GUEST_ID)
+ return;
+
+ hv->xsaves_xsavec_checked = true;
+
+ /* UP configurations aren't affected */
+ if (atomic_read(&kvm->online_vcpus) < 2)
+ return;
+
+ if (guest_cpuid_has(vcpu, X86_FEATURE_XSAVES) ||
+ !guest_cpuid_has(vcpu, X86_FEATURE_XSAVEC))
+ return;
+
+ pr_notice_ratelimited("Booting SMP Windows KVM VM with !XSAVES && XSAVEC. "
+ "If it fails to boot try disabling XSAVEC in the VM config.\n");
+}
+
+void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu)
+{
+ struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
+
+ if (!vcpu->arch.hyperv_enabled ||
+ hv->xsaves_xsavec_checked)
+ return;
+
+ mutex_lock(&hv->hv_lock);
+ __kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
+ mutex_unlock(&hv->hv_lock);
+}
+
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 1dc0b6604526..923e64903da9 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -182,6 +182,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock);
void kvm_hv_request_tsc_page_update(struct kvm *kvm);
+void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu);
+
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
@@ -267,6 +269,7 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
static inline void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock) {}
static inline void kvm_hv_request_tsc_page_update(struct kvm *kvm) {}
+static inline void kvm_hv_xsaves_xsavec_maybe_warn(struct kvm_vcpu *vcpu) {}
static inline void kvm_hv_init_vm(struct kvm *kvm) {}
static inline void kvm_hv_destroy_vm(struct kvm *kvm) {}
static inline int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h
index 36c8af87a707..4e725854c63a 100644
--- a/arch/x86/kvm/svm/svm_ops.h
+++ b/arch/x86/kvm/svm/svm_ops.h
@@ -8,7 +8,7 @@
#define svm_asm(insn, clobber...) \
do { \
- asm_volatile_goto("1: " __stringify(insn) "\n\t" \
+ asm goto("1: " __stringify(insn) "\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
::: clobber : fault); \
return; \
@@ -18,7 +18,7 @@ fault: \
#define svm_asm1(insn, op1, clobber...) \
do { \
- asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
+ asm goto("1: " __stringify(insn) " %0\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
:: op1 : clobber : fault); \
return; \
@@ -28,7 +28,7 @@ fault: \
#define svm_asm2(insn, op1, op2, clobber...) \
do { \
- asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
+ asm goto("1: " __stringify(insn) " %1, %0\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
:: op1, op2 : clobber : fault); \
return; \
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index a6216c874729..315c7c2ba89b 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -71,7 +71,7 @@ static int fixed_pmc_events[] = {
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
struct kvm_pmc *pmc;
- u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
+ u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
int i;
pmu->fixed_ctr_ctrl = data;
diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h
index edc3f16cc189..6a9bfdfbb6e5 100644
--- a/arch/x86/kvm/vmx/run_flags.h
+++ b/arch/x86/kvm/vmx/run_flags.h
@@ -2,7 +2,10 @@
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
#define __KVM_X86_VMX_RUN_FLAGS_H
-#define VMX_RUN_VMRESUME (1 << 0)
-#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
+#define VMX_RUN_VMRESUME_SHIFT 0
+#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
+
+#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
+#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 906ecd001511..2bfbf758d061 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -139,7 +139,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov (%_ASM_SP), %_ASM_AX
/* Check if vmlaunch or vmresume is needed */
- test $VMX_RUN_VMRESUME, %ebx
+ bt $VMX_RUN_VMRESUME_SHIFT, %ebx
/* Load guest registers. Don't clobber flags. */
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -161,8 +161,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Load guest RAX. This kills the @regs pointer! */
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
- /* Check EFLAGS.ZF from 'test VMX_RUN_VMRESUME' above */
- jz .Lvmlaunch
+ /* Clobbers EFLAGS.ZF */
+ CLEAR_CPU_BUFFERS
+
+ /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
+ jnc .Lvmlaunch
/*
* After a successful VMRESUME/VMLAUNCH, control flow "magically"
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e262bc2ba4e5..88a4ff200d04 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -388,7 +388,16 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{
- vmx->disable_fb_clear = (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
+ /*
+ * Disable VERW's behavior of clearing CPU buffers for the guest if the
+ * CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
+ * the mitigation. Disabling the clearing behavior provides a
+ * performance boost for guests that aren't aware that manually clearing
+ * CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
+ * and VM-Exit.
+ */
+ vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
+ (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
!boot_cpu_has_bug(X86_BUG_MDS) &&
!boot_cpu_has_bug(X86_BUG_TAA);
@@ -738,7 +747,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
*/
static int kvm_cpu_vmxoff(void)
{
- asm_volatile_goto("1: vmxoff\n\t"
+ asm goto("1: vmxoff\n\t"
_ASM_EXTABLE(1b, %l[fault])
::: "cc", "memory" : fault);
@@ -2784,7 +2793,7 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer)
cr4_set_bits(X86_CR4_VMXE);
- asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t"
+ asm goto("1: vmxon %[vmxon_pointer]\n\t"
_ASM_EXTABLE(1b, %l[fault])
: : [vmxon_pointer] "m"(vmxon_pointer)
: : fault);
@@ -7224,11 +7233,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_state_enter_irqoff();
- /* L1D Flush includes CPU buffer clear to mitigate MDS */
+ /*
+ * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
+ * mitigation for MDS is done late in VMentry and is still
+ * executed in spite of L1D Flush. This is because an extra VERW
+ * should not matter much after the big hammer L1D Flush.
+ */
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
- else if (static_branch_unlikely(&mds_user_clear))
- mds_clear_cpu_buffers();
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index f41ce3c24123..8060e5fc6dbd 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -94,7 +94,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
- asm_volatile_goto("1: vmread %[field], %[output]\n\t"
+ asm_goto_output("1: vmread %[field], %[output]\n\t"
"jna %l[do_fail]\n\t"
_ASM_EXTABLE(1b, %l[do_exception])
@@ -188,7 +188,7 @@ static __always_inline unsigned long vmcs_readl(unsigned long field)
#define vmx_asm1(insn, op1, error_args...) \
do { \
- asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
+ asm goto("1: " __stringify(insn) " %0\n\t" \
".byte 0x2e\n\t" /* branch not taken hint */ \
"jna %l[error]\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
@@ -205,7 +205,7 @@ fault: \
#define vmx_asm2(insn, op1, op2, error_args...) \
do { \
- asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
+ asm goto("1: " __stringify(insn) " %1, %0\n\t" \
".byte 0x2e\n\t" /* branch not taken hint */ \
"jna %l[error]\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 363b1c080205..48a61d283406 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1704,22 +1704,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
struct kvm_msr_entry msr;
int r;
+ /* Unconditionally clear the output for simplicity */
+ msr.data = 0;
msr.index = index;
r = kvm_get_msr_feature(&msr);
- if (r == KVM_MSR_RET_INVALID) {
- /* Unconditionally clear the output for simplicity */
- *data = 0;
- if (kvm_msr_ignored_check(index, 0, false))
- r = 0;
- }
-
- if (r)
- return r;
+ if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
+ r = 0;
*data = msr.data;
- return 0;
+ return r;
}
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
@@ -1782,6 +1777,10 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
kvm_mmu_reset_context(vcpu);
+ if (!static_cpu_has(X86_FEATURE_XSAVES) &&
+ (efer & EFER_SVME))
+ kvm_hv_xsaves_xsavec_maybe_warn(vcpu);
+
return 0;
}
@@ -2507,7 +2506,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
}
#ifdef CONFIG_X86_64
-static inline int gtod_is_based_on_tsc(int mode)
+static inline bool gtod_is_based_on_tsc(int mode)
{
return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
}
@@ -5454,7 +5453,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
vcpu->arch.nmi_pending = 0;
atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
- kvm_make_request(KVM_REQ_NMI, vcpu);
+ if (events->nmi.pending)
+ kvm_make_request(KVM_REQ_NMI, vcpu);
}
static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
@@ -7016,6 +7016,9 @@ set_identity_unlock:
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
+ r = -ENOENT;
+ if (!pic_in_kernel(kvm))
+ goto create_pit_unlock;
r = -ENOMEM;
kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
if (kvm->arch.vpit)
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 20ef350a60fb..10d5ed8b5990 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -163,23 +163,23 @@ SYM_CODE_END(__get_user_8_handle_exception)
#endif
/* get_user */
- _ASM_EXTABLE(1b, __get_user_handle_exception)
- _ASM_EXTABLE(2b, __get_user_handle_exception)
- _ASM_EXTABLE(3b, __get_user_handle_exception)
+ _ASM_EXTABLE_UA(1b, __get_user_handle_exception)
+ _ASM_EXTABLE_UA(2b, __get_user_handle_exception)
+ _ASM_EXTABLE_UA(3b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
- _ASM_EXTABLE(4b, __get_user_handle_exception)
+ _ASM_EXTABLE_UA(4b, __get_user_handle_exception)
#else
- _ASM_EXTABLE(4b, __get_user_8_handle_exception)
- _ASM_EXTABLE(5b, __get_user_8_handle_exception)
+ _ASM_EXTABLE_UA(4b, __get_user_8_handle_exception)
+ _ASM_EXTABLE_UA(5b, __get_user_8_handle_exception)
#endif
/* __get_user */
- _ASM_EXTABLE(6b, __get_user_handle_exception)
- _ASM_EXTABLE(7b, __get_user_handle_exception)
- _ASM_EXTABLE(8b, __get_user_handle_exception)
+ _ASM_EXTABLE_UA(6b, __get_user_handle_exception)
+ _ASM_EXTABLE_UA(7b, __get_user_handle_exception)
+ _ASM_EXTABLE_UA(8b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
- _ASM_EXTABLE(9b, __get_user_handle_exception)
+ _ASM_EXTABLE_UA(9b, __get_user_handle_exception)
#else
- _ASM_EXTABLE(9b, __get_user_8_handle_exception)
- _ASM_EXTABLE(10b, __get_user_8_handle_exception)
+ _ASM_EXTABLE_UA(9b, __get_user_8_handle_exception)
+ _ASM_EXTABLE_UA(10b, __get_user_8_handle_exception)
#endif
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 2877f5934177..975c9c18263d 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -133,15 +133,15 @@ SYM_CODE_START_LOCAL(__put_user_handle_exception)
RET
SYM_CODE_END(__put_user_handle_exception)
- _ASM_EXTABLE(1b, __put_user_handle_exception)
- _ASM_EXTABLE(2b, __put_user_handle_exception)
- _ASM_EXTABLE(3b, __put_user_handle_exception)
- _ASM_EXTABLE(4b, __put_user_handle_exception)
- _ASM_EXTABLE(5b, __put_user_handle_exception)
- _ASM_EXTABLE(6b, __put_user_handle_exception)
- _ASM_EXTABLE(7b, __put_user_handle_exception)
- _ASM_EXTABLE(9b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(1b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(2b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(3b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(4b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(5b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(6b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(7b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(9b, __put_user_handle_exception)
#ifdef CONFIG_X86_32
- _ASM_EXTABLE(8b, __put_user_handle_exception)
- _ASM_EXTABLE(10b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(8b, __put_user_handle_exception)
+ _ASM_EXTABLE_UA(10b, __put_user_handle_exception)
#endif
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 679b09cfe241..d6375b3c633b 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -798,15 +798,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
show_opcodes(regs, loglvl);
}
-/*
- * The (legacy) vsyscall page is the long page in the kernel portion
- * of the address space that has user-accessible permissions.
- */
-static bool is_vsyscall_vaddr(unsigned long vaddr)
-{
- return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
-}
-
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, u32 pkey, int si_code)
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 968d7005f4a7..f50cc210a981 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -26,18 +26,31 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
for (; addr < end; addr = next) {
pud_t *pud = pud_page + pud_index(addr);
pmd_t *pmd;
+ bool use_gbpage;
next = (addr & PUD_MASK) + PUD_SIZE;
if (next > end)
next = end;
- if (info->direct_gbpages) {
- pud_t pudval;
+ /* if this is already a gbpage, this portion is already mapped */
+ if (pud_large(*pud))
+ continue;
+
+ /* Is using a gbpage allowed? */
+ use_gbpage = info->direct_gbpages;
- if (pud_present(*pud))
- continue;
+ /* Don't use gbpage if it maps more than the requested region. */
+ /* at the begining: */
+ use_gbpage &= ((addr & ~PUD_MASK) == 0);
+ /* ... or at the end: */
+ use_gbpage &= ((next & ~PUD_MASK) == 0);
+
+ /* Never overwrite existing mappings */
+ use_gbpage &= !pud_present(*pud);
+
+ if (use_gbpage) {
+ pud_t pudval;
- addr &= PUD_MASK;
pudval = __pud((addr - info->offset) | info->page_flag);
set_pud(pud, pudval);
continue;
diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
index 6993f026adec..42115ac079cf 100644
--- a/arch/x86/mm/maccess.c
+++ b/arch/x86/mm/maccess.c
@@ -3,6 +3,8 @@
#include <linux/uaccess.h>
#include <linux/kernel.h>
+#include <asm/vsyscall.h>
+
#ifdef CONFIG_X86_64
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
@@ -16,6 +18,14 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
return false;
/*
+ * Reading from the vsyscall page may cause an unhandled fault in
+ * certain cases. Though it is at an address above TASK_SIZE_MAX, it is
+ * usually considered as a user space address.
+ */
+ if (is_vsyscall_vaddr(vaddr))
+ return false;
+
+ /*
* Allow everything during early boot before 'x86_virt_bits'
* is initialized. Needed for instruction decoding in early
* exception handlers.
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index adc497b93f03..65e9a6e391c0 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -934,7 +934,7 @@ static int __init cmp_memblk(const void *a, const void *b)
const struct numa_memblk *ma = *(const struct numa_memblk **)a;
const struct numa_memblk *mb = *(const struct numa_memblk **)b;
- return ma->start - mb->start;
+ return (ma->start > mb->start) - (ma->start < mb->start);
}
static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
@@ -944,14 +944,12 @@ static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
* @start: address to begin fill
* @end: address to end fill
*
- * Find and extend numa_meminfo memblks to cover the @start-@end
- * physical address range, such that the first memblk includes
- * @start, the last memblk includes @end, and any gaps in between
- * are filled.
+ * Find and extend numa_meminfo memblks to cover the physical
+ * address range @start-@end
*
* RETURNS:
* 0 : Success
- * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
+ * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
*/
int __init numa_fill_memblks(u64 start, u64 end)
@@ -963,17 +961,14 @@ int __init numa_fill_memblks(u64 start, u64 end)
/*
* Create a list of pointers to numa_meminfo memblks that
- * overlap start, end. Exclude (start == bi->end) since
- * end addresses in both a CFMWS range and a memblk range
- * are exclusive.
- *
- * This list of pointers is used to make in-place changes
- * that fill out the numa_meminfo memblks.
+ * overlap start, end. The list is used to make in-place
+ * changes that fill out the numa_meminfo memblks.
*/
for (int i = 0; i < mi->nr_blks; i++) {
struct numa_memblk *bi = &mi->blk[i];
- if (start < bi->end && end >= bi->start) {
+ if (memblock_addrs_overlap(start, end - start, bi->start,
+ bi->end - bi->start)) {
blk[count] = &mi->blk[i];
count++;
}
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index e9b448d1b1b7..102880404046 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -755,10 +755,14 @@ pmd_t *lookup_pmd_address(unsigned long address)
* areas on 32-bit NUMA systems. The percpu areas can
* end up in this kind of memory, for instance.
*
- * This could be optimized, but it is only intended to be
- * used at initialization time, and keeping it
- * unoptimized should increase the testing coverage for
- * the more obscure platforms.
+ * Note that as long as the PTEs are well-formed with correct PFNs, this
+ * works without checking the PRESENT bit in the leaf PTE. This is unlike
+ * the similar vmalloc_to_page() and derivatives. Callers may depend on
+ * this behavior.
+ *
+ * This could be optimized, but it is only used in paths that are not perf
+ * sensitive, and keeping it unoptimized should increase the testing coverage
+ * for the more obscure platforms.
*/
phys_addr_t slow_virt_to_phys(void *__virt_addr)
{
@@ -2041,17 +2045,12 @@ int set_mce_nospec(unsigned long pfn)
return rc;
}
-static int set_memory_p(unsigned long *addr, int numpages)
-{
- return change_page_attr_set(addr, numpages, __pgprot(_PAGE_PRESENT), 0);
-}
-
/* Restore full speculative operation to the pfn. */
int clear_mce_nospec(unsigned long pfn)
{
unsigned long addr = (unsigned long) pfn_to_kaddr(pfn);
- return set_memory_p(&addr, 1);
+ return set_memory_p(addr, 1);
}
EXPORT_SYMBOL_GPL(clear_mce_nospec);
#endif /* CONFIG_X86_64 */
@@ -2104,6 +2103,11 @@ int set_memory_np_noalias(unsigned long addr, int numpages)
CPA_NO_CHECK_ALIAS, NULL);
}
+int set_memory_p(unsigned long addr, int numpages)
+{
+ return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
+}
+
int set_memory_4k(unsigned long addr, int numpages)
{
return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 919f647c740f..e1390d1e331b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -3242,3 +3242,8 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
BUG_ON(ret < 0);
}
}
+
+bool bpf_jit_supports_ptr_xchg(void)
+{
+ return true;
+}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 4b0d6fff88de..1fb9a1644d94 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu)
char *resched_name, *callfunc_name, *debug_name;
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
+ if (!resched_name)
+ goto fail_mem;
per_cpu(xen_resched_irq, cpu).name = resched_name;
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
cpu,
@@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu)
per_cpu(xen_resched_irq, cpu).irq = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
+ if (!callfunc_name)
+ goto fail_mem;
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
cpu,
@@ -90,6 +94,9 @@ int xen_smp_intr_init(unsigned int cpu)
if (!xen_fifo_events) {
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
+ if (!debug_name)
+ goto fail_mem;
+
per_cpu(xen_debug_irq, cpu).name = debug_name;
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
xen_debug_interrupt,
@@ -101,6 +108,9 @@ int xen_smp_intr_init(unsigned int cpu)
}
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
+ if (!callfunc_name)
+ goto fail_mem;
+
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
cpu,
@@ -114,6 +124,8 @@ int xen_smp_intr_init(unsigned int cpu)
return 0;
+ fail_mem:
+ rc = -ENOMEM;
fail:
xen_smp_intr_free(cpu);
return rc;
diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h
index c812bf85021c..46c8596259d2 100644
--- a/arch/xtensa/include/asm/jump_label.h
+++ b/arch/xtensa/include/asm/jump_label.h
@@ -13,7 +13,7 @@
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
"_nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b, %l[l_yes], %c0\n\t"
@@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key,
* make it reachable and wrap both into a no-transform block
* to avoid any assembler interference with this.
*/
- asm_volatile_goto("1:\n\t"
+ asm goto("1:\n\t"
".begin no-transform\n\t"
"_j %l[l_yes]\n\t"
"2:\n\t"
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index feef615e2c9c..c9a16fba58b9 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -336,7 +336,7 @@ int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
if (nr_vecs > BIO_MAX_VECS)
return -E2BIG;
if (nr_vecs > UIO_FASTIOV) {
- bvec = kcalloc(sizeof(*bvec), nr_vecs, GFP_KERNEL);
+ bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL);
if (!bvec)
return -ENOMEM;
pages = NULL;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e303fd317313..ff93c385ba5a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -300,7 +300,7 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
* @disk: gendisk the new blkg is associated with
* @gfp_mask: allocation mask to use
*
- * Allocate a new blkg assocating @blkcg and @q.
+ * Allocate a new blkg associating @blkcg and @disk.
*/
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
gfp_t gfp_mask)
diff --git a/block/blk-core.c b/block/blk-core.c
index 11342af420d0..de771093b526 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -49,6 +49,7 @@
#include "blk-pm.h"
#include "blk-cgroup.h"
#include "blk-throttle.h"
+#include "blk-ioprio.h"
struct dentry *blk_debugfs_root;
@@ -833,6 +834,14 @@ end_io:
}
EXPORT_SYMBOL(submit_bio_noacct);
+static void bio_set_ioprio(struct bio *bio)
+{
+ /* Nobody set ioprio so far? Initialize it based on task's nice value */
+ if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
+ bio->bi_ioprio = get_current_ioprio();
+ blkcg_set_ioprio(bio);
+}
+
/**
* submit_bio - submit a bio to the block device layer for I/O
* @bio: The &struct bio which describes the I/O
@@ -855,6 +864,7 @@ void submit_bio(struct bio *bio)
count_vm_events(PGPGOUT, bio_sectors(bio));
}
+ bio_set_ioprio(bio);
submit_bio_noacct(bio);
}
EXPORT_SYMBOL(submit_bio);
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 089fcb9cfce3..04d44f0bcbc8 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1261,7 +1261,7 @@ static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
{
struct ioc *ioc = iocg->ioc;
- u64 last_period, cur_period;
+ u64 __maybe_unused last_period, cur_period;
u64 vtime, vtarget;
int i;
@@ -1353,6 +1353,13 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
lockdep_assert_held(&iocg->waitq.lock);
+ /*
+ * If the delay is set by another CPU, we may be in the past. No need to
+ * change anything if so. This avoids decay calculation underflow.
+ */
+ if (time_before64(now->now, iocg->delay_at))
+ return false;
+
/* calculate the current delay in effect - 1/2 every second */
tdelta = now->now - iocg->delay_at;
if (iocg->delay)
diff --git a/block/blk-map.c b/block/blk-map.c
index 8584babf3ea0..71210cdb3442 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -205,12 +205,19 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
/*
* success
*/
- if ((iov_iter_rw(iter) == WRITE &&
- (!map_data || !map_data->null_mapped)) ||
- (map_data && map_data->from_user)) {
+ if (iov_iter_rw(iter) == WRITE &&
+ (!map_data || !map_data->null_mapped)) {
ret = bio_copy_from_iter(bio, iter);
if (ret)
goto cleanup;
+ } else if (map_data && map_data->from_user) {
+ struct iov_iter iter2 = *iter;
+
+ /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
+ iter2.data_source = ITER_SOURCE;
+ ret = bio_copy_from_iter(bio, &iter2);
+ if (ret)
+ goto cleanup;
} else {
if (bmd->is_our_pages)
zero_fill_bio(bio);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 5cbeb9344f2f..94668e72ab09 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -479,23 +479,6 @@ out:
return res;
}
-static int hctx_run_show(void *data, struct seq_file *m)
-{
- struct blk_mq_hw_ctx *hctx = data;
-
- seq_printf(m, "%lu\n", hctx->run);
- return 0;
-}
-
-static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct blk_mq_hw_ctx *hctx = data;
-
- hctx->run = 0;
- return count;
-}
-
static int hctx_active_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
@@ -624,7 +607,6 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
{"tags_bitmap", 0400, hctx_tags_bitmap_show},
{"sched_tags", 0400, hctx_sched_tags_show},
{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
- {"run", 0600, hctx_run_show, hctx_run_write},
{"active", 0400, hctx_active_show},
{"dispatch_busy", 0400, hctx_dispatch_busy_show},
{"type", 0400, hctx_type_show},
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 67c95f31b15b..451a2c1f1f32 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -324,8 +324,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
return;
- hctx->run++;
-
/*
* A return of -EAGAIN is an indication that hctx->dispatch is not
* empty and we must run again in order to avoid starving flushes.
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c11c97afa0bc..2dc01551e27c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -40,7 +40,6 @@
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-#include "blk-ioprio.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
@@ -772,11 +771,16 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
/*
* Partial zone append completions cannot be supported as the
* BIO fragments may end up not being written sequentially.
+ * For such case, force the completed nbytes to be equal to
+ * the BIO size so that bio_advance() sets the BIO remaining
+ * size to 0 and we end up calling bio_endio() before returning.
*/
- if (bio->bi_iter.bi_size != nbytes)
+ if (bio->bi_iter.bi_size != nbytes) {
bio->bi_status = BLK_STS_IOERR;
- else
+ nbytes = bio->bi_iter.bi_size;
+ } else {
bio->bi_iter.bi_sector = rq->__sector;
+ }
}
bio_advance(bio, nbytes);
@@ -1860,6 +1864,22 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
__add_wait_queue(wq, wait);
/*
+ * Add one explicit barrier since blk_mq_get_driver_tag() may
+ * not imply barrier in case of failure.
+ *
+ * Order adding us to wait queue and allocating driver tag.
+ *
+ * The pair is the one implied in sbitmap_queue_wake_up() which
+ * orders clearing sbitmap tag bits and waitqueue_active() in
+ * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless
+ *
+ * Otherwise, re-order of adding wait queue and getting driver tag
+ * may cause __sbitmap_queue_wake_up() to wake up nothing because
+ * the waitqueue_active() may not observe us in wait queue.
+ */
+ smp_mb();
+
+ /*
* It's possible that a tag was freed in the window between the
* allocation failure and adding the hardware queue to the wait
* queue.
@@ -2891,8 +2911,11 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
return NULL;
}
-/* return true if this @rq can be used for @bio */
-static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
+/*
+ * Check if we can use the passed on request for submitting the passed in bio,
+ * and remove it from the request list if it can be used.
+ */
+static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
struct bio *bio)
{
enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
@@ -2920,14 +2943,6 @@ static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
return true;
}
-static void bio_set_ioprio(struct bio *bio)
-{
- /* Nobody set ioprio so far? Initialize it based on task's nice value */
- if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
- bio->bi_ioprio = get_current_ioprio();
- blkcg_set_ioprio(bio);
-}
-
/**
* blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer.
@@ -2952,13 +2967,6 @@ void blk_mq_submit_bio(struct bio *bio)
blk_status_t ret;
bio = blk_queue_bounce(bio, q);
- if (bio_may_exceed_limits(bio, &q->limits)) {
- bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
- if (!bio)
- return;
- }
-
- bio_set_ioprio(bio);
if (plug) {
rq = rq_list_peek(&plug->cached_rq);
@@ -2966,16 +2974,26 @@ void blk_mq_submit_bio(struct bio *bio)
rq = NULL;
}
if (rq) {
+ if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+ if (!bio)
+ return;
+ }
if (!bio_integrity_prep(bio))
return;
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
return;
- if (blk_mq_can_use_cached_rq(rq, plug, bio))
+ if (blk_mq_use_cached_rq(rq, plug, bio))
goto done;
percpu_ref_get(&q->q_usage_counter);
} else {
if (unlikely(bio_queue_enter(bio)))
return;
+ if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
+ bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+ if (!bio)
+ goto fail;
+ }
if (!bio_integrity_prep(bio))
goto fail;
}
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 5ba3cd574eac..0c0e270a8265 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -163,9 +163,9 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
*/
static bool wb_recent_wait(struct rq_wb *rwb)
{
- struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb;
+ struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
- return time_before(jiffies, wb->dirty_sleep + HZ);
+ return time_before(jiffies, bdi->last_bdp_sleep + HZ);
}
static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
diff --git a/block/ioctl.c b/block/ioctl.c
index 9c73a763ef88..438f79c564cf 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -20,8 +20,6 @@ static int blkpg_do_ioctl(struct block_device *bdev,
struct blkpg_partition p;
sector_t start, length;
- if (disk->flags & GENHD_FL_NO_PART)
- return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
diff --git a/block/ioprio.c b/block/ioprio.c
index b5a942519a79..73301a261429 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -139,32 +139,6 @@ out:
return ret;
}
-/*
- * If the task has set an I/O priority, use that. Otherwise, return
- * the default I/O priority.
- *
- * Expected to be called for current task or with task_lock() held to keep
- * io_context stable.
- */
-int __get_task_ioprio(struct task_struct *p)
-{
- struct io_context *ioc = p->io_context;
- int prio;
-
- if (p != current)
- lockdep_assert_held(&p->alloc_lock);
- if (ioc)
- prio = ioc->ioprio;
- else
- prio = IOPRIO_DEFAULT;
-
- if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
- prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
- task_nice_ioprio(p));
- return prio;
-}
-EXPORT_SYMBOL_GPL(__get_task_ioprio);
-
static int get_task_ioprio(struct task_struct *p)
{
int ret;
diff --git a/block/opal_proto.h b/block/opal_proto.h
index dec7ce3a3edb..d247a457bf6e 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -71,6 +71,7 @@ enum opal_response_token {
#define SHORT_ATOM_BYTE 0xBF
#define MEDIUM_ATOM_BYTE 0xDF
#define LONG_ATOM_BYTE 0xE3
+#define EMPTY_ATOM_BYTE 0xFF
#define OPAL_INVAL_PARAM 12
#define OPAL_MANUFACTURED_INACTIVE 0x08
diff --git a/block/partitions/core.c b/block/partitions/core.c
index e6ac73617f3e..5f5ed5c75f04 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -439,6 +439,11 @@ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
goto out;
}
+ if (disk->flags & GENHD_FL_NO_PART) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (partition_overlaps(disk, start, length, -1)) {
ret = -EBUSY;
goto out;
@@ -562,8 +567,8 @@ static bool blk_add_partition(struct gendisk *disk,
part = add_partition(disk, p, from, size, state->parts[p].flags,
&state->parts[p].info);
if (IS_ERR(part) && PTR_ERR(part) != -ENXIO) {
- printk(KERN_ERR " %s: p%d could not be added: %ld\n",
- disk->disk_name, p, -PTR_ERR(part));
+ printk(KERN_ERR " %s: p%d could not be added: %pe\n",
+ disk->disk_name, p, part);
return true;
}
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 3d9e9cd250bd..fa4dba5d8531 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -1056,16 +1056,20 @@ static int response_parse(const u8 *buf, size_t length,
token_length = response_parse_medium(iter, pos);
else if (pos[0] <= LONG_ATOM_BYTE) /* long atom */
token_length = response_parse_long(iter, pos);
+ else if (pos[0] == EMPTY_ATOM_BYTE) /* empty atom */
+ token_length = 1;
else /* TOKEN */
token_length = response_parse_token(iter, pos);
if (token_length < 0)
return token_length;
+ if (pos[0] != EMPTY_ATOM_BYTE)
+ num_entries++;
+
pos += token_length;
total -= token_length;
iter++;
- num_entries++;
}
resp->num = num_entries;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 82c44d4899b9..e24c829d7a01 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -91,13 +91,13 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if (!(msg->msg_flags & MSG_MORE)) {
err = hash_alloc_result(sk, ctx);
if (err)
- goto unlock_free;
+ goto unlock_free_result;
ahash_request_set_crypt(&ctx->req, NULL,
ctx->result, 0);
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->wait);
if (err)
- goto unlock_free;
+ goto unlock_free_result;
}
goto done_more;
}
@@ -170,6 +170,7 @@ unlock:
unlock_free:
af_alg_free_sg(&ctx->sgl);
+unlock_free_result:
hash_free_result(sk, ctx);
ctx->more = false;
goto unlock;
diff --git a/crypto/cbc.c b/crypto/cbc.c
index eedddef9ce40..e81918ca68b7 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -148,6 +148,9 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
if (!is_power_of_2(inst->alg.co.base.cra_blocksize))
goto out_free_inst;
+ if (inst->alg.co.statesize)
+ goto out_free_inst;
+
inst->alg.encrypt = crypto_cbc_encrypt;
inst->alg.decrypt = crypto_cbc_decrypt;
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 0b6dd8aa21f2..0f1bd7dcde24 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -212,13 +212,12 @@ static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
ivsize = crypto_lskcipher_ivsize(tfm);
ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
+ memcpy(ivs, req->iv, ivsize);
flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT)
flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
- else
- memcpy(ivs, req->iv, ivsize);
if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
@@ -234,8 +233,7 @@ static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
}
- if (flags & CRYPTO_LSKCIPHER_FLAG_FINAL)
- memcpy(req->iv, ivs, ivsize);
+ memcpy(req->iv, ivs, ivsize);
return err;
}
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 19035230563d..7cb962e21453 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -102,7 +102,7 @@ static int reset_pending_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = seq_to_ivpu(s);
- seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
+ seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_pending));
return 0;
}
@@ -130,7 +130,9 @@ dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size
fw->dvfs_mode = dvfs_mode;
- ivpu_pm_schedule_recovery(vdev);
+ ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+ if (ret)
+ return ret;
return size;
}
@@ -190,7 +192,10 @@ fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
return ret;
ivpu_hw_profiling_freq_drive(vdev, enable);
- ivpu_pm_schedule_recovery(vdev);
+
+ ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+ if (ret)
+ return ret;
return size;
}
@@ -301,11 +306,18 @@ static ssize_t
ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
+ int ret;
if (!size)
return -EINVAL;
- ivpu_pm_schedule_recovery(vdev);
+ ret = ivpu_rpm_get(vdev);
+ if (ret)
+ return ret;
+
+ ivpu_pm_trigger_recovery(vdev, "debugfs");
+ flush_work(&vdev->pm->recovery_work);
+ ivpu_rpm_put(vdev);
return size;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 64927682161b..4b0640226986 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -6,6 +6,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_accel.h>
#include <drm/drm_file.h>
@@ -17,6 +18,7 @@
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@@ -65,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
return file_priv;
}
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
+static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
{
- struct ivpu_file_priv *file_priv;
-
- xa_lock_irq(&vdev->context_xa);
- file_priv = xa_load(&vdev->context_xa, id);
- /* file_priv may still be in context_xa during file_priv_release() */
- if (file_priv && !kref_get_unless_zero(&file_priv->ref))
- file_priv = NULL;
- xa_unlock_irq(&vdev->context_xa);
-
- if (file_priv)
- ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
- file_priv->ctx.id, kref_read(&file_priv->ref));
-
- return file_priv;
+ mutex_lock(&file_priv->lock);
+ if (file_priv->bound) {
+ ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
+
+ ivpu_cmdq_release_all_locked(file_priv);
+ ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+ ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
+ ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+ file_priv->bound = false;
+ drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
+ }
+ mutex_unlock(&file_priv->lock);
}
static void file_priv_release(struct kref *ref)
@@ -88,13 +88,15 @@ static void file_priv_release(struct kref *ref)
struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
struct ivpu_device *vdev = file_priv->vdev;
- ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
+ ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
+ file_priv->ctx.id, (bool)file_priv->bound);
+
+ pm_runtime_get_sync(vdev->drm.dev);
+ mutex_lock(&vdev->context_list_lock);
+ file_priv_unbind(vdev, file_priv);
+ mutex_unlock(&vdev->context_list_lock);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
- ivpu_cmdq_release_all(file_priv);
- ivpu_jsm_context_release(vdev, file_priv->ctx.id);
- ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
- drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
}
@@ -176,9 +178,6 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user.start;
break;
- case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- args->value = file_priv->priority;
- break;
case DRM_IVPU_PARAM_CONTEXT_ID:
args->value = file_priv->ctx.id;
break;
@@ -218,17 +217,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_param *args = data;
int ret = 0;
switch (args->param) {
- case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
- file_priv->priority = args->value;
- else
- ret = -EINVAL;
- break;
default:
ret = -EINVAL;
}
@@ -241,50 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_file_priv *file_priv;
u32 ctx_id;
- void *old;
- int ret;
+ int idx, ret;
- ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
- return ret;
- }
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv) {
ret = -ENOMEM;
- goto err_xa_erase;
+ goto err_dev_exit;
}
file_priv->vdev = vdev;
- file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
+ file_priv->bound = true;
kref_init(&file_priv->ref);
mutex_init(&file_priv->lock);
+ mutex_lock(&vdev->context_list_lock);
+
+ ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
+ vdev->context_xa_limit, GFP_KERNEL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
+ goto err_unlock;
+ }
+
ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
if (ret)
- goto err_mutex_destroy;
+ goto err_xa_erase;
- old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
- if (xa_is_err(old)) {
- ret = xa_err(old);
- ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
- goto err_ctx_fini;
- }
+ mutex_unlock(&vdev->context_list_lock);
+ drm_dev_exit(idx);
+
+ file->driver_priv = file_priv;
ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
ctx_id, current->comm, task_pid_nr(current));
- file->driver_priv = file_priv;
return 0;
-err_ctx_fini:
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
-err_mutex_destroy:
- mutex_destroy(&file_priv->lock);
- kfree(file_priv);
err_xa_erase:
xa_erase_irq(&vdev->context_xa, ctx_id);
+err_unlock:
+ mutex_unlock(&vdev->context_list_lock);
+ mutex_destroy(&file_priv->lock);
+ kfree(file_priv);
+err_dev_exit:
+ drm_dev_exit(idx);
return ret;
}
@@ -340,8 +335,6 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
if (!ret)
ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
- else
- ivpu_hw_diagnose_failure(vdev);
return ret;
}
@@ -369,6 +362,9 @@ int ivpu_boot(struct ivpu_device *vdev)
ret = ivpu_wait_for_ready(vdev);
if (ret) {
ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_mmu_evtq_dump(vdev);
+ ivpu_fw_log_dump(vdev);
return ret;
}
@@ -484,9 +480,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
- /* VPU 37XX does not require 10m D3hot delay */
- if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
- pdev->d3hot_delay = 0;
+ /* NPU does not require 10m D3hot delay */
+ pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
if (ret) {
@@ -540,6 +535,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
+ ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
+ if (ret)
+ goto err_xa_destroy;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret)
goto err_xa_destroy;
@@ -611,14 +610,30 @@ err_xa_destroy:
return ret;
}
+static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
+{
+ struct ivpu_file_priv *file_priv;
+ unsigned long ctx_id;
+
+ mutex_lock(&vdev->context_list_lock);
+
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv)
+ file_priv_unbind(vdev, file_priv);
+
+ mutex_unlock(&vdev->context_list_lock);
+}
+
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
+ ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev);
ivpu_pm_cancel_recovery(vdev);
+ ivpu_bo_unbind_all_user_contexts(vdev);
ivpu_ipc_fini(vdev);
ivpu_fw_fini(vdev);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index ebc4b84f27b2..069ace4adb2d 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -56,6 +56,7 @@
#define IVPU_DBG_JSM BIT(10)
#define IVPU_DBG_KREF BIT(11)
#define IVPU_DBG_RPM BIT(12)
+#define IVPU_DBG_MMU_MAP BIT(13)
#define ivpu_err(vdev, fmt, ...) \
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
@@ -114,6 +115,7 @@ struct ivpu_device {
struct ivpu_mmu_context gctx;
struct ivpu_mmu_context rctx;
+ struct mutex context_list_lock; /* Protects user context addition/removal */
struct xarray context_xa;
struct xa_limit context_xa_limit;
@@ -145,8 +147,8 @@ struct ivpu_file_priv {
struct mutex lock; /* Protects cmdq */
struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
struct ivpu_mmu_context ctx;
- u32 priority;
bool has_mmu_faults;
+ bool bound;
};
extern int ivpu_dbg_mask;
@@ -162,7 +164,6 @@ extern bool ivpu_disable_mmu_cont_pages;
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 6576232f3e67..5fa8bd4603d5 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -222,7 +222,6 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
- (ivpu_hw_gen(vdev) > IVPU_HW_37XX) ||
(ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
vdev->wa.disable_d0i3_msg = true;
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 1dda4f38ea25..e9ddbe9f50eb 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -24,14 +24,11 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
- if (bo->ctx)
- ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n",
- action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
- bo->handle, bo->ctx->id, bo->vpu_addr, bo->mmu_mapped);
- else
- ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n",
- action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
- bo->handle);
+ ivpu_dbg(vdev, BO,
+ "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
+ action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
+ (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
+ (bool)bo->base.base.import_attach);
}
/*
@@ -49,12 +46,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
mutex_lock(&bo->lock);
ivpu_dbg_bo(vdev, bo, "pin");
-
- if (!bo->ctx) {
- ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle);
- ret = -EINVAL;
- goto unlock;
- }
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
if (!bo->mmu_mapped) {
struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
@@ -85,7 +77,10 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
const struct ivpu_addr_range *range)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- int ret;
+ int idx, ret;
+
+ if (!drm_dev_enter(&vdev->drm, &idx))
+ return -ENODEV;
mutex_lock(&bo->lock);
@@ -101,6 +96,8 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
mutex_unlock(&bo->lock);
+ drm_dev_exit(idx);
+
return ret;
}
@@ -108,11 +105,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert_held(&bo->lock);
-
- ivpu_dbg_bo(vdev, bo, "unbind");
-
- /* TODO: dma_unmap */
+ lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -124,19 +117,23 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
if (bo->ctx) {
ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node);
- bo->vpu_addr = 0;
bo->ctx = NULL;
}
-}
-static void ivpu_bo_unbind(struct ivpu_bo *bo)
-{
- mutex_lock(&bo->lock);
- ivpu_bo_unbind_locked(bo);
- mutex_unlock(&bo->lock);
+ if (bo->base.base.import_attach)
+ return;
+
+ dma_resv_lock(bo->base.base.resv, NULL);
+ if (bo->base.sgt) {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ bo->base.sgt = NULL;
+ }
+ dma_resv_unlock(bo->base.base.resv);
}
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
struct ivpu_bo *bo;
@@ -146,8 +143,10 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
mutex_lock(&bo->lock);
- if (bo->ctx == ctx)
+ if (bo->ctx == ctx) {
+ ivpu_dbg_bo(vdev, bo, "unbind");
ivpu_bo_unbind_locked(bo);
+ }
mutex_unlock(&bo->lock);
}
mutex_unlock(&vdev->bo_list_lock);
@@ -199,9 +198,6 @@ ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
- ivpu_dbg(vdev, BO, "create: vpu_addr 0x%llx size %zu flags 0x%x\n",
- bo->vpu_addr, bo->base.base.size, flags);
-
return bo;
}
@@ -212,6 +208,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
struct ivpu_bo *bo = to_ivpu_bo(obj);
struct ivpu_addr_range *range;
+ if (bo->ctx) {
+ ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
+ file_priv->ctx.id, bo->ctx->id);
+ return -EALREADY;
+ }
+
if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
@@ -227,62 +229,24 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
struct ivpu_device *vdev = to_ivpu_device(obj->dev);
struct ivpu_bo *bo = to_ivpu_bo(obj);
+ ivpu_dbg_bo(vdev, bo, "free");
+
mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node);
mutex_unlock(&vdev->bo_list_lock);
drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
- ivpu_dbg_bo(vdev, bo, "free");
-
- ivpu_bo_unbind(bo);
+ ivpu_bo_unbind_locked(bo);
mutex_destroy(&bo->lock);
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
drm_gem_shmem_free(&bo->base);
}
-static const struct dma_buf_ops ivpu_bo_dmabuf_ops = {
- .cache_sgt_mapping = true,
- .attach = drm_gem_map_attach,
- .detach = drm_gem_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
- .release = drm_gem_dmabuf_release,
- .mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
-};
-
-static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
-{
- struct drm_device *dev = obj->dev;
- struct dma_buf_export_info exp_info = {
- .exp_name = KBUILD_MODNAME,
- .owner = dev->driver->fops->owner,
- .ops = &ivpu_bo_dmabuf_ops,
- .size = obj->size,
- .flags = flags,
- .priv = obj,
- .resv = obj->resv,
- };
- void *sgt;
-
- /*
- * Make sure that pages are allocated and dma-mapped before exporting the bo.
- * DMA-mapping is required if the bo will be imported to the same device.
- */
- sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
- if (IS_ERR(sgt))
- return sgt;
-
- return drm_gem_dmabuf_export(dev, &exp_info);
-}
-
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_bo_free,
.open = ivpu_bo_open,
- .export = ivpu_bo_export,
.print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
@@ -315,11 +279,9 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
return PTR_ERR(bo);
}
- ret = drm_gem_handle_create(file, &bo->base.base, &bo->handle);
- if (!ret) {
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+ if (!ret)
args->vpu_addr = bo->vpu_addr;
- args->handle = bo->handle;
- }
drm_gem_object_put(&bo->base.base);
@@ -361,7 +323,9 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ret)
goto err_put;
+ dma_resv_lock(bo->base.base.resv, NULL);
ret = drm_gem_shmem_vmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
if (ret)
goto err_put;
@@ -376,7 +340,10 @@ void ivpu_bo_free_internal(struct ivpu_bo *bo)
{
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
+ dma_resv_lock(bo->base.base.resv, NULL);
drm_gem_shmem_vunmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
+
drm_gem_object_put(&bo->base.base);
}
@@ -432,19 +399,11 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{
- unsigned long dma_refcount = 0;
-
mutex_lock(&bo->lock);
- if (bo->base.base.dma_buf && bo->base.base.dma_buf->file)
- dma_refcount = atomic_long_read(&bo->base.base.dma_buf->file->f_count);
-
- drm_printf(p, "%-3u %-6d 0x%-12llx %-10lu 0x%-8x %-4u %-8lu",
- bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.base.size,
- bo->flags, kref_read(&bo->base.base.refcount), dma_refcount);
-
- if (bo->base.base.import_attach)
- drm_printf(p, " imported");
+ drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+ bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+ bo->flags, kref_read(&bo->base.base.refcount));
if (bo->base.pages)
drm_printf(p, " has_pages");
@@ -452,6 +411,9 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped");
+ if (bo->base.base.import_attach)
+ drm_printf(p, " imported");
+
drm_printf(p, "\n");
mutex_unlock(&bo->lock);
@@ -462,8 +424,8 @@ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_bo *bo;
- drm_printf(p, "%-3s %-6s %-14s %-10s %-10s %-4s %-8s %s\n",
- "ctx", "handle", "vpu_addr", "size", "flags", "refs", "dma_refs", "attribs");
+ drm_printf(p, "%-9s %-3s %-14s %-10s %-10s %-4s %s\n",
+ "bo", "ctx", "vpu_addr", "size", "flags", "refs", "attribs");
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index d75cad0d3c74..a8559211c70d 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -19,14 +19,13 @@ struct ivpu_bo {
struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr;
- u32 handle;
u32 flags;
u32 job_status; /* Valid only for command buffer */
bool mmu_mapped;
};
int ivpu_bo_pin(struct ivpu_bo *bo);
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 574cdeefb66b..89af1006df55 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -510,22 +510,12 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
-{
- ivpu_boot_dpu_active_drive(vdev, false);
- ivpu_boot_pwr_island_isolation_drive(vdev, true);
- ivpu_boot_pwr_island_trickle_drive(vdev, false);
- ivpu_boot_pwr_island_drive(vdev, false);
-
- return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
-}
-
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@ -616,12 +606,37 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
return 0;
}
+static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret = 0;
- if (ivpu_boot_pwr_domain_disable(vdev)) {
- ivpu_err(vdev, "Failed to disable power domain\n");
+ if (ivpu_hw_37xx_ip_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset NPU\n");
ret = -EIO;
}
@@ -661,6 +676,11 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
+ /* PLL requests may fail when powering down, so issue WP 0 here */
+ ret = ivpu_pll_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
+
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@ -875,24 +895,18 @@ static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
-
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
}
static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
-
ivpu_hw_wdt_disable(vdev);
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
}
static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
-
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
}
/* Handler for IRQs from VPU core (irqV) */
@@ -970,7 +984,7 @@ static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
return true;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index eba2fdef2ace..a1523d0b1ef3 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -24,7 +24,7 @@
#define SKU_HW_ID_SHIFT 16u
#define SKU_HW_ID_MASK 0xffff0000u
-#define PLL_CONFIG_DEFAULT 0x1
+#define PLL_CONFIG_DEFAULT 0x0
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_REF_CLK_FREQ (50 * 1000000)
@@ -530,7 +530,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
- val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@ -704,7 +704,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 tile_disable;
- u32 tile_enable;
u32 fuse;
fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
@@ -725,10 +724,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
else
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
- tile_enable = (~tile_disable) & TILE_MAX_MASK;
-
- hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
- hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
hw->tile_fuse = tile_disable;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
@@ -746,7 +741,7 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
return 0;
}
-static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+static int ivpu_hw_40xx_ip_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
@@ -768,6 +763,23 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
return ret;
}
+static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ if (ivpu_hw_40xx_ip_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset VPU IP\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_pll_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable PLL\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
@@ -913,7 +925,7 @@ static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
- if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
+ if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
ivpu_warn(vdev, "Failed to reset the VPU\n");
if (ivpu_pll_disable(vdev)) {
@@ -1032,18 +1044,18 @@ static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
/* TODO: For LNN hang consider engine reset instead of full recovery */
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
}
static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_hw_wdt_disable(vdev);
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
}
static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
}
/* Handler for IRQs from VPU core (irqV) */
@@ -1137,7 +1149,7 @@ static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
return true;
}
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index e86621f16f85..fa66c39b57ec 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -343,10 +343,8 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
vdev->timeout.jsm);
- if (hb_ret == -ETIMEDOUT) {
- ivpu_hw_diagnose_failure(vdev);
- ivpu_pm_schedule_recovery(vdev);
- }
+ if (hb_ret == -ETIMEDOUT)
+ ivpu_pm_trigger_recovery(vdev, "IPC timeout");
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 7206cf9cdb4a..e70cfb859339 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -112,22 +112,20 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin
}
}
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
int i;
- mutex_lock(&file_priv->lock);
+ lockdep_assert_held(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_release_locked(file_priv, i);
-
- mutex_unlock(&file_priv->lock);
}
/*
* Mark the doorbell as unregistered and reset job queue pointers.
* This function needs to be called when the VPU hardware is restarted
- * and FW looses job queue state. The next time job queue is used it
+ * and FW loses job queue state. The next time job queue is used it
* will be registered again.
*/
static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
@@ -161,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
- if (!file_priv)
- continue;
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv)
ivpu_cmdq_reset_all(file_priv);
- ivpu_file_priv_put(&file_priv);
- }
+ mutex_unlock(&vdev->context_list_lock);
+
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -243,60 +239,32 @@ static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
return &fence->base;
}
-static void job_get(struct ivpu_job *job, struct ivpu_job **link)
+static void ivpu_job_destroy(struct ivpu_job *job)
{
struct ivpu_device *vdev = job->vdev;
-
- kref_get(&job->ref);
- *link = job;
-
- ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
-}
-
-static void job_release(struct kref *ref)
-{
- struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
- struct ivpu_device *vdev = job->vdev;
u32 i;
+ ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
+ job->job_id, job->file_priv->ctx.id, job->engine_idx);
+
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
drm_gem_object_put(&job->bos[i]->base.base);
dma_fence_put(job->done_fence);
ivpu_file_priv_put(&job->file_priv);
-
- ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
kfree(job);
-
- /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
- ivpu_rpm_put(vdev);
-}
-
-static void job_put(struct ivpu_job *job)
-{
- struct ivpu_device *vdev = job->vdev;
-
- ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
- kref_put(&job->ref, job_release);
}
static struct ivpu_job *
-ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
+ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
- int ret;
-
- ret = ivpu_rpm_get(vdev);
- if (ret < 0)
- return NULL;
job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
if (!job)
- goto err_rpm_put;
-
- kref_init(&job->ref);
+ return NULL;
job->vdev = vdev;
job->engine_idx = engine_idx;
@@ -310,17 +278,14 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->file_priv = ivpu_file_priv_get(file_priv);
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
-
return job;
err_free_job:
kfree(job);
-err_rpm_put:
- ivpu_rpm_put(vdev);
return NULL;
}
-static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
struct ivpu_job *job;
@@ -329,7 +294,7 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
return -ENOENT;
if (job->file_priv->has_mmu_faults)
- job_status = VPU_JSM_STATUS_ABORTED;
+ job_status = DRM_IVPU_JOB_STATUS_ABORTED;
job->bos[CMD_BUF_IDX]->job_status = job_status;
dma_fence_signal(job->done_fence);
@@ -337,9 +302,10 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+ ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
- job_put(job);
+ ivpu_rpm_put(vdev);
return 0;
}
@@ -349,10 +315,10 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
unsigned long id;
xa_for_each(&vdev->submitted_jobs_xa, id, job)
- ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
}
-static int ivpu_direct_job_submission(struct ivpu_job *job)
+static int ivpu_job_submit(struct ivpu_job *job)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
@@ -360,53 +326,65 @@ static int ivpu_direct_job_submission(struct ivpu_job *job)
struct ivpu_cmdq *cmdq;
int ret;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
if (!cmdq) {
- ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
- file_priv->ctx.id, job->engine_idx);
+ ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
+ file_priv->ctx.id, job->engine_idx);
ret = -EINVAL;
- goto err_unlock;
+ goto err_unlock_file_priv;
}
job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
- job_get(job, &job);
- ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+ xa_lock(&vdev->submitted_jobs_xa);
+ ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
if (ret) {
- ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
- goto err_job_put;
+ ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
+ file_priv->ctx.id);
+ ret = -EBUSY;
+ goto err_unlock_submitted_jobs_xa;
}
ret = ivpu_cmdq_push_job(cmdq, job);
if (ret)
- goto err_xa_erase;
+ goto err_erase_xa;
ivpu_start_job_timeout_detection(vdev);
- ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
- job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
- job->engine_idx, cmdq->jobq->header.tail);
-
- if (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW) {
- ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
cmdq->jobq->header.head = cmdq->jobq->header.tail;
wmb(); /* Flush WC buffer for jobq header */
} else {
ivpu_cmdq_ring_db(vdev, cmdq);
}
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
+ job->job_id, file_priv->ctx.id, job->engine_idx,
+ job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
+
+ xa_unlock(&vdev->submitted_jobs_xa);
+
mutex_unlock(&file_priv->lock);
+
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+ ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+
return 0;
-err_xa_erase:
- xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_job_put:
- job_put(job);
-err_unlock:
+err_erase_xa:
+ __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock_submitted_jobs_xa:
+ xa_unlock(&vdev->submitted_jobs_xa);
+err_unlock_file_priv:
mutex_unlock(&file_priv->lock);
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -488,6 +466,9 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (params->engine > DRM_IVPU_ENGINE_COPY)
return -EINVAL;
+ if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
return -EINVAL;
@@ -509,44 +490,49 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
params->buffer_count * sizeof(u32));
if (ret) {
ret = -EFAULT;
- goto free_handles;
+ goto err_free_handles;
}
if (!drm_dev_enter(&vdev->drm, &idx)) {
ret = -ENODEV;
- goto free_handles;
+ goto err_free_handles;
}
ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
file_priv->ctx.id, params->buffer_count);
- job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
+ job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
if (!job) {
ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
- goto dev_exit;
+ goto err_exit_dev;
}
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
params->commands_offset);
if (ret) {
- ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
- goto job_put;
+ ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
+ goto err_destroy_job;
}
- ret = ivpu_direct_job_submission(job);
- if (ret) {
- dma_fence_signal(job->done_fence);
- ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
- }
+ down_read(&vdev->pm->reset_lock);
+ ret = ivpu_job_submit(job);
+ up_read(&vdev->pm->reset_lock);
+ if (ret)
+ goto err_signal_fence;
-job_put:
- job_put(job);
-dev_exit:
drm_dev_exit(idx);
-free_handles:
kfree(buf_handles);
+ return ret;
+err_signal_fence:
+ dma_fence_signal(job->done_fence);
+err_destroy_job:
+ ivpu_job_destroy(job);
+err_exit_dev:
+ drm_dev_exit(idx);
+err_free_handles:
+ kfree(buf_handles);
return ret;
}
@@ -568,7 +554,7 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
}
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
- ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
+ ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
ivpu_start_job_timeout_detection(vdev);
}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 45a2f2ec82e5..ca4984071cc7 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -43,7 +43,6 @@ struct ivpu_cmdq {
will update the job status
*/
struct ivpu_job {
- struct kref ref;
struct ivpu_device *vdev;
struct ivpu_file_priv *file_priv;
struct dma_fence *done_fence;
@@ -56,7 +55,7 @@ struct ivpu_job {
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 2228c44b115f..91bd640655ab 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -7,6 +7,7 @@
#include <linux/highmem.h>
#include "ivpu_drv.h"
+#include "ivpu_hw.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
@@ -71,10 +72,10 @@
#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
-#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
-#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
-#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
+#define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
+#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
+#define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT)
#define IVPU_MMU_CMDQ_CMD_SIZE 16
#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
@@ -474,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
return 0;
}
+static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
+{
+ return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+ (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
+}
+
+static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
+{
+ return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+ (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
+}
+
static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
{
- struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
- u64 *queue_buffer = q->base;
- int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
+ struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
+ u64 *queue_buffer = cmdq->base;
+ int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
- if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
+ if (ivpu_mmu_queue_is_full(cmdq)) {
ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
return -EBUSY;
}
queue_buffer[idx] = data0;
queue_buffer[idx + 1] = data1;
- q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
+ cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
@@ -518,6 +531,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
ivpu_mmu_cmdq_err_to_str(err));
+ ivpu_hw_diagnose_failure(vdev);
}
return ret;
@@ -558,7 +572,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->cmdq.cons = 0;
memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
- clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
@@ -872,20 +885,15 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
- if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+ if (ivpu_mmu_queue_is_empty(evtq))
return NULL;
- clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
-
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
-
return evt;
}
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
- bool schedule_recovery = false;
u32 *event;
u32 ssid;
@@ -895,14 +903,22 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
ivpu_mmu_dump_event(vdev, event);
ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
- schedule_recovery = true;
- else
- ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+ ivpu_pm_trigger_recovery(vdev, "MMU event");
+ return;
+ }
+
+ ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
}
+}
- if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
+{
+ u32 *event;
+
+ while ((event = ivpu_mmu_get_event(vdev)) != NULL)
+ ivpu_mmu_dump_event(vdev, event);
}
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index cb551126806b..6fa35c240710 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -46,5 +46,6 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 12a8c09d4547..fe6161299236 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
+ ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+ ctx->id, dma_addr, vpu_addr, size);
+
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
@@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
+
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
@@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) {
+ dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
+ ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+ ctx->id, dma_addr, vpu_addr, size);
+
ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
vpu_addr += size;
}
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
+
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 0af8864cb3b5..5f73854234ba 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -13,6 +13,7 @@
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
@@ -57,11 +58,14 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
+ /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+ pci_save_state(to_pci_dev(vdev->drm.dev));
+
ret = ivpu_shutdown(vdev);
- if (ret) {
+ if (ret)
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
- return ret;
- }
+
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
return ret;
}
@@ -70,6 +74,9 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
+
retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
@@ -111,22 +118,37 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
int ret;
-retry:
- ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
- if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
- cond_resched();
- goto retry;
- }
+ ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
- if (ret && ret != -EAGAIN)
- ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
+ ret = pm_runtime_resume_and_get(vdev->drm.dev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
+
+ ivpu_fw_log_dump(vdev);
+
+ atomic_inc(&vdev->pm->reset_counter);
+ atomic_set(&vdev->pm->reset_pending, 1);
+ down_write(&vdev->pm->reset_lock);
+
+ ivpu_suspend(vdev);
+ ivpu_pm_prepare_cold_boot(vdev);
+ ivpu_jobs_abort_all(vdev);
+
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+
+ up_write(&vdev->pm->reset_lock);
+ atomic_set(&vdev->pm->reset_pending, 0);
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
}
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
{
- struct ivpu_pm_info *pm = vdev->pm;
+ ivpu_err(vdev, "Recovery triggered by %s\n", reason);
if (ivpu_disable_recovery) {
ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
@@ -138,10 +160,11 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
return;
}
- /* Schedule recovery if it's not in progress */
- if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
- ivpu_hw_irq_disable(vdev);
- queue_work(system_long_wq, &pm->recovery_work);
+ /* Trigger recovery if it's not in progress */
+ if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
+ queue_work(system_long_wq, &vdev->pm->recovery_work);
}
}
@@ -149,12 +172,8 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
- unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
-
- ivpu_err(vdev, "TDR detected, timeout %lu ms", timeout_ms);
- ivpu_hw_diagnose_failure(vdev);
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "TDR");
}
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
@@ -192,9 +211,6 @@ int ivpu_pm_suspend_cb(struct device *dev)
ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev);
- pci_save_state(to_pci_dev(dev));
- pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
-
ivpu_dbg(vdev, PM, "Suspend done.\n");
return 0;
@@ -208,9 +224,6 @@ int ivpu_pm_resume_cb(struct device *dev)
ivpu_dbg(vdev, PM, "Resume..\n");
- pci_set_power_state(to_pci_dev(dev), PCI_D0);
- pci_restore_state(to_pci_dev(dev));
-
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to resume: %d\n", ret);
@@ -227,6 +240,9 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
bool hw_is_idle = true;
int ret;
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+ drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
+
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
@@ -247,7 +263,8 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
if (!hw_is_idle) {
- ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_fw_log_dump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
@@ -308,11 +325,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- pm_runtime_get_sync(vdev->drm.dev);
-
ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->in_reset, 1);
+ atomic_set(&vdev->pm->reset_pending, 1);
+
+ pm_runtime_get_sync(vdev->drm.dev);
+ down_write(&vdev->pm->reset_lock);
ivpu_prepare_for_reset(vdev);
ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev);
@@ -329,9 +347,11 @@ void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- atomic_set(&vdev->pm->in_reset, 0);
+ up_write(&vdev->pm->reset_lock);
+ atomic_set(&vdev->pm->reset_pending, 0);
ivpu_dbg(vdev, PM, "Post-reset done.\n");
+ pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
@@ -344,7 +364,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
pm->vdev = vdev;
pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
- atomic_set(&pm->in_reset, 0);
+ init_rwsem(&pm->reset_lock);
+ atomic_set(&pm->reset_pending, 0);
+ atomic_set(&pm->reset_counter, 0);
+
INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index 97c6e0b0aa42..ec60fbeefefc 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -6,6 +6,7 @@
#ifndef __IVPU_PM_H__
#define __IVPU_PM_H__
+#include <linux/rwsem.h>
#include <linux/types.h>
struct ivpu_device;
@@ -14,8 +15,9 @@ struct ivpu_pm_info {
struct ivpu_device *vdev;
struct delayed_work job_timeout_work;
struct work_struct recovery_work;
- atomic_t in_reset;
+ struct rw_semaphore reset_lock;
atomic_t reset_counter;
+ atomic_t reset_pending;
bool is_warmboot;
u32 suspend_reschedule_counter;
};
@@ -37,7 +39,7 @@ int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
void ivpu_rpm_put(struct ivpu_device *vdev);
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason);
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 7b7c605166e0..ab2a82cb1b0b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
-#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -674,78 +673,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
-/*
- * Only a single callback can be registered for CXL CPER events.
- */
-static DECLARE_RWSEM(cxl_cper_rw_sem);
-static cxl_cper_callback cper_callback;
-
-/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
-
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-#define CPER_SEC_CXL_GEN_MEDIA_GUID \
- GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
- 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
-
-/*
- * DRAM Event Record
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
- */
-#define CPER_SEC_CXL_DRAM_GUID \
- GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
- 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-#define CPER_SEC_CXL_MEM_MODULE_GUID \
- GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
- 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
-
-static void cxl_cper_post_event(enum cxl_event_type event_type,
- struct cxl_cper_event_rec *rec)
-{
- if (rec->hdr.length <= sizeof(rec->hdr) ||
- rec->hdr.length > sizeof(*rec)) {
- pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
- rec->hdr.length);
- return;
- }
-
- if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
- pr_err(FW_WARN "CXL CPER invalid event\n");
- return;
- }
-
- guard(rwsem_read)(&cxl_cper_rw_sem);
- if (cper_callback)
- cper_callback(event_type, rec);
-}
-
-int cxl_cper_register_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (cper_callback)
- return -EINVAL;
- cper_callback = callback;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
-
-int cxl_cper_unregister_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (callback != cper_callback)
- return -EINVAL;
- cper_callback = NULL;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
-
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -780,22 +707,6 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
- } else if (guid_equal(sec_type,
- &CPER_SEC_CXL_MEM_MODULE_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
} else {
void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index dbdee2924594..02255795b800 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -525,10 +525,12 @@ static void acpi_ec_clear(struct acpi_ec *ec)
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/* Drain additional events if hardware requires that */
if (EC_FLAGS_CLEAR_ON_RESUME)
@@ -544,9 +546,11 @@ static void __acpi_ec_flush_work(void)
static void acpi_ec_disable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
__acpi_ec_disable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/*
* When ec_freeze_events is true, we need to flush events in
@@ -567,9 +571,10 @@ void acpi_ec_flush_work(void)
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
+ unsigned long flags;
bool guarded;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/*
* If firmware SCI_EVT clearing timing is "event", we actually
* don't know when the SCI_EVT will be cleared by firmware after
@@ -585,29 +590,31 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
ec->event_state != EC_EVENT_READY &&
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return guarded;
}
static int ec_transaction_polled(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
@@ -749,6 +756,7 @@ static int ec_guard(struct acpi_ec *ec)
static int ec_poll(struct acpi_ec *ec)
{
+ unsigned long flags;
int repeat = 5; /* number of command restarts */
while (repeat--) {
@@ -757,14 +765,14 @@ static int ec_poll(struct acpi_ec *ec)
do {
if (!ec_guard(ec))
return 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec, false);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
return -ETIME;
}
@@ -772,10 +780,11 @@ static int ec_poll(struct acpi_ec *ec)
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
struct transaction *t)
{
+ unsigned long tmp;
int ret = 0;
/* start transaction */
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
@@ -786,11 +795,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = t;
ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
@@ -799,7 +808,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
acpi_ec_complete_request(ec);
ec_dbg_ref(ec, "Decrease command");
unlock:
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
@@ -927,7 +936,9 @@ EXPORT_SYMBOL(ec_get_handle);
static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
ec_dbg_drv("Starting EC");
/* Enable GPE for event processing (SCI_EVT=1) */
@@ -937,28 +948,31 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
}
ec_log_drv("EC started");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static bool acpi_ec_stopped(struct acpi_ec *ec)
{
+ unsigned long flags;
bool flushed;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
flushed = acpi_ec_flushed(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return flushed;
}
static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec)) {
ec_dbg_drv("Stopping EC");
set_bit(EC_FLAGS_STOPPED, &ec->flags);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
wait_event(ec->wait, acpi_ec_stopped(ec));
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) {
acpi_ec_complete_request(ec);
@@ -969,25 +983,29 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
ec_log_drv("EC stopped");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_enter_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = true;
ec->polling_guard = 0;
ec_log_drv("interrupt blocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_leave_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = ec_busy_polling;
ec->polling_guard = ec_polling_guard;
ec_log_drv("interrupt unblocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
void acpi_ec_block_transactions(void)
@@ -1119,9 +1137,9 @@ static void acpi_ec_event_processor(struct work_struct *work)
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_put_query_handler(handler);
kfree(q);
@@ -1184,12 +1202,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec)
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress++;
queue_work(ec_query_wq, &q->work);
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
return 0;
@@ -1205,14 +1223,14 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event started");
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
while (ec->events_to_process) {
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_submit_query(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->events_to_process--;
}
@@ -1229,11 +1247,11 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event stopped");
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
guard_timeout = !!ec_guard(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
if (guard_timeout && !ec->curr)
@@ -1246,7 +1264,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec->events_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
}
static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
@@ -1271,11 +1289,13 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
clear_gpe_and_advance_transaction(ec, true);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -2085,7 +2105,7 @@ bool acpi_ec_dispatch_gpe(void)
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
if (acpi_ec_gpe_status_set(first_ec)) {
pm_pr_dbg("ACPI EC GPE status set\n");
@@ -2094,7 +2114,7 @@ bool acpi_ec_dispatch_gpe(void)
work_in_progress = acpi_ec_work_in_progress(first_ec);
}
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
if (!work_in_progress)
return false;
@@ -2107,11 +2127,11 @@ bool acpi_ec_dispatch_gpe(void)
pm_pr_dbg("ACPI EC work flushed\n");
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
work_in_progress = acpi_ec_work_in_progress(first_ec);
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
return false;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8dd23b19e997..eca24f41556d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -478,6 +478,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
{
WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
+
+ /* (e)poll-based threads require an explicit wakeup signal when
+ * queuing their own work; they rely on these events to consume
+ * messages without I/O block. Without it, threads risk waiting
+ * indefinitely without handling the work.
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ thread->pid == current->pid && !thread->process_todo)
+ wake_up_interruptible_sync(&thread->wait);
+
thread->process_todo = true;
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 3a5f3255f51b..682ff550ccfb 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -48,6 +48,7 @@ enum {
enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
+ board_ahci_43bit_dma,
board_ahci_ign_iferr,
board_ahci_low_power,
board_ahci_no_debounce_delay,
@@ -128,6 +129,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_43bit_dma] = {
+ AHCI_HFLAGS (AHCI_HFLAG_43BIT_ONLY),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_ign_iferr] = {
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
@@ -597,14 +605,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
- /* Asmedia */
- { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
- { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
- { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
- { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
- { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
- { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
- { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
+ /* ASMedia */
+ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
+ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
+ { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
+ { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
+ { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
{ PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */
{ PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */
{ PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */
@@ -663,6 +671,19 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
+ switch (pdev->device) {
+ case 0x1166:
+ dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+ hpriv->saved_port_map = 0x3f;
+ break;
+ case 0x1064:
+ dev_info(&pdev->dev, "ASM1064 has only four ports\n");
+ hpriv->saved_port_map = 0xf;
+ break;
+ }
+ }
+
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->saved_port_map = 1;
@@ -949,11 +970,20 @@ static int ahci_pci_device_resume(struct device *dev)
#endif /* CONFIG_PM */
-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+static int ahci_configure_dma_masks(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
{
- const int dma_bits = using_dac ? 64 : 32;
+ int dma_bits;
int rc;
+ if (hpriv->cap & HOST_CAP_64) {
+ dma_bits = 64;
+ if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
+ dma_bits = 43;
+ } else {
+ dma_bits = 32;
+ }
+
/*
* If the device fixup already set the dma_mask to some non-standard
* value, don't extend it here. This happens on STA2X11, for example.
@@ -1926,7 +1956,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_gtf_filter_workaround(host);
/* initialize adapter */
- rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+ rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
return rc;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 4bae95b06ae3..df8f8a1a3a34 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -247,6 +247,7 @@ enum {
AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
suspend/resume */
AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
+ AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 64f7f7d6ba84..11a2c199a7c2 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
u32 axicc;
bool is_cci_enabled;
int flags;
- struct reset_control *rst;
};
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
@@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
+static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+ int rc, i;
+
+ rc = ahci_platform_enable_regulators(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_platform_enable_clks(hpriv);
+ if (rc)
+ goto disable_regulator;
+
+ /* Assert the controller reset */
+ rc = ahci_platform_assert_rsts(hpriv);
+ if (rc)
+ goto disable_clks;
+
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_init(hpriv->phys[i]);
+ if (rc)
+ goto disable_rsts;
+ }
+
+ /* De-assert the controller reset */
+ ahci_platform_deassert_rsts(hpriv);
+
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_power_on(hpriv->phys[i]);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+ }
+
+ return 0;
+
+disable_rsts:
+ ahci_platform_deassert_rsts(hpriv);
+
+disable_phys:
+ while (--i >= 0) {
+ phy_power_off(hpriv->phys[i]);
+ phy_exit(hpriv->phys[i]);
+ }
+
+disable_clks:
+ ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+ ahci_platform_disable_regulators(hpriv);
+
+ return rc;
+}
+
static int ceva_ahci_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
return -ENOMEM;
cevapriv->ahci_pdev = pdev;
-
- cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
- NULL);
- if (IS_ERR(cevapriv->rst))
- dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
- "failed to get reset\n");
-
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
- if (!cevapriv->rst) {
- rc = ahci_platform_enable_resources(hpriv);
- if (rc)
- return rc;
- } else {
- int i;
+ hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ NULL);
+ if (IS_ERR(hpriv->rsts))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
+ "failed to get reset\n");
- rc = ahci_platform_enable_clks(hpriv);
- if (rc)
- return rc;
- /* Assert the controller reset */
- reset_control_assert(cevapriv->rst);
-
- for (i = 0; i < hpriv->nports; i++) {
- rc = phy_init(hpriv->phys[i]);
- if (rc)
- return rc;
- }
-
- /* De-assert the controller reset */
- reset_control_deassert(cevapriv->rst);
-
- for (i = 0; i < hpriv->nports; i++) {
- rc = phy_power_on(hpriv->phys[i]);
- if (rc) {
- phy_exit(hpriv->phys[i]);
- return rc;
- }
- }
- }
+ rc = ceva_ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
if (of_property_read_bool(np, "ceva,broken-gen2"))
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
(u8 *)&cevapriv->pp2c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
(u8 *)&cevapriv->pp2c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read OOB timing value for COMWAKE from device-tree*/
if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
(u8 *)&cevapriv->pp3c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
(u8 *)&cevapriv->pp3c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read phy BURST timing value from device-tree */
if (of_property_read_u8_array(np, "ceva,p0-burst-params",
(u8 *)&cevapriv->pp4c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-burst-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-burst-params",
(u8 *)&cevapriv->pp4c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-burst-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read phy RETRY interval timing value from device-tree */
if (of_property_read_u16_array(np, "ceva,p0-retry-params",
(u16 *)&cevapriv->pp5c[0], 2) < 0) {
dev_warn(dev, "ceva,p0-retry-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u16_array(np, "ceva,p1-retry-params",
(u16 *)&cevapriv->pp5c[1], 2) < 0) {
dev_warn(dev, "ceva,p1-retry-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/*
@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
struct ahci_host_priv *hpriv = host->private_data;
int rc;
- rc = ahci_platform_enable_resources(hpriv);
+ rc = ceva_ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 09ed67772fae..be3412cdb22e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2001,6 +2001,33 @@ bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
return true;
}
+static bool ata_dev_power_is_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_CHK_POWER;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask) {
+ ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
+ err_mask);
+ /*
+ * Assume we are in standby mode so that we always force a
+ * spinup in ata_dev_power_set_active().
+ */
+ return false;
+ }
+
+ ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
+
+ /* Active or idle */
+ return tf.nsect == 0xff;
+}
+
/**
* ata_dev_power_set_standby - Set a device power mode to standby
* @dev: target device
@@ -2017,6 +2044,11 @@ void ata_dev_power_set_standby(struct ata_device *dev)
struct ata_taskfile tf;
unsigned int err_mask;
+ /* If the device is already sleeping or in standby, do nothing. */
+ if ((dev->flags & ATA_DFLAG_SLEEPING) ||
+ !ata_dev_power_is_active(dev))
+ return;
+
/*
* Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
* causing some drives to spin up and down again. For these, do nothing
@@ -2042,33 +2074,6 @@ void ata_dev_power_set_standby(struct ata_device *dev)
err_mask);
}
-static bool ata_dev_power_is_active(struct ata_device *dev)
-{
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- ata_tf_init(dev, &tf);
- tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
- tf.protocol = ATA_PROT_NODATA;
- tf.command = ATA_CMD_CHK_POWER;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- if (err_mask) {
- ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
- err_mask);
- /*
- * Assume we are in standby mode so that we always force a
- * spinup in ata_dev_power_set_active().
- */
- return false;
- }
-
- ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
-
- /* Active or idle */
- return tf.nsect == 0xff;
-}
-
/**
* ata_dev_power_set_active - Set a device power mode to active
* @dev: target device
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index b6656c287175..0fb1934875f2 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -784,7 +784,7 @@ bool sata_lpm_ignore_phy_events(struct ata_link *link)
EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
static const char *ata_lpm_policy_names[] = {
- [ATA_LPM_UNKNOWN] = "max_performance",
+ [ATA_LPM_UNKNOWN] = "keep_firmware_settings",
[ATA_LPM_MAX_POWER] = "max_performance",
[ATA_LPM_MED_POWER] = "medium_power",
[ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 50d8ce20ae5b..9fb1575f8d88 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2550,14 +2550,12 @@ static int fore200e_sba_probe(struct platform_device *op)
return 0;
}
-static int fore200e_sba_remove(struct platform_device *op)
+static void fore200e_sba_remove(struct platform_device *op)
{
struct fore200e *fore200e = dev_get_drvdata(&op->dev);
fore200e_shutdown(fore200e);
kfree(fore200e);
-
- return 0;
}
static const struct of_device_id fore200e_sba_match[] = {
@@ -2574,7 +2572,7 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove = fore200e_sba_remove,
+ .remove_new = fore200e_sba_remove,
};
#endif
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e327a0229dc1..e7f713cd70d3 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card)
vc->scq = alloc_scq(card, vc->class);
if (!vc->scq) {
printk("%s: can't get SCQ.\n", card->name);
+ kfree(card->vcs[0]);
+ card->vcs[0] = NULL;
return -ENOMEM;
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 018ac202de34..024b78a0cfc1 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -431,9 +431,6 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
int cpu;
- if (!raw_capacity)
- return 0;
-
if (val != CPUFREQ_CREATE_POLICY)
return 0;
@@ -450,9 +447,11 @@ init_cpu_capacity_callback(struct notifier_block *nb,
}
if (cpumask_empty(cpus_to_visit)) {
- topology_normalize_cpu_scale();
- schedule_work(&update_topology_flags_work);
- free_raw_capacity();
+ if (raw_capacity) {
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ free_raw_capacity();
+ }
pr_debug("cpu_capacity: parsing done\n");
schedule_work(&parsing_done_work);
}
@@ -472,7 +471,7 @@ static int __init register_cpufreq_notifier(void)
* On ACPI-based systems skip registering cpufreq notifier as cpufreq
* information is not needed for cpu capacity initialization.
*/
- if (!acpi_disabled || !raw_capacity)
+ if (!acpi_disabled)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 14d46af40f9a..9828da9b933c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -125,7 +125,7 @@ static void __fwnode_link_del(struct fwnode_link *link)
*/
static void __fwnode_link_cycle(struct fwnode_link *link)
{
- pr_debug("%pfwf: Relaxing link with %pfwf\n",
+ pr_debug("%pfwf: cycle: depends on %pfwf\n",
link->consumer, link->supplier);
link->flags |= FWLINK_FLAG_CYCLE;
}
@@ -284,10 +284,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
return false;
}
+#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
+ DL_FLAG_CYCLE | \
+ DL_FLAG_MANAGED)
static inline bool device_link_flag_is_sync_state_only(u32 flags)
{
- return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
+ return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
/**
@@ -1943,6 +1945,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
/* Termination condition. */
if (sup_dev == con) {
+ pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
@@ -1974,8 +1977,11 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
+ if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
+ par_dev->fwnode);
ret = true;
+ }
if (!sup_dev)
goto out;
@@ -1991,6 +1997,8 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (__fw_devlink_relax_cycles(con,
dev_link->supplier->fwnode)) {
+ pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
+ dev_link->supplier->fwnode);
fw_devlink_relax_link(dev_link);
dev_link->flags |= DL_FLAG_CYCLE;
ret = true;
@@ -2058,13 +2066,19 @@ static int fw_devlink_create_devlink(struct device *con,
/*
* SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So cycle detection isn't necessary and shouldn't be done.
+ * So, one might expect that cycle detection isn't necessary for them.
+ * However, if the device link was marked as SYNC_STATE_ONLY because
+ * it's part of a cycle, then we still need to do cycle detection. This
+ * is because the consumer and supplier might be part of multiple cycles
+ * and we need to detect all those cycles.
*/
- if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ if (!device_link_flag_is_sync_state_only(flags) ||
+ flags & DL_FLAG_CYCLE) {
device_links_write_lock();
if (__fw_devlink_relax_cycles(con, sup_handle)) {
__fwnode_link_cycle(link);
flags = fw_devlink_get_flags(link->flags);
+ pr_debug("----- cycle: end -----\n");
dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
sup_handle);
}
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 026bdcb45127..0d957c5f1bcc 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -9,6 +9,23 @@
#define BLOCK_TEST_SIZE 12
+static void get_changed_bytes(void *orig, void *new, size_t size)
+{
+ char *o = orig;
+ char *n = new;
+ int i;
+
+ get_random_bytes(new, size);
+
+ /*
+ * This could be nicer and more efficient but we shouldn't
+ * super care.
+ */
+ for (i = 0; i < size; i++)
+ while (n[i] == o[i])
+ get_random_bytes(&n[i], 1);
+}
+
static const struct regmap_config test_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
@@ -1202,7 +1219,8 @@ static void raw_noinc_write(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- unsigned int val, val_test, val_last;
+ unsigned int val;
+ u16 val_test, val_last;
u16 val_array[BLOCK_TEST_SIZE];
config = raw_regmap_config;
@@ -1251,7 +1269,7 @@ static void raw_sync(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- u16 val[2];
+ u16 val[3];
u16 *hw_buf;
unsigned int rval;
int i;
@@ -1265,17 +1283,13 @@ static void raw_sync(struct kunit *test)
hw_buf = (u16 *)data->vals;
- get_random_bytes(&val, sizeof(val));
+ get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
- if (config.val_format_endian == REGMAP_ENDIAN_BIG)
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- be16_to_cpu(val[0])));
- else
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- le16_to_cpu(val[0])));
+ KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
+ sizeof(u16) * 2));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
@@ -1284,24 +1298,34 @@ static void raw_sync(struct kunit *test)
switch (i) {
case 2:
case 3:
- case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu(val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu(val[i - 2]));
}
break;
+ case 4:
+ KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
+ break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
+
+ /*
+ * The value written via _write() was translated by the core,
+ * translate the original copy for comparison purposes.
+ */
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG)
+ val[2] = cpu_to_be16(val[2]);
+ else
+ val[2] = cpu_to_le16(val[2]);
/* The values should not appear in the "hardware" */
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
@@ -1312,8 +1336,7 @@ static void raw_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
regmap_exit(map);
}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7061d3ee836a..6b5d34919c72 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -68,7 +68,7 @@ static struct attribute *bcma_device_attrs[] = {
};
ATTRIBUTE_GROUPS(bcma_device);
-static struct bus_type bcma_bus_type = {
+static const struct bus_type bcma_bus_type = {
.name = "bcma",
.match = bcma_bus_match,
.probe = bcma_device_probe,
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index d2dbf8aaccb5..b1b47d88f5db 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
struct gendisk *gd;
mempool_t *mp;
struct blk_mq_tag_set *set;
+ sector_t ssize;
ulong flags;
int late = 0;
int err;
@@ -396,7 +397,7 @@ aoeblk_gdalloc(void *vp)
gd->minors = AOE_PARTITIONS;
gd->fops = &aoe_bdops;
gd->private_data = d;
- set_capacity(gd, d->ssize);
+ ssize = d->ssize;
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
d->aoemajor, d->aoeminor);
@@ -405,6 +406,8 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
+ set_capacity(gd, ssize);
+
err = device_add_disk(NULL, gd, aoe_attr_groups);
if (err)
goto out_disk_cleanup;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 146b32fa7b47..f8145499da38 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -165,39 +165,37 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
return get_size(lo->lo_offset, lo->lo_sizelimit, file);
}
+/*
+ * We support direct I/O only if lo_offset is aligned with the logical I/O size
+ * of backing device, and the logical block size of loop is bigger than that of
+ * the backing device.
+ */
+static bool lo_bdev_can_use_dio(struct loop_device *lo,
+ struct block_device *backing_bdev)
+{
+ unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
+
+ if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
+ return false;
+ if (lo->lo_offset & (sb_bsize - 1))
+ return false;
+ return true;
+}
+
static void __loop_update_dio(struct loop_device *lo, bool dio)
{
struct file *file = lo->lo_backing_file;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- unsigned short sb_bsize = 0;
- unsigned dio_align = 0;
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *backing_bdev = NULL;
bool use_dio;
- if (inode->i_sb->s_bdev) {
- sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
- dio_align = sb_bsize - 1;
- }
+ if (S_ISBLK(inode->i_mode))
+ backing_bdev = I_BDEV(inode);
+ else if (inode->i_sb->s_bdev)
+ backing_bdev = inode->i_sb->s_bdev;
- /*
- * We support direct I/O only if lo_offset is aligned with the
- * logical I/O size of backing device, and the logical block
- * size of loop is bigger than the backing device's.
- *
- * TODO: the above condition may be loosed in the future, and
- * direct I/O may be switched runtime at that time because most
- * of requests in sane applications should be PAGE_SIZE aligned
- */
- if (dio) {
- if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
- !(lo->lo_offset & dio_align) &&
- (file->f_mode & FMODE_CAN_ODIRECT))
- use_dio = true;
- else
- use_dio = false;
- } else {
- use_dio = false;
- }
+ use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
+ (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
if (lo->use_dio == use_dio)
return;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4e72ec4e25ac..33a8f37bb6a1 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -508,7 +508,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
struct iov_iter *iter, int msg_flags, int *sent)
{
int result;
- struct msghdr msg;
+ struct msghdr msg = {} ;
unsigned int noreclaim_flag;
if (unlikely(!sock)) {
@@ -524,10 +524,6 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
do {
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
sock->sk->sk_use_task_frag = false;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (send)
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 9f7695f00c2d..36755f263e8e 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1840,7 +1840,7 @@ static void null_del_dev(struct nullb *nullb)
dev = nullb->dev;
- ida_simple_remove(&nullb_indexes, nullb->index);
+ ida_free(&nullb_indexes, nullb->index);
list_del_init(&nullb->list);
@@ -2174,7 +2174,7 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
mutex_lock(&lock);
- rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
if (rv < 0) {
mutex_unlock(&lock);
goto out_cleanup_zone;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index a999b698b131..12b5d53ec856 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
static void rbd_lock_del_request(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
- bool need_wakeup;
+ bool need_wakeup = false;
lockdep_assert_held(&rbd_dev->lock_rwsem);
spin_lock(&rbd_dev->lock_lists_lock);
- rbd_assert(!list_empty(&img_req->lock_item));
- list_del_init(&img_req->lock_item);
- need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
- list_empty(&rbd_dev->running_list));
+ if (!list_empty(&img_req->lock_item)) {
+ list_del_init(&img_req->lock_item);
+ need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+ list_empty(&rbd_dev->running_list));
+ }
spin_unlock(&rbd_dev->lock_lists_lock);
if (need_wakeup)
complete(&rbd_dev->releasing_wait);
@@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
return;
}
- list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
+ while (!list_empty(&rbd_dev->acquiring_list)) {
+ img_req = list_first_entry(&rbd_dev->acquiring_list,
+ struct rbd_img_request, lock_item);
mutex_lock(&img_req->state_mutex);
rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
+ if (!result)
+ list_move_tail(&img_req->lock_item,
+ &rbd_dev->running_list);
+ else
+ list_del_init(&img_req->lock_item);
rbd_img_schedule(img_req, result);
mutex_unlock(&img_req->state_mutex);
}
-
- list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
static bool locker_equal(const struct ceph_locker *lhs,
@@ -5326,7 +5332,7 @@ static void rbd_dev_release(struct device *dev)
if (need_put) {
destroy_workqueue(rbd_dev->task_wq);
- ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+ ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
}
rbd_dev_free(rbd_dev);
@@ -5402,9 +5408,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return NULL;
/* get an id and fill in device name */
- rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
- minor_to_rbd_dev_id(1 << MINORBITS),
- GFP_KERNEL);
+ rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida,
+ minor_to_rbd_dev_id(1 << MINORBITS) - 1,
+ GFP_KERNEL);
if (rbd_dev->dev_id < 0)
goto fail_rbd_dev;
@@ -5425,7 +5431,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return rbd_dev;
fail_dev_id:
- ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+ ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
fail_rbd_dev:
rbd_dev_free(rbd_dev);
return NULL;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3b6b9abb8ce1..2bf14a0e2815 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -367,8 +367,6 @@ static void virtblk_done(struct virtqueue *vq)
blk_mq_complete_request(req);
req_done = true;
}
- if (unlikely(virtqueue_is_broken(vq)))
- break;
} while (!virtqueue_enable_cb(vq));
/* In case queue is stopped waiting for more buffers. */
@@ -1595,14 +1593,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ /* Ensure no requests in virtqueues before deleting vqs. */
+ blk_mq_freeze_queue(vblk->disk->queue);
+
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
- blk_mq_quiesce_queue(vblk->disk->queue);
-
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
@@ -1620,7 +1619,7 @@ static int virtblk_restore(struct virtio_device *vdev)
virtio_device_ready(vdev);
- blk_mq_unquiesce_queue(vblk->disk->queue);
+ blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
#endif
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index fdb0fae88d1c..b40b32fa7f1c 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
bt_dev_dbg(hdev, "QCA Patch config");
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
- cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ cmd, 0, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index a61757835695..9a7243d5db71 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -1417,7 +1417,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
- set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks);
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
kfree_skb(skb);
return 0;
@@ -2368,7 +2368,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
hdev->setup = bcm4377_hci_setup;
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
if (bcm4377->hw->broken_mws_transport_config)
set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
if (bcm4377->hw->broken_ext_scan)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 94b8c406f0c0..edd2a81b4d5e 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -1806,13 +1807,12 @@ static int qca_power_on(struct hci_dev *hdev)
static void hci_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
}
static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
@@ -1904,7 +1904,17 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+ /* Set BDA quirk bit for reading BDA value from fwnode property
+ * only if that property exist in DT.
+ */
+ if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
+ } else {
+ bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
+ }
+
hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 6b5da73c8541..837bf9d51c6e 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -120,7 +120,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
i++;
}
- if (i == 0 || i % 4)
+ if (i == 0)
goto err;
for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
index 57186c58dc84..1d7dd3d2c101 100644
--- a/drivers/cache/ax45mp_cache.c
+++ b/drivers/cache/ax45mp_cache.c
@@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
unsigned long line_size;
unsigned long flags;
+ if (unlikely(start == end))
+ return;
+
line_size = ax45mp_priv.ax45mp_cache_line_size;
start = start & (~(line_size - 1));
+ end = ((end + line_size - 1) & (~(line_size - 1)));
local_irq_save(flags);
ax45mp_cpu_dcache_wb_range(start, end);
local_irq_restore(flags);
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 74db7fef237b..d7182d6e9783 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -4,8 +4,9 @@
*/
#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,x1e80100-gcc.h>
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 0964bb11657f..782993951fff 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -2475,7 +2475,7 @@ static const struct samsung_cmu_info misc_cmu_info __initconst = {
.nr_clk_ids = CLKS_NR_MISC,
.clk_regs = misc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(misc_clk_regs),
- .clk_name = "dout_cmu_misc_bus",
+ .clk_name = "bus",
};
/* ---- platform_driver ----------------------------------------------------- */
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 32daaac9b132..ca7a06489c40 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -69,7 +69,7 @@
* @base_addr: Base address of timer
* @freq: Timer input clock frequency
* @clk: Associated clock source
- * @clk_rate_change_nb Notifier block for clock rate changes
+ * @clk_rate_change_nb: Notifier block for clock rate changes
*/
struct ttc_timer {
void __iomem *base_addr;
@@ -134,7 +134,7 @@ static void ttc_set_interval(struct ttc_timer *timer,
* @irq: IRQ number of the Timer
* @dev_id: void pointer to the ttc_timer instance
*
- * returns: Always IRQ_HANDLED - success
+ * Returns: Always IRQ_HANDLED - success
**/
static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
{
@@ -151,8 +151,9 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
/**
* __ttc_clocksource_read - Reads the timer counter register
+ * @cs: &clocksource to read from
*
- * returns: Current timer counter register value
+ * Returns: Current timer counter register value
**/
static u64 __ttc_clocksource_read(struct clocksource *cs)
{
@@ -173,7 +174,7 @@ static u64 notrace ttc_sched_clock_read(void)
* @cycles: Timer interval ticks
* @evt: Address of clock event instance
*
- * returns: Always 0 - success
+ * Returns: Always %0 - success
**/
static int ttc_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
@@ -186,9 +187,12 @@ static int ttc_set_next_event(unsigned long cycles,
}
/**
- * ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer
- *
+ * ttc_shutdown - Sets the state of timer
* @evt: Address of clock event instance
+ *
+ * Used for shutdown or oneshot.
+ *
+ * Returns: Always %0 - success
**/
static int ttc_shutdown(struct clock_event_device *evt)
{
@@ -202,6 +206,12 @@ static int ttc_shutdown(struct clock_event_device *evt)
return 0;
}
+/**
+ * ttc_set_periodic - Sets the state of timer
+ * @evt: Address of clock event instance
+ *
+ * Returns: Always %0 - success
+ */
static int ttc_set_periodic(struct clock_event_device *evt)
{
struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
diff --git a/drivers/clocksource/timer-ep93xx.c b/drivers/clocksource/timer-ep93xx.c
index bc0ca6e12334..6981ff3ac8a9 100644
--- a/drivers/clocksource/timer-ep93xx.c
+++ b/drivers/clocksource/timer-ep93xx.c
@@ -155,9 +155,8 @@ static int __init ep93xx_timer_of_init(struct device_node *np)
ep93xx_tcu = tcu;
irq = irq_of_parse_and_map(np, 0);
- if (irq == 0)
- irq = -EINVAL;
- if (irq < 0) {
+ if (!irq) {
+ ret = -EINVAL;
pr_err("EP93XX Timer Can't parse IRQ %d", irq);
goto out_free;
}
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 57857c0dfba9..e66dcbd66566 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -61,12 +61,19 @@ static int riscv_clock_next_event(unsigned long delta,
return 0;
}
+static int riscv_clock_shutdown(struct clock_event_device *evt)
+{
+ riscv_clock_event_stop();
+ return 0;
+}
+
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.set_next_event = riscv_clock_next_event,
+ .set_state_shutdown = riscv_clock_shutdown,
};
/*
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 5f60f6bd3386..56acf2617262 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -183,7 +183,7 @@ static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
* dmtimer_write - write timer registers in posted and non-posted mode
* @timer: timer pointer over which write operation is to perform
* @reg: lowest byte holds the register offset
- * @value: data to write into the register
+ * @val: data to write into the register
*
* The posted mode bit is encoded in reg. Note that in posted mode, the write
* pending bit must be checked. Otherwise a write on a register which has a
@@ -949,7 +949,7 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
/**
* omap_dm_timer_set_int_disable - disable timer interrupts
- * @timer: pointer to timer handle
+ * @cookie: pointer to timer cookie
* @mask: bit mask of interrupts to be disabled
*
* Disables the specified timer interrupts for a timer.
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 3d5e6d705fc6..44b19e696176 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -108,9 +108,8 @@ static inline void send_msg(struct cn_msg *msg)
filter_data[1] = 0;
}
- if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
- cn_filter, (void *)filter_data) == -ESRCH)
- atomic_set(&proc_event_num_listeners, 0);
+ cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
+ cn_filter, (void *)filter_data);
local_unlock(&local_event.lock);
}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 1f6186475715..1791d37fbc53 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1232,14 +1232,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
-
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
-
value = READ_ONCE(cpudata->cppc_req_cached);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2ca70b0b5fdc..79619227ea51 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -529,6 +529,30 @@ static int intel_pstate_cppc_get_scaling(int cpu)
}
#endif /* CONFIG_ACPI_CPPC_LIB */
+static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
+ unsigned int relation)
+{
+ if (freq == cpu->pstate.turbo_freq)
+ return cpu->pstate.turbo_pstate;
+
+ if (freq == cpu->pstate.max_freq)
+ return cpu->pstate.max_pstate;
+
+ switch (relation) {
+ case CPUFREQ_RELATION_H:
+ return freq / cpu->pstate.scaling;
+ case CPUFREQ_RELATION_C:
+ return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
+ }
+
+ return DIV_ROUND_UP(freq, cpu->pstate.scaling);
+}
+
+static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
+{
+ return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
+}
+
/**
* intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
* @cpu: Target CPU.
@@ -546,6 +570,7 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
int scaling = cpu->pstate.scaling;
+ int freq;
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
@@ -559,16 +584,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling);
- cpu->pstate.max_pstate_physical =
- DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
- scaling);
+ freq = perf_ctl_max_phys * perf_ctl_scaling;
+ cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
- cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ cpu->pstate.min_freq = freq;
/*
* Cast the min P-state value retrieved via pstate_funcs.get_min() to
* the effective range of HWP performance levels.
*/
- cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
+ cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}
static inline void update_turbo_state(void)
@@ -2528,13 +2553,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* abstract values to represent performance rather than pure ratios.
*/
if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
- int scaling = cpu->pstate.scaling;
int freq;
freq = max_policy_perf * perf_ctl_scaling;
- max_policy_perf = DIV_ROUND_UP(freq, scaling);
+ max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
freq = min_policy_perf * perf_ctl_scaling;
- min_policy_perf = DIV_ROUND_UP(freq, scaling);
+ min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -2908,18 +2932,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
cpufreq_freq_transition_begin(policy, &freqs);
- switch (relation) {
- case CPUFREQ_RELATION_L:
- target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
- break;
- case CPUFREQ_RELATION_H:
- target_pstate = freqs.new / cpu->pstate.scaling;
- break;
- default:
- target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
- break;
- }
-
+ target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
freqs.new = target_pstate * cpu->pstate.scaling;
@@ -2937,7 +2950,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
update_turbo_state();
- target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2974,6 +2987,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
+ if (min_pstate > cpu->max_perf_ratio)
+ min_pstate = cpu->max_perf_ratio;
+
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 1262a7773ef3..de50c00ba218 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -299,22 +299,6 @@ theend:
return err;
}
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun8i_ce_dev *ce = op->ce;
- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
- int flow, err;
-
- flow = rctx->flow;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
- local_bh_disable();
- crypto_finalize_skcipher_request(engine, breq, err);
- local_bh_enable();
-}
-
static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
void *async_req)
{
@@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
+static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+ int flow, err;
+
+ flow = rctx->flow;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ sun8i_ce_cipher_unprepare(engine, areq);
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
+}
+
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
int err = sun8i_ce_cipher_prepare(engine, areq);
@@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
return err;
sun8i_ce_cipher_run(engine, areq);
- sun8i_ce_cipher_unprepare(engine, areq);
return 0;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index a148ff1f0872..a4f6884416a0 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -4545,6 +4545,7 @@ struct caam_hash_alg {
struct list_head entry;
struct device *dev;
int alg_type;
+ bool is_hmac;
struct ahash_alg ahash_alg;
};
@@ -4571,7 +4572,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dev = caam_hash->dev;
- if (alg->setkey) {
+ if (caam_hash->is_hmac) {
ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
ARRAY_SIZE(ctx->key),
DMA_TO_DEVICE,
@@ -4611,7 +4612,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
* For keyed hash algorithms shared descriptors
* will be created later in setkey() callback
*/
- return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
+ return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -4646,12 +4647,14 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
+ t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
t_alg->ahash_alg.setkey = NULL;
+ t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 290c8500c247..fdd724228c2f 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1753,6 +1753,7 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
int alg_type;
+ bool is_hmac;
struct ahash_engine_alg ahash_alg;
};
@@ -1804,7 +1805,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
} else {
if (priv->era >= 6) {
ctx->dir = DMA_BIDIRECTIONAL;
- ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
+ ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE;
} else {
ctx->dir = DMA_TO_DEVICE;
ctx->key_dir = DMA_NONE;
@@ -1862,7 +1863,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
* For keyed hash algorithms shared descriptors
* will be created later in setkey() callback
*/
- return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
+ return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -1915,12 +1916,14 @@ caam_hash_alloc(struct caam_hash_template *template,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
+ t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
halg->setkey = NULL;
+ t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e4d3f45242f6..b04bc1d3d627 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -534,10 +534,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init);
static int __sev_platform_shutdown_locked(int *error)
{
- struct sev_device *sev = psp_master->sev_data;
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
int ret;
- if (!sev || sev->state == SEV_STATE_UNINIT)
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 479062aa5e6b..94a0ebb03d8c 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -463,6 +463,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
break;
case ADF_401XX_PCI_DEVICE_ID:
hw_data->fw_name = ADF_4XXX_FW;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 1b13b4aa16ec..a235e6c300f1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
theend:
pm_runtime_put_autosuspend(rkc->dev);
+ rk_hash_unprepare(engine, breq);
+
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
- rk_hash_unprepare(engine, breq);
-
return 0;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 2621ff8a9376..de53eddf6796 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
- struct virtio_crypto_ctrl_header *header, void *para,
+ struct virtio_crypto_ctrl_header *header,
+ struct virtio_crypto_akcipher_session_para *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
- memcpy(&ctrl->u, para, sizeof(ctrl->u));
+ memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index dcf2b39e1048..1a3e6aafbdcc 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -316,31 +316,27 @@ static const struct cxl_root_ops acpi_root_ops = {
.qos_class = cxl_acpi_qos_class,
};
-static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
+static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
+ struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
- struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
struct resource *cxl_res = ctx->cxl_res;
struct cxl_cxims_context cxims_ctx;
struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
- struct acpi_cedt_cfmws *cfmws;
cxl_calc_hb_fn cxl_calc_hb;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
struct resource *res;
int rc;
- cfmws = (struct acpi_cedt_cfmws *) header;
-
rc = cxl_acpi_cfmws_verify(dev, cfmws);
if (rc) {
dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
- return 0;
+ return rc;
}
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
@@ -376,7 +372,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
if (IS_ERR(cxlrd))
- return 0;
+ return PTR_ERR(cxlrd);
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
@@ -420,16 +416,7 @@ err_xormap:
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(dev, cxld);
- if (rc) {
- dev_err(dev, "Failed to add decode range: %pr", res);
- return rc;
- }
- dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
- dev_name(&cxld->dev),
- phys_to_target_node(cxld->hpa_range.start),
- cxld->hpa_range.start, cxld->hpa_range.end);
-
- return 0;
+ return rc;
err_insert:
kfree(res->name);
@@ -438,6 +425,29 @@ err_name:
return -ENOMEM;
}
+static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
+ struct cxl_cfmws_context *ctx = arg;
+ struct device *dev = ctx->dev;
+ int rc;
+
+ rc = __cxl_parse_cfmws(cfmws, ctx);
+ if (rc)
+ dev_err(dev,
+ "Failed to add decode range: [%#llx - %#llx] (%d)\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1, rc);
+ else
+ dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
+ phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+
+ /* never fail cxl_acpi load for a single window failure */
+ return 0;
+}
+
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
struct device *dev)
{
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 6fe11546889f..08fd0baea7a0 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
return 0;
}
-static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
- struct list_head *list)
+static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
+ struct cxl_dpa_perf *dpa_perf)
{
- struct cxl_dpa_perf *dpa_perf;
-
- dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
- if (!dpa_perf)
- return;
-
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
- list_add_tail(&dpa_perf->list, list);
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
@@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
dent->coord.read_latency, dent->coord.write_latency);
}
-static void free_perf_ents(void *data)
-{
- struct cxl_memdev_state *mds = data;
- struct cxl_dpa_perf *dpa_perf, *n;
- LIST_HEAD(discard);
-
- list_splice_tail_init(&mds->ram_perf_list, &discard);
- list_splice_tail_init(&mds->pmem_perf_list, &discard);
- list_for_each_entry_safe(dpa_perf, n, &discard, list) {
- list_del(&dpa_perf->list);
- kfree(dpa_perf);
- }
-}
-
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
@@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
xa_for_each(dsmas_xa, index, dent) {
if (resource_size(&cxlds->ram_res) &&
range_contains(&ram_range, &dent->dpa_range))
- add_perf_entry(dev, dent, &mds->ram_perf_list);
+ update_perf_entry(dev, dent, &mds->ram_perf);
else if (resource_size(&cxlds->pmem_res) &&
range_contains(&pmem_range, &dent->dpa_range))
- add_perf_entry(dev, dent, &mds->pmem_perf_list);
+ update_perf_entry(dev, dent, &mds->pmem_perf);
else
dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
dent->dpa_range.start);
}
-
- devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
}
static int match_cxlrd_qos_class(struct device *dev, void *data)
@@ -293,24 +270,24 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
return 0;
}
-static void cxl_qos_match(struct cxl_port *root_port,
- struct list_head *work_list,
- struct list_head *discard_list)
+static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
{
- struct cxl_dpa_perf *dpa_perf, *n;
+ *dpa_perf = (struct cxl_dpa_perf) {
+ .qos_class = CXL_QOS_CLASS_INVALID,
+ };
+}
- list_for_each_entry_safe(dpa_perf, n, work_list, list) {
- int rc;
+static bool cxl_qos_match(struct cxl_port *root_port,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+ return false;
- if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
- return;
+ if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
+ match_cxlrd_qos_class))
+ return false;
- rc = device_for_each_child(&root_port->dev,
- (void *)&dpa_perf->qos_class,
- match_cxlrd_qos_class);
- if (!rc)
- list_move_tail(&dpa_perf->list, discard_list);
- }
+ return true;
}
static int match_cxlrd_hb(struct device *dev, void *data)
@@ -334,23 +311,10 @@ static int match_cxlrd_hb(struct device *dev, void *data)
return 0;
}
-static void discard_dpa_perf(struct list_head *list)
-{
- struct cxl_dpa_perf *dpa_perf, *n;
-
- list_for_each_entry_safe(dpa_perf, n, list, list) {
- list_del(&dpa_perf->list);
- kfree(dpa_perf);
- }
-}
-DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
-
static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- LIST_HEAD(__discard);
- struct list_head *discard __free(dpa_perf) = &__discard;
struct cxl_port *root_port;
int rc;
@@ -363,16 +327,17 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
root_port = &cxl_root->port;
/* Check that the QTG IDs are all sane between end device and root decoders */
- cxl_qos_match(root_port, &mds->ram_perf_list, discard);
- cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
+ if (!cxl_qos_match(root_port, &mds->ram_perf))
+ reset_dpa_perf(&mds->ram_perf);
+ if (!cxl_qos_match(root_port, &mds->pmem_perf))
+ reset_dpa_perf(&mds->pmem_perf);
/* Check to make sure that the device's host bridge is under a root decoder */
rc = device_for_each_child(&root_port->dev,
- (void *)cxlmd->endpoint->host_bridge,
- match_cxlrd_hb);
+ cxlmd->endpoint->host_bridge, match_cxlrd_hb);
if (!rc) {
- list_splice_tail_init(&mds->ram_perf_list, discard);
- list_splice_tail_init(&mds->pmem_perf_list, discard);
+ reset_dpa_perf(&mds->ram_perf);
+ reset_dpa_perf(&mds->pmem_perf);
}
return rc;
@@ -417,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_memdev_set_qos_class(cxlds, dsmas_xa);
cxl_qos_class_verify(cxlmd);
+ cxl_memdev_update_perf(cxlmd);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 27166a411705..9adda4795eb7 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1391,8 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.reg_map.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
- INIT_LIST_HEAD(&mds->ram_perf_list);
- INIT_LIST_HEAD(&mds->pmem_perf_list);
+ mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
+ mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
return mds;
}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index dae8802ecdb0..d4e259f3a7e9 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static ssize_t pmem_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+ __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_pmem_attributes[] = {
&dev_attr_pmem_size.attr,
+ &dev_attr_pmem_qos_class.attr,
NULL,
};
+static ssize_t ram_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+ __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_ram_attributes[] = {
&dev_attr_ram_size.attr,
+ &dev_attr_ram_qos_class.attr,
NULL,
};
@@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
.is_visible = cxl_memdev_visible,
};
+static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_ram_qos_class.attr)
+ if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_ram_attribute_group = {
.name = "ram",
.attrs = cxl_memdev_ram_attributes,
+ .is_visible = cxl_ram_visible,
};
+static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_pmem_qos_class.attr)
+ if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_pmem_attribute_group = {
.name = "pmem",
.attrs = cxl_memdev_pmem_attributes,
+ .is_visible = cxl_pmem_visible,
};
static umode_t cxl_memdev_security_visible(struct kobject *kobj,
@@ -519,6 +575,13 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
NULL,
};
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
+{
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
.release = cxl_memdev_release,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 6c9c8d92f8f7..e9e6c81ce034 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -477,9 +477,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
allowed++;
}
- if (!allowed) {
- cxl_set_mem_enable(cxlds, 0);
- info->mem_enabled = 0;
+ if (!allowed && info->mem_enabled) {
+ dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
+ return -ENXIO;
}
/*
@@ -932,11 +932,21 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
void cxl_cor_error_detected(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct device *dev = &cxlds->cxlmd->dev;
+
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return;
+ }
- if (cxlds->rcd)
- cxl_handle_rdport_errors(cxlds);
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
- cxl_handle_endpoint_cor_ras(cxlds);
+ cxl_handle_endpoint_cor_ras(cxlds);
+ }
}
EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
@@ -948,16 +958,25 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
struct device *dev = &cxlmd->dev;
bool ue;
- if (cxlds->rcd)
- cxl_handle_rdport_errors(cxlds);
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+ /*
+ * A frozen channel indicates an impending reset which is fatal to
+ * CXL.mem operation, and will likely crash the system. On the off
+ * chance the situation is recoverable dump the status of the RAS
+ * capability registers and bounce the active state of the memdev.
+ */
+ ue = cxl_handle_endpoint_ras(cxlds);
+ }
- /*
- * A frozen channel indicates an impending reset which is fatal to
- * CXL.mem operation, and will likely crash the system. On the off
- * chance the situation is recoverable dump the status of the RAS
- * capability registers and bounce the active state of the memdev.
- */
- ue = cxl_handle_endpoint_ras(cxlds);
switch (state) {
case pci_channel_io_normal:
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 0f05692bfec3..4c7fd2d5cccb 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -525,7 +525,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_region_params *p = &cxlr->params;
struct resource *res;
- u32 remainder = 0;
+ u64 remainder = 0;
lockdep_assert_held_write(&cxl_region_rwsem);
@@ -545,7 +545,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
(cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
return -ENXIO;
- div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
+ div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
if (remainder)
return -EINVAL;
@@ -730,12 +730,17 @@ static int match_auto_decoder(struct device *dev, void *data)
return 0;
}
-static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_region *cxlr)
+static struct cxl_decoder *
+cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
+ if (port == cxled_to_port(cxled))
+ return &cxled->cxld;
+
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
@@ -753,8 +758,31 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
return to_cxl_decoder(dev);
}
-static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
- struct cxl_region *cxlr)
+static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
+ struct cxl_decoder *cxld)
+{
+ struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
+ struct cxl_decoder *cxld_iter = rr->decoder;
+
+ /*
+ * Allow the out of order assembly of auto-discovered regions.
+ * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
+ * in HPA order. Confirm that the decoder with the lesser HPA
+ * starting address has the lesser id.
+ */
+ dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
+ dev_name(&cxld->dev), cxld->id,
+ dev_name(&cxld_iter->dev), cxld_iter->id);
+
+ if (cxld_iter->id > cxld->id)
+ return true;
+
+ return false;
+}
+
+static struct cxl_region_ref *
+alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -764,16 +792,21 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
xa_for_each(&port->regions, index, iter) {
struct cxl_region_params *ip = &iter->region->params;
- if (!ip->res)
+ if (!ip->res || ip->res->start < p->res->start)
continue;
- if (ip->res->start > p->res->start) {
- dev_dbg(&cxlr->dev,
- "%s: HPA order violation %s:%pr vs %pr\n",
- dev_name(&port->dev),
- dev_name(&iter->region->dev), ip->res, p->res);
- return ERR_PTR(-EBUSY);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
+ if (auto_order_ok(port, iter->region, cxld))
+ continue;
}
+ dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+
+ return ERR_PTR(-EBUSY);
}
cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
@@ -853,10 +886,7 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
{
struct cxl_decoder *cxld;
- if (port == cxled_to_port(cxled))
- cxld = &cxled->cxld;
- else
- cxld = cxl_region_find_decoder(port, cxlr);
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (!cxld) {
dev_dbg(&cxlr->dev, "%s: no decoder available\n",
dev_name(&port->dev));
@@ -953,7 +983,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr);
+ cxl_rr = alloc_region_ref(port, cxlr, cxled);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 89445435303a..bdf117a33744 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -338,7 +338,7 @@ TRACE_EVENT(cxl_general_media,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_GEN_MEDIA_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID;
/* General Media */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -425,7 +425,7 @@ TRACE_EVENT(cxl_dram,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_DRAM_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_DRAM_UUID;
/* DRAM */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -573,7 +573,7 @@ TRACE_EVENT(cxl_memory_module,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_MEM_MODULE_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID;
/* Memory Module Event */
__entry->event_type = rec->event_type;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b6017c0c57b4..003feebab79b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -880,6 +880,8 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 5303d6942b88..20fb3b35e89e 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -395,13 +395,11 @@ enum cxl_devtype {
/**
* struct cxl_dpa_perf - DPA performance property entry
- * @list - list entry
* @dpa_range - range for DPA address
* @coord - QoS performance data (i.e. latency, bandwidth)
* @qos_class - QoS Class cookies
*/
struct cxl_dpa_perf {
- struct list_head list;
struct range dpa_range;
struct access_coordinate coord;
int qos_class;
@@ -471,8 +469,8 @@ struct cxl_dev_state {
* @security: security driver state info
* @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
- * @ram_perf_list: performance data entries matched to RAM
- * @pmem_perf_list: performance data entries matched to PMEM
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -494,8 +492,8 @@ struct cxl_memdev_state {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
- struct list_head ram_perf_list;
- struct list_head pmem_perf_list;
+ struct cxl_dpa_perf ram_perf;
+ struct cxl_dpa_perf pmem_perf;
struct cxl_event_state event;
struct cxl_poison_state poison;
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index c5c9d8e0d88d..0c79d9ce877c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -215,52 +215,6 @@ static ssize_t trigger_poison_list_store(struct device *dev,
}
static DEVICE_ATTR_WO(trigger_poison_list);
-static ssize_t ram_qos_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_dpa_perf *dpa_perf;
-
- if (!dev->driver)
- return -ENOENT;
-
- if (list_empty(&mds->ram_perf_list))
- return -ENOENT;
-
- dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf,
- list);
-
- return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_ram_qos_class =
- __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
-
-static ssize_t pmem_qos_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_dpa_perf *dpa_perf;
-
- if (!dev->driver)
- return -ENOENT;
-
- if (list_empty(&mds->pmem_perf_list))
- return -ENOENT;
-
- dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf,
- list);
-
- return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_pmem_qos_class =
- __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
-
static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
@@ -272,21 +226,11 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
mds->poison.enabled_cmds))
return 0;
- if (a == &dev_attr_pmem_qos_class.attr)
- if (list_empty(&mds->pmem_perf_list))
- return 0;
-
- if (a == &dev_attr_ram_qos_class.attr)
- if (list_empty(&mds->ram_perf_list))
- return 0;
-
return a->mode;
}
static struct attribute *cxl_mem_attrs[] = {
&dev_attr_trigger_poison_list.attr,
- &dev_attr_ram_qos_class.attr,
- &dev_attr_pmem_qos_class.attr,
NULL
};
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 4fd1f207c84e..2ff361e756d6 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -382,7 +382,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
return rc;
}
-static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
@@ -441,7 +441,7 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
/* background command interrupts are optional */
- if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
+ if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail)
return 0;
msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
@@ -588,7 +588,7 @@ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
}
-static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
+static bool cxl_alloc_irq_vectors(struct pci_dev *pdev)
{
int nvecs;
@@ -605,9 +605,9 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nvecs < 1) {
dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
- return -ENXIO;
+ return false;
}
- return 0;
+ return true;
}
static irqreturn_t cxl_event_thread(int irq, void *id)
@@ -743,7 +743,7 @@ static bool cxl_event_int_is_fw(u8 setting)
}
static int cxl_event_config(struct pci_host_bridge *host_bridge,
- struct cxl_memdev_state *mds)
+ struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_event_interrupt_policy policy;
int rc;
@@ -755,6 +755,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
if (!host_bridge->native_cxl_error)
return 0;
+ if (!irq_avail) {
+ dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n");
+ return 0;
+ }
+
rc = cxl_mem_alloc_event_buf(mds);
if (rc)
return rc;
@@ -789,6 +794,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
int i, rc, pmu_count;
+ bool irq_avail;
/*
* Double check the anonymous union trickery in struct cxl_regs
@@ -846,11 +852,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
- rc = cxl_alloc_irq_vectors(pdev);
- if (rc)
- return rc;
+ irq_avail = cxl_alloc_irq_vectors(pdev);
- rc = cxl_pci_setup_mailbox(mds);
+ rc = cxl_pci_setup_mailbox(mds, irq_avail);
if (rc)
return rc;
@@ -909,7 +913,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- rc = cxl_event_config(host_bridge, mds);
+ rc = cxl_event_config(host_bridge, mds, irq_avail);
if (rc)
return rc;
@@ -970,61 +974,6 @@ static struct pci_driver cxl_pci_driver = {
},
};
-#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
-static void cxl_cper_event_call(enum cxl_event_type ev_type,
- struct cxl_cper_event_rec *rec)
-{
- struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
- struct pci_dev *pdev __free(pci_dev_put) = NULL;
- enum cxl_event_log_type log_type;
- struct cxl_dev_state *cxlds;
- unsigned int devfn;
- u32 hdr_flags;
-
- devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
- pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
- device_id->bus_num, devfn);
- if (!pdev)
- return;
-
- guard(pci_dev)(pdev);
- if (pdev->driver != &cxl_pci_driver)
- return;
-
- cxlds = pci_get_drvdata(pdev);
- if (!cxlds)
- return;
-
- /* Fabricate a log type */
- hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
- log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
-
- cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
- &uuid_null, &rec->event);
-}
-
-static int __init cxl_pci_driver_init(void)
-{
- int rc;
-
- rc = cxl_cper_register_callback(cxl_cper_event_call);
- if (rc)
- return rc;
-
- rc = pci_register_driver(&cxl_pci_driver);
- if (rc)
- cxl_cper_unregister_callback(cxl_cper_event_call);
-
- return rc;
-}
-
-static void __exit cxl_pci_driver_exit(void)
-{
- pci_unregister_driver(&cxl_pci_driver);
- cxl_cper_unregister_callback(cxl_cper_event_call);
-}
-
-module_init(cxl_pci_driver_init);
-module_exit(cxl_pci_driver_exit);
+module_pci_driver(cxl_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index ee899f8e6721..4a63567e93ba 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -168,10 +168,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
if (vmf->pgoff > buffer->pagecount)
return VM_FAULT_SIGBUS;
- vmf->page = buffer->pages[vmf->pgoff];
- get_page(vmf->page);
-
- return 0;
+ return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
}
static const struct vm_operations_struct dma_heap_vm_ops = {
@@ -185,6 +182,8 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
vma->vm_ops = &dma_heap_vm_ops;
vma->vm_private_data = buffer;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 70ba506dabab..e928f2ca0f1e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -378,6 +378,20 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config LS2X_APB_DMA
+ tristate "Loongson LS2X APB DMA support"
+ depends on LOONGARCH || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the Loongson LS2X APB DMA controller driver. The
+ DMA controller is having single DMA channel which can be
+ configured for different peripherals like audio, nand, sdio
+ etc which is in APB bus.
+
+ This DMA controller transfers data from memory to peripheral fifo.
+ It does not support memory to memory data transfer.
+
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
depends on M5441x || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 83553a97a010..dfd40d14e408 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 5b63996640d9..9588773dd2eb 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -57,6 +57,8 @@
#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
+#define BUS_WIDTH_WORD_SIZE GENMASK(3, 0)
+#define BUS_WIDTH_FRAME_SIZE GENMASK(7, 4)
#define BUS_WIDTH_8BIT 0x00
#define BUS_WIDTH_16BIT 0x01
#define BUS_WIDTH_32BIT 0x02
@@ -740,7 +742,8 @@ static int admac_device_config(struct dma_chan *chan,
struct admac_data *ad = adchan->host;
bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
int wordsize = 0;
- u32 bus_width = 0;
+ u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
+ ~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fb89ecbf0cc5..40052d1bd0b5 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -222,8 +222,14 @@ struct atdma_sg {
* @vd: pointer to the virtual dma descriptor.
* @atchan: pointer to the atmel dma channel.
* @total_len: total transaction byte count
- * @sg_len: number of sg entries.
+ * @sglen: number of sg entries.
* @sg: array of sgs.
+ * @boundary: number of transfers to perform before the automatic address increment operation
+ * @dst_hole: value to add to the destination address when the boundary has been reached
+ * @src_hole: value to add to the source address when the boundary has been reached
+ * @memset_buffer: buffer used for the memset operation
+ * @memset_paddr: physical address of the buffer used for the memset operation
+ * @memset_vaddr: virtual address of the buffer used for the memset operation
*/
struct at_desc {
struct virt_dma_desc vd;
@@ -245,7 +251,10 @@ struct at_desc {
/*-- Channels --------------------------------------------------------*/
/**
- * atc_status - information bits stored in channel status flag
+ * enum atc_status - information bits stored in channel status flag
+ *
+ * @ATC_IS_PAUSED: If channel is pauses
+ * @ATC_IS_CYCLIC: If channel is cyclic
*
* Manipulated with atomic operations.
*/
@@ -282,7 +291,6 @@ struct at_dma_chan {
u32 save_cfg;
u32 save_dscr;
struct dma_slave_config dma_sconfig;
- bool cyclic;
struct at_desc *desc;
};
@@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @dma_device: dmaengine dma_device object members
- * @atdma_devtype: identifier of DMA controller compatibility
- * @ch_regs: memory mapped register base
+ * @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @lli_pool: hw lli table
+ * @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
*/
struct at_dma {
@@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
/**
* atc_get_llis_residue - Get residue for a hardware linked list transfer
+ * @atchan: pointer to an atmel hdmac channel.
+ * @desc: pointer to the descriptor for which the residue is calculated.
+ * @residue: residue to be set to dma_tx_state.
*
* Calculate the residue by removing the length of the Linked List Item (LLI)
* already transferred from the total length. To get the current LLI we can use
@@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
- * @atchan: pointer to an atmel hdmac channel.
- * @desc: pointer to the descriptor for which the residue is calculated.
- * @residue: residue to be set to dma_tx_state.
- * Returns 0 on success, -errno otherwise.
+ *
+ * Returns: %0 on success, -errno otherwise.
*/
static int atc_get_llis_residue(struct at_dma_chan *atchan,
struct at_desc *desc, u32 *residue)
@@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
* @chan: DMA channel
* @cookie: transaction identifier to check status of
* @residue: residue to be updated.
- * Return 0 on success, -errono otherwise.
+ *
+ * Return: %0 on success, -errno otherwise.
*/
static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
u32 *residue)
@@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan)
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
*
- * return - the number of allocated descriptors
+ * Return: the number of allocated descriptors
*/
static int atc_alloc_chan_resources(struct dma_chan *chan)
{
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 2457a420c13d..4e339c04fc1e 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -81,9 +81,13 @@
#define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
+#define AXI_DMAC_REG_CURRENT_SG_ID 0x454
+#define AXI_DMAC_REG_SG_ADDRESS 0x47c
+#define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc
#define AXI_DMAC_CTRL_ENABLE BIT(0)
#define AXI_DMAC_CTRL_PAUSE BIT(1)
+#define AXI_DMAC_CTRL_ENABLE_SG BIT(2)
#define AXI_DMAC_IRQ_SOT BIT(0)
#define AXI_DMAC_IRQ_EOT BIT(1)
@@ -97,20 +101,35 @@
/* The maximum ID allocated by the hardware is 31 */
#define AXI_DMAC_SG_UNUSED 32U
+/* Flags for axi_dmac_hw_desc.flags */
+#define AXI_DMAC_HW_FLAG_LAST BIT(0)
+#define AXI_DMAC_HW_FLAG_IRQ BIT(1)
+
+struct axi_dmac_hw_desc {
+ u32 flags;
+ u32 id;
+ u64 dest_addr;
+ u64 src_addr;
+ u64 next_sg_addr;
+ u32 y_len;
+ u32 x_len;
+ u32 src_stride;
+ u32 dst_stride;
+ u64 __pad[2];
+};
+
struct axi_dmac_sg {
- dma_addr_t src_addr;
- dma_addr_t dest_addr;
- unsigned int x_len;
- unsigned int y_len;
- unsigned int dest_stride;
- unsigned int src_stride;
- unsigned int id;
unsigned int partial_len;
bool schedule_when_free;
+
+ struct axi_dmac_hw_desc *hw;
+ dma_addr_t hw_phys;
};
struct axi_dmac_desc {
struct virt_dma_desc vdesc;
+ struct axi_dmac_chan *chan;
+
bool cyclic;
bool have_partial_xfer;
@@ -139,6 +158,7 @@ struct axi_dmac_chan {
bool hw_partial_xfer;
bool hw_cyclic;
bool hw_2d;
+ bool hw_sg;
};
struct axi_dmac {
@@ -213,9 +233,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
unsigned int flags = 0;
unsigned int val;
- val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
- if (val) /* Queue is full, wait for the next SOT IRQ */
- return;
+ if (!chan->hw_sg) {
+ val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
+ if (val) /* Queue is full, wait for the next SOT IRQ */
+ return;
+ }
desc = chan->next_desc;
@@ -229,14 +251,15 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
sg = &desc->sg[desc->num_submitted];
/* Already queued in cyclic mode. Wait for it to finish */
- if (sg->id != AXI_DMAC_SG_UNUSED) {
+ if (sg->hw->id != AXI_DMAC_SG_UNUSED) {
sg->schedule_when_free = true;
return;
}
- desc->num_submitted++;
- if (desc->num_submitted == desc->num_sgs ||
- desc->have_partial_xfer) {
+ if (chan->hw_sg) {
+ chan->next_desc = NULL;
+ } else if (++desc->num_submitted == desc->num_sgs ||
+ desc->have_partial_xfer) {
if (desc->cyclic)
desc->num_submitted = 0; /* Start again */
else
@@ -246,32 +269,42 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
chan->next_desc = desc;
}
- sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
+ sg->hw->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
- if (axi_dmac_dest_is_mem(chan)) {
- axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
- axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
- }
+ if (!chan->hw_sg) {
+ if (axi_dmac_dest_is_mem(chan)) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->hw->dest_addr);
+ axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->hw->dst_stride);
+ }
- if (axi_dmac_src_is_mem(chan)) {
- axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
- axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
+ if (axi_dmac_src_is_mem(chan)) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->hw->src_addr);
+ axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->hw->src_stride);
+ }
}
/*
* If the hardware supports cyclic transfers and there is no callback to
- * call and only a single segment, enable hw cyclic mode to avoid
- * unnecessary interrupts.
+ * call, enable hw cyclic mode to avoid unnecessary interrupts.
*/
- if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
- desc->num_sgs == 1)
- flags |= AXI_DMAC_FLAG_CYCLIC;
+ if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) {
+ if (chan->hw_sg)
+ desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ;
+ else if (desc->num_sgs == 1)
+ flags |= AXI_DMAC_FLAG_CYCLIC;
+ }
if (chan->hw_partial_xfer)
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
- axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
- axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
+ if (chan->hw_sg) {
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, (u32)sg->hw_phys);
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS_HIGH,
+ (u64)sg->hw_phys >> 32);
+ } else {
+ axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->hw->x_len);
+ axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->hw->y_len);
+ }
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
}
@@ -286,9 +319,9 @@ static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
struct axi_dmac_sg *sg)
{
if (chan->hw_2d)
- return sg->x_len * sg->y_len;
+ return (sg->hw->x_len + 1) * (sg->hw->y_len + 1);
else
- return sg->x_len;
+ return (sg->hw->x_len + 1);
}
static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
@@ -307,9 +340,9 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
for (i = 0; i < desc->num_sgs; i++) {
sg = &desc->sg[i];
- if (sg->id == AXI_DMAC_SG_UNUSED)
+ if (sg->hw->id == AXI_DMAC_SG_UNUSED)
continue;
- if (sg->id == id) {
+ if (sg->hw->id == id) {
desc->have_partial_xfer = true;
sg->partial_len = len;
found_sg = true;
@@ -348,6 +381,9 @@ static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
rslt->result = DMA_TRANS_NOERROR;
rslt->residue = 0;
+ if (chan->hw_sg)
+ return;
+
/*
* We get here if the last completed segment is partial, which
* means we can compute the residue from that segment onwards
@@ -374,36 +410,47 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
(completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
axi_dmac_dequeue_partial_xfers(chan);
- do {
- sg = &active->sg[active->num_completed];
- if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
- break;
- if (!(BIT(sg->id) & completed_transfers))
- break;
- active->num_completed++;
- sg->id = AXI_DMAC_SG_UNUSED;
- if (sg->schedule_when_free) {
- sg->schedule_when_free = false;
- start_next = true;
+ if (chan->hw_sg) {
+ if (active->cyclic) {
+ vchan_cyclic_callback(&active->vdesc);
+ } else {
+ list_del(&active->vdesc.node);
+ vchan_cookie_complete(&active->vdesc);
+ active = axi_dmac_active_desc(chan);
+ start_next = !!active;
}
+ } else {
+ do {
+ sg = &active->sg[active->num_completed];
+ if (sg->hw->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
+ break;
+ if (!(BIT(sg->hw->id) & completed_transfers))
+ break;
+ active->num_completed++;
+ sg->hw->id = AXI_DMAC_SG_UNUSED;
+ if (sg->schedule_when_free) {
+ sg->schedule_when_free = false;
+ start_next = true;
+ }
- if (sg->partial_len)
- axi_dmac_compute_residue(chan, active);
+ if (sg->partial_len)
+ axi_dmac_compute_residue(chan, active);
- if (active->cyclic)
- vchan_cyclic_callback(&active->vdesc);
+ if (active->cyclic)
+ vchan_cyclic_callback(&active->vdesc);
- if (active->num_completed == active->num_sgs ||
- sg->partial_len) {
- if (active->cyclic) {
- active->num_completed = 0; /* wrap around */
- } else {
- list_del(&active->vdesc.node);
- vchan_cookie_complete(&active->vdesc);
- active = axi_dmac_active_desc(chan);
+ if (active->num_completed == active->num_sgs ||
+ sg->partial_len) {
+ if (active->cyclic) {
+ active->num_completed = 0; /* wrap around */
+ } else {
+ list_del(&active->vdesc.node);
+ vchan_cookie_complete(&active->vdesc);
+ active = axi_dmac_active_desc(chan);
+ }
}
- }
- } while (active);
+ } while (active);
+ }
return start_next;
}
@@ -467,8 +514,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac *dmac = chan_to_axi_dmac(chan);
unsigned long flags;
+ u32 ctrl = AXI_DMAC_CTRL_ENABLE;
- axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
+ if (chan->hw_sg)
+ ctrl |= AXI_DMAC_CTRL_ENABLE_SG;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl);
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan))
@@ -476,22 +527,58 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
-static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
+static struct axi_dmac_desc *
+axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
{
+ struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+ struct device *dev = dmac->dma_dev.dev;
+ struct axi_dmac_hw_desc *hws;
struct axi_dmac_desc *desc;
+ dma_addr_t hw_phys;
unsigned int i;
desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
desc->num_sgs = num_sgs;
+ desc->chan = chan;
+
+ hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)),
+ &hw_phys, GFP_ATOMIC);
+ if (!hws) {
+ kfree(desc);
+ return NULL;
+ }
- for (i = 0; i < num_sgs; i++)
- desc->sg[i].id = AXI_DMAC_SG_UNUSED;
+ for (i = 0; i < num_sgs; i++) {
+ desc->sg[i].hw = &hws[i];
+ desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws);
+
+ hws[i].id = AXI_DMAC_SG_UNUSED;
+ hws[i].flags = 0;
+
+ /* Link hardware descriptors */
+ hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws);
+ }
+
+ /* The last hardware descriptor will trigger an interrupt */
+ desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ;
return desc;
}
+static void axi_dmac_free_desc(struct axi_dmac_desc *desc)
+{
+ struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan);
+ struct device *dev = dmac->dma_dev.dev;
+ struct axi_dmac_hw_desc *hw = desc->sg[0].hw;
+ dma_addr_t hw_phys = desc->sg[0].hw_phys;
+
+ dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)),
+ hw, hw_phys);
+ kfree(desc);
+}
+
static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
enum dma_transfer_direction direction, dma_addr_t addr,
unsigned int num_periods, unsigned int period_len,
@@ -508,26 +595,24 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
for (i = 0; i < num_periods; i++) {
- len = period_len;
-
- while (len > segment_size) {
+ for (len = period_len; len > segment_size; sg++) {
if (direction == DMA_DEV_TO_MEM)
- sg->dest_addr = addr;
+ sg->hw->dest_addr = addr;
else
- sg->src_addr = addr;
- sg->x_len = segment_size;
- sg->y_len = 1;
- sg++;
+ sg->hw->src_addr = addr;
+ sg->hw->x_len = segment_size - 1;
+ sg->hw->y_len = 0;
+ sg->hw->flags = 0;
addr += segment_size;
len -= segment_size;
}
if (direction == DMA_DEV_TO_MEM)
- sg->dest_addr = addr;
+ sg->hw->dest_addr = addr;
else
- sg->src_addr = addr;
- sg->x_len = len;
- sg->y_len = 1;
+ sg->hw->src_addr = addr;
+ sg->hw->x_len = len - 1;
+ sg->hw->y_len = 0;
sg++;
addr += len;
}
@@ -554,7 +639,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i)
num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
- desc = axi_dmac_alloc_desc(num_sgs);
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
@@ -563,7 +648,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) {
- kfree(desc);
+ axi_dmac_free_desc(desc);
return NULL;
}
@@ -583,7 +668,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
- unsigned int num_periods, num_segments;
+ unsigned int num_periods, num_segments, num_sgs;
if (direction != chan->direction)
return NULL;
@@ -597,11 +682,16 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
num_periods = buf_len / period_len;
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
+ num_sgs = num_periods * num_segments;
- desc = axi_dmac_alloc_desc(num_periods * num_segments);
+ desc = axi_dmac_alloc_desc(chan, num_sgs);
if (!desc)
return NULL;
+ /* Chain the last descriptor to the first, and remove its "last" flag */
+ desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys;
+ desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST;
+
axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
period_len, desc->sg);
@@ -653,26 +743,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
return NULL;
}
- desc = axi_dmac_alloc_desc(1);
+ desc = axi_dmac_alloc_desc(chan, 1);
if (!desc)
return NULL;
if (axi_dmac_src_is_mem(chan)) {
- desc->sg[0].src_addr = xt->src_start;
- desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
+ desc->sg[0].hw->src_addr = xt->src_start;
+ desc->sg[0].hw->src_stride = xt->sgl[0].size + src_icg;
}
if (axi_dmac_dest_is_mem(chan)) {
- desc->sg[0].dest_addr = xt->dst_start;
- desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
+ desc->sg[0].hw->dest_addr = xt->dst_start;
+ desc->sg[0].hw->dst_stride = xt->sgl[0].size + dst_icg;
}
if (chan->hw_2d) {
- desc->sg[0].x_len = xt->sgl[0].size;
- desc->sg[0].y_len = xt->numf;
+ desc->sg[0].hw->x_len = xt->sgl[0].size - 1;
+ desc->sg[0].hw->y_len = xt->numf - 1;
} else {
- desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
- desc->sg[0].y_len = 1;
+ desc->sg[0].hw->x_len = xt->sgl[0].size * xt->numf - 1;
+ desc->sg[0].hw->y_len = 0;
}
if (flags & DMA_CYCLIC)
@@ -688,7 +778,7 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
{
- kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
+ axi_dmac_free_desc(to_axi_dmac_desc(vdesc));
}
static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
@@ -714,6 +804,9 @@ static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
case AXI_DMAC_REG_CURRENT_DEST_ADDR:
case AXI_DMAC_REG_PARTIAL_XFER_LEN:
case AXI_DMAC_REG_PARTIAL_XFER_ID:
+ case AXI_DMAC_REG_CURRENT_SG_ID:
+ case AXI_DMAC_REG_SG_ADDRESS:
+ case AXI_DMAC_REG_SG_ADDRESS_HIGH:
return true;
default:
return false;
@@ -866,6 +959,10 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
chan->hw_cyclic = true;
+ axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, 0xffffffff);
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_SG_ADDRESS))
+ chan->hw_sg = true;
+
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
chan->hw_2d = true;
@@ -911,6 +1008,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
struct axi_dmac *dmac;
struct regmap *regmap;
unsigned int version;
+ u32 irq_mask = 0;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -966,6 +1064,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
dma_dev->directions = BIT(dmac->chan.direction);
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */
INIT_LIST_HEAD(&dma_dev->channels);
dmac->chan.vchan.desc_free = axi_dmac_desc_free;
@@ -977,7 +1076,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
- axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+ if (dmac->chan.hw_sg)
+ irq_mask |= AXI_DMAC_IRQ_SOT;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, irq_mask);
if (of_dma_is_coherent(pdev->dev.of_node)) {
ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b7388ae62d7f..491b22240221 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1103,6 +1103,9 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
static void __dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan)
{
+ if (chan->local == NULL)
+ return;
+
WARN_ONCE(!device->device_release && chan->client_count,
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ffe621695e47..a4f608837849 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -21,6 +21,10 @@
#include <linux/slab.h>
#include <linux/wait.h>
+static bool nobounce;
+module_param(nobounce, bool, 0644);
+MODULE_PARM_DESC(nobounce, "Prevent using swiotlb buffer (default: use swiotlb buffer)");
+
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
@@ -90,6 +94,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
* struct dmatest_params - test parameters.
+ * @nobounce: prevent using swiotlb buffer
* @buf_size: size of the memcpy test buffer
* @channel: bus ID of the channel to test
* @device: bus ID of the DMA Engine to test
@@ -106,6 +111,7 @@ MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
* @polled: use polling for completion instead of interrupts
*/
struct dmatest_params {
+ bool nobounce;
unsigned int buf_size;
char channel[20];
char device[32];
@@ -215,6 +221,7 @@ struct dmatest_done {
struct dmatest_data {
u8 **raw;
u8 **aligned;
+ gfp_t gfp_flags;
unsigned int cnt;
unsigned int off;
};
@@ -533,7 +540,7 @@ static int dmatest_alloc_test_data(struct dmatest_data *d,
goto err;
for (i = 0; i < d->cnt; i++) {
- d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
+ d->raw[i] = kmalloc(buf_size + align, d->gfp_flags);
if (!d->raw[i])
goto err;
@@ -655,6 +662,13 @@ static int dmatest_func(void *data)
goto err_free_coefs;
}
+ src->gfp_flags = GFP_KERNEL;
+ dst->gfp_flags = GFP_KERNEL;
+ if (params->nobounce) {
+ src->gfp_flags = GFP_DMA;
+ dst->gfp_flags = GFP_DMA;
+ }
+
if (dmatest_alloc_test_data(src, buf_size, align) < 0)
goto err_free_coefs;
@@ -1093,6 +1107,7 @@ static void add_threaded_test(struct dmatest_info *info)
struct dmatest_params *params = &info->params;
/* Copy test parameters */
+ params->nobounce = nobounce;
params->buf_size = test_buf_size;
strscpy(params->channel, strim(test_channel), sizeof(params->channel));
strscpy(params->device, strim(test_device), sizeof(params->device));
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index b38786f0ad79..b75fdaffad9a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
+static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+ /*
+ * In case of remote eDMA engine setup, the DW PCIe RP/EP internal
+ * configuration registers and application memory are normally accessed
+ * over different buses. Ensure LL-data reaches the memory before the
+ * doorbell register is toggled by issuing the dummy-read from the remote
+ * LL memory in a hope that the MRd TLP will return only after the
+ * last MWr TLP is completed
+ */
+ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ readl(chunk->ll_region.vaddr.io);
+}
+
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
}
+
+ dw_edma_v0_sync_ll_data(chunk);
+
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 0745d9e7d259..406f169b09a7 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -176,7 +176,7 @@ dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
};
struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
- char name[16];
+ char name[32];
regs_dent = debugfs_create_dir(WRITE_STR, dent);
@@ -239,7 +239,7 @@ static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
};
struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
- char name[16];
+ char name[32];
regs_dent = debugfs_create_dir(READ_STR, dent);
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
index 00b735a0202a..10e8f0715114 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
@@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
- u32 num_ch = 0;
- int id;
-
- for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
- if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
- num_ch++;
- }
-
- if (num_ch > HDMA_V0_MAX_NR_CH)
- num_ch = HDMA_V0_MAX_NR_CH;
-
- return (u16)num_ch;
+ /*
+ * The HDMA IP have no way to know the number of hardware channels
+ * available, we set it to maximum channels and let the platform
+ * set the right number of channels.
+ */
+ return HDMA_V0_MAX_NR_CH;
}
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
@@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
+static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+ /*
+ * In case of remote HDMA engine setup, the DW PCIe RP/EP internal
+ * configuration registers and application memory are normally accessed
+ * over different buses. Ensure LL-data reaches the memory before the
+ * doorbell register is toggled by issuing the dummy-read from the remote
+ * LL memory in a hope that the MRd TLP will return only after the
+ * last MWr TLP is completed
+ */
+ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ readl(chunk->ll_region.vaddr.io);
+}
+
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Interrupt enable&unmask - done, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
- HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
+ HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
+ if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
/* Channel control */
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
@@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Set consumer cycle */
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+
+ dw_hdma_v0_sync_ll_data(chunk);
+
/* Doorbell */
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
}
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
index 520c81978b08..dcdc57fe976c 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
@@ -116,7 +116,7 @@ static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
{
struct dentry *regs_dent, *ch_dent;
- char name[16];
+ char name[32];
int i;
regs_dent = debugfs_create_dir(WRITE_STR, dent);
@@ -133,7 +133,7 @@ static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent)
{
struct dentry *regs_dent, *ch_dent;
- char name[16];
+ char name[32];
int i;
regs_dent = debugfs_create_dir(READ_STR, dent);
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-regs.h b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
index a974abdf8aaf..eab5fd7177e5 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-regs.h
+++ b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
@@ -15,7 +15,7 @@
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
-#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
+#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
#define HDMA_V0_ABORT_INT_MASK BIT(2)
#define HDMA_V0_STOP_INT_MASK BIT(0)
#define HDMA_V0_LINKLIST_EN BIT(0)
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 7958ac33e36c..5a8061a307cd 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
if (!dpaa2_chan->fd_pool)
goto err;
- dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
- sizeof(struct dpaa2_fl_entry),
- sizeof(struct dpaa2_fl_entry), 0);
+ dpaa2_chan->fl_pool =
+ dma_pool_create("fl_pool", dev,
+ sizeof(struct dpaa2_fl_entry) * 3,
+ sizeof(struct dpaa2_fl_entry), 0);
+
if (!dpaa2_chan->fl_pool)
goto err_fd;
dpaa2_chan->sdd_pool =
dma_pool_create("sdd_pool", dev,
- sizeof(struct dpaa2_qdma_sd_d),
+ sizeof(struct dpaa2_qdma_sd_d) * 2,
sizeof(struct dpaa2_qdma_sd_d), 0);
if (!dpaa2_chan->sdd_pool)
goto err_fl;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b53f46245c37..793f1a7ad5e3 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (fsl_chan->is_multi_fifo) {
/* set mloff to support multiple fifo */
burst = cfg->direction == DMA_DEV_TO_MEM ?
- cfg->src_addr_width : cfg->dst_addr_width;
+ cfg->src_maxburst : cfg->dst_maxburst;
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
/* enable DMLOE/SMLOE */
if (cfg->direction == DMA_MEM_TO_DEV) {
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index bb5221158a77..f5e216b157c7 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -30,8 +30,9 @@
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
-#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
-#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
+#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
+#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
+#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
#define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 238a69bd0d6f..d36e28b9c767 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -9,6 +9,8 @@
* Vybrid and Layerscape SoCs.
*/
+#include <dt-bindings/dma/fsl-edma.h>
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@@ -21,10 +23,6 @@
#include "fsl-edma-common.h"
-#define ARGS_RX BIT(0)
-#define ARGS_REMOTE BIT(1)
-#define ARGS_MULTI_FIFO BIT(2)
-
static void fsl_edma_synchronize(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
@@ -153,9 +151,15 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
i = fsl_chan - fsl_edma->chans;
fsl_chan->priority = dma_spec->args[1];
- fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
- fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
- fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
+ fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
+ fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
+ fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
+
+ if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1))
+ continue;
+
+ if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1))
+ continue;
if (!b_chmux && i == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
@@ -579,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
- dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ dma_set_max_seg_size(fsl_edma->dma_dev.dev,
+ FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 47cb28468049..5005e138fc23 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
+#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5)
@@ -160,6 +161,10 @@ struct fsl_qdma_format {
u8 __reserved1[2];
u8 cfg8b_w1;
} __packed;
+ struct {
+ __le32 __reserved2;
+ __le32 cmd;
+ } __packed;
__le64 data;
};
} __packed;
@@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len)
{
- u32 cmd;
struct fsl_qdma_format *sdf, *ddf;
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
@@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
/* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- sdf->data = QDMA_SDDF_CMD(cmd);
-
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
- ddf->data = QDMA_SDDF_CMD(cmd);
+ sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF);
+
+ ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
}
/*
@@ -514,11 +515,11 @@ static struct fsl_qdma_queue
queue_temp = queue_head + i + (j * queue_num);
queue_temp->cq =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- queue_size[i],
- &queue_temp->bus_addr,
- GFP_KERNEL);
+ dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ queue_size[i],
+ &queue_temp->bus_addr,
+ GFP_KERNEL);
if (!queue_temp->cq)
return NULL;
queue_temp->block_base = fsl_qdma->block_base +
@@ -563,15 +564,14 @@ static struct fsl_qdma_queue
/*
* Buffer for queue command
*/
- status_head->cq = dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- status_size,
- &status_head->bus_addr,
- GFP_KERNEL);
- if (!status_head->cq) {
- devm_kfree(&pdev->dev, status_head);
+ status_head->cq = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ status_size,
+ &status_head->bus_addr,
+ GFP_KERNEL);
+ if (!status_head->cq)
return NULL;
- }
+
status_head->n_cq = status_size;
status_head->virt_head = status_head->cq;
status_head->virt_tail = status_head->cq;
@@ -625,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
- void *block,
+ __iomem void *block,
int id)
{
bool duplicate;
@@ -805,7 +805,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
int i;
int cpu;
int ret;
- char irq_name[20];
+ char irq_name[32];
fsl_qdma->error_irq =
platform_get_irq_byname(pdev, "qdma-error");
@@ -1197,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
- ret = fsl_qdma_irq_init(pdev, fsl_qdma);
- if (ret)
- return ret;
-
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@@ -1239,16 +1235,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
- ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
- dev_err(&pdev->dev,
- "Can't register NXP Layerscape qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
- ret = fsl_qdma_reg_init(fsl_qdma);
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
@@ -1268,8 +1267,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
static void fsl_qdma_remove(struct platform_device *pdev)
{
- int i;
- struct fsl_qdma_queue *status;
struct device_node *np = pdev->dev.of_node;
struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
@@ -1277,12 +1274,6 @@ static void fsl_qdma_remove(struct platform_device *pdev)
fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_qdma->dma_dev);
-
- for (i = 0; i < fsl_qdma->block_number; i++) {
- status = fsl_qdma->status[i];
- dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
- status->n_cq, status->cq, status->bus_addr);
- }
}
static const struct of_device_id fsl_qdma_dt_ids[] = {
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 1d918d45d9f6..e5a94a93a3cc 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -165,7 +165,7 @@ static void idxd_cdev_dev_release(struct device *dev)
struct idxd_wq *wq = idxd_cdev->wq;
cdev_ctx = &ictx[wq->idxd->data->type];
- ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
+ ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
kfree(idxd_cdev);
}
@@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
spin_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
- h = evl->head;
+ h = status.head;
size = evl->size;
while (h != t) {
@@ -463,7 +463,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
cdev = &idxd_cdev->cdev;
dev = cdev_dev(idxd_cdev);
cdev_ctx = &ictx[wq->idxd->data->type];
- minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
+ minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
if (minor < 0) {
kfree(idxd_cdev);
return minor;
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index 9cfbd9b14c4c..f3f25ee676f3 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
spin_lock(&evl->lock);
- h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
+ h = evl_status.head;
evl_size = evl->size;
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index f43d81128b96..ecfdf4a8f1f8 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -807,6 +807,9 @@ err_bmap:
static void idxd_device_evl_free(struct idxd_device *idxd)
{
+ void *evl_log;
+ unsigned int evl_log_size;
+ dma_addr_t evl_dma;
union gencfg_reg gencfg;
union genctrl_reg genctrl;
struct device *dev = &idxd->pdev->dev;
@@ -827,11 +830,15 @@ static void idxd_device_evl_free(struct idxd_device *idxd)
iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
- dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
bitmap_free(evl->bmap);
+ evl_log = evl->log;
+ evl_log_size = evl->log_size;
+ evl_dma = evl->dma;
evl->log = NULL;
evl->size = IDXD_EVL_SIZE_MIN;
spin_unlock(&evl->lock);
+
+ dma_free_coherent(dev, evl_log_size, evl_log, evl_dma);
}
static void idxd_group_config_write(struct idxd_group *group)
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 47de3f93ff1e..d0f5db6cf1ed 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -300,7 +300,6 @@ struct idxd_evl {
unsigned int log_size;
/* The number of entries in the event log. */
u16 size;
- u16 head;
unsigned long *bmap;
bool batch_fail[IDXD_MAX_BATCH_IDENT];
};
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 14df1f1347a8..4954adc6bb60 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
static int idxd_init_evl(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
+ unsigned int evl_cache_size;
struct idxd_evl *evl;
+ const char *idxd_name;
if (idxd->hw.gen_cap.evl_support == 0)
return 0;
@@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
spin_lock_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
- idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
- sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
- 0, 0, NULL);
+ idxd_name = dev_name(idxd_confdev(idxd));
+ evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
+ /*
+ * Since completion record in evl_cache will be copied to user
+ * when handling completion record page fault, need to create
+ * the cache suitable for user copy.
+ */
+ idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
+ 0, 0, 0, evl_cache_size,
+ NULL);
if (!idxd->evl_cache) {
kfree(evl);
return -ENOMEM;
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index c8a0aa874b11..348aa21389a9 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
- h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
+ h = evl_status.head;
size = idxd->evl->size;
while (h != t) {
@@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
h = (h + 1) % size;
}
- evl->head = h;
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
spin_unlock(&evl->lock);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f81ecf5863e8..9b42f5e96b1e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -421,9 +421,7 @@ struct sdma_desc {
* @shp_addr: value for gReg[6]
* @per_addr: value for gReg[2]
* @status: status of dma channel
- * @context_loaded: ensure context is only loaded once
* @data: specific sdma interface structure
- * @bd_pool: dma_pool for bd
* @terminate_worker: used to call back into terminate work function
* @terminated: terminated list
* @is_ram_script: flag for script in ram
@@ -486,8 +484,6 @@ struct sdma_channel {
* @num_script_addrs: Number of script addresses in this image
* @ram_code_start: offset of SDMA ram image in this firmware image
* @ram_code_size: size of SDMA ram image
- * @script_addrs: Stores the start address of the SDMA scripts
- * (in SDMA memory space)
*/
struct sdma_firmware_header {
u32 magic;
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/ls2x-apb-dma.c
new file mode 100644
index 000000000000..a49913f3ed3f
--- /dev/null
+++ b/drivers/dma/ls2x-apb-dma.c
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for the Loongson LS2X APB DMA Controller
+ *
+ * Copyright (C) 2017-2023 Loongson Corporation
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* Global Configuration Register */
+#define LDMA_ORDER_ERG 0x0
+
+/* Bitfield definitions */
+
+/* Bitfields in Global Configuration Register */
+#define LDMA_64BIT_EN BIT(0) /* 1: 64 bit support */
+#define LDMA_UNCOHERENT_EN BIT(1) /* 0: cache, 1: uncache */
+#define LDMA_ASK_VALID BIT(2)
+#define LDMA_START BIT(3) /* DMA start operation */
+#define LDMA_STOP BIT(4) /* DMA stop operation */
+#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
+
+/* Bitfields in ndesc_addr field of HW decriptor */
+#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
+#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
+
+/* Bitfields in cmd field of HW decriptor */
+#define LDMA_INT BIT(1) /* Enable DMA interrupts */
+#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
+
+#define LDMA_SLAVE_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define LDMA_MAX_TRANS_LEN U32_MAX
+
+/*-- descriptors -----------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_hw_desc - DMA HW descriptor
+ * @ndesc_addr: the next descriptor low address.
+ * @mem_addr: memory low address.
+ * @apb_addr: device buffer address.
+ * @len: length of a piece of carried content, in words.
+ * @step_len: length between two moved memory data blocks.
+ * @step_times: number of blocks to be carried in a single DMA operation.
+ * @cmd: descriptor command or state.
+ * @stats: DMA status.
+ * @high_ndesc_addr: the next descriptor high address.
+ * @high_mem_addr: memory high address.
+ * @reserved: reserved
+ */
+struct ls2x_dma_hw_desc {
+ u32 ndesc_addr;
+ u32 mem_addr;
+ u32 apb_addr;
+ u32 len;
+ u32 step_len;
+ u32 step_times;
+ u32 cmd;
+ u32 stats;
+ u32 high_ndesc_addr;
+ u32 high_mem_addr;
+ u32 reserved[2];
+} __packed;
+
+/*
+ * struct ls2x_dma_sg - ls2x dma scatter gather entry
+ * @hw: the pointer to DMA HW descriptor.
+ * @llp: physical address of the DMA HW descriptor.
+ * @phys: destination or source address(mem).
+ * @len: number of Bytes to read.
+ */
+struct ls2x_dma_sg {
+ struct ls2x_dma_hw_desc *hw;
+ dma_addr_t llp;
+ dma_addr_t phys;
+ u32 len;
+};
+
+/*
+ * struct ls2x_dma_desc - software descriptor
+ * @vdesc: pointer to the virtual dma descriptor.
+ * @cyclic: flag to dma cyclic
+ * @burst_size: burst size of transaction, in words.
+ * @desc_num: number of sg entries.
+ * @direction: transfer direction, to or from device.
+ * @status: dma controller status.
+ * @sg: array of sgs.
+ */
+struct ls2x_dma_desc {
+ struct virt_dma_desc vdesc;
+ bool cyclic;
+ size_t burst_size;
+ u32 desc_num;
+ enum dma_transfer_direction direction;
+ enum dma_status status;
+ struct ls2x_dma_sg sg[] __counted_by(desc_num);
+};
+
+/*-- Channels --------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
+ * @vchan: virtual dma channel entry.
+ * @desc: pointer to the ls2x sw dma descriptor.
+ * @pool: hw desc table
+ * @irq: irq line
+ * @sconfig: configuration for slave transfers, passed via .device_config
+ */
+struct ls2x_dma_chan {
+ struct virt_dma_chan vchan;
+ struct ls2x_dma_desc *desc;
+ void *pool;
+ int irq;
+ struct dma_slave_config sconfig;
+};
+
+/*-- Controller ------------------------------------------------------*/
+
+/*
+ * struct ls2x_dma_priv - LS2X APB DMAC specific information
+ * @ddev: dmaengine dma_device object members
+ * @dma_clk: DMAC clock source
+ * @regs: memory mapped register base
+ * @lchan: channel to store ls2x_dma_chan structures
+ */
+struct ls2x_dma_priv {
+ struct dma_device ddev;
+ struct clk *dma_clk;
+ void __iomem *regs;
+ struct ls2x_dma_chan lchan;
+};
+
+/*-- Helper functions ------------------------------------------------*/
+
+static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct ls2x_dma_desc, vdesc);
+}
+
+static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ls2x_dma_chan, vchan.chan);
+}
+
+static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
+{
+ return container_of(ddev, struct ls2x_dma_priv, ddev);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
+ struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
+ int i;
+
+ for (i = 0; i < desc->desc_num; i++) {
+ if (desc->sg[i].hw)
+ dma_pool_free(lchan->pool, desc->sg[i].hw,
+ desc->sg[i].llp);
+ }
+
+ kfree(desc);
+}
+
+static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
+{
+ struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+ u64 val;
+
+ val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
+ val |= LDMA_64BIT_EN | cmd;
+ lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
+{
+ struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
+ struct ls2x_dma_sg *ldma_sg;
+ struct virt_dma_desc *vdesc;
+ u64 val;
+
+ /* Get the next descriptor */
+ vdesc = vchan_next_desc(&lchan->vchan);
+ if (!vdesc) {
+ lchan->desc = NULL;
+ return;
+ }
+
+ list_del(&vdesc->node);
+ lchan->desc = to_ldma_desc(vdesc);
+ ldma_sg = &lchan->desc->sg[0];
+
+ /* Start DMA */
+ lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
+ val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
+ lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
+}
+
+static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
+{
+ u32 maxburst, buswidth;
+
+ /* Reject definitely invalid configurations */
+ if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
+ (lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
+ return 0;
+
+ if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
+ maxburst = lchan->sconfig.dst_maxburst;
+ buswidth = lchan->sconfig.dst_addr_width;
+ } else {
+ maxburst = lchan->sconfig.src_maxburst;
+ buswidth = lchan->sconfig.src_addr_width;
+ }
+
+ /* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
+ return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
+}
+
+static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
+ struct ls2x_dma_desc *desc)
+{
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
+ u32 num_segments, segment_size;
+
+ if (desc->direction == DMA_MEM_TO_DEV) {
+ ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
+ ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
+ } else {
+ ldma_sg->hw->cmd = LDMA_INT;
+ ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
+ }
+
+ ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
+ ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
+
+ /* Split into multiple equally sized segments if necessary */
+ num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
+ segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
+
+ /* Word count register takes input in words */
+ ldma_sg->hw->len = segment_size;
+ ldma_sg->hw->step_times = num_segments;
+ ldma_sg->hw->step_len = 0;
+
+ /* lets make a link list */
+ if (sg_index) {
+ desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
+ desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
+ }
+}
+
+/*-- DMA Engine API --------------------------------------------------*/
+
+/*
+ * ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
+ * @chan: allocate descriptor resources for this channel
+ *
+ * return - the number of allocated descriptors
+ */
+static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
+ chan->device->dev, PAGE_SIZE,
+ __alignof__(struct ls2x_dma_hw_desc), 0);
+ if (!lchan->pool) {
+ dev_err(chan2dev(chan), "No memory for descriptors\n");
+ return -ENOMEM;
+ }
+
+ return 1;
+}
+
+/*
+ * ls2x_dma_free_chan_resources - free all channel resources
+ * @chan: DMA channel
+ */
+static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ vchan_free_chan_resources(to_virt_chan(chan));
+ dma_pool_destroy(lchan->pool);
+ lchan->pool = NULL;
+}
+
+/*
+ * ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: tx descriptor status flags
+ * @context: transaction context (ignored)
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ u32 sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ struct ls2x_dma_desc *desc;
+ struct scatterlist *sg;
+ size_t burst_size;
+ int i;
+
+ if (unlikely(!sg_len || !is_slave_direction(direction)))
+ return NULL;
+
+ burst_size = ls2x_dmac_detect_burst(lchan);
+ if (!burst_size)
+ return NULL;
+
+ desc = kzalloc(struct_size(desc, sg, sg_len), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc_num = sg_len;
+ desc->direction = direction;
+ desc->burst_size = burst_size;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+ if (!ldma_sg->hw) {
+ desc->desc_num = i;
+ ls2x_dma_desc_free(&desc->vdesc);
+ return NULL;
+ }
+
+ ldma_sg->phys = sg_dma_address(sg);
+ ldma_sg->len = sg_dma_len(sg);
+
+ ls2x_dma_fill_desc(lchan, i, desc);
+ }
+
+ /* Setting the last descriptor enable bit */
+ desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ * @flags: tx descriptor status flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ struct ls2x_dma_desc *desc;
+ size_t burst_size;
+ u32 num_periods;
+ int i;
+
+ if (unlikely(!buf_len || !period_len))
+ return NULL;
+
+ if (unlikely(!is_slave_direction(direction)))
+ return NULL;
+
+ burst_size = ls2x_dmac_detect_burst(lchan);
+ if (!burst_size)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+ desc = kzalloc(struct_size(desc, sg, num_periods), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc_num = num_periods;
+ desc->direction = direction;
+ desc->burst_size = burst_size;
+
+ /* Build cyclic linked list */
+ for (i = 0; i < num_periods; i++) {
+ struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
+
+ /* Allocate DMA capable memory for hardware descriptor */
+ ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
+ if (!ldma_sg->hw) {
+ desc->desc_num = i;
+ ls2x_dma_desc_free(&desc->vdesc);
+ return NULL;
+ }
+
+ ldma_sg->phys = buf_addr + period_len * i;
+ ldma_sg->len = period_len;
+
+ ls2x_dma_fill_desc(lchan, i, desc);
+ }
+
+ /* Lets make a cyclic list */
+ desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
+ desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
+ desc->cyclic = true;
+ desc->status = DMA_IN_PROGRESS;
+
+ return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
+}
+
+/*
+ * ls2x_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ */
+static int ls2x_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ memcpy(&lchan->sconfig, config, sizeof(*config));
+ return 0;
+}
+
+/*
+ * ls2x_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ls2x_dma_issue_pending(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
+ ls2x_dma_start_transfer(lchan);
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+}
+
+/*
+ * ls2x_dma_terminate_all - terminate all transactions
+ * @chan: channel
+ *
+ * Stops all DMA transactions.
+ */
+static int ls2x_dma_terminate_all(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ /* Setting stop cmd */
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ if (lchan->desc) {
+ vchan_terminate_vdesc(&lchan->desc->vdesc);
+ lchan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&lchan->vchan, &head);
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&lchan->vchan, &head);
+ return 0;
+}
+
+/*
+ * ls2x_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ */
+static void ls2x_dma_synchronize(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+
+ vchan_synchronize(&lchan->vchan);
+}
+
+static int ls2x_dma_pause(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ lchan->desc->status = DMA_PAUSED;
+ }
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ return 0;
+}
+
+static int ls2x_dma_resume(struct dma_chan *chan)
+{
+ struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchan->vchan.lock, flags);
+ if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
+ lchan->desc->status = DMA_IN_PROGRESS;
+ ls2x_dma_write_cmd(lchan, LDMA_START);
+ }
+ spin_unlock_irqrestore(&lchan->vchan.lock, flags);
+
+ return 0;
+}
+
+/*
+ * ls2x_dma_isr - LS2X DMA Interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to ls2x_dma_chan
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
+{
+ struct ls2x_dma_chan *lchan = dev_id;
+ struct ls2x_dma_desc *desc;
+
+ spin_lock(&lchan->vchan.lock);
+ desc = lchan->desc;
+ if (desc) {
+ if (desc->cyclic) {
+ vchan_cyclic_callback(&desc->vdesc);
+ } else {
+ desc->status = DMA_COMPLETE;
+ vchan_cookie_complete(&desc->vdesc);
+ ls2x_dma_start_transfer(lchan);
+ }
+
+ /* ls2x_dma_start_transfer() updates lchan->desc */
+ if (!lchan->desc)
+ ls2x_dma_write_cmd(lchan, LDMA_STOP);
+ }
+ spin_unlock(&lchan->vchan.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ls2x_dma_chan_init(struct platform_device *pdev,
+ struct ls2x_dma_priv *priv)
+{
+ struct ls2x_dma_chan *lchan = &priv->lchan;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ lchan->irq = platform_get_irq(pdev, 0);
+ if (lchan->irq < 0)
+ return lchan->irq;
+
+ ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), lchan);
+ if (ret)
+ return ret;
+
+ /* Initialize channels related values */
+ INIT_LIST_HEAD(&priv->ddev.channels);
+ lchan->vchan.desc_free = ls2x_dma_desc_free;
+ vchan_init(&lchan->vchan, &priv->ddev);
+
+ return 0;
+}
+
+/*
+ * ls2x_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ls2x_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ls2x_dma_priv *priv;
+ struct dma_device *ddev;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return dev_err_probe(dev, PTR_ERR(priv->regs),
+ "devm_platform_ioremap_resource failed.\n");
+
+ priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->dma_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
+
+ ret = clk_prepare_enable(priv->dma_clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
+
+ ret = ls2x_dma_chan_init(pdev, priv);
+ if (ret)
+ goto disable_clk;
+
+ ddev = &priv->ddev;
+ ddev->dev = dev;
+ dma_cap_zero(ddev->cap_mask);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+
+ ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = ls2x_dma_issue_pending;
+ ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
+ ddev->device_config = ls2x_dma_slave_config;
+ ddev->device_terminate_all = ls2x_dma_terminate_all;
+ ddev->device_synchronize = ls2x_dma_synchronize;
+ ddev->device_pause = ls2x_dma_pause;
+ ddev->device_resume = ls2x_dma_resume;
+
+ ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+ ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
+ ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ ret = dma_async_device_register(&priv->ddev);
+ if (ret < 0)
+ goto disable_clk;
+
+ ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
+ if (ret < 0)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(&priv->ddev);
+disable_clk:
+ clk_disable_unprepare(priv->dma_clk);
+
+ return ret;
+}
+
+/*
+ * ls2x_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static void ls2x_dma_remove(struct platform_device *pdev)
+{
+ struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&priv->ddev);
+ clk_disable_unprepare(priv->dma_clk);
+}
+
+static const struct of_device_id ls2x_dma_of_match_table[] = {
+ { .compatible = "loongson,ls2k1000-apbdma" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
+
+static struct platform_driver ls2x_dmac_driver = {
+ .probe = ls2x_dma_probe,
+ .remove_new = ls2x_dma_remove,
+ .driver = {
+ .name = "ls2x-apbdma",
+ .of_match_table = ls2x_dma_of_match_table,
+ },
+};
+module_platform_driver(ls2x_dmac_driver);
+
+MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
index 1b0a95892627..7b41c670970a 100644
--- a/drivers/dma/milbeaut-hdmac.c
+++ b/drivers/dma/milbeaut-hdmac.c
@@ -531,7 +531,7 @@ disable_clk:
return ret;
}
-static int milbeaut_hdmac_remove(struct platform_device *pdev)
+static void milbeaut_hdmac_remove(struct platform_device *pdev)
{
struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -546,16 +546,21 @@ static int milbeaut_hdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
milbeaut_hdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->ddev);
clk_disable_unprepare(mdev->clk);
-
- return 0;
}
static const struct of_device_id milbeaut_hdmac_match[] = {
@@ -566,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
static struct platform_driver milbeaut_hdmac_driver = {
.probe = milbeaut_hdmac_probe,
- .remove = milbeaut_hdmac_remove,
+ .remove_new = milbeaut_hdmac_remove,
.driver = {
.name = "milbeaut-m10v-hdmac",
.of_match_table = milbeaut_hdmac_match,
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index d29d01e730aa..2cce529b448e 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -368,7 +368,7 @@ disable_xdmac:
return ret;
}
-static int milbeaut_xdmac_remove(struct platform_device *pdev)
+static void milbeaut_xdmac_remove(struct platform_device *pdev)
{
struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -383,8 +383,15 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
milbeaut_xdmac_free_chan_resources(chan);
}
@@ -392,8 +399,6 @@ static int milbeaut_xdmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&mdev->ddev);
disable_xdmac(mdev);
-
- return 0;
}
static const struct of_device_id milbeaut_xdmac_match[] = {
@@ -404,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
static struct platform_driver milbeaut_xdmac_driver = {
.probe = milbeaut_xdmac_probe,
- .remove = milbeaut_xdmac_remove,
+ .remove_new = milbeaut_xdmac_remove,
.driver = {
.name = "milbeaut-m10v-xdmac",
.of_match_table = milbeaut_xdmac_match,
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 3cf0b38387ae..c29744bfdf2c 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1053,6 +1053,9 @@ static bool _trigger(struct pl330_thread *thrd)
thrd->req_running = idx;
+ if (desc->rqtype == DMA_MEM_TO_DEV || desc->rqtype == DMA_DEV_TO_MEM)
+ UNTIL(thrd, PL330_STATE_WFP);
+
return true;
}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index 1aa65e5de0f3..f79240734807 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
- dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
-
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 3125a2f162b4..428473611115 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -20,10 +20,13 @@
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/slab.h>
#include "sf-pdma.h"
+#define PDMA_QUIRK_NO_STRICT_ORDERING BIT(0)
+
#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
{
@@ -65,7 +68,7 @@ static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
u64 dst, u64 src, u64 size)
{
- desc->xfer_type = PDMA_FULL_SPEED;
+ desc->xfer_type = desc->chan->pdma->transfer_type;
desc->xfer_size = size;
desc->dst_addr = dst;
desc->src_addr = src;
@@ -492,6 +495,7 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
+ const struct sf_pdma_driver_platdata *ddata;
struct sf_pdma *pdma;
int ret, n_chans;
const enum dma_slave_buswidth widths =
@@ -517,6 +521,14 @@ static int sf_pdma_probe(struct platform_device *pdev)
pdma->n_chans = n_chans;
+ pdma->transfer_type = PDMA_FULL_SPEED | PDMA_STRICT_ORDERING;
+
+ ddata = device_get_match_data(&pdev->dev);
+ if (ddata) {
+ if (ddata->quirks & PDMA_QUIRK_NO_STRICT_ORDERING)
+ pdma->transfer_type &= ~PDMA_STRICT_ORDERING;
+ }
+
pdma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdma->membase))
return PTR_ERR(pdma->membase);
@@ -563,7 +575,20 @@ static int sf_pdma_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id, pdma);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Can't register SiFive Platform OF_DMA. (%d)\n", ret);
+ goto err_unregister;
+ }
+
return 0;
+
+err_unregister:
+ dma_async_device_unregister(&pdma->dma_dev);
+
+ return ret;
}
static void sf_pdma_remove(struct platform_device *pdev)
@@ -583,12 +608,25 @@ static void sf_pdma_remove(struct platform_device *pdev)
tasklet_kill(&ch->err_tasklet);
}
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
dma_async_device_unregister(&pdma->dma_dev);
}
+static const struct sf_pdma_driver_platdata mpfs_pdma = {
+ .quirks = PDMA_QUIRK_NO_STRICT_ORDERING,
+};
+
static const struct of_device_id sf_pdma_dt_ids[] = {
- { .compatible = "sifive,fu540-c000-pdma" },
- { .compatible = "sifive,pdma0" },
+ {
+ .compatible = "sifive,fu540-c000-pdma",
+ }, {
+ .compatible = "sifive,pdma0",
+ }, {
+ .compatible = "microchip,mpfs-pdma",
+ .data = &mpfs_pdma,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
index d05772b5d8d3..215e07183d7e 100644
--- a/drivers/dma/sf-pdma/sf-pdma.h
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -48,7 +48,8 @@
#define PDMA_ERR_STATUS_MASK GENMASK(31, 31)
/* Transfer Type */
-#define PDMA_FULL_SPEED 0xFF000008
+#define PDMA_FULL_SPEED 0xFF000000
+#define PDMA_STRICT_ORDERING BIT(3)
/* Error Recovery */
#define MAX_RETRY 1
@@ -112,8 +113,13 @@ struct sf_pdma {
struct dma_device dma_dev;
void __iomem *membase;
void __iomem *mappedbase;
+ u32 transfer_type;
u32 n_chans;
struct sf_pdma_chan chans[] __counted_by(n_chans);
};
+struct sf_pdma_driver_platdata {
+ u32 quirks;
+};
+
#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index fea5bda34bc2..1f1e86ba5c66 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -755,11 +755,11 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
static int rz_dmac_chan_probe(struct rz_dmac *dmac,
struct rz_dmac_chan *channel,
- unsigned int index)
+ u8 index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
struct rz_lmdesc *lmdesc;
- char pdev_irqname[5];
+ char pdev_irqname[6];
char *irqname;
int ret;
@@ -767,7 +767,7 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
channel->mid_rid = -EINVAL;
/* Request the channel interrupt. */
- sprintf(pdev_irqname, "ch%u", index);
+ scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (channel->irq < 0)
return channel->irq;
@@ -845,9 +845,9 @@ static int rz_dmac_probe(struct platform_device *pdev)
struct dma_device *engine;
struct rz_dmac *dmac;
int channel_num;
- unsigned int i;
int ret;
int irq;
+ u8 i;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 9c121a4b33ad..f97d80343aea 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -25,7 +25,7 @@ struct sh_dmae_chan {
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
void __iomem *base;
- char dev_id[16]; /* unique name per DMAC of channel */
+ char dev_id[32]; /* unique name per DMAC of channel */
int pm_error;
dma_addr_t slave_addr;
};
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index a9b4302f6050..f7cd0cad056c 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -706,10 +706,10 @@ static const struct dev_pm_ops usb_dmac_pm = {
static int usb_dmac_chan_probe(struct usb_dmac *dmac,
struct usb_dmac_chan *uchan,
- unsigned int index)
+ u8 index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
- char pdev_irqname[5];
+ char pdev_irqname[6];
char *irqname;
int ret;
@@ -717,7 +717,7 @@ static int usb_dmac_chan_probe(struct usb_dmac *dmac,
uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
/* Request the channel interrupt. */
- sprintf(pdev_irqname, "ch%u", index);
+ scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (uchan->irq < 0)
return -ENODEV;
@@ -768,8 +768,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
struct dma_device *engine;
struct usb_dmac *dmac;
- unsigned int i;
int ret;
+ u8 i;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
@@ -869,7 +869,7 @@ static void usb_dmac_chan_remove(struct usb_dmac *dmac,
static void usb_dmac_remove(struct platform_device *pdev)
{
struct usb_dmac *dmac = platform_get_drvdata(pdev);
- int i;
+ u8 i;
for (i = 0; i < dmac->n_channels; ++i)
usb_dmac_chan_remove(dmac, &dmac->channels[i]);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 002833fb1fa0..2c489299148e 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -31,13 +31,11 @@
/**
* struct stedma40_platform_data - Configuration struct for the dma device.
*
- * @dev_tx: mapping between destination event line and io address
- * @dev_rx: mapping between source event line and io address
* @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver.
* @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
* which avoids HW bug that exists in some versions of the controller.
- * SoftLLI introduces relink overhead that could impact performace for
+ * SoftLLI introduces relink overhead that could impact performance for
* certain use cases.
* @num_of_soft_lli_chans: The number of channels that needs to be configured
* to use SoftLLI.
@@ -184,7 +182,7 @@ static __maybe_unused u32 d40_backup_regs[] = {
/*
* since 9540 and 8540 has the same HW revision
- * use v4a for 9540 or ealier
+ * use v4a for 9540 or earlier
* use v4b for 8540 or later
* HW revision:
* DB8500ed has revision 0
@@ -411,7 +409,7 @@ struct d40_desc {
*
* @base: The virtual address of LCLA. 18 bit aligned.
* @dma_addr: DMA address, if mapped
- * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
+ * @base_unaligned: The original kmalloc pointer, if kmalloc is used.
* This pointer is only there for clean-up on error.
* @pages: The number of pages needed for all physical channels.
* Only used later for clean-up on error
@@ -1655,7 +1653,7 @@ static void dma_tasklet(struct tasklet_struct *t)
return;
check_pending_tx:
- /* Rescue manouver if receiving double interrupts */
+ /* Rescue maneuver if receiving double interrupts */
if (d40c->pending_tx > 0)
d40c->pending_tx--;
spin_unlock_irqrestore(&d40c->lock, flags);
@@ -3412,7 +3410,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
base->lcla_pool.base = (void *)page_list[i];
} else {
/*
- * After many attempts and no succees with finding the correct
+ * After many attempts and no success with finding the correct
* alignment, try with allocating a big buffer.
*/
dev_warn(base->dev,
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 7a0586633bf3..24ad7077c53b 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -153,6 +153,7 @@ struct tegra_adma {
void __iomem *base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
+ unsigned long *dma_chan_mask;
unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved;
@@ -741,6 +742,10 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!tdc->tdma)
+ continue;
+
ch_reg = &tdc->ch_regs;
ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
/* skip if channel is not active */
@@ -779,6 +784,9 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!tdc->tdma)
+ continue;
ch_reg = &tdc->ch_regs;
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
@@ -867,10 +875,31 @@ static int tegra_adma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->ahub_clk);
}
+ tdma->dma_chan_mask = devm_kzalloc(&pdev->dev,
+ BITS_TO_LONGS(tdma->nr_channels) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!tdma->dma_chan_mask)
+ return -ENOMEM;
+
+ /* Enable all channels by default */
+ bitmap_fill(tdma->dma_chan_mask, tdma->nr_channels);
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "dma-channel-mask",
+ (u32 *)tdma->dma_chan_mask,
+ BITS_TO_U32(tdma->nr_channels));
+ if (ret < 0 && (ret != -EINVAL)) {
+ dev_err(&pdev->dev, "dma-channel-mask is not complete.\n");
+ return ret;
+ }
+
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
+ /* skip for reserved channels */
+ if (!test_bit(i, tdma->dma_chan_mask))
+ continue;
+
tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
+ (cdata->ch_reg_size * i);
@@ -957,8 +986,10 @@ static void tegra_adma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
- for (i = 0; i < tdma->nr_channels; ++i)
- irq_dispose_mapping(tdma->channels[i].irq);
+ for (i = 0; i < tdma->nr_channels; ++i) {
+ if (tdma->channels[i].irq)
+ irq_dispose_mapping(tdma->channels[i].irq);
+ }
pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index acc950bf609c..d376c117cecf 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -12,6 +12,7 @@ k3-psil-lib-objs := k3-psil.o \
k3-psil-j721s2.o \
k3-psil-am62.o \
k3-psil-am62a.o \
- k3-psil-j784s4.o
+ k3-psil-j784s4.o \
+ k3-psil-am62p.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index f1f920861fa9..5f8d2e93ff3f 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {
diff --git a/drivers/dma/ti/k3-psil-am62p.c b/drivers/dma/ti/k3-psil-am62p.c
new file mode 100644
index 000000000000..0f338e16d971
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am62p.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62p_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0x4600, 19, 19, 16),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+ /* CSIRX 1-3 (only for J722S) */
+ PSIL_CSI2RX(0x5100),
+ PSIL_CSI2RX(0x5101),
+ PSIL_CSI2RX(0x5102),
+ PSIL_CSI2RX(0x5103),
+ PSIL_CSI2RX(0x5104),
+ PSIL_CSI2RX(0x5105),
+ PSIL_CSI2RX(0x5106),
+ PSIL_CSI2RX(0x5107),
+ PSIL_CSI2RX(0x5108),
+ PSIL_CSI2RX(0x5109),
+ PSIL_CSI2RX(0x510a),
+ PSIL_CSI2RX(0x510b),
+ PSIL_CSI2RX(0x510c),
+ PSIL_CSI2RX(0x510d),
+ PSIL_CSI2RX(0x510e),
+ PSIL_CSI2RX(0x510f),
+ PSIL_CSI2RX(0x5110),
+ PSIL_CSI2RX(0x5111),
+ PSIL_CSI2RX(0x5112),
+ PSIL_CSI2RX(0x5113),
+ PSIL_CSI2RX(0x5114),
+ PSIL_CSI2RX(0x5115),
+ PSIL_CSI2RX(0x5116),
+ PSIL_CSI2RX(0x5117),
+ PSIL_CSI2RX(0x5118),
+ PSIL_CSI2RX(0x5119),
+ PSIL_CSI2RX(0x511a),
+ PSIL_CSI2RX(0x511b),
+ PSIL_CSI2RX(0x511c),
+ PSIL_CSI2RX(0x511d),
+ PSIL_CSI2RX(0x511e),
+ PSIL_CSI2RX(0x511f),
+ PSIL_CSI2RX(0x5200),
+ PSIL_CSI2RX(0x5201),
+ PSIL_CSI2RX(0x5202),
+ PSIL_CSI2RX(0x5203),
+ PSIL_CSI2RX(0x5204),
+ PSIL_CSI2RX(0x5205),
+ PSIL_CSI2RX(0x5206),
+ PSIL_CSI2RX(0x5207),
+ PSIL_CSI2RX(0x5208),
+ PSIL_CSI2RX(0x5209),
+ PSIL_CSI2RX(0x520a),
+ PSIL_CSI2RX(0x520b),
+ PSIL_CSI2RX(0x520c),
+ PSIL_CSI2RX(0x520d),
+ PSIL_CSI2RX(0x520e),
+ PSIL_CSI2RX(0x520f),
+ PSIL_CSI2RX(0x5210),
+ PSIL_CSI2RX(0x5211),
+ PSIL_CSI2RX(0x5212),
+ PSIL_CSI2RX(0x5213),
+ PSIL_CSI2RX(0x5214),
+ PSIL_CSI2RX(0x5215),
+ PSIL_CSI2RX(0x5216),
+ PSIL_CSI2RX(0x5217),
+ PSIL_CSI2RX(0x5218),
+ PSIL_CSI2RX(0x5219),
+ PSIL_CSI2RX(0x521a),
+ PSIL_CSI2RX(0x521b),
+ PSIL_CSI2RX(0x521c),
+ PSIL_CSI2RX(0x521d),
+ PSIL_CSI2RX(0x521e),
+ PSIL_CSI2RX(0x521f),
+ PSIL_CSI2RX(0x5300),
+ PSIL_CSI2RX(0x5301),
+ PSIL_CSI2RX(0x5302),
+ PSIL_CSI2RX(0x5303),
+ PSIL_CSI2RX(0x5304),
+ PSIL_CSI2RX(0x5305),
+ PSIL_CSI2RX(0x5306),
+ PSIL_CSI2RX(0x5307),
+ PSIL_CSI2RX(0x5308),
+ PSIL_CSI2RX(0x5309),
+ PSIL_CSI2RX(0x530a),
+ PSIL_CSI2RX(0x530b),
+ PSIL_CSI2RX(0x530c),
+ PSIL_CSI2RX(0x530d),
+ PSIL_CSI2RX(0x530e),
+ PSIL_CSI2RX(0x530f),
+ PSIL_CSI2RX(0x5310),
+ PSIL_CSI2RX(0x5311),
+ PSIL_CSI2RX(0x5312),
+ PSIL_CSI2RX(0x5313),
+ PSIL_CSI2RX(0x5314),
+ PSIL_CSI2RX(0x5315),
+ PSIL_CSI2RX(0x5316),
+ PSIL_CSI2RX(0x5317),
+ PSIL_CSI2RX(0x5318),
+ PSIL_CSI2RX(0x5319),
+ PSIL_CSI2RX(0x531a),
+ PSIL_CSI2RX(0x531b),
+ PSIL_CSI2RX(0x531c),
+ PSIL_CSI2RX(0x531d),
+ PSIL_CSI2RX(0x531e),
+ PSIL_CSI2RX(0x531f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62p_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+ /* PDMA_MAIN0 - SPI0-2 */
+ PSIL_PDMA_XY_PKT(0xc300),
+ PSIL_PDMA_XY_PKT(0xc301),
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+ PSIL_PDMA_XY_PKT(0xc305),
+ PSIL_PDMA_XY_PKT(0xc306),
+ PSIL_PDMA_XY_PKT(0xc307),
+ PSIL_PDMA_XY_PKT(0xc308),
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+ PSIL_PDMA_XY_PKT(0xc402),
+ PSIL_PDMA_XY_PKT(0xc403),
+ PSIL_PDMA_XY_PKT(0xc404),
+ PSIL_PDMA_XY_PKT(0xc405),
+ PSIL_PDMA_XY_PKT(0xc406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0xc600, 19, 19, 8),
+ PSIL_ETHERNET(0xc601, 20, 27, 8),
+ PSIL_ETHERNET(0xc602, 21, 35, 8),
+ PSIL_ETHERNET(0xc603, 22, 43, 8),
+ PSIL_ETHERNET(0xc604, 23, 51, 8),
+ PSIL_ETHERNET(0xc605, 24, 59, 8),
+ PSIL_ETHERNET(0xc606, 25, 67, 8),
+ PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62p_ep_map = {
+ .name = "am62p",
+ .src = am62p_src_ep_map,
+ .src_count = ARRAY_SIZE(am62p_src_ep_map),
+ .dst = am62p_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am62p_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index c383723d1c8f..a577be97e344 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -45,5 +45,6 @@ extern struct psil_ep_map j721s2_ep_map;
extern struct psil_ep_map am62_ep_map;
extern struct psil_ep_map am62a_ep_map;
extern struct psil_ep_map j784s4_ep_map;
+extern struct psil_ep_map am62p_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index c11389d67a3f..25148d952472 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -26,6 +26,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM62X", .data = &am62_ep_map },
{ .family = "AM62AX", .data = &am62a_ep_map },
{ .family = "J784S4", .data = &j784s4_ep_map },
+ { .family = "AM62PX", .data = &am62p_ep_map },
+ { .family = "J722S", .data = &am62p_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 30fd2f386f36..6400d06588a2 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -3968,6 +3968,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
{
struct udma_chan *uc = to_udma_chan(&vc->chan);
struct udma_desc *d;
+ u8 status;
if (!vd)
return;
@@ -3977,12 +3978,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
if (d->metadata_size)
udma_fetch_epib(uc, d);
- /* Provide residue information for the client */
if (result) {
void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
if (cppi5_desc_get_type(desc_vaddr) ==
CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ /* Provide residue information for the client */
result->residue = d->residue -
cppi5_hdesc_get_pktlen(desc_vaddr);
if (result->residue)
@@ -3991,7 +3992,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
result->result = DMA_TRANS_NOERROR;
} else {
result->residue = 0;
- result->result = DMA_TRANS_NOERROR;
+ /* Propagate TR Response errors to the client */
+ status = d->hwdesc[0].tr_resp_base->status;
+ if (status)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
}
}
}
@@ -4441,6 +4447,8 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM62X", .data = &am64_soc_data },
{ .family = "AM62AX", .data = &am64_soc_data },
{ .family = "J784S4", .data = &j721e_soc_data },
+ { .family = "AM62PX", .data = &am64_soc_data },
+ { .family = "J722S", .data = &am64_soc_data },
{ /* sentinel */ }
};
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index 618839df0748..ad7125f6e2ca 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -453,7 +453,7 @@ disable_clk:
return ret;
}
-static int uniphier_mdmac_remove(struct platform_device *pdev)
+static void uniphier_mdmac_remove(struct platform_device *pdev)
{
struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev);
struct dma_chan *chan;
@@ -468,16 +468,21 @@ static int uniphier_mdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *mdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
uniphier_mdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&mdev->ddev);
clk_disable_unprepare(mdev->clk);
-
- return 0;
}
static const struct of_device_id uniphier_mdmac_match[] = {
@@ -488,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
static struct platform_driver uniphier_mdmac_driver = {
.probe = uniphier_mdmac_probe,
- .remove = uniphier_mdmac_remove,
+ .remove_new = uniphier_mdmac_remove,
.driver = {
.name = "uniphier-mio-dmac",
.of_match_table = uniphier_mdmac_match,
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index 3a8ee2b173b5..3ce2dc2ad9de 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -563,7 +563,7 @@ out_unregister_dmac:
return ret;
}
-static int uniphier_xdmac_remove(struct platform_device *pdev)
+static void uniphier_xdmac_remove(struct platform_device *pdev)
{
struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
struct dma_device *ddev = &xdev->ddev;
@@ -579,15 +579,20 @@ static int uniphier_xdmac_remove(struct platform_device *pdev)
*/
list_for_each_entry(chan, &ddev->channels, device_node) {
ret = dmaengine_terminate_sync(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /*
+ * This results in resource leakage and maybe also
+ * use-after-free errors as e.g. *xdev is kfreed.
+ */
+ dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
+ chan->chan_id, ERR_PTR(ret));
+ return;
+ }
uniphier_xdmac_free_chan_resources(chan);
}
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(ddev);
-
- return 0;
}
static const struct of_device_id uniphier_xdmac_match[] = {
@@ -598,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
static struct platform_driver uniphier_xdmac_driver = {
.probe = uniphier_xdmac_probe,
- .remove = uniphier_xdmac_remove,
+ .remove_new = uniphier_xdmac_remove,
.driver = {
.name = "uniphier-xdmac",
.of_match_table = uniphier_xdmac_match,
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
index e641a5083e14..98f5f6fb9ff9 100644
--- a/drivers/dma/xilinx/xdma-regs.h
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -64,9 +64,10 @@ struct xdma_hw_desc {
__le64 next_desc;
};
-#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
-#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
-#define XDMA_DESC_BLOCK_ALIGN 4096
+#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
+#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
+#define XDMA_DESC_BLOCK_ALIGN 32
+#define XDMA_DESC_BLOCK_BOUNDARY 4096
/*
* Channel registers
@@ -76,6 +77,7 @@ struct xdma_hw_desc {
#define XDMA_CHAN_CONTROL_W1S 0x8
#define XDMA_CHAN_CONTROL_W1C 0xc
#define XDMA_CHAN_STATUS 0x40
+#define XDMA_CHAN_STATUS_RC 0x44
#define XDMA_CHAN_COMPLETED_DESC 0x48
#define XDMA_CHAN_ALIGNMENTS 0x4c
#define XDMA_CHAN_INTR_ENABLE 0x90
@@ -101,6 +103,7 @@ struct xdma_hw_desc {
#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define CHAN_CTRL_IE_WRITE_ERROR GENMASK(18, 14)
#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
#define CHAN_CTRL_POLL_MODE_WB BIT(26)
@@ -111,8 +114,17 @@ struct xdma_hw_desc {
CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
CHAN_CTRL_IE_MAGIC_STOPPED | \
CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
CHAN_CTRL_IE_DESC_ERROR)
+#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
+
+#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_WRITE_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
/* bits of the channel interrupt enable mask */
#define CHAN_IM_DESC_ERROR BIT(19)
#define CHAN_IM_READ_ERROR BIT(9)
@@ -134,18 +146,6 @@ struct xdma_hw_desc {
#define XDMA_SGDMA_DESC_ADJ 0x4088
#define XDMA_SGDMA_DESC_CREDIT 0x408c
-/* bits of the SG DMA control register */
-#define XDMA_CTRL_RUN_STOP BIT(0)
-#define XDMA_CTRL_IE_DESC_STOPPED BIT(1)
-#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2)
-#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
-#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4)
-#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6)
-#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9)
-#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19)
-#define XDMA_CTRL_NON_INCR_ADDR BIT(25)
-#define XDMA_CTRL_POLL_MODE_WB BIT(26)
-
/*
* interrupt registers
*/
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 84a88029226f..170017ff2aad 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -78,27 +78,31 @@ struct xdma_chan {
* @vdesc: Virtual DMA descriptor
* @chan: DMA channel pointer
* @dir: Transferring direction of the request
- * @dev_addr: Physical address on DMA device side
* @desc_blocks: Hardware descriptor blocks
* @dblk_num: Number of hardware descriptor blocks
* @desc_num: Number of hardware descriptors
* @completed_desc_num: Completed hardware descriptors
* @cyclic: Cyclic transfer vs. scatter-gather
+ * @interleaved_dma: Interleaved DMA transfer
* @periods: Number of periods in the cyclic transfer
* @period_size: Size of a period in bytes in cyclic transfers
+ * @frames_left: Number of frames left in interleaved DMA transfer
+ * @error: tx error flag
*/
struct xdma_desc {
struct virt_dma_desc vdesc;
struct xdma_chan *chan;
enum dma_transfer_direction dir;
- u64 dev_addr;
struct xdma_desc_block *desc_blocks;
u32 dblk_num;
u32 desc_num;
u32 completed_desc_num;
bool cyclic;
+ bool interleaved_dma;
u32 periods;
u32 period_size;
+ u32 frames_left;
+ bool error;
};
#define XDMA_DEV_STATUS_REG_DMA BIT(0)
@@ -276,6 +280,7 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
sw_desc->chan = chan;
sw_desc->desc_num = desc_num;
sw_desc->cyclic = cyclic;
+ sw_desc->error = false;
dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
GFP_NOWAIT);
@@ -371,6 +376,31 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
return ret;
xchan->busy = true;
+
+ return 0;
+}
+
+/**
+ * xdma_xfer_stop - Stop DMA transfer
+ * @xchan: DMA channel pointer
+ */
+static int xdma_xfer_stop(struct xdma_chan *xchan)
+{
+ int ret;
+ u32 val;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+
+ /* clear run stop bit to prevent any further auto-triggering */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+
+ /* Clear the channel status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -476,6 +506,84 @@ static void xdma_issue_pending(struct dma_chan *chan)
}
/**
+ * xdma_terminate_all - Terminate all transactions
+ * @chan: DMA channel pointer
+ */
+static int xdma_terminate_all(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ xdma_xfer_stop(xdma_chan);
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+
+ xdma_chan->busy = false;
+ vd = vchan_next_desc(&xdma_chan->vchan);
+ if (vd) {
+ list_del(&vd->node);
+ dma_cookie_complete(&vd->tx);
+ vchan_terminate_vdesc(vd);
+ }
+ vchan_get_all_descriptors(&xdma_chan->vchan, &head);
+ list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
+
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+
+ return 0;
+}
+
+/**
+ * xdma_synchronize - Synchronize terminated transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_synchronize(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ vchan_synchronize(&xdma_chan->vchan);
+}
+
+/**
+ * xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
+ * @sw_desc: tx descriptor state container
+ * @src_addr: Value for a ->src_addr field of a first descriptor
+ * @dst_addr: Value for a ->dst_addr field of a first descriptor
+ * @size: Total size of a contiguous memory block
+ * @filled_descs_num: Number of filled hardware descriptors for corresponding sw_desc
+ */
+static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
+ u64 dst_addr, u32 size, u32 filled_descs_num)
+{
+ u32 left = size, len, desc_num = filled_descs_num;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+
+ dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
+ desc = dblk->virt_addr;
+ desc += desc_num & XDMA_DESC_ADJACENT_MASK;
+ do {
+ len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
+ /* set hardware descriptor */
+ desc->bytes = cpu_to_le32(len);
+ desc->src_addr = cpu_to_le64(src_addr);
+ desc->dst_addr = cpu_to_le64(dst_addr);
+ if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
+ desc = (++dblk)->virt_addr;
+ else
+ desc++;
+
+ src_addr += len;
+ dst_addr += len;
+ left -= len;
+ } while (left);
+
+ return desc_num - filled_descs_num;
+}
+
+/**
* xdma_prep_device_sg - prepare a descriptor for a DMA transaction
* @chan: DMA channel pointer
* @sgl: Transfer scatter gather list
@@ -491,13 +599,10 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
{
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
struct dma_async_tx_descriptor *tx_desc;
- u32 desc_num = 0, i, len, rest;
- struct xdma_desc_block *dblk;
- struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
- u64 dev_addr, *src, *dst;
+ u32 desc_num = 0, i;
+ u64 addr, dev_addr, *src, *dst;
struct scatterlist *sg;
- u64 addr;
for_each_sg(sgl, sg, sg_len, i)
desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
@@ -506,6 +611,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!sw_desc)
return NULL;
sw_desc->dir = dir;
+ sw_desc->cyclic = false;
+ sw_desc->interleaved_dma = false;
if (dir == DMA_MEM_TO_DEV) {
dev_addr = xdma_chan->cfg.dst_addr;
@@ -517,32 +624,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
dst = &addr;
}
- dblk = sw_desc->desc_blocks;
- desc = dblk->virt_addr;
- desc_num = 1;
+ desc_num = 0;
for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg);
- rest = sg_dma_len(sg);
-
- do {
- len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
- /* set hardware descriptor */
- desc->bytes = cpu_to_le32(len);
- desc->src_addr = cpu_to_le64(*src);
- desc->dst_addr = cpu_to_le64(*dst);
-
- if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
- dblk++;
- desc = dblk->virt_addr;
- } else {
- desc++;
- }
-
- desc_num++;
- dev_addr += len;
- addr += len;
- rest -= len;
- } while (rest);
+ desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
+ dev_addr += sg_dma_len(sg);
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -576,9 +662,9 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
struct xdma_device *xdev = xdma_chan->xdev_hdl;
unsigned int periods = size / period_size;
struct dma_async_tx_descriptor *tx_desc;
- struct xdma_desc_block *dblk;
- struct xdma_hw_desc *desc;
struct xdma_desc *sw_desc;
+ u64 addr, dev_addr, *src, *dst;
+ u32 desc_num;
unsigned int i;
/*
@@ -602,22 +688,23 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
sw_desc->periods = periods;
sw_desc->period_size = period_size;
sw_desc->dir = dir;
+ sw_desc->interleaved_dma = false;
- dblk = sw_desc->desc_blocks;
- desc = dblk->virt_addr;
+ addr = address;
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
- /* fill hardware descriptor */
+ desc_num = 0;
for (i = 0; i < periods; i++) {
- desc->bytes = cpu_to_le32(period_size);
- if (dir == DMA_MEM_TO_DEV) {
- desc->src_addr = cpu_to_le64(address + i * period_size);
- desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
- } else {
- desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
- desc->dst_addr = cpu_to_le64(address + i * period_size);
- }
-
- desc++;
+ desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
+ addr += i * period_size;
}
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -633,6 +720,57 @@ failed:
}
/**
+ * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
+ * @chan: DMA channel
+ * @xt: DMA transfer template
+ * @flags: tx flags
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_interleaved_dma(struct dma_chan *chan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ int i;
+ u32 desc_num = 0, period_size = 0;
+ struct dma_async_tx_descriptor *tx_desc;
+ struct xdma_chan *xchan = to_xdma_chan(chan);
+ struct xdma_desc *sw_desc;
+ u64 src_addr, dst_addr;
+
+ for (i = 0; i < xt->frame_size; ++i)
+ desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xchan, desc_num, false);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = xt->dir;
+ sw_desc->interleaved_dma = true;
+ sw_desc->cyclic = flags & DMA_PREP_REPEAT;
+ sw_desc->frames_left = xt->numf;
+ sw_desc->periods = xt->numf;
+
+ desc_num = 0;
+ src_addr = xt->src_start;
+ dst_addr = xt->dst_start;
+ for (i = 0; i < xt->frame_size; ++i) {
+ desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
+ src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
+ xt->sgl[i].size : 0);
+ dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
+ xt->sgl[i].size : 0);
+ period_size += xt->sgl[i].size;
+ }
+ sw_desc->period_size = period_size;
+
+ tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
+ if (tx_desc)
+ return tx_desc;
+
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
* xdma_device_config - Configure the DMA channel
* @chan: DMA channel
* @cfg: channel configuration
@@ -677,9 +815,8 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
return -EINVAL;
}
- xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
- dev, XDMA_DESC_BLOCK_SIZE,
- XDMA_DESC_BLOCK_ALIGN, 0);
+ xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE,
+ XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
if (!xdma_chan->desc_pool) {
xdma_err(xdev, "unable to allocate descriptor pool");
return -ENOMEM;
@@ -706,20 +843,20 @@ static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
vd = vchan_find_desc(&xdma_chan->vchan, cookie);
- if (vd)
- desc = to_xdma_desc(vd);
- if (!desc || !desc->cyclic) {
- spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
- return ret;
- }
-
- period_idx = desc->completed_desc_num % desc->periods;
- residue = (desc->periods - period_idx) * desc->period_size;
+ if (!vd)
+ goto out;
+ desc = to_xdma_desc(vd);
+ if (desc->error) {
+ ret = DMA_ERROR;
+ } else if (desc->cyclic) {
+ period_idx = desc->completed_desc_num % desc->periods;
+ residue = (desc->periods - period_idx) * desc->period_size;
+ dma_set_residue(state, residue);
+ }
+out:
spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
- dma_set_residue(state, residue);
-
return ret;
}
@@ -732,11 +869,12 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
{
struct xdma_chan *xchan = dev_id;
u32 complete_desc_num = 0;
- struct xdma_device *xdev;
- struct virt_dma_desc *vd;
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct virt_dma_desc *vd, *next_vd;
struct xdma_desc *desc;
int ret;
u32 st;
+ bool repeat_tx;
spin_lock(&xchan->vchan.lock);
@@ -745,45 +883,76 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
if (!vd)
goto out;
- xchan->busy = false;
+ /* Clear-on-read the status register */
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
+ if (ret)
+ goto out;
+
desc = to_xdma_desc(vd);
- xdev = xchan->xdev_hdl;
+
+ st &= XDMA_CHAN_STATUS_MASK;
+ if ((st & XDMA_CHAN_ERROR_MASK) ||
+ !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
+ desc->error = true;
+ xdma_err(xdev, "channel error, status register value: 0x%x", st);
+ goto out;
+ }
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
&complete_desc_num);
if (ret)
goto out;
- desc->completed_desc_num += complete_desc_num;
+ if (desc->interleaved_dma) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+ if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
+ xdma_xfer_start(xchan);
+ goto out;
+ }
- if (desc->cyclic) {
- ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
- &st);
- if (ret)
+ /* last desc of any frame */
+ desc->frames_left--;
+ if (desc->frames_left)
+ goto out;
+
+ /* last desc of the last frame */
+ repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
+ next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
+ if (next_vd)
+ repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
+ if (repeat_tx) {
+ desc->frames_left = desc->periods;
+ desc->completed_desc_num = 0;
+ vchan_cyclic_callback(vd);
+ } else {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ }
+ /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
+ xdma_xfer_start(xchan);
+ } else if (!desc->cyclic) {
+ xchan->busy = false;
+ desc->completed_desc_num += complete_desc_num;
+
+ /* if all data blocks are transferred, remove and complete the request */
+ if (desc->completed_desc_num == desc->desc_num) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
goto out;
+ }
- regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
+ if (desc->completed_desc_num > desc->desc_num ||
+ complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
+ goto out;
+ /* transfer the rest of data */
+ xdma_xfer_start(xchan);
+ } else {
+ desc->completed_desc_num = complete_desc_num;
vchan_cyclic_callback(vd);
- goto out;
- }
-
- /*
- * if all data blocks are transferred, remove and complete the request
- */
- if (desc->completed_desc_num == desc->desc_num) {
- list_del(&vd->node);
- vchan_cookie_complete(vd);
- goto out;
}
- if (desc->completed_desc_num > desc->desc_num ||
- complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
- goto out;
-
- /* transfer the rest of data (SG only) */
- xdma_xfer_start(xchan);
-
out:
spin_unlock(&xchan->vchan.lock);
return IRQ_HANDLED;
@@ -1080,6 +1249,9 @@ static int xdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
xdev->dma_dev.dev = &pdev->dev;
xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
@@ -1089,10 +1261,13 @@ static int xdma_probe(struct platform_device *pdev)
xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
xdev->dma_dev.device_config = xdma_device_config;
xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+ xdev->dma_dev.device_terminate_all = xdma_terminate_all;
+ xdev->dma_dev.device_synchronize = xdma_synchronize;
xdev->dma_dev.filter.map = pdata->device_map;
xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
xdev->dma_dev.filter.fn = xdma_filter_fn;
xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
+ xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
ret = dma_async_device_register(&xdev->dma_dev);
if (ret) {
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 69587d85a7cd..b82815e64d24 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -309,7 +309,7 @@ static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
- out_str_len);
+ out_str_len + 1);
snprintf(buf, out_str_len, "%d",
dpdma_debugfs.xilinx_dpdma_irq_done_count);
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 1eca8cc271f8..6b2a6236f5e3 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -29,7 +29,7 @@ static u32 dpll_pin_xa_id;
WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_DPLL_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
-#define ASSERT_PIN_REGISTERED(p) \
+#define ASSERT_DPLL_PIN_REGISTERED(p) \
WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
struct dpll_device_registration {
@@ -425,6 +425,53 @@ void dpll_device_unregister(struct dpll_device *dpll,
}
EXPORT_SYMBOL_GPL(dpll_device_unregister);
+static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
+{
+ kfree(prop->package_label);
+ kfree(prop->panel_label);
+ kfree(prop->board_label);
+ kfree(prop->freq_supported);
+}
+
+static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
+ struct dpll_pin_properties *dst)
+{
+ memcpy(dst, src, sizeof(*dst));
+ if (src->freq_supported && src->freq_supported_num) {
+ size_t freq_size = src->freq_supported_num *
+ sizeof(*src->freq_supported);
+ dst->freq_supported = kmemdup(src->freq_supported,
+ freq_size, GFP_KERNEL);
+ if (!src->freq_supported)
+ return -ENOMEM;
+ }
+ if (src->board_label) {
+ dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
+ if (!dst->board_label)
+ goto err_board_label;
+ }
+ if (src->panel_label) {
+ dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
+ if (!dst->panel_label)
+ goto err_panel_label;
+ }
+ if (src->package_label) {
+ dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
+ if (!dst->package_label)
+ goto err_package_label;
+ }
+
+ return 0;
+
+err_package_label:
+ kfree(dst->panel_label);
+err_panel_label:
+ kfree(dst->board_label);
+err_board_label:
+ kfree(dst->freq_supported);
+ return -ENOMEM;
+}
+
static struct dpll_pin *
dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
const struct dpll_pin_properties *prop)
@@ -441,24 +488,48 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
prop->type > DPLL_PIN_TYPE_MAX)) {
ret = -EINVAL;
- goto err;
+ goto err_pin_prop;
}
- pin->prop = prop;
+ ret = dpll_pin_prop_dup(prop, &pin->prop);
+ if (ret)
+ goto err_pin_prop;
refcount_set(&pin->refcount, 1);
xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
if (ret)
- goto err;
+ goto err_xa_alloc;
return pin;
-err:
+err_xa_alloc:
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
+ dpll_pin_prop_free(&pin->prop);
+err_pin_prop:
kfree(pin);
return ERR_PTR(ret);
}
+static void dpll_netdev_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ rtnl_lock();
+ rcu_assign_pointer(dev->dpll_pin, dpll_pin);
+ rtnl_unlock();
+}
+
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ WARN_ON(!dpll_pin);
+ dpll_netdev_pin_assign(dev, dpll_pin);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_set);
+
+void dpll_netdev_pin_clear(struct net_device *dev)
+{
+ dpll_netdev_pin_assign(dev, NULL);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_clear);
+
/**
* dpll_pin_get - find existing or create new dpll pin
* @clock_id: clock_id of creator
@@ -511,10 +582,11 @@ void dpll_pin_put(struct dpll_pin *pin)
{
mutex_lock(&dpll_lock);
if (refcount_dec_and_test(&pin->refcount)) {
+ xa_erase(&dpll_pin_xa, pin->id);
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
- xa_erase(&dpll_pin_xa, pin->id);
- kfree(pin);
+ dpll_pin_prop_free(&pin->prop);
+ kfree_rcu(pin, rcu);
}
mutex_unlock(&dpll_lock);
}
@@ -564,8 +636,6 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
WARN_ON(!ops->state_on_dpll_get) ||
WARN_ON(!ops->direction_get))
return -EINVAL;
- if (ASSERT_DPLL_REGISTERED(dpll))
- return -EINVAL;
mutex_lock(&dpll_lock);
if (WARN_ON(!(dpll->module == pin->module &&
@@ -583,6 +653,7 @@ static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv)
{
+ ASSERT_DPLL_PIN_REGISTERED(pin);
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
if (xa_empty(&pin->dpll_refs))
@@ -636,15 +707,13 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
unsigned long i, stop;
int ret;
- if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
+ if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
return -EINVAL;
if (WARN_ON(!ops) ||
WARN_ON(!ops->state_on_pin_get) ||
WARN_ON(!ops->direction_get))
return -EINVAL;
- if (ASSERT_PIN_REGISTERED(parent))
- return -EINVAL;
mutex_lock(&dpll_lock);
ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 5585873c5c1b..2b6d8ef1cdf3 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -44,9 +44,10 @@ struct dpll_device {
* @module: module of creator
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
- * @prop: pointer to pin properties given by registerer
+ * @prop: pin properties copied from the registerer
* @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
+ * @rcu: rcu_head for kfree_rcu()
**/
struct dpll_pin {
u32 id;
@@ -55,8 +56,9 @@ struct dpll_pin {
struct module *module;
struct xarray dpll_refs;
struct xarray parent_refs;
- const struct dpll_pin_properties *prop;
+ struct dpll_pin_properties prop;
refcount_t refcount;
+ struct rcu_head rcu;
};
/**
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 3370dbddb86b..98e6ad8528d3 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <net/genetlink.h>
#include "dpll_core.h"
#include "dpll_netlink.h"
@@ -48,18 +49,6 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
}
/**
- * dpll_msg_pin_handle_size - get size of pin handle attribute for given pin
- * @pin: pin pointer
- *
- * Return: byte size of pin handle attribute for given pin.
- */
-size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
-{
- return pin ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
-}
-EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
-
-/**
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
* @msg: pointer to sk_buff message to attach a pin handle
* @pin: pin pointer
@@ -68,7 +57,7 @@ EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
* * 0 - success
* * -EMSGSIZE - no space in message to attach pin handle
*/
-int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+static int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
{
if (!pin)
return 0;
@@ -76,7 +65,28 @@ int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
return -EMSGSIZE;
return 0;
}
-EXPORT_SYMBOL_GPL(dpll_msg_add_pin_handle);
+
+static struct dpll_pin *dpll_netdev_pin(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->dpll_pin);
+}
+
+/**
+ * dpll_netdev_pin_handle_size - get size of pin handle attribute of a netdev
+ * @dev: netdev from which to get the pin
+ *
+ * Return: byte size of pin handle attribute, or 0 if @dev has no pin.
+ */
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+ return dpll_netdev_pin(dev) ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
+}
+
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev)
+{
+ return dpll_msg_add_pin_handle(msg, dpll_netdev_pin(dev));
+}
static int
dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
@@ -121,14 +131,21 @@ dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_lock_status_error status_error = 0;
enum dpll_lock_status status;
int ret;
- ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status, extack);
+ ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status,
+ &status_error, extack);
if (ret)
return ret;
if (nla_put_u32(msg, DPLL_A_LOCK_STATUS, status))
return -EMSGSIZE;
+ if (status_error &&
+ (status == DPLL_LOCK_STATUS_UNLOCKED ||
+ status == DPLL_LOCK_STATUS_HOLDOVER) &&
+ nla_put_u32(msg, DPLL_A_LOCK_STATUS_ERROR, status_error))
+ return -EMSGSIZE;
return 0;
}
@@ -303,17 +320,17 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
DPLL_A_PIN_PAD))
return -EMSGSIZE;
- for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
+ for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
if (!nest)
return -EMSGSIZE;
- freq = pin->prop->freq_supported[fs].min;
+ freq = pin->prop.freq_supported[fs].min;
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
&freq, DPLL_A_PIN_PAD)) {
nla_nest_cancel(msg, nest);
return -EMSGSIZE;
}
- freq = pin->prop->freq_supported[fs].max;
+ freq = pin->prop.freq_supported[fs].max;
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
&freq, DPLL_A_PIN_PAD)) {
nla_nest_cancel(msg, nest);
@@ -329,9 +346,9 @@ static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
- for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
- if (freq >= pin->prop->freq_supported[fs].min &&
- freq <= pin->prop->freq_supported[fs].max)
+ for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
+ if (freq >= pin->prop.freq_supported[fs].min &&
+ freq <= pin->prop.freq_supported[fs].max)
return true;
return false;
}
@@ -421,7 +438,7 @@ static int
dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
struct netlink_ext_ack *extack)
{
- const struct dpll_pin_properties *prop = pin->prop;
+ const struct dpll_pin_properties *prop = &pin->prop;
struct dpll_pin_ref *ref;
int ret;
@@ -553,6 +570,24 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
}
+static bool dpll_pin_available(struct dpll_pin *pin)
+{
+ struct dpll_pin_ref *par_ref;
+ unsigned long i;
+
+ if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
+ return false;
+ xa_for_each(&pin->parent_refs, i, par_ref)
+ if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
+ DPLL_REGISTERED))
+ return true;
+ xa_for_each(&pin->dpll_refs, i, par_ref)
+ if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
+ DPLL_REGISTERED))
+ return true;
+ return false;
+}
+
/**
* dpll_device_change_ntf - notify that the dpll device has been changed
* @dpll: registered dpll pointer
@@ -579,7 +614,7 @@ dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
int ret = -ENOMEM;
void *hdr;
- if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
+ if (!dpll_pin_available(pin))
return -ENODEV;
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
@@ -717,7 +752,7 @@ dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
int ret;
if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
- pin->prop->capabilities)) {
+ pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "state changing is not allowed");
return -EOPNOTSUPP;
}
@@ -753,7 +788,7 @@ dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
int ret;
if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
- pin->prop->capabilities)) {
+ pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "state changing is not allowed");
return -EOPNOTSUPP;
}
@@ -780,7 +815,7 @@ dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
int ret;
if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
- pin->prop->capabilities)) {
+ pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "prio changing is not allowed");
return -EOPNOTSUPP;
}
@@ -808,7 +843,7 @@ dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
int ret;
if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
- pin->prop->capabilities)) {
+ pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "direction changing is not allowed");
return -EOPNOTSUPP;
}
@@ -838,8 +873,8 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
int ret;
phase_adj = nla_get_s32(phase_adj_attr);
- if (phase_adj > pin->prop->phase_range.max ||
- phase_adj < pin->prop->phase_range.min) {
+ if (phase_adj > pin->prop.phase_range.max ||
+ phase_adj < pin->prop.phase_range.min) {
NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
"phase adjust value not supported");
return -EINVAL;
@@ -1023,7 +1058,7 @@ dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
unsigned long i;
xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
- prop = pin->prop;
+ prop = &pin->prop;
cid_match = clock_id ? pin->clock_id == clock_id : true;
mod_match = mod_name_attr && module_name(pin->module) ?
!nla_strcmp(mod_name_attr,
@@ -1130,6 +1165,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
pin = dpll_pin_find_from_nlattr(info);
if (!IS_ERR(pin)) {
+ if (!dpll_pin_available(pin)) {
+ nlmsg_free(msg);
+ return -ENODEV;
+ }
ret = dpll_msg_add_pin_handle(msg, pin);
if (ret) {
nlmsg_free(msg);
@@ -1177,8 +1216,11 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i;
int ret = 0;
+ mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
ctx->idx) {
+ if (!dpll_pin_available(pin))
+ continue;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
&dpll_nl_family, NLM_F_MULTI,
@@ -1194,6 +1236,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
genlmsg_end(skb, hdr);
}
+ mutex_unlock(&dpll_lock);
+
if (ret == -EMSGSIZE) {
ctx->idx = i;
return skb->len;
@@ -1349,6 +1393,7 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i;
int ret = 0;
+ mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED,
ctx->idx) {
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
@@ -1365,6 +1410,8 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
genlmsg_end(skb, hdr);
}
+ mutex_unlock(&dpll_lock);
+
if (ret == -EMSGSIZE) {
ctx->idx = i;
return skb->len;
@@ -1415,20 +1462,6 @@ dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
mutex_unlock(&dpll_lock);
}
-int dpll_lock_dumpit(struct netlink_callback *cb)
-{
- mutex_lock(&dpll_lock);
-
- return 0;
-}
-
-int dpll_unlock_dumpit(struct netlink_callback *cb)
-{
- mutex_unlock(&dpll_lock);
-
- return 0;
-}
-
int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
@@ -1441,7 +1474,8 @@ int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
}
info->user_ptr[0] = xa_load(&dpll_pin_xa,
nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
- if (!info->user_ptr[0]) {
+ if (!info->user_ptr[0] ||
+ !dpll_pin_available(info->user_ptr[0])) {
NL_SET_ERR_MSG(info->extack, "pin not found");
ret = -ENODEV;
goto unlock_dev;
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index eaee5be7aa64..1e95f5397cfc 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -95,9 +95,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
},
{
.cmd = DPLL_CMD_DEVICE_GET,
- .start = dpll_lock_dumpit,
.dumpit = dpll_nl_device_get_dumpit,
- .done = dpll_unlock_dumpit,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
},
{
@@ -129,9 +127,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
},
{
.cmd = DPLL_CMD_PIN_GET,
- .start = dpll_lock_dumpit,
.dumpit = dpll_nl_pin_get_dumpit,
- .done = dpll_unlock_dumpit,
.policy = dpll_pin_get_dump_nl_policy,
.maxattr = DPLL_A_PIN_ID,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
index 92d4c9c4f788..f491262bee4f 100644
--- a/drivers/dpll/dpll_nl.h
+++ b/drivers/dpll/dpll_nl.h
@@ -30,8 +30,6 @@ dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
void
dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
-int dpll_lock_dumpit(struct netlink_callback *cb);
-int dpll_unlock_dumpit(struct netlink_callback *cb);
int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info);
int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 6ac5ff20a2fe..401a77e3b5fa 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
*/
card->bm_generation = generation;
- if (root_device == NULL) {
+ if (card->gap_count == 0) {
+ /*
+ * If self IDs have inconsistent gap counts, do a
+ * bus reset ASAP. The config rom read might never
+ * complete, so don't wait for it. However, still
+ * send a PHY configuration packet prior to the
+ * bus reset. The PHY configuration packet might
+ * fail, but 1394-2008 8.4.5.2 explicitly permits
+ * it in this case, so it should be safe to try.
+ */
+ new_root_id = local_id;
+ /*
+ * We must always send a bus reset if the gap count
+ * is inconsistent, so bypass the 5-reset limit.
+ */
+ card->bm_retries = 0;
+ } else if (root_device == NULL) {
/*
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
@@ -484,7 +500,19 @@ static void bm_work(struct work_struct *work)
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
- reset_bus(card, true);
+ /*
+ * Where possible, use a short bus reset to minimize
+ * disruption to isochronous transfers. But in the event
+ * of a gap count inconsistency, use a long bus reset.
+ *
+ * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
+ * may set different gap counts after a bus reset. On a mixed
+ * 1394/1394a bus, a short bus reset can get doubled. Some
+ * nodes may treat the double reset as one bus reset and others
+ * may treat it as two, causing a gap count inconsistency
+ * again. Using a long bus reset prevents this.
+ */
+ reset_bus(card, card->gap_count != 0);
/* Will allocate broadcast channel after the reset. */
goto out;
}
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 0547253d16fe..7d3346b3a2bf 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -118,10 +118,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
* @buf: where to put the string
* @size: size of @buf, in bytes
*
- * The string is taken from a minimal ASCII text descriptor leaf after
- * the immediate entry with @key. The string is zero-terminated.
- * An overlong string is silently truncated such that it and the
- * zero byte fit into @size.
+ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
+ * @key. The string is zero-terminated. An overlong string is silently truncated such that it
+ * and the zero byte fit into @size.
*
* Returns strlen(buf) or a negative error code.
*/
@@ -368,8 +367,17 @@ static ssize_t show_text_leaf(struct device *dev,
for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) {
int result = fw_csr_string(directories[i], attr->key, buf, bufsize);
// Detected.
- if (result >= 0)
+ if (result >= 0) {
ret = result;
+ } else if (i == 0 && attr->key == CSR_VENDOR) {
+ // Sony DVMC-DA1 has configuration ROM such that the descriptor leaf entry
+ // in the root directory follows to the directory entry for vendor ID
+ // instead of the immediate value for vendor ID.
+ result = fw_csr_string(directories[i], CSR_DIRECTORY | attr->key, buf,
+ bufsize);
+ if (result >= 0)
+ ret = result;
+ }
}
if (ret >= 0) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 6146b2927d5c..f2556a8e9401 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -107,12 +107,12 @@ struct ffa_drv_info {
struct work_struct notif_pcpu_work;
struct work_struct irq_work;
struct xarray partition_info;
- unsigned int partition_count;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
struct mutex notify_lock; /* lock to protect notifier hashtable */
};
static struct ffa_drv_info *drv_info;
+static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@@ -733,6 +733,11 @@ static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
void *cb_data;
partition = xa_load(&drv_info->partition_info, part_id);
+ if (!partition) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ return;
+ }
+
read_lock(&partition->rw_lock);
callback = partition->callback;
cb_data = partition->cb_data;
@@ -915,6 +920,11 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
return -EOPNOTSUPP;
partition = xa_load(&drv_info->partition_info, part_id);
+ if (!partition) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ return -EINVAL;
+ }
+
write_lock(&partition->rw_lock);
cb_valid = !!partition->callback;
@@ -1186,9 +1196,9 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
kfree(pbuf);
}
-static void ffa_setup_partitions(void)
+static int ffa_setup_partitions(void)
{
- int count, idx;
+ int count, idx, ret;
uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_dev_part_info *info;
@@ -1197,7 +1207,7 @@ static void ffa_setup_partitions(void)
count = ffa_partition_probe(&uuid_null, &pbuf);
if (count <= 0) {
pr_info("%s: No partitions found, error %d\n", __func__, count);
- return;
+ return -EINVAL;
}
xa_init(&drv_info->partition_info);
@@ -1226,40 +1236,53 @@ static void ffa_setup_partitions(void)
ffa_device_unregister(ffa_dev);
continue;
}
- xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL);
+ rwlock_init(&info->rw_lock);
+ ret = xa_insert(&drv_info->partition_info, tpbuf->id,
+ info, GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
+ __func__, tpbuf->id, ret);
+ ffa_device_unregister(ffa_dev);
+ kfree(info);
+ }
}
- drv_info->partition_count = count;
kfree(pbuf);
/* Allocate for the host */
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
- xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL);
- drv_info->partition_count++;
+ if (!info) {
+ pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n",
+ __func__, drv_info->vm_id);
+ /* Already registered devices are freed on bus_exit */
+ ffa_partitions_cleanup();
+ return -ENOMEM;
+ }
+
+ rwlock_init(&info->rw_lock);
+ ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
+ info, GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
+ __func__, drv_info->vm_id, ret);
+ kfree(info);
+ /* Already registered devices are freed on bus_exit */
+ ffa_partitions_cleanup();
+ }
+
+ return ret;
}
static void ffa_partitions_cleanup(void)
{
- struct ffa_dev_part_info **info;
- int idx, count = drv_info->partition_count;
-
- if (!count)
- return;
-
- info = kcalloc(count, sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
-
- xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK,
- count, XA_PRESENT);
+ struct ffa_dev_part_info *info;
+ unsigned long idx;
- for (idx = 0; idx < count; idx++)
- kfree(info[idx]);
- kfree(info);
+ xa_for_each(&drv_info->partition_info, idx, info) {
+ xa_erase(&drv_info->partition_info, idx);
+ kfree(info);
+ }
- drv_info->partition_count = 0;
xa_destroy(&drv_info->partition_info);
}
@@ -1508,7 +1531,11 @@ static int __init ffa_init(void)
ffa_notifications_setup();
- ffa_setup_partitions();
+ ret = ffa_setup_partitions();
+ if (ret) {
+ pr_err("failed to setup partitions\n");
+ goto cleanup_notifs;
+ }
ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
drv_info, true);
@@ -1516,6 +1543,9 @@ static int __init ffa_init(void)
pr_info("Failed to register driver sched callback %d\n", ret);
return 0;
+
+cleanup_notifs:
+ ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
@@ -1535,7 +1565,6 @@ static void __exit ffa_exit(void)
ffa_rxtx_unmap(drv_info->vm_id);
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
- xa_destroy(&drv_info->partition_info);
kfree(drv_info);
arm_ffa_bus_exit();
}
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index c0644558042a..e2050adbf85c 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -13,7 +13,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3,
@@ -954,8 +954,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
scmi_clock_describe_rates_get(ph, clkid, clk);
}
- if (PROTOCOL_REV_MAJOR(version) >= 0x2 &&
- PROTOCOL_REV_MINOR(version) >= 0x1) {
+ if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
cinfo->clock_config_set = scmi_clock_config_set_v2;
cinfo->clock_config_get = scmi_clock_config_get_v2;
} else {
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c46dc5215af7..00b165d1f502 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
/* declarations for message passing transports */
struct scmi_msg_payld;
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 19246ed1f01f..b8d470417e8f 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+ /*
+ * An A2P IRQ is NOT valid when received while the platform still has
+ * the ownership of the channel, because the platform at first releases
+ * the SMT channel and then sends the completion interrupt.
+ *
+ * This addresses a possible race condition in which a spurious IRQ from
+ * a previous timed-out reply which arrived late could be wrongly
+ * associated with the next pending transaction.
+ */
+ if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
+ dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
+ return;
+ }
+
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
}
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 8ea2a7b3d35d..211e8e0aef2c 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -350,8 +350,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
}
static inline void
-process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
- unsigned int loop_idx,
+process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
{
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
@@ -362,10 +362,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
if (dom->level_indexing_mode) {
+ int ret;
+
opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
- xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL);
- xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
+ GFP_KERNEL);
+ if (ret)
+ dev_warn(dev,
+ "Failed to add opps_by_idx at %d - ret:%d\n",
+ opp->level_index, ret);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret)
+ dev_warn(dev,
+ "Failed to add opps_by_lvl at %d - ret:%d\n",
+ opp->perf, ret);
+
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
}
@@ -382,7 +395,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
process_response_opp(opp, st->loop_idx, response);
else
- process_response_opp_v4(p->perf_dom, opp, st->loop_idx,
+ process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
response);
p->perf_dom->opp_count++;
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 0493aa3c12bf..350573518503 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -1111,7 +1111,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
int i;
for (i = 0; i < num_chans; i++) {
- void *xret;
struct scmi_raw_queue *q;
q = scmi_raw_queue_init(raw);
@@ -1120,13 +1119,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
goto err_xa;
}
- xret = xa_store(&raw->chans_q, channels[i], q,
+ ret = xa_insert(&raw->chans_q, channels[i], q,
GFP_KERNEL);
- if (xa_err(xret)) {
+ if (ret) {
dev_err(dev,
"Fail to allocate Raw queue 0x%02X\n",
channels[i]);
- ret = xa_err(xret);
goto err_xa;
}
}
@@ -1322,6 +1320,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
dev = raw->handle->dev;
q = scmi_raw_queue_select(raw, idx,
SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+ if (!q) {
+ dev_warn(dev,
+ "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
+ idx, chan_id);
+ return;
+ }
/*
* Grab the msg_q_lock upfront to avoid a possible race between
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 87b4f4d35f06..8bf495bcad09 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -10,7 +10,7 @@
#include <linux/processor.h>
#include <linux/types.h>
-#include <asm-generic/bug.h>
+#include <linux/bug.h>
#include "common.h"
@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
+
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
+{
+ return (ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 83f5bb57fa4c..83092d93f36a 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
- int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 3e8d4b51a814..97bafb5f7038 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
if (!cap_info->phys) {
kfree(cap_info->pages);
kfree(cap_info);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 35c37f667781..9b3884ff81e6 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -523,6 +523,17 @@ static void cper_print_tstamp(const char *pfx,
}
}
+struct ignore_section {
+ guid_t guid;
+ const char *name;
+};
+
+static const struct ignore_section ignore_sections[] = {
+ { .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" },
+ { .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" },
+ { .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" },
+};
+
static void
cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
int sec_no)
@@ -543,6 +554,14 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) {
+ if (guid_equal(sec_type, &ignore_sections[i].guid)) {
+ printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name);
+ return;
+ }
+ }
+
if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index d4987d013080..a00e07b853f2 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -144,15 +144,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
/*
- * Special purpose memory is 'soft reserved', which means it
- * is set aside initially, but can be hotplugged back in or
- * be assigned to the dax driver after boot.
- */
- if (efi_soft_reserve_enabled() &&
- (md->attribute & EFI_MEMORY_SP))
- return false;
-
- /*
* According to the spec, these regions are no longer reserved
* after calling ExitBootServices(). However, we can only use
* them as System RAM if they can be mapped writeback cacheable.
@@ -196,6 +187,16 @@ static __init void reserve_regions(void)
size = npages << PAGE_SHIFT;
if (is_memory(md)) {
+ /*
+ * Special purpose memory is 'soft reserved', which
+ * means it is set aside initially. Don't add a memblock
+ * for it now so that it can be hotplugged back in or
+ * be assigned to the dax driver after boot.
+ */
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ continue;
+
early_init_dt_add_memory_arch(paddr, size);
if (!is_usable_memory(md))
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 06964a3c130f..73f4810f6db3 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE
+cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax
cflags-$(CONFIG_LOONGARCH) += -fpie
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
@@ -143,7 +143,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# exist.
STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
-STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX
# For LoongArch, keep all the symbols in .init section and make sure that no
# absolute symbols references exist.
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
index 6b83c492c3b8..31928bd87e0f 100644
--- a/drivers/firmware/efi/libstub/alignedmem.c
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -14,6 +14,7 @@
* @max: the address that the last allocated memory page shall not
* exceed
* @align: minimum alignment of the base of the allocation
+ * @memory_type: the type of memory to allocate
*
* Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
* to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 212687c30d79..c04b82ea40f2 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed,
- int memory_type, unsigned long alloc_limit);
+ int memory_type, unsigned long alloc_min,
+ unsigned long alloc_max);
efi_status_t efi_random_get_seed(void);
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 62d63f7a2645..1a9808012abd 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
*/
status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed,
- EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 674a064b8f7a..4e96a855fdf4 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -17,7 +17,7 @@
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
unsigned long align_shift,
- u64 alloc_limit)
+ u64 alloc_min, u64 alloc_max)
{
unsigned long align = 1UL << align_shift;
u64 first_slot, last_slot, region_end;
@@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
return 0;
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
- alloc_limit);
+ alloc_max);
if (region_end < size)
return 0;
- first_slot = round_up(md->phys_addr, align);
+ first_slot = round_up(max(md->phys_addr, alloc_min), align);
last_slot = round_down(region_end - size + 1, align);
if (first_slot > last_slot)
@@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long *addr,
unsigned long random_seed,
int memory_type,
- unsigned long alloc_limit)
+ unsigned long alloc_min,
+ unsigned long alloc_max)
{
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
@@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size,
efi_memory_desc_t *md = (void *)map->map + map_offset;
unsigned long slots;
- slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
+ slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
+ alloc_max);
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 0d510c9a06a4..99429bc4b0c7 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
-void efi_adjust_memory_range_protection(unsigned long start,
- unsigned long size)
+efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size)
{
efi_status_t status;
efi_gcd_memory_space_desc_t desc;
@@ -236,13 +236,17 @@ void efi_adjust_memory_range_protection(unsigned long start,
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
if (memattr != NULL) {
- efi_call_proto(memattr, clear_memory_attributes, rounded_start,
- rounded_end - rounded_start, EFI_MEMORY_XP);
- return;
+ status = efi_call_proto(memattr, clear_memory_attributes,
+ rounded_start,
+ rounded_end - rounded_start,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to clear EFI_MEMORY_XP attribute\n");
+ return status;
}
if (efi_dxe_table == NULL)
- return;
+ return EFI_SUCCESS;
/*
* Don't modify memory region attributes, they are
@@ -255,7 +259,7 @@ void efi_adjust_memory_range_protection(unsigned long start,
status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
if (status != EFI_SUCCESS)
- return;
+ break;
next = desc.base_address + desc.length;
@@ -280,8 +284,10 @@ void efi_adjust_memory_range_protection(unsigned long start,
unprotect_start,
unprotect_start + unprotect_size,
status);
+ break;
}
}
+ return EFI_SUCCESS;
}
static void setup_unaccepted_memory(void)
@@ -793,6 +799,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
seed[0], EFI_LOADER_CODE,
+ LOAD_PHYSICAL_ADDR,
EFI_X86_KERNEL_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
return status;
@@ -805,9 +812,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
*kernel_entry = addr + entry;
- efi_adjust_memory_range_protection(addr, kernel_total_size);
-
- return EFI_SUCCESS;
+ return efi_adjust_memory_range_protection(addr, kernel_total_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
index 37c5a36b9d8c..1c20e99a6494 100644
--- a/drivers/firmware/efi/libstub/x86-stub.h
+++ b/drivers/firmware/efi/libstub/x86-stub.h
@@ -5,8 +5,8 @@
extern void trampoline_32bit_src(void *, bool);
extern const u16 trampoline_ljmp_imm_offset;
-void efi_adjust_memory_range_protection(unsigned long start,
- unsigned long size);
+efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size);
#ifdef CONFIG_X86_64
efi_status_t efi_setup_5level_paging(void);
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index bdb17eac0cb4..1ceace956758 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
}
status = efi_random_alloc(alloc_size, min_kimg_align, &image_base,
- seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS) {
efi_err("Failed to allocate memory\n");
goto free_cmdline;
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
index 09525fb5c240..01f0f90ea418 100644
--- a/drivers/firmware/efi/riscv-runtime.c
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
- int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 81f5f62e34fc..fbeeaee4ac85 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -167,7 +167,7 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
u32 *response_msg;
int ret;
- response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+ response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
GFP_KERNEL);
if (!response_msg)
return -ENOMEM;
@@ -384,7 +384,8 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
u32 *response_msg;
int ret;
- response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+ response_msg = devm_kzalloc(priv->dev,
+ AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
GFP_KERNEL);
if (!response_msg)
return -ENOMEM;
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 19706bd2642a..3c197db42c9d 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL_GPL(sysfb_disable);
static __init int sysfb_init(void)
{
- const struct screen_info *si = &screen_info;
+ struct screen_info *si = &screen_info;
struct simplefb_platform_data mode;
const char *name;
bool compatible;
@@ -119,18 +119,6 @@ static __init int sysfb_init(void)
if (ret)
goto err;
- /*
- * The firmware framebuffer is now maintained by the created
- * device. Disable screen_info after we've consumed it. Prevents
- * invalid access during kexec reboots.
- *
- * TODO: Vgacon still relies on the global screen_info. Make
- * vgacon work with the platform device, so we can clear
- * the screen_info unconditionally.
- */
- if (strcmp(name, "platform-framebuffer"))
- screen_info.orig_video_isVGA = 0;
-
goto unlock_mutex;
err:
platform_device_put(pd);
@@ -140,4 +128,4 @@ unlock_mutex:
}
/* must execute after PCI subsystem for EFI quirks */
-subsys_initcall_sync(sysfb_init);
+device_initcall(sysfb_init);
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index e00c33310517..753e7be039e4 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
-
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
goto exit_destroy;
}
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+
ret = gpiochip_add_data(&chip->gpio_chip, chip);
if (!ret)
return 0;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index be7f2fa5aa7b..806b88d8dfb7 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -330,20 +330,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
state = sprd_eic_get(chip, offset);
- if (state)
+ if (state) {
sprd_eic_update(chip, offset,
SPRD_EIC_DBNC_IEV, 0);
- else
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IC, 1);
+ } else {
sprd_eic_update(chip, offset,
SPRD_EIC_DBNC_IEV, 1);
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IC, 1);
+ }
break;
default:
return -ENOTSUPP;
@@ -355,20 +362,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
state = sprd_eic_get(chip, offset);
- if (state)
+ if (state) {
sprd_eic_update(chip, offset,
SPRD_EIC_LATCH_INTPOL, 0);
- else
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTCLR, 1);
+ } else {
sprd_eic_update(chip, offset,
SPRD_EIC_LATCH_INTPOL, 1);
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTCLR, 1);
+ }
break;
default:
return -ENOTSUPP;
@@ -382,29 +396,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
default:
@@ -417,29 +436,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
default:
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 88066826d8e5..cd3e9657cc36 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1651,6 +1651,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "INT33FC:00@3",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 0.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 44c8f5743a24..75be4a3ca7f8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -968,11 +968,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
- goto err_remove_acpi_chip;
+ goto err_free_hogs;
ret = gpiochip_irqchip_init_hw(gc);
if (ret)
- goto err_remove_acpi_chip;
+ goto err_remove_irqchip_mask;
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
@@ -997,23 +997,23 @@ err_remove_irqchip:
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(gc);
-err_remove_acpi_chip:
+err_free_hogs:
+ gpiochip_free_hogs(gc);
acpi_gpiochip_remove(gc);
+ gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
- gpiochip_free_hogs(gc);
of_gpiochip_remove(gc);
err_free_gpiochip_mask:
- gpiochip_remove_pin_ranges(gc);
gpiochip_free_valid_mask(gc);
+err_remove_from_list:
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&gdev->list);
+ spin_unlock_irqrestore(&gpio_lock, flags);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */
gpio_device_put(gdev);
goto err_print_message;
}
-err_remove_from_list:
- spin_lock_irqsave(&gpio_lock, flags);
- list_del(&gdev->list);
- spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
kfree_const(gdev->label);
err_free_descs:
@@ -2042,6 +2042,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return -ENOTSUPP;
+#endif
+
return pinctrl_gpio_set_config(gc, offset, config);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_config);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2520db0b776e..c7edba18a6f0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -199,7 +199,7 @@ config DRM_TTM
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
- depends on DRM && KUNIT && MMU
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
@@ -207,7 +207,8 @@ config DRM_TTM_KUNIT_TEST
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
- developers.
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9da14436a373..79827a6dcd7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -200,6 +200,7 @@ extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight;
+extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
@@ -254,8 +255,6 @@ extern int amdgpu_agp;
extern int amdgpu_wbrf;
-extern int fw_bo_location;
-
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -1080,6 +1079,8 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ /* indicate amdgpu suspension status */
+ bool suspend_complete;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
@@ -1146,6 +1147,7 @@ struct amdgpu_device {
bool debug_vm;
bool debug_largebar;
bool debug_disable_soft_recovery;
+ bool debug_use_vram_fw_buf;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1548,9 +1550,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 2deebece810e..7099ff9cf8c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1519,4 +1519,22 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
+/**
+ * amdgpu_choose_low_power_state
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Choose the target low power state for the GPU
+ */
+void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
+{
+ if (adev->in_runpm)
+ return;
+
+ if (amdgpu_acpi_is_s0ix_active(adev))
+ adev->in_s0ix = true;
+ else if (amdgpu_acpi_is_s3_active(adev))
+ adev->in_s3 = true;
+}
+
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 067690ba7bff..41db030ddc4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -138,11 +138,34 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
+static const struct drm_client_funcs kfd_client_funcs = {
+ .unregister = drm_client_release,
+};
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
+ &kfd_client_funcs);
+ if (ret) {
+ dev_err(adev->dev, "Failed to init DRM client: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_client_register(&adev->kfd.client);
+
+ return 0;
+}
+
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
int last_valid_bit;
- int ret;
amdgpu_amdkfd_gpuvm_init_mem_limits();
@@ -161,12 +184,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.enable_mes = adev->enable_mes,
};
- ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", NULL);
- if (ret) {
- dev_err(adev->dev, "Failed to init DRM client: %d\n", ret);
- return;
- }
-
/* this is going to have a few of the MSBs set that we need to
* clear
*/
@@ -205,10 +222,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
&gpu_resources);
- if (adev->kfd.init_complete)
- drm_client_register(&adev->kfd.client);
- else
- drm_client_release(&adev->kfd.client);
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -695,10 +708,8 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- /* Temporary workaround to fix issues observed in some
- * compute applications when GFXOFF is enabled on GFX11.
- */
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11) {
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index cf6ed5fce291..27c61c535e29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm,
struct svm_range_bo *svm_bo);
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#endif
@@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
@@ -311,7 +313,7 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo);
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
- struct dma_fence **ef);
+ struct dma_fence __rcu **ef);
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *info);
int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 899e31e3a5e8..3a3f3ce09f00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
/* stop secheduler and drain ring. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d17b2452cb1f..231fd927dcfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2085,21 +2085,35 @@ out:
return ret;
}
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
+ int ret;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
+ ret = amdgpu_bo_reserve(mem->bo, true);
+ if (ret)
+ goto out;
+
list_for_each_entry(entry, &mem->attachments, list) {
- if (entry->bo_va->base.vm == vm)
- kfd_mem_dmaunmap_attachment(mem, entry);
+ if (entry->bo_va->base.vm != vm)
+ continue;
+ if (entry->bo_va->base.bo->tbo.ttm &&
+ !entry->bo_va->base.bo->tbo.ttm->sg)
+ continue;
+
+ kfd_mem_dmaunmap_attachment(mem, entry);
}
+ amdgpu_bo_unreserve(mem->bo);
+out:
mutex_unlock(&mem->lock);
+
+ return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
@@ -2802,7 +2816,7 @@ unlock_out:
put_task_struct(usertask);
}
-static void replace_eviction_fence(struct dma_fence **ef,
+static void replace_eviction_fence(struct dma_fence __rcu **ef,
struct dma_fence *new_ef)
{
struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
@@ -2837,7 +2851,7 @@ static void replace_eviction_fence(struct dma_fence **ef,
* 7. Add fence to all PD and PT BOs.
* 8. Unreserve all BOs
*/
-int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
{
struct amdkfd_process_info *process_info = info;
struct amdgpu_vm *peer_vm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index e485dd3357c6..1afbb2e932c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1678,7 +1678,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_wqueue_stop(&ring->sched);
}
@@ -1694,7 +1694,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_wqueue_start(&ring->sched);
}
@@ -1916,8 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
ring = adev->rings[val];
- if (!ring || !ring->funcs->preempt_ib ||
- !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring) ||
+ !ring->funcs->preempt_ib)
return -EINVAL;
/* the last preemption failed */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5bb444bb36ce..94bdb5fa6ebc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1544,6 +1544,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+ release_firmware(adev->pm.fw);
if (fw_ver < 0x00160e00)
return true;
}
@@ -4120,23 +4121,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
} else {
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 0):
- case IP_VERSION(13, 0, 7):
- case IP_VERSION(13, 0, 10):
- r = psp_gpu_reset(adev);
- break;
- default:
- tmp = amdgpu_reset_method;
- /* It should do a default reset when loading or reloading the driver,
- * regardless of the module parameter reset_method.
- */
- amdgpu_reset_method = AMD_RESET_METHOD_NONE;
- r = amdgpu_asic_reset(adev);
- amdgpu_reset_method = tmp;
- break;
- }
-
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
@@ -4523,13 +4514,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
+ amdgpu_choose_low_power_state(adev);
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- return r;
+ goto unprepare;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
@@ -4538,10 +4531,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
if (r)
- return r;
+ goto unprepare;
}
return 0;
+
+unprepare:
+ adev->in_s0ix = adev->in_s3 = false;
+
+ return r;
}
/**
@@ -4578,7 +4576,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
- flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);
@@ -5030,7 +5027,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
spin_lock(&ring->sched.job_list_lock);
@@ -5169,7 +5166,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
/* Clear job fence from fence drv to avoid force_completion
@@ -5245,7 +5242,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
- bool gpu_reset_for_dev_remove = 0;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
@@ -5265,10 +5261,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
- gpu_reset_for_dev_remove =
- test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
- test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
-
/*
* ASIC reset has to be done on all XGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
@@ -5311,18 +5303,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_ras_intr_cleared();
}
- /* Since the mode1 reset affects base ip blocks, the
- * phase1 ip blocks need to be resumed. Otherwise there
- * will be a BIOS signature error and the psp bootloader
- * can't load kdb on the next amdgpu install.
- */
- if (gpu_reset_for_dev_remove) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list)
- amdgpu_device_ip_resume_phase1(tmp_adev);
-
- goto end;
- }
-
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (need_full_reset) {
/* post card */
@@ -5559,11 +5539,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int i, r = 0;
bool need_emergency_restart = false;
bool audio_suspended = false;
- bool gpu_reset_for_dev_remove = false;
-
- gpu_reset_for_dev_remove =
- test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
- test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
/*
* Special case: RAS triggered and full reset isn't supported
@@ -5601,7 +5576,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
list_add_tail(&tmp_adev->reset_list, &device_list);
- if (gpu_reset_for_dev_remove && adev->shutdown)
+ if (adev->shutdown)
tmp_adev->shutdown = true;
}
if (!list_is_first(&adev->reset_list, &device_list))
@@ -5658,7 +5633,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@@ -5686,10 +5661,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (gpu_reset_for_dev_remove) {
- /* Workaroud for ASICs need to disable SMC first */
- amdgpu_device_smu_fini_early(tmp_adev);
- }
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
/*TODO Should we stop ?*/
if (r) {
@@ -5721,9 +5692,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
if (r && r == -EAGAIN)
goto retry;
-
- if (!r && gpu_reset_for_dev_remove)
- goto recover_end;
}
skip_hw_reset:
@@ -5734,7 +5702,7 @@ skip_hw_reset:
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);
@@ -5779,7 +5747,6 @@ skip_sched_resume:
amdgpu_ras_set_error_query_ready(tmp_adev, true);
}
-recover_end:
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
@@ -6090,7 +6057,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, NULL);
@@ -6218,7 +6185,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 0431eafa86b5..c7d60dd0fb97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1963,8 +1963,6 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
break;
case IP_VERSION(9, 4, 3):
- if (!amdgpu_exp_hw_support)
- return -EINVAL;
amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
break;
case IP_VERSION(10, 1, 10):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 852cec98ff26..586f4d03039d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -128,6 +128,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_VM = BIT(0),
AMDGPU_DEBUG_LARGEBAR = BIT(1),
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
+ AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -210,7 +211,7 @@ int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
-int fw_bo_location = -1;
+int amdgpu_damage_clips = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -860,6 +861,18 @@ MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (defau
module_param_named(backlight, amdgpu_backlight, bint, 0444);
/**
+ * DOC: damageclips (int)
+ * Enable or disable damage clips support. If damage clips support is disabled,
+ * we will force full frame updates, irrespective of what user space sends to
+ * us.
+ *
+ * Defaults to -1 (where it is enabled unless a PSR-SU display is detected).
+ */
+MODULE_PARM_DESC(damageclips,
+ "Damage clips support (0 = disable, 1 = enable, -1 auto (default))");
+module_param_named(damageclips, amdgpu_damage_clips, int, 0444);
+
+/**
* DOC: tmz (int)
* Trusted Memory Zone (TMZ) is a method to protect data being written
* to or read from memory.
@@ -990,10 +1003,6 @@ MODULE_PARM_DESC(wbrf,
"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
module_param_named(wbrf, amdgpu_wbrf, int, 0444);
-MODULE_PARM_DESC(fw_bo_location,
- "location to put firmware bo for frontdoor loading (-1 = auto (default), 0 = on ram, 1 = on vram");
-module_param(fw_bo_location, int, 0644);
-
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
@@ -2122,6 +2131,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: soft reset for GPU recovery disabled\n");
adev->debug_disable_soft_recovery = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) {
+ pr_info("debug: place fw in vram for frontdoor loading\n");
+ adev->debug_use_vram_fw_buf = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2233,6 +2247,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, ddev);
+ amdgpu_init_debug_options(adev);
+
ret = amdgpu_driver_load_kms(adev, flags);
if (ret)
goto err_pci;
@@ -2252,6 +2268,10 @@ retry_init:
if (ret)
goto err_pci;
+ ret = amdgpu_amdkfd_drm_client_create(adev);
+ if (ret)
+ goto err_pci;
+
/*
* 1. don't init fbdev on hw without DCE
* 2. don't init fbdev if there are no connectors
@@ -2313,8 +2333,6 @@ retry_init:
amdgpu_get_secondary_funcs(adev);
}
- amdgpu_init_debug_options(adev);
-
return 0;
err_pci:
@@ -2336,38 +2354,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(dev->dev);
}
- if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&
- !amdgpu_sriov_vf(adev)) {
- bool need_to_reset_gpu = false;
-
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- struct amdgpu_hive_info *hive;
-
- hive = amdgpu_get_xgmi_hive(adev);
- if (hive->device_remove_count == 0)
- need_to_reset_gpu = true;
- hive->device_remove_count++;
- amdgpu_put_xgmi_hive(hive);
- } else {
- need_to_reset_gpu = true;
- }
-
- /* Workaround for ASICs need to reset SMU.
- * Called only when the first device is removed.
- */
- if (need_to_reset_gpu) {
- struct amdgpu_reset_context reset_context;
-
- adev->shutdown = true;
- memset(&reset_context, 0, sizeof(reset_context));
- reset_context.method = AMD_RESET_METHOD_NONE;
- reset_context.reset_req_dev = adev;
- set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
- set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags);
- amdgpu_device_gpu_recover(adev, NULL, &reset_context);
- }
- }
-
amdgpu_driver_unload_kms(dev);
/*
@@ -2503,6 +2489,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
@@ -2517,6 +2504,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 73b8cca35bab..c623e23049d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -121,6 +121,7 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
struct amdgpu_bo_param bp;
dma_addr_t dma_addr;
struct page *p;
+ unsigned long x;
int ret;
if (adev->gart.bo != NULL)
@@ -130,6 +131,10 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
if (!p)
return -ENOMEM;
+ /* assign pages to this device */
+ for (x = 0; x < (1UL << order); x++)
+ p[x].mapping = adev->mman.bdev.dev_mapping;
+
/* If the hardware does not support UTCL2 snooping of the CPU caches
* then set_memory_wc() could be used as a workaround to mark the pages
* as write combine memory.
@@ -223,6 +228,7 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
unsigned int order = get_order(adev->gart.table_size);
struct sg_table *sg = adev->gart.bo->tbo.sg;
struct page *p;
+ unsigned long x;
int ret;
ret = amdgpu_bo_reserve(adev->gart.bo, false);
@@ -234,6 +240,8 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
sg_free_table(sg);
kfree(sg);
p = virt_to_page(adev->gart.ptr);
+ for (x = 0; x < (1UL << order); x++)
+ p[x].mapping = NULL;
__free_pages(p, order);
adev->gart.ptr = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index b9674c57c436..6ddc8e3360e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -723,8 +723,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ /* If going to s2idle, no need to wait */
+ if (adev->in_s0ix) {
+ if (!amdgpu_dpm_set_powergating_by_smu(adev,
+ AMD_IP_BLOCK_TYPE_GFX, true))
+ adev->gfx.gfx_off_state = true;
+ } else {
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
+ }
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index d2f273d77e59..55784a9f26c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -1045,21 +1045,28 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
* seconds, so here, we just pick up three parts for emulation.
*/
ret = memcmp(vram_ptr, cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
ret = memcmp(vram_ptr + (size / 2), cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
ret = memcmp(vram_ptr + size - 10, cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
+release_buffer:
amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
&vram_ptr);
- return 0;
+ return ret;
}
static ssize_t current_memory_partition_show(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b5ebafd4a3ad..bf4f48fe438d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1105,7 +1105,12 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_AVG_POWER,
(void *)&ui32, &ui32_size)) {
- return -EINVAL;
+ /* fall back to input power for backwards compat */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
}
ui32 >>= 8;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 2addbdf88394..0328616473f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -466,7 +466,7 @@ static int psp_sw_init(void *handle)
}
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- (amdgpu_sriov_vf(adev) || fw_bo_location == 1) ?
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 468a67b302d4..ca5c86e5f7cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
}
}
- if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
+ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
ret = -EFAULT;
err_free_shared_buf:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index fc42fb6ee191..31823a30dea2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -305,11 +305,13 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return -EINVAL;
data->head.block = block_id;
- /* only ue and ce errors are supported */
+ /* only ue, ce and poison errors are supported */
if (!memcmp("ue", err, 2))
data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
else if (!memcmp("ce", err, 2))
data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ else if (!memcmp("poison", err, 6))
+ data->head.type = AMDGPU_RAS_ERROR__POISON;
else
return -EINVAL;
@@ -431,9 +433,10 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
* The block is one of: umc, sdma, gfx, etc.
* see ras_block_string[] for details
*
- * The error type is one of: ue, ce, where,
+ * The error type is one of: ue, ce and poison where,
* ue is multi-uncorrectable
* ce is single-correctable
+ * poison is poison
*
* The sub-block is a the sub-block index, pass 0 if there is no sub-block.
* The address and value are hexadecimal numbers, leading 0x is optional.
@@ -1067,8 +1070,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
mcm_info = &err_info->mcm_info;
if (err_info->ce_count) {
dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld new correctable hardware errors detected in %s block, "
- "no user action is needed\n",
+ "%lld new correctable hardware errors detected in %s block\n",
mcm_info->socket_id,
mcm_info->die_id,
err_info->ce_count,
@@ -1080,8 +1082,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
dev_info(adev->dev, "socket: %d, die: %d, "
- "%lld correctable hardware errors detected in total in %s block, "
- "no user action is needed\n",
+ "%lld correctable hardware errors detected in total in %s block\n",
mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
}
}
@@ -1108,16 +1109,14 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "
"%ld correctable hardware errors "
- "detected in %s block, no user "
- "action is needed.\n",
+ "detected in %s block\n",
adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev),
ras_mgr->err_data.ce_count,
blk_name);
} else {
dev_info(adev->dev, "%ld correctable hardware errors "
- "detected in %s block, no user "
- "action is needed.\n",
+ "detected in %s block\n",
ras_mgr->err_data.ce_count,
blk_name);
}
@@ -1920,7 +1919,7 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj
struct amdgpu_iv_entry *entry)
{
dev_info(obj->adev->dev,
- "Poison is created, no user action is needed.\n");
+ "Poison is created\n");
}
static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
@@ -2920,6 +2919,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_query_poison_mode(adev);
+ /* Packed socket_id to ras feature mask bits[31:29] */
+ if (adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id)
+ con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
+
/* Get RAS schema for particular SOC */
con->schema = amdgpu_get_ras_schema(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index b0335a1c5e90..19899f6b9b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -32,7 +32,6 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
- AMDGPU_RESET_FOR_DEVICE_REMOVE = 2,
};
struct amdgpu_reset_context {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 45424ebf9681..5505d646f43a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -635,6 +635,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
ring->name);
ring->sched.ready = !r;
+
return r;
}
@@ -717,3 +718,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
}
+
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
+{
+ if (!ring)
+ return false;
+
+ if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bbb53720a018..fe1a61eb6e4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index d334e42fe0eb..3e12763e477a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1062,7 +1062,7 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
{
if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
- (amdgpu_sriov_vf(adev) || fw_bo_location == 1) ?
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.fw_buf,
&adev->firmware.fw_buf_mc,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index b6cd565562ad..4740dd65b99d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -116,7 +116,7 @@ struct amdgpu_mem_stats;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-/* Reserve 4MB VRAM for page tables */
+/* How much VRAM be reserved for page tables */
#define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 6f149b54d4d3..b9a15d51eb5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -59,11 +59,8 @@ static inline uint16_t complete_integer_division_u16(
static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
{
- bool arg1_negative = numerator < 0;
- bool arg2_negative = denominator < 0;
-
- uint16_t arg1_value = (uint16_t)(arg1_negative ? -numerator : numerator);
- uint16_t arg2_value = (uint16_t)(arg2_negative ? -denominator : denominator);
+ u16 arg1_value = numerator;
+ u16 arg2_value = denominator;
uint16_t remainder;
@@ -100,9 +97,6 @@ static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
res_value += summand;
}
- if (arg1_negative ^ arg2_negative)
- res_value = -res_value;
-
return res_value;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 08916538a615..8db880244324 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -221,8 +221,23 @@ static struct attribute *amdgpu_vram_mgr_attributes[] = {
NULL
};
+static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_mem_info_vram_vendor.attr &&
+ !adev->gmc.vram_vendor)
+ return 0;
+
+ return attr->mode;
+}
+
const struct attribute_group amdgpu_vram_mgr_attr_group = {
- .attrs = amdgpu_vram_mgr_attributes
+ .attrs = amdgpu_vram_mgr_attributes,
+ .is_visible = amdgpu_vram_attrs_is_visible
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 6cab882e8061..1592c63b3099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -43,7 +43,6 @@ struct amdgpu_hive_info {
} pstate;
struct amdgpu_reset_domain *reset_domain;
- uint32_t device_remove_count;
atomic_t ras_recovery;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
index f0737fb3a999..d1bba9c64e16 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -30,6 +30,8 @@
#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+#define regATHUB_MISC_CNTL_V3_3_0 0x00d8
+#define regATHUB_MISC_CNTL_V3_3_0_BASE_IDX 0
static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
@@ -40,6 +42,9 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
break;
+ case IP_VERSION(3, 3, 0):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0);
+ break;
default:
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
break;
@@ -53,6 +58,9 @@ static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
case IP_VERSION(3, 0, 1):
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
break;
+ case IP_VERSION(3, 3, 0):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0, data);
+ break;
default:
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 6f7c031dd197..f24e34dc33d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(mmIH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(mmIH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index b8c47e0cf37a..c19681492efa 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 73f6d7e72c73..dcdecb18b230 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3996,16 +3996,13 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
- /* don't check this. There are apparently firmwares in the wild with
- * incorrect size in the header
- */
- if (err == -ENODEV)
- goto out;
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
- dev_dbg(adev->dev,
- "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
- fw_name);
+ goto out;
+
+ /* don't validate this firmware. There are apparently firmwares
+ * in the wild with incorrect size in the header
+ */
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
@@ -4030,8 +4027,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = 0;
adev->gfx.mec2_fw = NULL;
}
- amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
- amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
gfx_v10_0_check_fw_write_wait(adev);
out:
@@ -6592,7 +6587,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 2fbcd9765980..4f3bfdc75b37 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -107,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
};
-static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
- SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x8000b007),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f)
-};
-
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -304,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
break;
- case IP_VERSION(11, 5, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_5_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0));
- break;
default:
break;
}
@@ -3846,7 +3824,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
@@ -6383,6 +6361,9 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ bitmap = i * adev->gfx.config.max_sh_per_se + j;
+ if (!((gfx_v11_0_get_sa_active_bitmap(adev) >> bitmap) & 1))
+ continue;
mask = 1;
counter = 0;
gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 69c500910746..3bc6943365a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
+ /* Now only limit the quirk on the APU gfx9 series and already
+ * confirmed that the APU gfx10/gfx11 needn't such update.
+ */
+ if (adev->flags & AMD_IS_APU &&
+ adev->in_s3 && !adev->suspend_complete) {
+ DRM_INFO(" Will skip the CSB packet resubmit\n");
+ return 0;
+ }
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index 95d06da544e2..49aecdcee006 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -456,10 +456,12 @@ static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
- tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
- WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
+ WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 42e103d7077d..59d9215e5556 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -915,8 +915,8 @@ static int gmc_v6_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v6_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index efc16e580f1e..45a2f8e031a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1099,8 +1099,8 @@ static int gmc_v7_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v7_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ff4ae73d27ec..4422b27a3cc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1219,8 +1219,8 @@ static int gmc_v8_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v8_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f9039d64ff2d..e67a62db9e12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1947,13 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
- static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
- u32 vram_info;
-
- if (!amdgpu_sriov_vf(adev)) {
- vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
- adev->gmc.vram_vendor = vram_info & 0xF;
- }
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
}
@@ -2340,8 +2333,8 @@ static int gmc_v9_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index aecad530b10a..2c02ae69883d 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index d9ed7332d805..ad4ad39f128f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -418,6 +418,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 8fb05eae340a..b8da0fc29378 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -418,6 +418,13 @@ static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index bc38b90f8cf8..88ea58d5c4ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle,
return ret;
}
-static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
@@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
- .set = jpeg_v4_0_set_interrupt_state,
.process = jpeg_v4_0_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 6ede85b28cc8..78b74daf4eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
- amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
return 0;
}
@@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle,
return ret;
}
-static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
- .set = jpeg_v4_0_5_set_interrupt_state,
.process = jpeg_v4_0_5_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index e64b33115848..de93614726c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 6d24c84924cb..19986ff6a48d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -401,8 +401,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
if (err_data.ce_count)
dev_info(adev->dev, "%ld correctable hardware "
- "errors detected in %s block, "
- "no user action is needed.\n",
+ "errors detected in %s block\n",
obj->err_data.ce_count,
get_ras_block_str(adev->nbio.ras_if));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 25a3da83e0fb..b4723d68eab0 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
u32 inst_mask;
int i;
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset =
+ SOC15_REG_OFFSET(
+ NBIO, 0,
+ regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
+ << 2;
WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
0xff & ~(adev->gfx.xcc_mask));
@@ -597,8 +603,7 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
if (err_data.ce_count)
dev_info(adev->dev, "%ld correctable hardware "
- "errors detected in %s block, "
- "no user action is needed.\n",
+ "errors detected in %s block\n",
obj->err_data.ce_count,
get_ras_block_str(adev->nbio.ras_if));
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 9a24f17a5750..cada9f300a7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(IH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 15033efec2ba..1c614451dead 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
return AMD_RESET_METHOD_MODE1;
}
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+ * 2) S3 suspend abort and TOS already launched.
+ */
+ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+ !adev->suspend_complete &&
+ sol_reg)
+ return true;
+
+ return false;
+}
+
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
- (adev->apu_flags & AMD_APU_IS_RAVEN2))
+ /* On the latest Raven, the GPU reset can be performed
+ * successfully. So now, temporarily enable it for the
+ * S3 suspend abort case.
+ */
+ if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
+ !soc15_need_reset_on_resume(adev))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -1302,6 +1325,10 @@ static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc15_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
+ soc15_asic_reset(adev);
+ }
return soc15_common_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 48c6efcdeac9..4d7188912edf 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 917707bba7f3..450b6e831509 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index 530549314ce4..a3ee3c4c650f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -64,7 +64,7 @@ static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
uint64_t reg_value;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
- dev_info(adev->dev, "Deferred error, no user action is needed.\n");
+ dev_info(adev->dev, "Deferred error\n");
if (mc_umc_status)
dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 169ed400ee7b..8ab01ae919d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -2018,22 +2018,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
}
/**
- * vcn_v4_0_set_interrupt_state - set VCN block interrupt state
- *
- * @adev: amdgpu_device pointer
- * @source: interrupt sources
- * @type: interrupt types
- * @state: interrupt states
- *
- * Set VCN block interrupt state
- */
-static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
- unsigned type, enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
-/**
* vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
*
* @adev: amdgpu_device pointer
@@ -2097,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
- .set = vcn_v4_0_set_interrupt_state,
.process = vcn_v4_0_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 2eda30e78f61..49e4c3c09aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle)
vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
-
- amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
}
return 0;
@@ -1669,22 +1667,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s
}
/**
- * vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state
- *
- * @adev: amdgpu_device pointer
- * @source: interrupt sources
- * @type: interrupt types
- * @state: interrupt states
- *
- * Set VCN block interrupt state
- */
-static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
- unsigned type, enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
-/**
* vcn_v4_0_5_process_interrupt - process VCN block interrupt
*
* @adev: amdgpu_device pointer
@@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = {
- .set = vcn_v4_0_5_set_interrupt_state,
.process = vcn_v4_0_5_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index d364c6dd152c..bf68e18e3824 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index ddfc6941f9d5..db66e6cccaf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -421,6 +421,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index df75863393fc..d1caaf0e6a7c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9eef807, 0x876dff6d,
0x0000ffff, 0x87fe7e7e,
0x87ea6a6a, 0xb9faf802,
- 0xbe80226c, 0xbf810000,
+ 0xbe80226c, 0xbf9b0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
@@ -2065,7 +2065,7 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
@@ -2500,7 +2500,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x876dff6d, 0x0000ffff,
0x87fe7e7e, 0x87ea6a6a,
0xb9faf802, 0xbe80226c,
- 0xbf810000, 0xbf9f0000,
+ 0xbf9b0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
};
@@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0xb8eef802, 0xbf0d866e,
0xbfa20002, 0xb97af802,
0xbe80486c, 0xb97af802,
- 0xbe804a6c, 0xbfb00000,
+ 0xbe804a6c, 0xbfb10000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index e0140df0b0ec..71b3dc0c7363 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -1104,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV:
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
- s_endpgm
+ s_endpgm_saved
end
function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index e506411ad28a..bb26338204f4 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -921,7 +921,7 @@ L_RESTORE:
/* the END */
/**************************************************************************/
L_END_PGM:
- s_endpgm
+ s_endpgm_saved
end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index ce4c52ec34d8..80e90fdef291 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
- amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+ err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+ if (err)
+ goto sync_memory_failed;
}
mutex_unlock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index d630100b9e91..bdc01ca9609a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -574,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
- addr = prange->start << PAGE_SHIFT;
+ addr = migrate->start;
src = (uint64_t *)(scratch + npages);
dst = scratch;
@@ -1026,7 +1026,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
} else {
res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
if (IS_ERR(res))
- return -ENOMEM;
+ return PTR_ERR(res);
pgmap->range.start = res->start;
pgmap->range.end = res->end;
pgmap->type = MEMORY_DEVICE_PRIVATE;
@@ -1042,10 +1042,10 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
- /* Disable SVM support capability */
- pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
devm_release_mem_region(adev->dev, res->start, resource_size(res));
+ /* Disable SVM support capability */
+ pgmap->type = 0;
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 8b7fed913526..22cbfa1bdadd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -170,6 +170,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 15277f1d5cf0..826bc4f6c8a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
if (has_wa_flag) {
- uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
- 0xffff : 0xffffffff;
+ uint32_t wa_mask =
+ (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff;
m->compute_static_thread_mgmt_se0 = wa_mask;
m->compute_static_thread_mgmt_se1 = wa_mask;
@@ -224,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 42d881809dc7..697b6d530d12 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
+ if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
+ if (minfo->update_flag & UPDATE_FLAG_IS_GWS)
+ m->compute_resource_limits |=
+ COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
+ else
+ m->compute_resource_limits &=
+ ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
+ }
+
q->is_active = QUEUE_IS_ACTIVE(*q);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 745024b31340..80320b8603fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -532,6 +532,7 @@ struct queue_properties {
enum mqd_update_flag {
UPDATE_FLAG_DBG_WA_ENABLE = 1,
UPDATE_FLAG_DBG_WA_DISABLE = 2,
+ UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */
};
struct mqd_update_info {
@@ -917,7 +918,7 @@ struct kfd_process {
* fence will be triggered during eviction and new one will be created
* during restore
*/
- struct dma_fence *ef;
+ struct dma_fence __rcu *ef;
/* Work items for evicting and restoring BOs */
struct delayed_work eviction_work;
@@ -1488,10 +1489,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
-static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
+static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
- struct drm_device *ddev = adev_to_drm(kfd->adev);
+ struct drm_device *ddev;
+
+ if (node->xcp)
+ ddev = node->xcp->ddev;
+ else
+ ddev = adev_to_drm(node->adev);
return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 71df51fcc1b0..717a60d7a4ea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1110,6 +1110,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+ struct dma_fence *ef;
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
@@ -1118,7 +1119,9 @@ static void kfd_process_wq_release(struct work_struct *work)
* destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences.
*/
- dma_fence_signal(p->ef);
+ synchronize_rcu();
+ ef = rcu_access_pointer(p->ef);
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
@@ -1127,7 +1130,7 @@ static void kfd_process_wq_release(struct work_struct *work)
svm_range_list_fini(p);
kfd_process_destroy_pdds(p);
- dma_fence_put(p->ef);
+ dma_fence_put(ef);
kfd_event_free_process(p);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 43eff221eae5..4858112f9a53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws)
{
+ struct mqd_update_info minfo = {0};
struct kfd_node *dev = NULL;
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
@@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
}
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
+ minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q, NULL);
+ pqn->q, &minfo);
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index ac84c4a2ca07..c50a0dc9c9c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -404,14 +404,9 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
- if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
- /* We're not in the eviction worker.
- * Signal the fence and synchronize with any
- * pending eviction work.
- */
+ if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
+ /* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
- cancel_work_sync(&svm_bo->eviction_work);
- }
dma_fence_put(&svm_bo->eviction_fence->base);
amdgpu_bo_unref(&svm_bo->bo);
kfree(svm_bo);
@@ -2345,8 +2340,10 @@ retry:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
- /* Pairs with mmget in svm_range_add_list_work */
- mmput(mm);
+ /* Pairs with mmget in svm_range_add_list_work. If dropping the
+ * last mm refcount, schedule release work to avoid circular locking
+ */
+ mmput_async(mm);
spin_lock(&svms->deferred_list_lock);
}
@@ -2657,6 +2654,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
{
struct vm_area_struct *vma;
struct interval_tree_node *node;
+ struct rb_node *rb_node;
unsigned long start_limit, end_limit;
vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
@@ -2676,16 +2674,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
if (node) {
end_limit = min(end_limit, node->start);
/* Last range that ends before the fault address */
- node = container_of(rb_prev(&node->rb),
- struct interval_tree_node, rb);
+ rb_node = rb_prev(&node->rb);
} else {
/* Last range must end before addr because
* there was no range after addr
*/
- node = container_of(rb_last(&p->svms.objects.rb_root),
- struct interval_tree_node, rb);
+ rb_node = rb_last(&p->svms.objects.rb_root);
}
- if (node) {
+ if (rb_node) {
+ node = container_of(rb_node, struct interval_tree_node, rb);
if (node->last >= addr) {
WARN(1, "Overlap with prev node and page fault addr\n");
return -EFAULT;
@@ -3432,13 +3429,14 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
{
- if (!fence)
- return -EINVAL;
-
- if (dma_fence_is_signaled(&fence->base))
- return 0;
-
- if (fence->svm_bo) {
+ /* Dereferencing fence->svm_bo is safe here because the fence hasn't
+ * signaled yet and we're under the protection of the fence->lock.
+ * After the fence is signaled in svm_range_bo_release, we cannot get
+ * here any more.
+ *
+ * Reference is dropped in svm_range_evict_svm_bo_worker.
+ */
+ if (svm_bo_ref_unless_zero(fence->svm_bo)) {
WRITE_ONCE(fence->svm_bo->evicting, 1);
schedule_work(&fence->svm_bo->eviction_work);
}
@@ -3453,8 +3451,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
int r = 0;
svm_bo = container_of(work, struct svm_range_bo, eviction_work);
- if (!svm_bo_ref_unless_zero(svm_bo))
- return; /* svm_bo was freed while eviction was pending */
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
mm = svm_bo->eviction_fence->mm;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e5f7c92eebcb..6ed2ec381aaa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
- if (pcache->cache_level == 2)
- pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
- else if (mode)
- pcache->cache_size = pcache_info[cache_type].cache_size / mode;
- else
- pcache->cache_size = pcache_info[cache_type].cache_size;
+ pcache->cache_size = pcache_info[cache_type].cache_size;
+ /* Partition mode only affects L3 cache size */
+ if (mode && pcache->cache_level == 3)
+ pcache->cache_size /= mode;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b569b6eda4e3..5853cf022917 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -272,6 +272,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
u32 v_blank_start, v_blank_end, h_position, v_position;
struct amdgpu_crtc *acrtc = NULL;
+ struct dc *dc = adev->dm.dc;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
@@ -284,6 +285,9 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
return 0;
}
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
/*
* TODO rework base driver to use values directly.
* for now parse it back into reg-format
@@ -1715,7 +1719,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
- init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+
+ init_data.flags.disable_ips_in_vpb = 1;
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
@@ -1836,21 +1843,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
- goto error;
- }
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
- goto error;
- }
- }
-
- /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
- * It is expected that DMUB will resend any pending notifications at this point, for
- * example HPD from DPIA.
- */
- if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point. Note
+ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+ * align legacy interface initialization sequence. Connection status will be proactivly
+ * detected once in the amdgpu_dm_initialize_drm_device.
+ */
dc_enable_dmub_outbox(adev->dm.dc);
/* DPIA trace goes to dmesg logs only if outbox is enabled */
@@ -1949,7 +1947,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
- if (adev->dm.hpd_rx_offload_wq) {
+ if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
if (adev->dm.hpd_rx_offload_wq[i].wq) {
destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
@@ -2280,6 +2278,7 @@ static int dm_sw_fini(void *handle)
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
}
@@ -3529,6 +3528,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ }
+
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
@@ -3557,10 +3564,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
handle_hpd_rx_irq,
(void *) aconnector);
}
-
- if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
- aconnector;
}
}
@@ -4554,6 +4557,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
+ if (dm->hpd_rx_offload_wq)
+ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+ aconnector;
+
if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
@@ -5212,6 +5219,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *crtc_state,
struct dc_flip_addrs *flip_addrs,
+ bool is_psr_su,
bool *dirty_regions_changed)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
@@ -5236,6 +5244,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
+ if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
+ is_psr_su)))
+ goto ffu;
+
if (!dm_crtc_state->mpo_requested) {
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
goto ffu;
@@ -6187,7 +6199,9 @@ create_stream_for_sink(struct drm_connector *connector,
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
+ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
drm_mode_copy(&mode, freesync_mode);
+ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
} else {
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode, scale);
@@ -6520,10 +6534,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@@ -6531,7 +6550,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
- edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+ edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@@ -6572,12 +6591,18 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@@ -6585,7 +6610,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
- edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+ edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@@ -8291,6 +8316,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count],
+ acrtc_state->stream->link->psr_settings.psr_version ==
+ DC_PSR_VERSION_SU_1,
&dirty_rects_changed);
/*
@@ -8976,16 +9003,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
trace_amdgpu_dm_atomic_commit_tail_begin(state);
- if (dm->dc->caps.ips_support) {
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- if (new_con_state->crtc &&
- new_con_state->crtc->state->active &&
- drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
- dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
- break;
- }
- }
- }
+ if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dm->dc, false);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_dp_mst_atomic_wait_for_dependencies(state);
@@ -9188,6 +9207,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* To fix this, DC should permit updating only stream properties.
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ if (!dummy_updates) {
+ DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ continue;
+ }
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
@@ -9292,10 +9315,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!new_con_state->writeback_job)
continue;
- new_crtc_state = NULL;
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- if (acrtc)
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ if (!new_crtc_state)
+ continue;
if (acrtc->wb_enabled)
continue;
@@ -10728,11 +10751,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
- if (ret) {
- DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
- ret = -EINVAL;
- goto fail;
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
}
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
@@ -10752,7 +10777,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
goto fail;
}
- status = dc_validate_global_state(dc, dm_state->context, false);
+ status = dc_validate_global_state(dc, dm_state->context, true);
if (status != DC_OK) {
DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 9b527bffe11a..c87b64e464ed 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -1239,7 +1239,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
if (has_crtc_cm_degamma && ret != -EINVAL) {
drm_dbg_kms(crtc->base.crtc->dev,
"doesn't support plane and CRTC degamma at the same time\n");
- return -EINVAL;
+ return -EINVAL;
}
/* If we are here, it means we don't have plane degamma settings, check
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index eaf8d9f48244..c27063305a13 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
+ case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
+ case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
@@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
+ apply_edid_quirks(edid_buf, edid_caps);
+
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
return result;
@@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
- apply_edid_quirks(edid_buf, edid_caps);
-
kfree(sads);
kfree(sadb);
@@ -979,6 +981,11 @@ int dm_helper_dmub_aux_transfer_sync(
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
+ if (!link->hpd_status) {
+ *operation_result = AUX_RET_ERROR_HPD_DISCON;
+ return -1;
+ }
+
return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
operation_result);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 58b880acb087..3390f0d8420a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -711,7 +711,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
{
bool st;
enum dc_irq_source irq_source;
-
+ struct dc *dc = adev->dm.dc;
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
if (!acrtc) {
@@ -729,6 +729,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
st = (state == AMDGPU_IRQ_STATE_ENABLE);
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
dc_interrupt_set(adev->dm.dc, irq_source, st);
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index f2dfa96f9ef5..39530b2ea495 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -94,7 +94,7 @@ static void calculate_bandwidth(
const uint32_t s_high = 7;
const uint32_t dmif_chunk_buff_margin = 1;
- uint32_t max_chunks_fbc_mode;
+ uint32_t max_chunks_fbc_mode = 0;
int32_t num_cursor_lines;
int32_t i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 960c4b4f6ddf..05f392501c0a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
+
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
} else if (revision.minor == 3) {
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
+
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
}
@@ -2422,10 +2424,11 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v11->gpucapinfo);
/*
@@ -2637,11 +2640,12 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v2_1->gpucapinfo);
/*
@@ -2799,11 +2803,11 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
-
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v2_2->gpucapinfo);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index a5489fe6875f..aa9fd1dc550a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
int i;
for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) {
+ if (i >= VG_NUM_DCFCLK_DPM_LEVELS)
+ break;
if (clock_table->SocVoltage[i] == voltage)
return clock_table->DcfClocks[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 757528256326..a84f1e376dee 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -87,6 +87,20 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK2_BYPASS_CNTL 0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+
+#define regCLK6_0_CLK6_spll_field_8 0x464b
+#define regCLK6_0_CLK6_spll_field_8_BASE_IDX 0
+
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK6_0_CLK6_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
@@ -131,35 +145,63 @@ static int dcn314_get_active_display_cnt_wa(
return display_count;
}
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
- struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
-
if (disable) {
- if (stream_enc && stream_enc->funcs->disable_fifo)
- pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
} else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
-
- if (stream_enc && stream_enc->funcs->enable_fifo)
- pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
}
}
}
}
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint32_t ssc_enable;
+
+ REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn314_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+}
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -252,11 +294,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn314_disable_otg_wa(clk_mgr_base, context, true);
+ dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn314_disable_otg_wa(clk_mgr_base, context, false);
+ dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
@@ -436,6 +478,11 @@ static DpmClocks314_t dummy_clocks;
static struct dcn314_watermarks dummy_wms = { 0 };
+static struct dcn314_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
static void dcn314_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn314_watermarks *table)
{
int i, num_valid_sets;
@@ -708,13 +755,31 @@ static struct clk_mgr_funcs dcn314_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn314_update_clocks,
- .init_clocks = dcn31_init_clocks,
+ .init_clocks = dcn314_init_clocks,
.enable_pme_wa = dcn314_enable_pme_wa,
.are_clock_states_equal = dcn314_are_clock_states_equal,
.notify_wm_ranges = dcn314_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+static void dcn314_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t clock_source;
+ //uint32_t ssc_enable;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ //REG_GET(CLK6_0_CLK6_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ if (dcn314_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
void dcn314_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn314 *clk_mgr,
@@ -782,6 +847,7 @@ void dcn314_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
+ dcn314_read_ss_info_from_lut(&clk_mgr->base);
/*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/
clk_mgr->base.base.bw_params = &dcn314_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 171f84340eb2..002c28e80720 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -28,6 +28,8 @@
#define __DCN314_CLK_MGR_H__
#include "clk_mgr_internal.h"
+#define DCN314_NUM_CLOCK_SOURCES 5
+
struct dcn314_watermarks;
struct dcn314_smu_watermark_set {
@@ -40,9 +42,18 @@ struct clk_mgr_dcn314 {
struct dcn314_smu_watermark_set smu_wm_set;
};
+struct dcn314_ss_info_table {
+ uint32_t ss_divider;
+ uint32_t ss_percentage[DCN314_NUM_CLOCK_SOURCES];
+};
+
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b);
+bool dcn314_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base);
+
+void dcn314_init_clocks(struct clk_mgr *clk_mgr);
+
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 9c660d1facc7..e64890259235 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -437,32 +437,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@@ -474,32 +474,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@@ -655,10 +655,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
+ uint32_t num_memps, num_fclk, num_dcfclk;
int i;
/* Determine min/max p-state values. */
- for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
+ num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
+ clock_table->NumMemPstatesEnabled;
+ for (i = 0; i < num_memps; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
@@ -670,7 +673,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
min_dram_speed_mts = max_dram_speed_mts;
min_pstate = max_pstate;
- for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
+ for (i = 0; i < num_memps; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
@@ -699,9 +702,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
- max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
+ num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
+ clock_table->NumFclkLevelsEnabled;
+ max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
- for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ clock_table->NumDcfClkLevelsEnabled;
+ for (i = 0; i < num_dcfclk; i++) {
int j;
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2d7205058c64..2c424e435962 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -411,12 +411,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required)
+ if (dc->optimized_required || dc->wm_optimized_required)
return false;
- if (!memcmp(&stream->adjust, adjust, sizeof(*adjust)))
- return true;
-
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -2230,6 +2227,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
+ dc->wm_optimized_required = false;
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -2652,6 +2650,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
}
+
+ dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -2859,6 +2859,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
+ if (update->crtc_timing_adjust)
+ stream->adjust = *update->crtc_timing_adjust;
+
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -3519,7 +3522,7 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program = resource_get_otg_master_for_stream(
&context->res_ctx,
stream);
-
+ ASSERT(top_pipe_to_program != NULL);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -3814,7 +3817,9 @@ static void commit_planes_for_stream(struct dc *dc,
* programming has completed (we turn on phantom OTG in order
* to complete the plane disable for phantom pipes).
*/
- dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (dc->hwss.disable_phantom_streams)
+ dc->hwss.disable_phantom_streams(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
@@ -4288,7 +4293,8 @@ static bool full_update_required(struct dc *dc,
stream_update->mst_bw_update ||
stream_update->func_shaper ||
stream_update->lut3d_func ||
- stream_update->pending_test_pattern))
+ stream_update->pending_test_pattern ||
+ stream_update->crtc_timing_adjust))
return true;
if (stream) {
@@ -4341,6 +4347,8 @@ static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ if (!cur_pipe || !new_pipe)
+ return false;
cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
if (cur_is_odm_in_use == new_is_odm_in_use)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 57f0ddd15923..9fbdb09697fd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2194,6 +2194,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
otg_master = resource_get_otg_master_for_stream(
&state->res_ctx, state->streams[stream_idx]);
+ if (!otg_master || otg_master->stream_res.tg == NULL) {
+ DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx);
+ return;
+ }
slice_count = resource_get_opp_heads_for_otg_master(otg_master,
&state->res_ctx, opp_heads);
for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
@@ -4986,20 +4990,6 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
return DC_OK;
}
-bool resource_subvp_in_use(struct dc *dc,
- struct dc_state *context)
-{
- uint32_t i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
- return true;
- }
- return false;
-}
-
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
{
if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 460a8010c79f..180ac47868c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -267,7 +267,8 @@ void dc_state_construct(struct dc *dc, struct dc_state *state)
state->clk_mgr = dc->clk_mgr;
/* Initialise DIG link encoder resource tracking variables. */
- link_enc_cfg_init(dc, state);
+ if (dc->res_pool)
+ link_enc_cfg_init(dc, state);
}
void dc_state_destruct(struct dc_state *state)
@@ -290,11 +291,14 @@ void dc_state_destruct(struct dc_state *state)
dc_stream_release(state->phantom_streams[i]);
state->phantom_streams[i] = NULL;
}
+ state->phantom_stream_count = 0;
for (i = 0; i < state->phantom_plane_count; i++) {
dc_plane_state_release(state->phantom_planes[i]);
state->phantom_planes[i] = NULL;
}
+ state->phantom_plane_count = 0;
+
state->stream_mask = 0;
memset(&state->res_ctx, 0, sizeof(state->res_ctx));
memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
@@ -433,8 +437,9 @@ bool dc_state_add_plane(
otg_master_pipe = resource_get_otg_master_for_stream(
&state->res_ctx, stream);
- added = resource_append_dpp_pipes_for_plane_composition(state,
- dc->current_state, pool, otg_master_pipe, plane_state);
+ if (otg_master_pipe)
+ added = resource_append_dpp_pipes_for_plane_composition(state,
+ dc->current_state, pool, otg_master_pipe, plane_state);
if (added) {
stream_status->plane_states[stream_status->plane_count] =
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index f30a341bc090..c9317ea0258e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -51,7 +51,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.265"
+#define DC_VER "3.2.266"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -434,6 +434,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_in_vpb;
};
enum visual_confirm {
@@ -1036,6 +1037,7 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2b79a0e5638e..363d522603a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -125,7 +125,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status;
int i;
@@ -133,6 +133,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
+ dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
for (i = 0 ; i < count; i++) {
@@ -1161,7 +1162,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dc_context *dc_ctx;
enum dmub_status status;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
@@ -1170,6 +1171,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
return true;
+ dc_ctx = dc_dmub_srv->ctx;
+
if (wait) {
if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
do {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index a23eebd9933b..ee10941caa59 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -139,6 +139,7 @@ union stream_update_flags {
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
+ uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
} bits;
@@ -325,6 +326,7 @@ struct dc_stream_update {
struct dc_3dlut *lut3d_func;
struct test_pattern *pending_test_pattern;
+ struct dc_crtc_timing_adjust *crtc_timing_adjust;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 4f276169e05a..9900dda2eef5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1034,6 +1034,7 @@ enum replay_FW_Message_type {
Replay_Msg_Not_Support = -1,
Replay_Set_Timing_Sync_Supported,
Replay_Set_Residency_Frameupdate_Timer,
+ Replay_Set_Pseudo_VTotal,
};
union replay_error_status {
@@ -1089,6 +1090,10 @@ struct replay_settings {
uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
enum replay_link_off_frame_count_level link_off_frame_count_level;
+ /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
+ uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+ /* Replay last pseudo vtotal set to DMUB */
+ uint16_t last_pseudo_vtotal;
};
/* To split out "global" and "per-panel" config settings.
@@ -1140,23 +1145,25 @@ struct dc_panel_config {
} ilr;
};
+#define MAX_SINKS_PER_LINK 4
+
/*
* USB4 DPIA BW ALLOCATION STRUCTS
*/
struct dc_dpia_bw_alloc {
- int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
- int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
- int sink_max_bw; // The Max BW that sink can require/support
+ int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
+ int link_verified_bw; // The Verified BW that link can allocated and use that has been verified already
+ int link_max_bw; // The Max BW that link can require/support
+ int allocated_bw; // The Actual Allocated BW for this DPIA
int estimated_bw; // The estimated available BW for this DPIA
int bw_granularity; // BW Granularity
+ int dp_overhead; // DP overhead in dp tunneling
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
bool response_ready; // Response ready from the CM side
uint8_t nrd_max_lane_count; // Non-reduced max lane count
uint8_t nrd_max_link_rate; // Non-reduced max link rate
};
-#define MAX_SINKS_PER_LINK 4
-
enum dc_hpd_enable_select {
HPD_EN_FOR_ALL_EDP = 0,
HPD_EN_FOR_PRIMARY_EDP_ONLY,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 140598f18bbd..f0458b8f00af 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -782,7 +782,7 @@ static void get_azalia_clock_info_dp(
/*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
* [khz] ->[100Hz] */
azalia_clock_info->audio_dto_module =
- pll_info->dp_dto_source_clock_in_khz * 10;
+ pll_info->audio_dto_source_clock_in_khz * 10;
}
void dce_aud_wall_dto_setup(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 5d3f6fa1011e..970644b695cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -975,6 +975,9 @@ static bool dcn31_program_pix_clk(
look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10);
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
+
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
@@ -1088,6 +1091,10 @@ static bool get_pixel_clk_frequency_100hz(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
unsigned int clock_hz = 0;
unsigned int modulo_hz = 0;
+ unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
+
+ if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+ dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
clock_hz = REG_READ(PHASE[inst]);
@@ -1100,7 +1107,7 @@ static bool get_pixel_clk_frequency_100hz(
modulo_hz = REG_READ(MODULO[inst]);
if (modulo_hz)
*pixel_clk_khz = div_u64((uint64_t)clock_hz*
- clock_source->ctx->dc->clk_mgr->dprefclk_khz*10,
+ dp_dto_ref_khz*10,
modulo_hz);
else
*pixel_clk_khz = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index e8570060d007..5bca67407c5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -290,4 +290,5 @@ void dce_panel_cntl_construct(
dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
dce_panel_cntl->base.ctx = init_data->ctx;
dce_panel_cntl->base.inst = init_data->inst;
+ dce_panel_cntl->base.pwrseq_inst = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index e43f77c11c00..5f97a868ada3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -56,16 +56,13 @@ static void dpp3_enable_cm_block(
static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
{
- enum dc_lut_mode mode;
+ enum dc_lut_mode mode = LUT_BYPASS;
uint32_t state_mode;
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
- if (state_mode == 0)
- mode = LUT_BYPASS;
-
if (state_mode == 2) {//Programmable RAM LUT
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
if (lut_mode == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
index ad0df1a72a90..9e96a3ace207 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
@@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct(
dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs;
dcn301_panel_cntl->base.ctx = init_data->ctx;
dcn301_panel_cntl->base.inst = init_data->inst;
+ dcn301_panel_cntl->base.pwrseq_inst = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 03248422d6ff..281be20b1a10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct(
struct dcn31_panel_cntl *dcn31_panel_cntl,
const struct panel_cntl_init_data *init_data)
{
+ uint8_t pwrseq_inst = 0xF;
+
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
- dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
+
+ switch (init_data->eng_id) {
+ case ENGINE_ID_DIGA:
+ pwrseq_inst = 0;
+ break;
+ case ENGINE_ID_DIGB:
+ pwrseq_inst = 1;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
+ ASSERT(false);
+ break;
+ }
+
+ dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index 501388014855..d761b0df2878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -203,12 +203,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index e4a328b45c8a..87760600e154 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -183,6 +183,20 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
return true;
}
+bool dcn32_subvp_in_use(struct dc *dc,
+ struct dc_state *context)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
+ return true;
+ }
+ return false;
+}
+
bool dcn32_mpo_in_use(struct dc_state *context)
{
uint32_t i;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index da94e5309fba..81e349d5835b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -184,8 +184,6 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -240,6 +238,8 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 6042a5a6a44f..59ade76ffb18 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index aa68d010cbfd..a0a65e099104 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -33,7 +33,6 @@
#include "dcn30/dcn30_resource.h"
#include "link.h"
#include "dc_state_priv.h"
-#include "resource.h"
#define DC_LOGGER_INIT(logger)
@@ -292,7 +291,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
/* for subvp + DRR case, if subvp pipes are still present we support pstate */
if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
- resource_subvp_in_use(dc, context))
+ dcn32_subvp_in_use(dc, context))
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
@@ -1113,7 +1112,7 @@ struct pipe_slice_table {
struct pipe_ctx *pri_pipe;
struct dc_plane_state *plane;
int slice_count;
- } mpc_combines[MAX_SURFACES];
+ } mpc_combines[MAX_PLANES];
int mpc_combine_count;
};
@@ -1289,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex
return updated;
}
-static bool should_allow_odm_power_optimization(struct dc *dc,
+static bool should_apply_odm_power_optimization(struct dc *dc,
struct dc_state *context, struct vba_vars_st *v, int *split,
bool *merge)
{
@@ -1393,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate(
{
int i;
unsigned int new_vlevel;
+ unsigned int cur_policy[MAX_PIPES];
- for (i = 0; i < pipe_cnt; i++)
+ for (i = 0; i < pipe_cnt; i++) {
+ cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy;
pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ }
new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -1404,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate(
memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
+ } else {
+ for (i = 0; i < pipe_cnt; i++)
+ pipes[i].pipe.dest.odm_combine_policy = cur_policy[i];
}
}
@@ -1581,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
}
}
- if (should_allow_odm_power_optimization(dc, context, vba, split, merge))
+ if (should_apply_odm_power_optimization(dc, context, vba, split, merge))
try_odm_power_optimization_and_revalidate(
dc, context, pipes, split, merge, vlevel, *pipe_cnt);
@@ -2210,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int i;
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
- dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
+ if (!dc->config.enable_windowed_mpo_odm)
+ dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
* we have to re-calculate the DET allocation and run through DML once more to
@@ -2273,7 +2279,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
unsigned int dummy_latency_index = 0;
int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
- bool subvp_active = resource_subvp_in_use(dc, context);
+ bool subvp_in_use = dcn32_subvp_in_use(dc, context);
unsigned int min_dram_speed_mts_margin;
bool need_fclk_lat_as_dummy = false;
bool is_subvp_p_drr = false;
@@ -2282,7 +2288,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc_assert_fp_enabled();
/* need to find dummy latency index for subvp */
- if (subvp_active) {
+ if (subvp_in_use) {
/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
if (!pstate_en) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
@@ -2468,7 +2474,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
}
- if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_active) {
+ if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) {
/* find largest table entry that is lower than dram speed,
* but lower than DPM0 still uses DPM0
*/
@@ -3528,7 +3534,7 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
{
// WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
- if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || resource_subvp_in_use(dc, context)) &&
+ if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
dc->dml.soc.num_chans <= 8) {
int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 3d12dabd39e4..7ea2bd5374d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -164,11 +164,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
- .sr_exit_z8_time_us = 525.0,
- .sr_enter_plus_exit_z8_time_us = 715.0,
- .fclk_change_latency_us = 20.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .sr_exit_z8_time_us = 210.0,
+ .sr_enter_plus_exit_z8_time_us = 320.0,
+ .fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index b95bf27f2fe2..9be5ebf3a8c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -6229,7 +6229,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.NoOfDPPThisState,
mode_lib->ms.dpte_group_bytes,
s->HostVMInefficiencyFactor,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
@@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
mode_lib->ms.MetaRowBytes[j][k],
mode_lib->ms.DPTEBytesPerRow[j][k],
@@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
@@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
@@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
@@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
locals->dpte_group_bytes,
s->HostVMInefficiencyFactor,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
@@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
locals->PDEAndMetaPTEBytesFrame[k],
locals->MetaRowByte[k],
locals->PixelPTEBytesPerRow[k],
@@ -9446,13 +9446,13 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculateWatermarks_params->CompressedBufferSizeInkByte = locals->CompressedBufferSizeInkByte;
// Output
- CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
- CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[0];
- CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0][0]; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
- CalculateWatermarks_params->SubViewportLinesNeededInMALL = &mode_lib->ms.SubViewportLinesNeededInMALL[j]; // dml_uint_t SubViewportLinesNeededInMALL[]
- CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[0];
- CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // dml_float_t *MaxActiveFCLKChangeLatencySupported
- CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[0];
+ CalculateWatermarks_params->Watermark = &locals->Watermark; // Watermarks *Watermark
+ CalculateWatermarks_params->DRAMClockChangeSupport = &locals->DRAMClockChangeSupport;
+ CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = locals->MaxActiveDRAMClockChangeLatencySupported; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
+ CalculateWatermarks_params->SubViewportLinesNeededInMALL = locals->SubViewportLinesNeededInMALL; // dml_uint_t SubViewportLinesNeededInMALL[]
+ CalculateWatermarks_params->FCLKChangeSupport = &locals->FCLKChangeSupport;
+ CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &locals->MaxActiveFCLKChangeLatencySupported; // dml_float_t *MaxActiveFCLKChangeLatencySupported
+ CalculateWatermarks_params->USRRetrainingSupport = &locals->USRRetrainingSupport;
CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
&mode_lib->scratch,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index fa6a93dd9629..1ba6933d2b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -341,9 +341,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
break;
}
- if (dml2->config.bbox_overrides.clks_table.num_states)
- p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
-
/* Override from passed values, if available */
for (i = 0; i < p->in_states->num_states; i++) {
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
@@ -400,7 +397,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
-
+ p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
}
@@ -439,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ // Override last out_state with data from last in_state
+ // This will ensure that out_state contains max fclk
+ memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
+ &p->in_states->state_array[p->in_states->num_states - 1],
+ sizeof(struct soc_state_bounding_box_st));
+ }
}
void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out)
@@ -626,8 +631,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
if (is_dp2p0_output_encoder(pipe))
out->OutputEncoder[location] = dml_dp2p0;
break;
- out->OutputEncoder[location] = dml_edp;
case SIGNAL_TYPE_EDP:
+ out->OutputEncoder[location] = dml_edp;
break;
case SIGNAL_TYPE_HDMI_TYPE_A:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
@@ -793,35 +798,28 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
-/*TODO no support for mpc combine, need rework - should calculate scaling params based on plane+stream*/
-static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, const struct dc_state *context)
+static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context)
{
int i;
- struct scaler_data data = { 0 };
+ struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
+
+ memset(temp_pipe, 0, sizeof(struct pipe_ctx));
for (i = 0; i < MAX_PIPES; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state == in && !pipe->prev_odm_pipe) {
- const struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
-
- data = context->res_ctx.pipe_ctx[i].plane_res.scl_data;
- while (next_pipe) {
- data.h_active += next_pipe->plane_res.scl_data.h_active;
- data.recout.width += next_pipe->plane_res.scl_data.recout.width;
- if (in->rotation == ROTATION_ANGLE_0 || in->rotation == ROTATION_ANGLE_180) {
- data.viewport.width += next_pipe->plane_res.scl_data.viewport.width;
- } else {
- data.viewport.height += next_pipe->plane_res.scl_data.viewport.height;
- }
- next_pipe = next_pipe->next_odm_pipe;
- }
+ temp_pipe->stream = pipe->stream;
+ temp_pipe->plane_state = pipe->plane_state;
+ temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
+
+ resource_build_scaling_params(temp_pipe);
break;
}
}
ASSERT(i < MAX_PIPES);
- return data;
+ return temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
@@ -866,7 +864,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->ScalerEnabled[location] = false;
}
-static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, const struct dc_state *context)
+static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context)
{
const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 26307e599614..2a58a7687bdb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2,
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
}
for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
+ if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
+ dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
+ __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ break;
+ }
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index fb328cd06cea..01493c49bd7a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1183,9 +1183,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
if (dccg) {
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
@@ -1354,7 +1354,7 @@ static void build_audio_output(
if (state->clk_mgr &&
(pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
- audio_output->pll_info.dp_dto_source_clock_in_khz =
+ audio_output->pll_info.audio_dto_source_clock_in_khz =
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
state->clk_mgr);
}
@@ -1476,7 +1476,7 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
-static enum dc_status apply_single_controller_ctx_to_hw(
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -2302,7 +2302,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
continue;
- status = apply_single_controller_ctx_to_hw(
+ status = dce110_apply_single_controller_ctx_to_hw(
pipe_ctx,
context,
dc);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 08028a1779ae..ed3cc3648e8e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 51dd2ae09b2a..6dd479e8a348 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -3076,7 +3076,7 @@ void dcn10_prepare_bandwidth(
context,
false);
- dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index bc71a9b058fe..931ac8ed7069 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1882,42 +1882,6 @@ static void dcn20_program_pipe(
}
}
-static void update_vmin_vmax_fams(struct dc *dc,
- struct dc_state *context)
-{
- uint32_t i;
- struct drr_params params = {0};
- bool subvp_in_use = resource_subvp_in_use(dc, context);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- ((subvp_in_use && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM &&
- pipe->stream->allow_freesync) || (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && pipe->stream->fpo_in_use))) {
- if (!pipe->stream->vrr_active_variable && !pipe->stream->vrr_active_fixed) {
- struct timing_generator *tg = context->res_ctx.pipe_ctx[i].stream_res.tg;
-
- /* DRR should be configured already if we're in active variable
- * or active fixed, so only program if we're not in this state
- */
- params.vertical_total_min = pipe->stream->timing.v_total;
- params.vertical_total_max = pipe->stream->timing.v_total;
- tg->funcs->set_drr(tg, &params);
- }
- } else {
- if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- !pipe->stream->vrr_active_variable &&
- !pipe->stream->vrr_active_fixed) {
- struct timing_generator *tg = context->res_ctx.pipe_ctx[i].stream_res.tg;
- params.vertical_total_min = 0;
- params.vertical_total_max = 0;
- tg->funcs->set_drr(tg, &params);
- }
- }
- }
-}
-
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
@@ -1994,7 +1958,6 @@ void dcn20_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
- update_vmin_vmax_fams(dc, context);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -2196,10 +2159,10 @@ void dcn20_prepare_bandwidth(
}
/* program dchubbub watermarks:
- * For assigning optimized_required, use |= operator since we don't want
+ * For assigning wm_optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2212,10 +2175,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
- dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
- dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -2598,7 +2561,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
-static void dcn20_reset_back_end_for_pipe(
+void dcn20_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -2827,18 +2790,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
}
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
-
- phyd32clk = get_phyd32clk_src(link);
- dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
-
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+
+ phyd32clk = get_phyd32clk_src(link);
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
} else {
if (dccg->funcs->enable_symclk_se)
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index b94c85340abf..d950b3e54ec2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing(
void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 8e88dcaf88f5..7252f5f781f0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
{
struct abm *abm = pipe_ctx->stream_res.abm;
- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+ uint32_t otg_inst;
+
+ if (!abm || !tg || !panel_cntl)
+ return;
+
+ otg_inst = tg->inst;
if (dmcu) {
dce110_set_pipe(pipe_ctx);
return;
}
- if (abm && panel_cntl) {
- if (abm->funcs && abm->funcs->set_pipe_ex) {
- abm->funcs->set_pipe_ex(abm,
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm,
otg_inst,
SET_ABM_PIPE_NORMAL,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
- } else {
- dmub_abm_set_pipe(abm, otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- }
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
}
@@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
{
struct dc_context *dc = pipe_ctx->stream->ctx;
struct abm *abm = pipe_ctx->stream_res.abm;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ uint32_t otg_inst;
+
+ if (!abm || !tg || !panel_cntl)
+ return false;
+
+ otg_inst = tg->inst;
if (dc->dc->res_pool->dmcu) {
dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
return true;
}
- if (abm != NULL) {
- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
-
- if (abm && panel_cntl) {
- if (abm->funcs && abm->funcs->set_pipe_ex) {
- abm->funcs->set_pipe_ex(abm,
- otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- } else {
- dmub_abm_set_pipe(abm,
- otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- }
- }
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
+ } else {
+ dmub_abm_set_pipe(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
- if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
+ if (abm->funcs && abm->funcs->set_backlight_level_pwm)
abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
frame_ramp, 0, panel_cntl->inst);
else
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 6c9299c7683d..aa36d7a56ca8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc,
}
}
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) ||
+ (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ if (hws->funcs.reset_back_end_for_pipe)
+ hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+}
+
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
{
unsigned int i;
+ enum dc_status status = DC_OK;
+ struct dce_hwseq *hws = dc->hwseq;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream &&
+ pipe_ctx->stream->link->link_state_valid) {
+ continue;
}
+
+ if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
+ continue;
+
+ if (hws->funcs.apply_single_controller_ctx_to_hw)
+ status = hws->funcs.apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ ASSERT(status == DC_OK);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (hws->funcs.resync_fifo_dccg_dio)
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+#endif
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index cecf7f0f5671..069e20bc87c0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context);
+
void dcn32_init_blank(
struct dc *dc,
struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 427cfc8c24a4..e8ac94a005b8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
.enable_phantom_streams = dcn32_enable_phantom_streams,
+ .disable_phantom_streams = dcn32_disable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
@@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
.resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
+ .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
};
void dcn32_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 9c806385ecbd..8b6c49622f3b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -680,7 +680,7 @@ void dcn35_power_down_on_boot(struct dc *dc)
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
+ int i, edp_num;
if (dc->debug.dmcub_emulation)
return true;
@@ -688,6 +688,13 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num == 0 || edp_num > 1)
return false;
+
+ for (i = 0; i < dc->current_state->stream_count; ++i) {
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+ return false;
+ }
}
// TODO: review other cases when idle optimization is allowed
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index a54399383318..64ca7c66509b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -379,6 +379,7 @@ struct hw_sequencer_funcs {
struct dc_cursor_attributes *cursor_attr);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+ void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 6137cf09aa54..b3c62a82cb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -165,8 +165,15 @@ struct hwseq_private_funcs {
void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
struct dc_state *context);
+ enum dc_status (*apply_single_controller_ctx_to_hw)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
#endif
+ void (*reset_back_end_for_pipe)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f74ae0d41d3c..3a6bf77a6873 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -469,6 +469,8 @@ struct resource_context {
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
+ /* solely used for build scalar data in dml2 */
+ struct pipe_ctx temp_pipe;
};
struct dce_bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index cbba39d251e5..17e014d3bdc8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -333,6 +333,7 @@ struct clk_mgr {
bool force_smu_not_present;
bool dc_mode_softmax_enabled;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+ int dp_dto_source_clock_in_khz; // Used to program DP DTO with ss adjustment on DCN314
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
index 5dcbaa2db964..e97d964a1791 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -57,7 +57,7 @@ struct panel_cntl_funcs {
struct panel_cntl_init_data {
struct dc_context *ctx;
uint32_t inst;
- uint32_t pwrseq_inst;
+ uint32_t eng_id;
};
struct panel_cntl {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 1d51fed12e20..77a60aa9f27b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe);
int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe);
/*
- * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
- * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for a plane with MPCC combine. otherwise
- * the number of MPC "cuts" for the plane.
+ * Get the number of MPC slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an MPC combine
+ * pipe topology.
*/
-int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head);
+int resource_get_mpc_slice_count(const struct pipe_ctx *pipe);
/*
- * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
- * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for ODM combine. otherwise
- * the number of ODM "cuts" for the timing.
+ * Get the number of ODM slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an ODM combine
+ * pipe topology.
*/
-int resource_get_odm_slice_count(const struct pipe_ctx *otg_master);
+int resource_get_odm_slice_count(const struct pipe_ctx *pipe);
/* Get the ODM slice index counting from 0 from left most slice */
int resource_get_odm_slice_index(const struct pipe_ctx *opp_head);
@@ -609,9 +605,6 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
struct pipe_ctx *sec_pipe,
bool odm);
-bool resource_subvp_in_use(struct dc *dc,
- struct dc_state *context);
-
/* A test harness interface that modifies dp encoder resources in the given dc
* state and bypasses the need to revalidate. The interface assumes that the
* test harness interface is called with pre-validated link config stored in the
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 5fe8b4871c77..3cbfbf8d107e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -900,11 +900,15 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
struct dc_stream_state *stream = pipe_ctx->stream;
- DC_LOGGER_INIT(dsc->ctx->logger);
- if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
+ if (!pipe_ctx->stream->timing.flags.DSC)
return false;
+ if (!dsc)
+ return false;
+
+ DC_LOGGER_INIT(dsc->ctx->logger);
+
if (enable) {
struct dsc_config dsc_cfg;
uint8_t dsc_packed_pps[128];
@@ -2005,17 +2009,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
}
}
- /*
- * If the link is DP-over-USB4 do the following:
- * - Train with fallback when enabling DPIA link. Conventional links are
+ /* Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
- * - Allocate only what the stream needs for bw in Gbps. Inform the CM
- * in case stream needs more or less bw from what has been allocated
- * earlier at plug time.
*/
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
do_fallback = true;
- }
/*
* Temporary w/a to get DP2.0 link rates to work with SST.
@@ -2197,6 +2195,32 @@ static enum dc_status enable_link(
return status;
}
+static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
+{
+ return true;
+}
+
+static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
+{
+ bool ret;
+
+ int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(stream->sink->link));
+
+ ret = allocate_usb4_bandwidth_for_stream(stream, bw);
+
+ return ret;
+}
+
+static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
+{
+ bool ret;
+
+ ret = allocate_usb4_bandwidth_for_stream(stream, 0);
+
+ return ret;
+}
+
void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
@@ -2232,6 +2256,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ deallocate_usb4_bandwidth(pipe_ctx->stream);
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
@@ -2474,6 +2501,9 @@ void link_set_dpms_on(
}
}
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ allocate_usb4_bandwidth(pipe_ctx->stream);
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
allocate_mst_payload(pipe_ctx);
else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 37d3027c32dc..cf22b8f28ba6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
-{
- uint8_t pwrseq_inst = 0xF;
- struct dc_context *dc_ctx = link->dc->ctx;
-
- DC_LOGGER_INIT(dc_ctx->logger);
-
- switch (link->eng_id) {
- case ENGINE_ID_DIGA:
- pwrseq_inst = 0;
- break;
- case ENGINE_ID_DIGB:
- pwrseq_inst = 1;
- break;
- default:
- DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
- ASSERT(false);
- break;
- }
-
- return pwrseq_inst;
-}
-
-
static void link_destruct(struct dc_link *link)
{
int i;
@@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link,
link->link_id.id == CONNECTOR_ID_LVDS)) {
panel_cntl_init_data.ctx = dc_ctx;
panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
- panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
+ panel_cntl_init_data.eng_id = link->eng_id;
link->panel_cntl =
link->dc->res_pool->funcs->panel_cntl_create(
&panel_cntl_init_data);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index b45fda96eaf6..5b0bc7f6a188 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
+/*
+ * This function calculates the bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective dpia link
+ *
+ * @stream: pointer to the dc_stream_state struct instance
+ * @num_streams: number of streams to be validated
+ *
+ * return: true if validation is succeeded
+ */
bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
{
- bool ret = true;
- int bw_needed[MAX_DPIA_NUM];
- struct dc_link *link[MAX_DPIA_NUM];
+ int bw_needed[MAX_DPIA_NUM] = {0};
+ struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
+ int num_dpias = 0;
+
+ for (unsigned int i = 0; i < num_streams; ++i) {
+ if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ /* new dpia sst stream, check whether it exceeds max dpia */
+ if (num_dpias >= MAX_DPIA_NUM)
+ return false;
- if (!num_streams || num_streams > MAX_DPIA_NUM)
- return ret;
+ dpia_link[num_dpias] = stream[i].link;
+ bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+ dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
+ num_dpias++;
+ } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ uint8_t j = 0;
+ /* check whether its a known dpia link */
+ for (; j < num_dpias; ++j) {
+ if (dpia_link[j] == stream[i].link)
+ break;
+ }
+
+ if (j == num_dpias) {
+ /* new dpia mst stream, check whether it exceeds max dpia */
+ if (num_dpias >= MAX_DPIA_NUM)
+ return false;
+ else {
+ dpia_link[j] = stream[i].link;
+ num_dpias++;
+ }
+ }
+
+ bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+ dc_link_get_highest_encoding_format(dpia_link[j]));
+ }
+ }
- for (uint8_t i = 0; i < num_streams; ++i) {
+ /* Include dp overheads */
+ for (uint8_t i = 0; i < num_dpias; ++i) {
+ int dp_overhead = 0;
- link[i] = stream[i].link;
- bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(link[i]));
+ dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
+ bw_needed[i] += dp_overhead;
}
- ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
-
- return ret;
+ return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 982eda3c46f5..6af42ba9885c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -82,25 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link)
{
union dmub_rb_cmd cmd = {0};
struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
- bool is_hpd_high = false;
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
- /* Return HPD status reported by DMUB if query successfully executed. */
- if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
- cmd.query_hpd.data.status == AUX_RET_SUCCESS)
- is_hpd_high = cmd.query_hpd.data.result;
-
- DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
- __func__,
- link->link_index,
- link->link_id.enum_id - ENUM_ID_1,
- cmd.query_hpd.data.status,
- cmd.query_hpd.data.result);
-
- return is_hpd_high;
+ /* Query dpia hpd status from dmub */
+ if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ cmd.query_hpd.data.status == AUX_RET_SUCCESS) {
+ DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ link->hpd_status,
+ cmd.query_hpd.data.result);
+ link->hpd_status = cmd.query_hpd.data.result;
+ } else {
+ DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ cmd.query_hpd.data.status,
+ link->hpd_status);
+ link->hpd_status = false;
+ }
+
+ return link->hpd_status;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index a7aa8c9da868..5491b707cec8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -54,12 +54,18 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
static void reset_bw_alloc_struct(struct dc_link *link)
{
link->dpia_bw_alloc_config.bw_alloc_enabled = false;
- link->dpia_bw_alloc_config.sink_verified_bw = 0;
- link->dpia_bw_alloc_config.sink_max_bw = 0;
+ link->dpia_bw_alloc_config.link_verified_bw = 0;
+ link->dpia_bw_alloc_config.link_max_bw = 0;
+ link->dpia_bw_alloc_config.allocated_bw = 0;
link->dpia_bw_alloc_config.estimated_bw = 0;
link->dpia_bw_alloc_config.bw_granularity = 0;
+ link->dpia_bw_alloc_config.dp_overhead = 0;
link->dpia_bw_alloc_config.response_ready = false;
- link->dpia_bw_alloc_config.sink_allocated_bw = 0;
+ link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
+ link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
+ for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
+ link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
+ DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
}
#define BW_GRANULARITY_0 4 // 0.25 Gbps
@@ -190,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
- for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
+ for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
@@ -210,8 +216,8 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
(link_dpia_secondary->hpd_status &&
link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
- total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
- link_dpia_secondary->dpia_bw_alloc_config.sink_allocated_bw;
+ total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
+ link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
} else if (link_dpia_primary->hpd_status &&
link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
@@ -264,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
/* Error check whether requested and allocated are equal */
req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
- if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) {
+ if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
__func__, link->link_index);
}
@@ -387,9 +393,9 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
__func__, link->link_index);
DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
- __func__, link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed);
+ __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
- link->dpia_bw_alloc_config.sink_allocated_bw = bw_needed;
+ link->dpia_bw_alloc_config.allocated_bw = bw_needed;
link->dpia_bw_alloc_config.response_ready = true;
break;
@@ -427,8 +433,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (link->hpd_status && peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
- link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
- set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
+ link->dpia_bw_alloc_config.link_max_bw = peak_bw;
+ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
do {
if (timeout > 0)
@@ -440,8 +446,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
if (!timeout)
ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
- else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
- ret = link->dpia_bw_alloc_config.sink_allocated_bw;
+ else if (link->dpia_bw_alloc_config.allocated_bw > 0)
+ ret = link->dpia_bw_alloc_config.allocated_bw;
}
//2. Cold Unplug
else if (!link->hpd_status)
@@ -450,7 +456,6 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
out:
return ret;
}
-
bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
{
bool ret = false;
@@ -458,7 +463,7 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
__func__, link->link_index, link->hpd_status,
- link->dpia_bw_alloc_config.sink_allocated_bw, req_bw);
+ link->dpia_bw_alloc_config.allocated_bw, req_bw);
if (!get_bw_alloc_proceed_flag(link))
goto out;
@@ -523,3 +528,30 @@ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const
return ret;
}
+
+int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
+{
+ int dp_overhead = 0, link_mst_overhead = 0;
+
+ if (!get_bw_alloc_proceed_flag((link)))
+ return dp_overhead;
+
+ /* if its mst link, add MTPH overhead */
+ if ((link->type == dc_connection_mst_branch) &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
+ * MST overhead is 1/64 of link bandwidth (excluding any overhead)
+ */
+ const struct dc_link_settings *link_cap =
+ dc_link_get_link_cap(link);
+ uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
+ (uint32_t)link_cap->lane_count *
+ LINK_RATE_REF_FREQ_IN_KHZ * 8;
+ link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
+ }
+
+ /* add all the overheads */
+ dp_overhead = link_mst_overhead;
+
+ return dp_overhead;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 981bc4eb6120..3b6d8494f9d5 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
*/
bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
+/*
+ * Obtain all the DP overheads in dp tunneling for the dpia link
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: DP overheads in DP tunneling
+ */
+int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
+
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 5a0b04518956..16a62e018712 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
union lane_status lane_status;
+ union lane_align_status_updated dpcd_lane_status_updated;
uint8_t dpcd_buf[6] = {0};
uint32_t lane;
@@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status(
* check lanes status
*/
lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane);
+ dpcd_lane_status_updated.raw = dpcd_buf[4];
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
!lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
+ !lane_status.bits.SYMBOL_LOCKED_0 ||
+ !dp_is_interlane_aligned(dpcd_lane_status_updated)) {
/* if one of the channel equalization, clock
* recovery or symbol lock is dropped
* consider it as (link has been
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index e8dda44b23cb..5d36bab0029c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
uint32_t retries_eq = 0;
enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
- uint32_t wait_time_microsec;
+ uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index 5c9a30211c10..fc50931c2aec 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -205,7 +205,7 @@ enum dc_status core_link_read_dpcd(
uint32_t extended_size;
/* size of the remaining partitioned address space */
uint32_t size_left_to_read;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
/* size of the next partition to be read from */
uint32_t partition_size;
uint32_t data_index = 0;
@@ -234,7 +234,7 @@ enum dc_status core_link_write_dpcd(
{
uint32_t partition_size;
uint32_t data_index = 0;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
while (size) {
partition_size = dpcd_get_next_partition_size(address, size);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 7f1196528218..046d3e205415 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -930,8 +930,8 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
{
/* To-do: Setup Replay */
- struct dc *dc = link->ctx->dc;
- struct dmub_replay *replay = dc->res_pool->replay;
+ struct dc *dc;
+ struct dmub_replay *replay;
int i;
unsigned int panel_inst;
struct replay_context replay_context = { 0 };
@@ -947,6 +947,10 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
if (!link)
return false;
+ dc = link->ctx->dc;
+
+ replay = dc->res_pool->replay;
+
if (!replay)
return false;
@@ -975,8 +979,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
replay_context.line_time_in_ns = lineTimeInNs;
- if (replay)
- link->replay_settings.replay_feature_enabled =
+ link->replay_settings.replay_feature_enabled =
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
if (link->replay_settings.replay_feature_enabled) {
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 91ea0d4da06a..823493543325 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -166,12 +166,6 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* disable otg request until end of the first line
- * in the vertical blank region
- */
- REG_UPDATE(OTG_CONTROL,
- OTG_MASTER_EN, 0);
-
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, 0xf,
OPTC_SEG1_SRC_SEL, 0xf,
@@ -179,6 +173,15 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
OPTC_SEG3_SRC_SEL, 0xf,
OPTC_NUM_OF_INPUT_SEGMENT, 0);
+ REG_UPDATE(OPTC_MEMORY_CONFIG,
+ OPTC_MEM_SEL, 0);
+
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+ REG_UPDATE(OTG_CONTROL,
+ OTG_MASTER_EN, 0);
+
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
@@ -205,6 +208,13 @@ static void optc32_disable_phantom_otg(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, 0xf,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0);
+
REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 08a59cf449ca..5b1547508850 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -138,12 +138,6 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- /* disable otg request until end of the first line
- * in the vertical blank region
- */
- REG_UPDATE(OTG_CONTROL,
- OTG_MASTER_EN, 0);
-
REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT,
OPTC_SEG0_SRC_SEL, 0xf,
OPTC_SEG1_SRC_SEL, 0xf,
@@ -151,6 +145,15 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
OPTC_SEG3_SRC_SEL, 0xf,
OPTC_NUM_OF_INPUT_SEGMENT, 0);
+ REG_UPDATE(OPTC_MEMORY_CONFIG,
+ OPTC_MEM_SEL, 0);
+
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+ REG_UPDATE(OTG_CONTROL,
+ OTG_MASTER_EN, 0);
+
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 511ff6b5b985..7538b548c572 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
vpg = dcn301_vpg_create(ctx, vpg_inst);
afmt = dcn301_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt) {
+ if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index ac04a9c9a3d8..6f10052caeef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context(
dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ if (dc->config.enable_windowed_mpo_odm &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ switch (resource_get_odm_slice_count(pipe)) {
+ case 2:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ break;
+ case 4:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
+ break;
+ default:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ }
+ } else {
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ }
pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
@@ -1899,7 +1913,7 @@ int dcn32_populate_dml_pipes_from_context(
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
- .get_subvp_en = resource_subvp_in_use,
+ .get_subvp_en = dcn32_subvp_in_use,
};
void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 62611acd4bcb..0c87b0fabba7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -131,6 +131,9 @@ void dcn32_merge_pipes_for_subvp(struct dc *dc,
bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct dc_state *context);
+bool dcn32_subvp_in_use(struct dc *dc,
+ struct dc_state *context);
+
bool dcn32_mpo_in_use(struct dc_state *context);
bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index e1ab207c46f1..74412e5f03fe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1574,7 +1574,7 @@ static void dcn321_destroy_resource_pool(struct resource_pool **pool)
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
- .get_subvp_en = resource_subvp_in_use,
+ .get_subvp_en = dcn32_subvp_in_use,
};
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 761ec9891875..5fdcda8f8602 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -780,8 +780,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
- .ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400,
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
.static_screen_wait_frames = 2,
};
@@ -2130,6 +2130,7 @@ static bool dcn35_resource_construct(
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c64b6c848ef7..e699731ee68e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -2832,6 +2832,7 @@ struct dmub_rb_cmd_psr_set_power_opt {
#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_MODE_IPS 0x10
#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
@@ -2894,6 +2895,10 @@ enum dmub_cmd_replay_type {
* Set Residency Frameupdate Timer.
*/
DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER = 6,
+ /**
+ * Set pseudo vtotal
+ */
+ DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
};
/**
@@ -3077,6 +3082,26 @@ struct dmub_cmd_replay_set_timing_sync_data {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_cmd_replay_set_pseudo_vtotal {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Source Vtotal that Replay + IPS + ABM full screen video src vtotal
+ */
+ uint16_t vtotal;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
*/
struct dmub_rb_cmd_replay_set_power_opt {
@@ -3157,6 +3182,20 @@ struct dmub_rb_cmd_replay_set_timing_sync {
};
/**
+ * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_rb_cmd_replay_set_pseudo_vtotal {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+ struct dmub_cmd_replay_set_pseudo_vtotal data;
+};
+
+/**
* Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
struct dmub_cmd_replay_frameupdate_timer_data {
@@ -3207,6 +3246,10 @@ union dmub_replay_cmd_set {
* Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command data.
*/
struct dmub_cmd_replay_frameupdate_timer_data timer_data;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
+ */
+ struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
};
/**
@@ -4358,6 +4401,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
struct dmub_rb_cmd_replay_set_frameupdate_timer replay_set_frameupdate_timer;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+ struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
};
/**
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
index 66a54da0641c..915a031a43cb 100644
--- a/drivers/gpu/drm/amd/display/include/audio_types.h
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -64,7 +64,7 @@ enum audio_dto_source {
/* PLL information required for AZALIA DTO calculation */
struct audio_pll_info {
- uint32_t dp_dto_source_clock_in_khz;
+ uint32_t audio_dto_source_clock_in_khz;
uint32_t feed_back_divider;
enum audio_dto_source dto_source;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index ad98e504c00d..e304e8435fb8 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -980,6 +980,11 @@ void set_replay_coasting_vtotal(struct dc_link *link,
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+{
+ link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+}
+
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal)
{
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index c17bbc6fb38c..bef4815e1703 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -57,6 +57,7 @@ void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint16_t vtotal);
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 1dc5dd9b7bf7..df2c7ffe190f 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -258,6 +258,7 @@ enum DC_DEBUG_MASK {
DC_ENABLE_DML2 = 0x100,
DC_DISABLE_PSR_SU = 0x200,
DC_DISABLE_REPLAY = 0x400,
+ DC_DISABLE_IPS = 0x800,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/amdgpu_reg_state.h b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h
index be519c8edf49..335980e2afbf 100644
--- a/drivers/gpu/drm/amd/include/amdgpu_reg_state.h
+++ b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h
@@ -138,7 +138,7 @@ static inline size_t amdgpu_reginst_size(uint16_t num_inst, size_t inst_size,
}
#define amdgpu_asic_get_reg_state_supported(adev) \
- ((adev)->asic_funcs->get_reg_state ? 1 : 0)
+ (((adev)->asic_funcs && (adev)->asic_funcs->get_reg_state) ? 1 : 0)
#define amdgpu_asic_get_reg_state(adev, state, buf, size) \
((adev)->asic_funcs->get_reg_state ? \
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
index 7ee3d291120d..6f80bfa7e41a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
@@ -8707,10 +8707,10 @@
#define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX 2
#define regBIF_BX1_BX_RESET_CNTL 0x00f0
#define regBIF_BX1_BX_RESET_CNTL_BASE_IDX 2
-#define regBIF_BX1_INTERRUPT_CNTL 0x8e11
-#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 5
-#define regBIF_BX1_INTERRUPT_CNTL2 0x8e12
-#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 5
+#define regBIF_BX1_INTERRUPT_CNTL 0x00f1
+#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 2
+#define regBIF_BX1_INTERRUPT_CNTL2 0x00f2
+#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 2
#define regBIF_BX1_CLKREQB_PAD_CNTL 0x00f8
#define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX 2
#define regBIF_BX1_BIF_FEATURES_CONTROL_MISC 0x00fb
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index f3cb490fe79b..087d57850304 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -4349,11 +4349,19 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDNB)\n", value);
size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
+ if (adev->flags & AMD_IS_APU)
+ seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
+ else
+ seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
+ }
size = sizeof(uint32_t);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
- seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
+ if (adev->flags & AMD_IS_APU)
+ seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
+ else
+ seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
+ }
size = sizeof(value);
seq_printf(m, "\n");
@@ -4379,9 +4387,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* VCN clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "VCN: Disabled\n");
+ seq_printf(m, "VCN: Powered down\n");
} else {
- seq_printf(m, "VCN: Enabled\n");
+ seq_printf(m, "VCN: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4393,9 +4401,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* UVD clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "UVD: Disabled\n");
+ seq_printf(m, "UVD: Powered down\n");
} else {
- seq_printf(m, "UVD: Enabled\n");
+ seq_printf(m, "UVD: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
@@ -4407,9 +4415,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
/* VCE clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
if (!value) {
- seq_printf(m, "VCE: Disabled\n");
+ seq_printf(m, "VCE: Powered down\n");
} else {
- seq_printf(m, "VCE: Enabled\n");
+ seq_printf(m, "VCE: Powered up\n");
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index df4f20293c16..eb4da3666e05 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = si_thermal_enable_alert(adev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ ret = si_set_temperature_range(adev);
+ if (ret)
+ return ret;
+#if 0 //TODO ?
+ si_dpm_powergate_uvd(adev, true);
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index f2a55c1413f5..17882f8dfdd3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
@@ -200,7 +200,7 @@ static int get_platform_power_management_table(
struct pp_hwmgr *hwmgr,
ATOM_Tonga_PPM_Table *atom_ppm_table)
{
- struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
+ struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index b1a8799e2dee..aa91730e4eaf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -3999,6 +3999,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
uint32_t sclk, mclk, activity_percent;
uint32_t offset, val_vid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@@ -4042,7 +4043,21 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
- return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA))
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
+ else
+ return -EOPNOTSUPP;
+ case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA))
+ return -EOPNOTSUPP;
+ else
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
(VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index c16703868e5c..0ad947df777a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <linux/power_supply.h>
#include <linux/reboot.h>
#include "amdgpu.h"
@@ -733,7 +734,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- smu->smu_baco.state = SMU_BACO_STATE_NONE;
+ smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@@ -817,16 +818,8 @@ static int smu_late_init(void *handle)
* handle the switch automatically. Driver involvement
* is unnecessary.
*/
- if (!smu->dc_controlled_by_gpio) {
- ret = smu_set_power_source(smu,
- adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
- SMU_POWER_SOURCE_DC);
- if (ret) {
- dev_err(adev->dev, "Failed to switch to %s mode!\n",
- adev->pm.ac_power ? "AC" : "DC");
- return ret;
- }
- }
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0;
+ smu_set_ac_dc(smu);
if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
(amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
@@ -1961,31 +1954,10 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
-static int smu_reset_mp1_state(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
-
- if ((!adev->in_runpm) && (!adev->in_suspend) &&
- (!amdgpu_in_reset(adev)))
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 0):
- case IP_VERSION(13, 0, 7):
- case IP_VERSION(13, 0, 10):
- ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -2003,15 +1975,7 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- ret = smu_smc_hw_cleanup(smu);
- if (ret)
- return ret;
-
- ret = smu_reset_mp1_state(smu);
- if (ret)
- return ret;
-
- return 0;
+ return smu_smc_hw_cleanup(smu);
}
static void smu_late_fini(void *handle)
@@ -2710,6 +2674,7 @@ int smu_get_power_limit(void *handle,
case SMU_PPT_LIMIT_CURRENT:
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 2aa4fea87314..66e84defd0b6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -424,7 +424,6 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
- SMU_BACO_STATE_NONE,
};
struct smu_baco_context {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 4cd43bbec910..bcad42534da4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 8d1d29ffb0f1..ed189a3878eb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 21fc033528fa..e2ad2b972ab0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 5a314d0316c1..c7bfa68bf00f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1442,10 +1442,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
schedule_work(&smu->interrupt_work);
+ adev->pm.ac_power = true;
break;
case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n");
schedule_work(&smu->interrupt_work);
+ adev->pm.ac_power = false;
break;
case 0x7:
/*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 771a3d457c33..c486182ff275 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1379,10 +1379,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(smu);
+ adev->pm.ac_power = true;
break;
case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(smu);
+ adev->pm.ac_power = false;
break;
case 0x7:
/*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index a9b25faa63e4..9b80f18ea6c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2357,6 +2357,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -2368,19 +2369,18 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
if (max_power_limit) {
- *max_power_limit = power_limit * (100 + od_percent_upper);
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
*max_power_limit /= 100;
}
@@ -2747,13 +2747,7 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_PrepareMp1ForUnload,
- 0x55, NULL);
-
- if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
- ret = smu_v13_0_disable_pmfw_state(smu);
-
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
break;
default:
/* Ignore others */
@@ -2949,7 +2943,7 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
return smu->smc_fw_version >= 0x004e6300;
case IP_VERSION(13, 0, 10):
@@ -2959,6 +2953,55 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
}
}
+static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3013,7 +3056,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
.enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_0_get_power_limit,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = smu_v13_0_0_set_power_limit,
.set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index cc2a02ceea85..7e1941cf1796 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -160,8 +160,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
- MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
- MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
@@ -970,7 +970,9 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
if (i < (clocks.num_levels - 1))
clk2 = clocks.data[i + 1].clocks_in_khz / 1000;
- if (curr_clk >= clk1 && curr_clk < clk2) {
+ if (curr_clk == clk1) {
+ level = i;
+ } else if (curr_clk >= clk1 && curr_clk < clk2) {
level = (curr_clk - clk1) <= (clk2 - curr_clk) ?
i :
i + 1;
@@ -2234,17 +2236,18 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
continue;
}
- if (ret) {
- dev_err(adev->dev,
- "failed to send mode2 message \tparam: 0x%08x error code %d\n",
- SMU_RESET_MODE_2, ret);
+ if (ret)
goto out;
- }
+
} while (ret == -ETIME && timeout);
out:
mutex_unlock(&smu->message_lock);
+ if (ret)
+ dev_err(adev->dev, "failed to send mode2 reset, error code %d",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 59606a19e3d2..3dc7b60cb075 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2321,6 +2321,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -2332,19 +2333,18 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
if (max_power_limit) {
- *max_power_limit = power_limit * (100 + od_percent_upper);
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
*max_power_limit /= 100;
}
@@ -2504,13 +2504,7 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_PrepareMp1ForUnload,
- 0x55, NULL);
-
- if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
- ret = smu_v13_0_disable_pmfw_state(smu);
-
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
break;
default:
/* Ignore others */
@@ -2545,6 +2539,55 @@ static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
return smu->smc_fw_version > 0x00524600;
}
+static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2596,7 +2639,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
.enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_7_get_power_limit,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = smu_v13_0_7_set_power_limit,
.set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index ef31033439bc..29d91493b101 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1762,6 +1762,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
u8 request = msg->request & ~DP_AUX_I2C_MOT;
int ret = 0;
+ mutex_lock(&ctx->aux_lock);
pm_runtime_get_sync(dev);
msg->reply = 0;
switch (request) {
@@ -1778,6 +1779,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
msg->size, msg->buffer);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+ mutex_unlock(&ctx->aux_lock);
return ret;
}
@@ -2474,7 +2476,9 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
ctx->connector = NULL;
anx7625_dp_stop(ctx);
- pm_runtime_put_sync(dev);
+ mutex_lock(&ctx->aux_lock);
+ pm_runtime_put_sync_suspend(dev);
+ mutex_unlock(&ctx->aux_lock);
}
static enum drm_connector_status
@@ -2668,6 +2672,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
mutex_init(&platform->lock);
mutex_init(&platform->hdcp_wq_lock);
+ mutex_init(&platform->aux_lock);
INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 66ebee7f3d83..39ed35d33836 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -475,6 +475,8 @@ struct anx7625_data {
struct workqueue_struct *hdcp_workqueue;
/* Lock for hdcp work queue */
struct mutex hdcp_wq_lock;
+ /* Lock for aux transfer and disable */
+ struct mutex aux_lock;
char edid_block;
struct display_timing dt;
u8 display_timing_valid;
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index bb55f697a181..6886db2d9e00 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -25,20 +25,18 @@ static void drm_aux_hpd_bridge_release(struct device *dev)
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
kfree(adev);
}
-static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
+static void drm_aux_hpd_bridge_free_adev(void *_adev)
{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
+ auxiliary_device_uninit(_adev);
}
/**
- * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
+ * devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge
* @parent: device instance providing this bridge
* @np: device node pointer corresponding to this bridge instance
*
@@ -46,11 +44,9 @@ static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
* DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
* able to send the HPD events.
*
- * Return: device instance that will handle created bridge or an error code
- * encoded into the pointer.
+ * Return: bridge auxiliary device pointer or an error pointer
*/
-struct device *drm_dp_hpd_bridge_register(struct device *parent,
- struct device_node *np)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np)
{
struct auxiliary_device *adev;
int ret;
@@ -74,18 +70,62 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent,
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
kfree(adev);
return ERR_PTR(ret);
}
- ret = auxiliary_device_add(adev);
- if (ret) {
- auxiliary_device_uninit(adev);
+ ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev);
+ if (ret)
return ERR_PTR(ret);
- }
- ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
+ return adev;
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc);
+
+static void drm_aux_hpd_bridge_del_adev(void *_adev)
+{
+ auxiliary_device_delete(_adev);
+}
+
+/**
+ * devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge
+ * @dev: struct device to tie registration lifetime to
+ * @adev: bridge auxiliary device to be registered
+ *
+ * Returns: zero on success or a negative errno
+ */
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
+{
+ int ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev);
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add);
+
+/**
+ * drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Return: device instance that will handle created bridge or an error pointer
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = devm_drm_dp_hpd_bridge_alloc(parent, np);
+ if (IS_ERR(adev))
+ return ERR_CAST(adev);
+
+ ret = devm_drm_dp_hpd_bridge_add(parent, adev);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 541e4f5afc4c..14d4dcf239da 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -107,6 +107,7 @@ struct ps8640 {
struct device_link *link;
bool pre_enabled;
bool need_post_hpd_delay;
+ struct mutex aux_lock;
};
static const struct regmap_config ps8640_regmap_config[] = {
@@ -345,11 +346,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
int ret;
+ mutex_lock(&ps_bridge->aux_lock);
pm_runtime_get_sync(dev);
+ ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
+ if (ret) {
+ pm_runtime_put_sync_suspend(dev);
+ goto exit;
+ }
ret = ps8640_aux_transfer_msg(aux, msg);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+exit:
+ mutex_unlock(&ps_bridge->aux_lock);
+
return ret;
}
@@ -470,7 +480,18 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
ps_bridge->pre_enabled = false;
ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+
+ /*
+ * The bridge seems to expect everything to be power cycled at the
+ * disable process, so grab a lock here to make sure
+ * ps8640_aux_transfer() is not holding a runtime PM reference and
+ * preventing the bridge from suspend.
+ */
+ mutex_lock(&ps_bridge->aux_lock);
+
pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
+
+ mutex_unlock(&ps_bridge->aux_lock);
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
@@ -619,6 +640,8 @@ static int ps8640_probe(struct i2c_client *client)
if (!ps_bridge)
return -ENOMEM;
+ mutex_init(&ps_bridge->aux_lock);
+
ps_bridge->supplies[0].supply = "vdd12";
ps_bridge->supplies[1].supply = "vdd33";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index be5914caa17d..63a1a0c88be4 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -969,10 +969,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
reg &= ~DSIM_STOP_STATE_CNT_MASK;
reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
-
- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
- reg |= DSIM_FORCE_STOP_STATE;
-
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
@@ -1431,18 +1427,6 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
disable_irq(dsi->irq);
}
-static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
-{
- u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
-
- if (enable)
- reg |= DSIM_FORCE_STOP_STATE;
- else
- reg &= ~DSIM_FORCE_STOP_STATE;
-
- samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
-}
-
static int samsung_dsim_init(struct samsung_dsim *dsi)
{
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
@@ -1492,9 +1476,6 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
ret = samsung_dsim_init(dsi);
if (ret)
return;
-
- samsung_dsim_set_display_mode(dsi);
- samsung_dsim_set_display_enable(dsi, true);
}
}
@@ -1503,12 +1484,8 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
- samsung_dsim_set_display_mode(dsi);
- samsung_dsim_set_display_enable(dsi, true);
- } else {
- samsung_dsim_set_stop_state(dsi, false);
- }
+ samsung_dsim_set_display_mode(dsi);
+ samsung_dsim_set_display_enable(dsi, true);
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
}
@@ -1521,9 +1498,6 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
- samsung_dsim_set_stop_state(dsi, true);
-
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
@@ -1828,8 +1802,6 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
if (ret)
return ret;
- samsung_dsim_set_stop_state(dsi, false);
-
ret = mipi_dsi_create_packet(&xfer.packet, msg);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 2bdc5b439beb..4560ae9cbce1 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1080,6 +1080,26 @@ static int sii902x_init(struct sii902x *sii902x)
return ret;
}
+ ret = sii902x_audio_codec_init(sii902x, dev);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(sii902x->i2c, sii902x);
+
+ sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
+ 1, 0, I2C_MUX_GATE,
+ sii902x_i2c_bypass_select,
+ sii902x_i2c_bypass_deselect);
+ if (!sii902x->i2cmux) {
+ ret = -ENOMEM;
+ goto err_unreg_audio;
+ }
+
+ sii902x->i2cmux->priv = sii902x;
+ ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+ if (ret)
+ goto err_unreg_audio;
+
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
@@ -1090,19 +1110,13 @@ static int sii902x_init(struct sii902x *sii902x)
drm_bridge_add(&sii902x->bridge);
- sii902x_audio_codec_init(sii902x, dev);
-
- i2c_set_clientdata(sii902x->i2c, sii902x);
+ return 0;
- sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
- 1, 0, I2C_MUX_GATE,
- sii902x_i2c_bypass_select,
- sii902x_i2c_bypass_deselect);
- if (!sii902x->i2cmux)
- return -ENOMEM;
+err_unreg_audio:
+ if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
+ platform_device_unregister(sii902x->audio.pdev);
- sii902x->i2cmux->priv = sii902x;
- return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+ return ret;
}
static int sii902x_probe(struct i2c_client *client)
@@ -1170,12 +1184,14 @@ static int sii902x_probe(struct i2c_client *client)
}
static void sii902x_remove(struct i2c_client *client)
-
{
struct sii902x *sii902x = i2c_get_clientdata(client);
- i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
+ i2c_mux_del_adapters(sii902x->i2cmux);
+
+ if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
+ platform_device_unregister(sii902x->audio.pdev);
}
static const struct of_device_id sii902x_dt_ids[] = {
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index bd6c24d4213c..f7c6b60629c2 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -5491,6 +5491,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
* - 0 if the new state is valid
* - %-ENOSPC, if the new state is invalid, because of BW limitation
* @failing_port is set to:
+ *
* - The non-root port where a BW limit check failed
* with all the ports downstream of @failing_port passing
* the BW limit check.
@@ -5499,6 +5500,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
* - %NULL if the BW limit check failed at the root port
* with all the ports downstream of the root port passing
* the BW limit check.
+ *
* - %-EINVAL, if the new state is invalid, because the root port has
* too many payloads.
*/
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index f57e6d74fb0e..5ebdd6f8f36e 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
+ u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
if (drm_buddy_block_is_allocated(block))
continue;
+ if (block_start < start || block_end > end) {
+ u64 adjusted_start = max(block_start, start);
+ u64 adjusted_end = min(block_end, end);
+
+ if (round_down(adjusted_end + 1, req_size) <=
+ round_up(adjusted_start, req_size))
+ continue;
+ }
+
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*
@@ -538,7 +548,13 @@ static int __alloc_range(struct drm_buddy *mm,
list_add(&block->left->tmp_link, dfs);
} while (1);
+ if (total_allocated < size) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
list_splice_tail(&allocated, blocks);
+
return 0;
err_undo:
@@ -755,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
return -EINVAL;
/* Actual range allocation */
- if (start + size == end)
+ if (start + size == end) {
+ if (!IS_ALIGNED(start | end, min_block_size))
+ return -EINVAL;
+
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
+ }
original_size = size;
original_min_size = min_block_size;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index cb90e70d85e8..65f9f66933bb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -904,6 +904,7 @@ out:
connector_set = NULL;
fb = NULL;
mode = NULL;
+ num_connectors = 0;
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 834a5e28abbe..7352bde299d5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -820,7 +820,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
if (max_segment == 0)
max_segment = UINT_MAX;
err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
+ (unsigned long)nr_pages << PAGE_SHIFT,
max_segment, GFP_KERNEL);
if (err) {
kfree(sg);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 84101baeecc6..a6c19de46292 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1040,7 +1040,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
@@ -1109,7 +1110,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
@@ -1416,10 +1418,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
/* This happens inside the syncobj lock */
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ if (!fence)
+ return;
+
ret = dma_fence_chain_find_seqno(&fence, entry->point);
- if (ret != 0 || !fence) {
+ if (ret != 0) {
+ /* The given seqno has not been submitted yet. */
dma_fence_put(fence);
return;
+ } else if (!fence) {
+ /* If dma_fence_chain_find_seqno returns 0 but sets the fence
+ * to NULL, it implies that the given seqno is signaled and a
+ * later seqno has already been submitted. Assign a stub fence
+ * so that the eventfd still gets signaled below.
+ */
+ fence = dma_fence_get_stub();
}
list_del_init(&entry->node);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 776f2f0b602d..0ef7bc8848b0 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -319,9 +319,9 @@ static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
- struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
struct exynos_drm_plane_state *state =
- to_exynos_plane_state(plane.base.state);
+ to_exynos_plane_state(plane->base.state);
unsigned int alpha = state->base.alpha;
unsigned int pixel_alpha;
unsigned long val;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a9f1c5c05894..f2145227a1e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -480,7 +480,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
struct fimd_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
const struct fimd_driver_data *driver_data = ctx->driver_data;
- void *timing_base = ctx->regs + driver_data->timing_base;
+ void __iomem *timing_base = ctx->regs + driver_data->timing_base;
u32 val;
if (ctx->suspended)
@@ -661,9 +661,9 @@ static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
struct drm_framebuffer *fb, int width)
{
- struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
struct exynos_drm_plane_state *state =
- to_exynos_plane_state(plane.base.state);
+ to_exynos_plane_state(plane->base.state);
uint32_t pixel_format = fb->format->format;
unsigned int alpha = state->base.alpha;
u32 val = WINCONx_ENWIN;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index e9a769590415..180507a47700 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1341,7 +1341,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
for (i = 0; i < ctx->num_clocks; i++) {
ret = clk_prepare_enable(ctx->clocks[i]);
if (ret) {
- while (--i > 0)
+ while (--i >= 0)
clk_disable_unprepare(ctx->clocks[i]);
return ret;
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index b5d6e3352071..3089029abba4 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT
Note that this driver only supports newer device from Broadwell on.
For further information and setup guide, you can visit:
- http://01.org/igvt-g.
+ https://github.com/intel/gvt-linux/wiki.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e777686190ca..c13f14edb508 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
-subdir-ccflags-y += $(call cc-option, -Wstringop-overflow)
subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
# The following turn off the warnings enabled by -Wextra
ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ac456a2275db..eda4a8b88590 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1155,6 +1155,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1255,8 +1256,6 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
-
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f5ef95da5534..ae647d03af25 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2355,6 +2355,9 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_rate = intel_dp_common_rate(intel_dp, 0);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
+ /* FIXME 128b/132b SST support missing */
+ limits->max_rate = min(limits->max_rate, 810000);
+
limits->min_lane_count = 1;
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 8f702c3fc62d..57bbf3e3af92 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1525,8 +1525,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* can rely on frontbuffer tracking.
*/
mask = EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP;
+ EDP_PSR_DEBUG_MASK_HPD;
+
+ /*
+ * For some unknown reason on HSW non-ULT (or at least on
+ * Dell Latitude E6540) external displays start to flicker
+ * when PSR is enabled on the eDP. SR/PC6 residency is much
+ * higher than should be possible with an external display.
+ * As a workaround leave LPSP unmasked to prevent PSR entry
+ * when external displays are active.
+ */
+ if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_LPSP;
if (DISPLAY_VER(dev_priv) < 20)
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index acc6b6804105..2915d7afe5cc 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1209,7 +1209,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_tv_format format;
u32 format_map;
- format_map = 1 << conn_state->tv.mode;
+ format_map = 1 << conn_state->tv.legacy_mode;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
@@ -2298,7 +2298,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
* Read the list of supported input resolutions for the selected TV
* format.
*/
- format_map = 1 << conn_state->tv.mode;
+ format_map = 1 << conn_state->tv.legacy_mode;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
@@ -2363,7 +2363,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
int i;
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
- if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
+ if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
*val = i;
return 0;
@@ -2419,7 +2419,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
- state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
+ state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
if (state->crtc) {
struct drm_crtc_state *crtc_state =
@@ -3076,7 +3076,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
drm_property_add_enum(intel_sdvo_connector->tv_format, i,
tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
- intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
+ intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d4386cb3569e..992a725de751 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
- int format = conn_state->tv.mode;
+ int format = conn_state->tv.legacy_mode;
return &tv_modes[format];
}
@@ -1704,7 +1704,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
break;
}
- connector->state->tv.mode = i;
+ connector->state->tv.legacy_mode = i;
}
static int
@@ -1859,7 +1859,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
old_state = drm_atomic_get_old_connector_state(state, connector);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
- if (old_state->tv.mode != new_state->tv.mode ||
+ if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
@@ -1896,7 +1896,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
conn_state->tv.margins.right = 46;
conn_state->tv.margins.bottom = 37;
- conn_state->tv.mode = 0;
+ conn_state->tv.legacy_mode = 0;
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
@@ -1910,7 +1910,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
drm_object_attach_property(&connector->base,
i915->drm.mode_config.legacy_tv_mode_property,
- conn_state->tv.mode);
+ conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
conn_state->tv.margins.left);
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index 64f440fdc22b..8b21dc8e26d5 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -51,8 +51,8 @@
#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
#define _DSCA_PPS_0 0x6B200
#define _DSCC_PPS_0 0x6BA00
-#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + (pps) * 4)
-#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + (pps) * 4)
+#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
+#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 90f6c1ece57d..efcb00472be2 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2849,8 +2849,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
for (i = start; i < end; i += 4) {
p = intel_gvt_find_mmio_info(gvt, i);
if (p) {
- WARN(1, "dup mmio definition offset %x\n",
- info->offset);
+ WARN(1, "dup mmio definition offset %x\n", i);
/* We return -EEXIST here to make GVT-g load fail.
* So duplicated MMIO can be found as soon as
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index e98b6d69a91a..9b6d87c8b583 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -41,7 +41,7 @@
* To virtualize GPU resources GVT-g driver depends on hypervisor technology
* e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
* and be virtualized within GVT-g device module. More architectural design
- * doc is available on https://01.org/group/2230/documentation-list.
+ * doc is available on https://github.com/intel/gvt-linux/wiki.
*/
static LIST_HEAD(intel_gvt_devices);
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 3f73b211fa8e..3407450435e2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
- drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 3f93c70488ca..311b91630fbe 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_DSI]) {
meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
drm_bridge_remove(&meson_encoder_dsi->bridge);
- drm_bridge_remove(meson_encoder_dsi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 25ea76558690..c4686568c9ca 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
- drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c0bc924cd302..c9c55e2ea584 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1287,7 +1287,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
+ gpu->ubwc_config.highest_bank_bit = 13;
gpu->ubwc_config.min_acc_len = 1;
gpu->ubwc_config.ubwc_mode = 1;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 83380bc92a00..6a4b489d44e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -144,10 +144,6 @@ enum dpu_enc_rc_states {
* to track crtc in the disable() hook which is called
* _after_ encoder_mask is cleared.
* @connector: If a mode is set, cached pointer to the active connector
- * @crtc_kickoff_cb: Callback into CRTC that will flush & start
- * all CTL paths
- * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
- * @debugfs_root: Debug file system root file node
* @enc_lock: Lock around physical encoder
* create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
@@ -2072,7 +2068,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
/* reset the merge 3D HW block */
- if (phys_enc->hw_pp->merge_3d) {
+ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
@@ -2103,7 +2099,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_wb)
intf_cfg.wb = phys_enc->hw_wb->idx;
- if (phys_enc->hw_pp->merge_3d)
+ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
if (ctl->ops.reset_intf_cfg)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index b58a9c2ae326..724537ab776d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -29,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
* @topology: selected topology for the display
- * @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
struct msm_display_topology topology;
@@ -204,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
* _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
* @rm: dpu resource manager handle
* @primary_idx: index of primary mixer in rm->mixer_blks[]
+ *
+ * Returns: lm peer mixed id on success or %-EINVAL on error
*/
static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 77a8d9366ed7..fb588fde298a 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -135,11 +135,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
tbd = dp_link_get_test_bits_depth(ctrl->link,
ctrl->panel->dp_mode.bpp);
- if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
- pr_debug("BIT_DEPTH not set. Configure default\n");
- tbd = DP_TEST_BIT_DEPTH_8;
- }
-
config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
/* Num of Lanes */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index d37d599aec27..4c72124ffb5d 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -329,10 +329,26 @@ static const struct component_ops dp_display_comp_ops = {
.unbind = dp_display_unbind,
};
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+ struct drm_connector *connector;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ connector = dp->dp_display.connector;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- struct drm_bridge *bridge = dp->dp_display.bridge;
+ if ((hpd && dp->dp_display.link_ready) ||
+ (!hpd && !dp->dp_display.link_ready)) {
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
+ (hpd ? "on" : "off"));
+ return 0;
+ }
/* reset video pattern flag on disconnect */
if (!hpd) {
@@ -348,7 +364,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
- drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
+ dp_display_send_hpd_event(&dp->dp_display);
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 98427d45e9a7..49dfac1fd1ef 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -7,6 +7,7 @@
#include <drm/drm_print.h>
+#include "dp_reg.h"
#include "dp_link.h"
#include "dp_panel.h"
@@ -1082,7 +1083,7 @@ int dp_link_process_request(struct dp_link *dp_link)
int dp_link_get_colorimetry_config(struct dp_link *dp_link)
{
- u32 cc;
+ u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
struct dp_link_private *link;
if (!dp_link) {
@@ -1096,10 +1097,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
* Unless a video pattern CTS test is ongoing, use RGB_VESA
* Only RGB_VESA and RGB_CEA supported for now
*/
- if (dp_link_is_video_pattern_requested(link))
- cc = link->dp_link.test_video.test_dyn_range;
- else
- cc = DP_TEST_DYNAMIC_RANGE_VESA;
+ if (dp_link_is_video_pattern_requested(link)) {
+ if (link->dp_link.test_video.test_dyn_range &
+ DP_TEST_DYNAMIC_RANGE_CEA)
+ cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
+ }
return cc;
}
@@ -1179,6 +1181,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
{
u32 tbd;
+ struct dp_link_private *link;
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
/*
* Few simplistic rules and assumptions made here:
@@ -1196,12 +1201,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
tbd = DP_TEST_BIT_DEPTH_10;
break;
default:
- tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+ drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
+ bpp);
+ tbd = DP_TEST_BIT_DEPTH_8;
break;
}
- if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
- tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+ tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
return tbd;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index ea85a691e72b..78785ed4b40c 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -143,6 +143,9 @@
#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0)
+#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04)
+
#define REG_DP_VALID_BOUNDARY (0x00000030)
#define REG_DP_VALID_BOUNDARY_2 (0x00000034)
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 5f68e31a3e4e..0915f3b68752 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -26,7 +26,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
void *vaddr;
- vaddr = msm_gem_get_vaddr(obj);
+ vaddr = msm_gem_get_vaddr_locked(obj);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
iosys_map_set_vaddr(map, vaddr);
@@ -36,7 +36,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- msm_gem_put_vaddr(obj);
+ msm_gem_put_vaddr_locked(obj);
}
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 095390774f22..655002b21b0d 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
- pm_runtime_get_sync(&gpu->pdev->dev);
+ WARN_ON(!mutex_is_locked(&gpu->lock));
- mutex_lock(&gpu->lock);
+ pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
+ submit->seqno = submit->hw_fence->seqno;
+
update_sw_cntrs(gpu);
/*
@@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
- hangcheck_timer_reset(gpu);
-
- mutex_unlock(&gpu->lock);
-
pm_runtime_put(&gpu->pdev->dev);
+ hangcheck_timer_reset(gpu);
}
/*
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 5cc8d358cc97..d5512037c38b 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,8 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
+ const struct iommu_flush_ops *tlb;
+ struct device *iommu_dev;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
@@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = {
static void msm_iommu_tlb_flush_all(void *cookie)
{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct adreno_smmu_priv *adreno_smmu;
+
+ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+ return;
+
+ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+ pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
+
+ pm_runtime_put_autosuspend(pagetable->iommu_dev);
}
static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct adreno_smmu_priv *adreno_smmu;
+
+ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+ return;
+
+ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+ pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
+
+ pm_runtime_put_autosuspend(pagetable->iommu_dev);
}
static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
@@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
{
}
-static const struct iommu_flush_ops null_tlb_ops = {
+static const struct iommu_flush_ops tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
@@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* The incoming cfg will have the TTBR1 quirk enabled */
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
- ttbr0_cfg.tlb = &null_tlb_ops;
+ ttbr0_cfg.tlb = &tlb_ops;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
- &ttbr0_cfg, iommu->domain);
+ &ttbr0_cfg, pagetable);
if (!pagetable->pgtbl_ops) {
kfree(pagetable);
@@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
+ pagetable->tlb = ttbr1_cfg->tlb;
+ pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 455b2e3a0cdd..35423d10aafa 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -562,6 +562,7 @@ static const struct msm_mdss_data sdm670_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sdm845_data = {
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 4bc13f7d005a..9d6655f96f0c 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_fence_init(submit->hw_fence, fctx);
- submit->seqno = submit->hw_fence->seqno;
-
mutex_lock(&priv->lru.lock);
for (i = 0; i < submit->nr_bos; i++) {
@@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
mutex_unlock(&priv->lru.lock);
+ /* TODO move submit path over to using a per-ring lock.. */
+ mutex_lock(&gpu->lock);
+
msm_gpu_submit(gpu, submit);
+ mutex_unlock(&gpu->lock);
+
return dma_fence_get(submit->hw_fence);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 1e6aaf95ff7c..ceef470c9fbf 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM
help
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
+
+config DRM_NOUVEAU_GSP_DEFAULT
+ bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
+ depends on DRM_NOUVEAU
+ default n
+ help
+ Say Y here if you want to use the GSP codepaths by default on
+ Turing and Ampere GPUs.
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index d1437c08645f..6f5d376d8fcc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -9,7 +9,7 @@
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
struct nvkm_gsp_mem {
- u32 size;
+ size_t size;
void *data;
dma_addr_t addr;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index a04156ca8390..80f74ee0fc78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -128,12 +128,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
struct nouveau_abi16_ntfy *ntfy, *temp;
/* Cancel all jobs from the entity's queue. */
- drm_sched_entity_fini(&chan->sched.entity);
+ if (chan->sched)
+ drm_sched_entity_fini(&chan->sched->entity);
if (chan->chan)
nouveau_channel_idle(chan->chan);
- nouveau_sched_fini(&chan->sched);
+ if (chan->sched)
+ nouveau_sched_destroy(&chan->sched);
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
@@ -197,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
+ struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -261,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
break;
}
+ case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ break;
+ case NOUVEAU_GETPARAM_VRAM_USED: {
+ struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+ getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
+ break;
+ }
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
@@ -337,10 +348,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq,
- chan->chan->dma.ib_max);
- if (ret)
- goto done;
+ /* If we're not using the VM_BIND uAPI, we don't need a scheduler.
+ *
+ * The client lock is already acquired by nouveau_abi16_get().
+ */
+ if (nouveau_cli_uvmm(cli)) {
+ ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
+ chan->chan->dma.ib_max);
+ if (ret)
+ goto done;
+ }
init->channel = chan->chan->chid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 1f5e243c0c75..11c8c4a80079 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
- struct nouveau_sched sched;
+ struct nouveau_sched *sched;
};
struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6f6c31a9937b..a947e1d5f309 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -201,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
- nouveau_sched_fini(&cli->sched);
+ if (cli->sched)
+ nouveau_sched_destroy(&cli->sched);
if (uvmm)
nouveau_uvmm_fini(uvmm);
nouveau_vmm_fini(&cli->svm);
@@ -311,7 +312,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
cli->mem = &mems[ret];
/* Don't pass in the (shared) sched_wq in order to let
- * nouveau_sched_init() create a dedicated one for VM_BIND jobs.
+ * nouveau_sched_create() create a dedicated one for VM_BIND jobs.
*
* This is required to ensure that for VM_BIND jobs free_job() work and
* run_job() work can always run concurrently and hence, free_job() work
@@ -320,7 +321,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
* locks which indirectly or directly are held for allocations
* elsewhere.
*/
- ret = nouveau_sched_init(&cli->sched, drm, NULL, 1);
+ ret = nouveau_sched_create(&cli->sched, drm, NULL, 1);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8a6d94c8b163..e239c6bf4afa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -98,7 +98,7 @@ struct nouveau_cli {
bool disabled;
} uvmm;
- struct nouveau_sched sched;
+ struct nouveau_sched *sched;
const struct nvif_mclass *mem;
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index bc5d71b79ab2..e65c0ef23bc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -389,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (ret)
goto out;
- args.sched = &chan16->sched;
+ args.sched = chan16->sched;
args.file_priv = file_priv;
args.chan = chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 5057d976fa57..93f08f9479d8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- if (atomic_dec_and_test(&fctx->notify_ref))
+ if (!--fctx->notify_ref)
drop = 1;
}
@@ -103,7 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
- cancel_work_sync(&fctx->allow_block_work);
+ cancel_work_sync(&fctx->uevent_work);
nouveau_fence_context_kill(fctx, 0);
nvif_event_dtor(&fctx->event);
fctx->dead = 1;
@@ -146,12 +146,13 @@ nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fc
return drop;
}
-static int
-nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
+static void
+nouveau_fence_uevent_work(struct work_struct *work)
{
- struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
+ struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
+ uevent_work);
unsigned long flags;
- int ret = NVIF_EVENT_KEEP;
+ int drop = 0;
spin_lock_irqsave(&fctx->lock, flags);
if (!list_empty(&fctx->pending)) {
@@ -161,23 +162,20 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (nouveau_fence_update(chan, fctx))
- ret = NVIF_EVENT_DROP;
+ drop = 1;
}
- spin_unlock_irqrestore(&fctx->lock, flags);
+ if (drop)
+ nvif_event_block(&fctx->event);
- return ret;
+ spin_unlock_irqrestore(&fctx->lock, flags);
}
-static void
-nouveau_fence_work_allow_block(struct work_struct *work)
+static int
+nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
{
- struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
- allow_block_work);
-
- if (atomic_read(&fctx->notify_ref) == 0)
- nvif_event_block(&fctx->event);
- else
- nvif_event_allow(&fctx->event);
+ struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
+ schedule_work(&fctx->uevent_work);
+ return NVIF_EVENT_KEEP;
}
void
@@ -191,7 +189,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
} args;
int ret;
- INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
+ INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
@@ -535,19 +533,15 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f)
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
- bool do_work;
- if (atomic_inc_return(&fctx->notify_ref) == 0)
- do_work = true;
+ if (!fctx->notify_ref++)
+ nvif_event_allow(&fctx->event);
ret = nouveau_fence_no_signaling(f);
if (ret)
set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
- else if (atomic_dec_and_test(&fctx->notify_ref))
- do_work = true;
-
- if (do_work)
- schedule_work(&fctx->allow_block_work);
+ else if (!--fctx->notify_ref)
+ nvif_event_block(&fctx->event);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 28f5cf013b89..8bc065acfe35 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -3,7 +3,6 @@
#define __NOUVEAU_FENCE_H__
#include <linux/dma-fence.h>
-#include <linux/workqueue.h>
#include <nvif/event.h>
struct nouveau_drm;
@@ -45,10 +44,9 @@ struct nouveau_fence_chan {
u32 context;
char name[32];
+ struct work_struct uevent_work;
struct nvif_event event;
- struct work_struct allow_block_work;
- atomic_t notify_ref;
- int dead, killed;
+ int notify_ref, dead, killed;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index dd98f6910f9c..32fa2e273965 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -398,7 +398,7 @@ static const struct drm_sched_backend_ops nouveau_sched_ops = {
.free_job = nouveau_sched_free_job,
};
-int
+static int
nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
struct workqueue_struct *wq, u32 credit_limit)
{
@@ -453,7 +453,30 @@ fail_wq:
return ret;
}
-void
+int
+nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit)
+{
+ struct nouveau_sched *sched;
+ int ret;
+
+ sched = kzalloc(sizeof(*sched), GFP_KERNEL);
+ if (!sched)
+ return -ENOMEM;
+
+ ret = nouveau_sched_init(sched, drm, wq, credit_limit);
+ if (ret) {
+ kfree(sched);
+ return ret;
+ }
+
+ *psched = sched;
+
+ return 0;
+}
+
+
+static void
nouveau_sched_fini(struct nouveau_sched *sched)
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
@@ -471,3 +494,14 @@ nouveau_sched_fini(struct nouveau_sched *sched)
if (sched->wq)
destroy_workqueue(sched->wq);
}
+
+void
+nouveau_sched_destroy(struct nouveau_sched **psched)
+{
+ struct nouveau_sched *sched = *psched;
+
+ nouveau_sched_fini(sched);
+ kfree(sched);
+
+ *psched = NULL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index a6528f5981e6..e1f01a23e6f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -111,8 +111,8 @@ struct nouveau_sched {
} job;
};
-int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
- struct workqueue_struct *wq, u32 credit_limit);
-void nouveau_sched_fini(struct nouveau_sched *sched);
+int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit);
+void nouveau_sched_destroy(struct nouveau_sched **psched);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index cc03e0c22ff3..5e4565c5011a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -1011,7 +1011,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
if (ret)
return ret;
- buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
+ buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 4f223c972c6a..0a0a11dc9ec0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1740,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
if (ret)
return ret;
- args.sched = &cli->sched;
+ args.sched = cli->sched;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index a6602c012671..3dda885df5b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -108,6 +108,9 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
} else {
ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
mem->mem.size, &tmp);
+ if (ret)
+ goto done;
+
vma->addr = tmp.addr;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
index 4135690326f4..3a30bea30e36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
@@ -168,12 +168,11 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
rm->flush = r535_bar_flush;
ret = gf100_bar_new_(rm, device, type, inst, &bar);
- *pbar = bar;
if (ret) {
- if (!bar)
- kfree(rm);
+ kfree(rm);
return ret;
}
+ *pbar = bar;
bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 19188683c8fc..8c2bf1c16f2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
return (void *)fw;
}
+static void
+shadow_fw_release(void *fw)
+{
+ release_firmware(fw);
+}
+
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
- .fini = (void(*)(void *))release_firmware,
+ .fini = shadow_fw_release,
.read = shadow_fw_read,
.rw = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 9ee58e2a0eb2..a73a5b589790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -997,6 +997,32 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
return 0;
}
+static void
+nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+{
+ if (mem->data) {
+ /*
+ * Poison the buffer to catch any unexpected access from
+ * GSP-RM if the buffer was prematurely freed.
+ */
+ memset(mem->data, 0xFF, mem->size);
+
+ dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+ memset(mem, 0, sizeof(*mem));
+ }
+}
+
+static int
+nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
+{
+ mem->size = size;
+ mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
+ if (WARN_ON(!mem->data))
+ return -ENOMEM;
+
+ return 0;
+}
+
static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
@@ -1024,6 +1050,11 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
nvkm_inth_allow(&gsp->subdev.inth);
nvkm_wr32(device, 0x110004, 0x00000040);
+
+ /* Release the DMA buffers that were needed only for boot and init */
+ nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
+ nvkm_gsp_mem_dtor(gsp, &gsp->libos);
+
return ret;
}
@@ -1078,7 +1109,6 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
if (IS_ERR(rpc))
return PTR_ERR(rpc);
- rpc->size = sizeof(*rpc);
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
@@ -1094,6 +1124,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
strings += name_len;
str_offset += name_len;
}
+ rpc->size = str_offset;
return nvkm_gsp_rpc_wr(gsp, rpc, false);
}
@@ -1532,27 +1563,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
return 0;
}
-static void
-nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
-{
- if (mem->data) {
- dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
- mem->data = NULL;
- }
-}
-
-static int
-nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
-{
- mem->size = size;
- mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
- if (WARN_ON(!mem->data))
- return -ENOMEM;
-
- return 0;
-}
-
-
static int
r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
{
@@ -1938,20 +1948,20 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
* See kgspCreateRadix3_IMPL
*/
static int
-nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
+nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)
{
u64 addr;
for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
u64 *ptes;
- int idx;
+ size_t bufsize;
+ int ret, idx;
- rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
- rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
- &rx3->mem[i].addr, GFP_KERNEL);
- if (WARN_ON(!rx3->mem[i].data))
- return -ENOMEM;
+ bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+ ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]);
+ if (ret)
+ return ret;
ptes = rx3->mem[i].data;
if (i == 2) {
@@ -1991,7 +2001,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
if (ret)
return ret;
- ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
+ ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3);
if (ret)
return ret;
@@ -2150,6 +2160,13 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
mutex_destroy(&gsp->cmdq.mutex);
r535_gsp_dtor_fws(gsp);
+
+ nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
+ nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
+ nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
+ nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
+ nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
+ nvkm_gsp_mem_dtor(gsp, &gsp->logrm);
}
int
@@ -2194,7 +2211,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
memcpy(gsp->sig.data, data, size);
/* Build radix3 page table for ELF image. */
- ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
+ ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
if (ret)
return ret;
@@ -2295,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
int ret;
+ bool enable_gsp = fwif->enable;
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
return -EINVAL;
if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index dad938cf6dec..8f3783742208 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -539,6 +539,8 @@ config DRM_PANEL_RAYDIUM_RM692E5
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for Raydium RM692E5-based
display panels, such as the one found in the Fairphone 5 smartphone.
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index ea5a85779382..f23d8832a1ad 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -309,7 +309,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
.off_func = s6d7aa0_lsl080al02_off,
.drm_mode = &s6d7aa0_lsl080al02_mode,
.mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_flags = 0,
.has_backlight = false,
.use_passwd3 = false,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 2214cb09678c..d493ee735c73 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3948,6 +3948,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
static const struct panel_desc tianma_tm070jvhg33 = {
@@ -3960,6 +3961,7 @@ static const struct panel_desc tianma_tm070jvhg33 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 85b3b4871a1d..fdd768bbd487 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -1985,8 +1985,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
}
- if (!clock)
+ if (!clock) {
+ vop2_unlock(vop2);
return;
+ }
if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
!(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT))
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 550492a7a031..d442b893275b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1184,14 +1184,16 @@ static void drm_sched_run_job_work(struct work_struct *w)
if (READ_ONCE(sched->pause_submit))
return;
+ /* Find entity with a ready job */
entity = drm_sched_select_entity(sched);
if (!entity)
- return;
+ return; /* No more work */
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
complete_all(&entity->entity_idle);
- return; /* No more work */
+ drm_sched_run_job_queue(sched);
+ return;
}
s_fence = sched_job->s_fence;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index ff36171c8fb7..03d1c76aec2d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -960,7 +960,8 @@ int host1x_client_iommu_attach(struct host1x_client *client)
* not the shared IOMMU domain, don't try to attach it to a different
* domain. This allows using the IOMMU-backed DMA API.
*/
- if (domain && domain != tegra->domain)
+ if (domain && domain->type != IOMMU_DOMAIN_IDENTITY &&
+ domain != tegra->domain)
return 0;
if (tegra->domain) {
@@ -1242,9 +1243,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
- err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
- if (err < 0)
- goto hub;
+ /*
+ * Only take over from a potential firmware framebuffer if any CRTCs
+ * have been registered. This must not be a fatal error because there
+ * are other accelerators that are exposed via this driver.
+ *
+ * Another case where this happens is on Tegra234 where the display
+ * hardware is no longer part of the host1x complex, so this driver
+ * will not expose any modesetting features.
+ */
+ if (drm->mode_config.num_crtc > 0) {
+ err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+ if (err < 0)
+ goto hub;
+ } else {
+ /*
+ * Indicate to userspace that this doesn't expose any display
+ * capabilities.
+ */
+ drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ }
err = drm_dev_register(drm, 0);
if (err < 0)
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index ea2af6bd9abe..be2d9d7764be 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -8,16 +8,308 @@
#include <linux/prime_numbers.h>
#include <linux/sched/signal.h>
+#include <linux/sizes.h>
#include <drm/drm_buddy.h>
#include "../lib/drm_random.h"
+static unsigned int random_seed;
+
static inline u64 get_size(int order, u64 chunk_size)
{
return (1 << order) * chunk_size;
}
+static void drm_test_buddy_alloc_range_bias(struct kunit *test)
+{
+ u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
+ DRM_RND_STATE(prng, random_seed);
+ unsigned int i, count, *order;
+ struct drm_buddy mm;
+ LIST_HEAD(allocated);
+
+ bias_size = SZ_1M;
+ ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
+ ps = max(SZ_4K, ps);
+ mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
+
+ kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ count = mm_size / bias_size;
+ order = drm_random_order(count, &prng);
+ KUNIT_EXPECT_TRUE(test, order);
+
+ /*
+ * Idea is to split the address space into uniform bias ranges, and then
+ * in some random order allocate within each bias, using various
+ * patterns within. This should detect if allocations leak out from a
+ * given bias, for example.
+ */
+
+ for (i = 0; i < count; i++) {
+ LIST_HEAD(tmp);
+ u32 size;
+
+ bias_start = order[i] * bias_size;
+ bias_end = bias_start + bias_size;
+ bias_rem = bias_size;
+
+ /* internal round_up too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, bias_size,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+
+ /* size too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size + ps, ps);
+
+ /* bias range too small for size */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end, bias_size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end, bias_size, ps);
+
+ /* bias misaligned */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end - ps,
+ bias_size >> 1, bias_size >> 1,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
+
+ /* single big page */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* single page with internal round_up */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, ps, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, ps, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* random size within */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ if (size)
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+
+ bias_rem -= size;
+ /* too big for current avail */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_rem + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_rem + ps, ps);
+
+ if (bias_rem) {
+ /* random fill of the remainder */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ size = max(size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+ /*
+ * Intentionally allow some space to be left
+ * unallocated, and ideally not always on the bias
+ * boundaries.
+ */
+ drm_buddy_free_list(&mm, &tmp);
+ } else {
+ list_splice_tail(&tmp, &allocated);
+ }
+ }
+
+ kfree(order);
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+
+ /*
+ * Something more free-form. Idea is to pick a random starting bias
+ * range within the address space and then start filling it up. Also
+ * randomly grow the bias range in both directions as we go along. This
+ * should give us bias start/end which is not always uniform like above,
+ * and in some cases will require the allocator to jump over already
+ * allocated nodes in the middle of the address space.
+ */
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
+ bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
+ bias_end = max(bias_end, bias_start + ps);
+ bias_rem = bias_end - bias_start;
+
+ do {
+ u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size);
+ bias_rem -= size;
+
+ /*
+ * Try to randomly grow the bias range in both directions, or
+ * only one, or perhaps don't grow at all.
+ */
+ do {
+ u32 old_bias_start = bias_start;
+ u32 old_bias_end = bias_end;
+
+ if (bias_start)
+ bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
+ if (bias_end != mm_size)
+ bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
+
+ bias_rem += old_bias_start - bias_start;
+ bias_rem += bias_end - old_bias_end;
+ } while (!bias_rem && (bias_start || bias_end != mm_size));
+ } while (bias_rem);
+
+ KUNIT_ASSERT_EQ(test, bias_start, 0);
+ KUNIT_ASSERT_EQ(test, bias_end, mm_size);
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
+ ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc passed with bias(%x-%x), size=%u\n",
+ bias_start, bias_end, ps);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_contiguous(struct kunit *test)
+{
+ const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
+ unsigned long i, n_pages, total;
+ struct drm_buddy_block *block;
+ struct drm_buddy mm;
+ LIST_HEAD(left);
+ LIST_HEAD(middle);
+ LIST_HEAD(right);
+ LIST_HEAD(allocated);
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ /*
+ * Idea is to fragment the address space by alternating block
+ * allocations between three different lists; one for left, middle and
+ * right. We can then free a list to simulate fragmentation. In
+ * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
+ * including the try_harder path.
+ */
+
+ i = 0;
+ n_pages = mm_size / ps;
+ do {
+ struct list_head *list;
+ int slot = i % 3;
+
+ if (slot == 0)
+ list = &left;
+ else if (slot == 1)
+ list = &middle;
+ else
+ list = &right;
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list, 0),
+ "buddy_alloc hit an error size=%u\n",
+ ps);
+ } while (++i < n_pages);
+
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%u\n", 3 * ps);
+
+ drm_buddy_free_list(&mm, &middle);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%u\n", 3 * ps);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%u\n", 2 * ps);
+
+ drm_buddy_free_list(&mm, &right);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%u\n", 3 * ps);
+ /*
+ * At this point we should have enough contiguous space for 2 blocks,
+ * however they are never buddies (since we freed middle and right) so
+ * will require the try_harder logic to find them.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", 2 * ps);
+
+ drm_buddy_free_list(&mm, &left);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", 3 * ps);
+
+ total = 0;
+ list_for_each_entry(block, &allocated, link)
+ total += drm_buddy_block_size(&mm, block);
+
+ KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
static void drm_test_buddy_alloc_pathological(struct kunit *test)
{
u64 mm_size, size, start = 0;
@@ -275,16 +567,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_fini(&mm);
}
+static int drm_buddy_suite_init(struct kunit_suite *suite)
+{
+ while (!random_seed)
+ random_seed = get_random_u32();
+
+ kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
+ random_seed);
+
+ return 0;
+}
+
static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_limit),
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
+ KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+ KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
static struct kunit_suite drm_buddy_test_suite = {
.name = "drm_buddy",
+ .suite_init = drm_buddy_suite_init,
.test_cases = drm_buddy_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 4e9247cf9977..1eb0c304f960 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -188,13 +188,13 @@ out:
static void drm_test_mm_debug(struct kunit *test)
{
+ struct drm_printer p = drm_debug_printer(test->name);
struct drm_mm mm;
struct drm_mm_node nodes[2];
/* Create a small drm_mm with a couple of nodes and a few holes, and
* check that the debug iterator doesn't explode over a trivial drm_mm.
*/
-
drm_mm_init(&mm, 0, 4096);
memset(nodes, 0, sizeof(nodes));
@@ -209,6 +209,9 @@ static void drm_test_mm_debug(struct kunit *test)
KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
"failed to reserve node[0] {start=%lld, size=%lld)\n",
nodes[0].start, nodes[0].size);
+
+ drm_mm_print(&mm, &p);
+ KUNIT_SUCCEED(test);
}
static bool expect_insert(struct kunit *test, struct drm_mm *mm,
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index f5187b384ae9..76027960054f 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -95,11 +95,17 @@ static int ttm_global_init(void)
ttm_pool_mgr_init(num_pages);
ttm_tt_mgr_init(num_pages, num_dma32);
- glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+ glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
+ __GFP_NOWARN);
+ /* Retry without GFP_DMA32 for platforms DMA32 is not available */
if (unlikely(glob->dummy_read_page == NULL)) {
- ret = -ENOMEM;
- goto out;
+ glob->dummy_read_page = alloc_page(__GFP_ZERO);
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
}
INIT_LIST_HEAD(&glob->device_list);
@@ -195,7 +201,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bool use_dma_alloc, bool use_dma32)
{
struct ttm_global *glob = &ttm_glob;
- int ret;
+ int ret, nid;
if (WARN_ON(vma_manager == NULL))
return -EINVAL;
@@ -215,7 +221,12 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
ttm_sys_man_init(bdev);
- ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32);
+ if (dev)
+ nid = dev_to_node(dev);
+ else
+ nid = NUMA_NO_NODE;
+
+ ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index b62f420a9f96..112438d965ff 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -387,7 +387,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
- struct page **pages = tt->pages;
+ struct page **pages = &tt->pages[start_page];
unsigned int order;
pgoff_t i, nr;
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index fcff41dd2315..88f63d526b22 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -147,6 +147,13 @@ v3d_job_allocate(void **container, size_t size)
return 0;
}
+static void
+v3d_job_deallocate(void **container)
+{
+ kfree(*container);
+ *container = NULL;
+}
+
static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
struct v3d_job *job, void (*free)(struct kref *ref),
@@ -273,8 +280,10 @@ v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
ret = v3d_job_init(v3d, file_priv, &(*job)->base,
v3d_job_free, args->in_sync, se, V3D_CSD);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)job);
return ret;
+ }
ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
if (ret)
@@ -282,8 +291,10 @@ v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
ret = v3d_job_init(v3d, file_priv, *clean_job,
v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)clean_job);
return ret;
+ }
(*job)->args = *args;
@@ -860,8 +871,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &render->base,
v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&render);
goto fail;
+ }
render->start = args->rcl_start;
render->end = args->rcl_end;
@@ -874,8 +887,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&bin);
goto fail;
+ }
bin->start = args->bcl_start;
bin->end = args->bcl_end;
@@ -892,8 +907,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, clean_job,
v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&clean_job);
goto fail;
+ }
last_job = clean_job;
} else {
@@ -1015,8 +1032,10 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &job->base,
v3d_job_free, args->in_sync, &se, V3D_TFU);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&job);
goto fail;
+ }
job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
sizeof(*job->base.bo), GFP_KERNEL);
@@ -1233,8 +1252,10 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
v3d_job_free, 0, &se, V3D_CPU);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&cpu_job);
goto fail;
+ }
clean_job = cpu_job->indirect_csd.clean_job;
csd_job = cpu_job->indirect_csd.job;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index f8e9abe647b9..9539aa28937f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -94,6 +94,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
goto err_free;
}
+ dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 1cced50d8d8c..e36ae1f0d885 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -47,7 +47,7 @@ config DRM_XE
config DRM_XE_DISPLAY
bool "Enable display support"
- depends on DRM_XE && EXPERT && DRM_XE=m
+ depends on DRM_XE && DRM_XE=m
select FB_IOMEM_HELPERS
select I2C
select I2C_ALGOBIT
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 53bd2a8ba1ae..efcf0ab7a1a6 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
-subdir-ccflags-y += $(call cc-option, -Wstringop-overflow)
subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
# The following turn off the warnings enabled by -Wextra
ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 3062e0e0d467..79ba98a169f9 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -50,8 +50,8 @@
#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16)
-#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffu << 16)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffffu << 0)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
index 811add10c30d..c165e26c0976 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -242,8 +242,8 @@ struct slpc_shared_data {
(HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xff << 8)
-#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xff << 0)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xffu << 8)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xffu << 0)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
index 3b83f907ece4..0b1146d0c997 100644
--- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
@@ -82,11 +82,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
#define GUC_CTB_HDR_LEN 1u
#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
#define GUC_CTB_MSG_MAX_LEN 256u
-#define GUC_CTB_MSG_0_FENCE (0xffff << 16)
-#define GUC_CTB_MSG_0_FORMAT (0xf << 12)
+#define GUC_CTB_MSG_0_FENCE (0xffffu << 16)
+#define GUC_CTB_MSG_0_FORMAT (0xfu << 12)
#define GUC_CTB_FORMAT_HXG 0u
-#define GUC_CTB_MSG_0_RESERVED (0xf << 8)
-#define GUC_CTB_MSG_0_NUM_DWORDS (0xff << 0)
+#define GUC_CTB_MSG_0_RESERVED (0xfu << 8)
+#define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0)
/**
* DOC: CTB HXG Message
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 47094b9b044c..0400bc0fccdc 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -31,9 +31,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
-#define GUC_KLV_0_KEY (0xffff << 16)
-#define GUC_KLV_0_LEN (0xffff << 0)
-#define GUC_KLV_n_VALUE (0xffffffff << 0)
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs
diff --git a/drivers/gpu/drm/xe/abi/guc_messages_abi.h b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
index 3d199016cf88..29e414c82d56 100644
--- a/drivers/gpu/drm/xe/abi/guc_messages_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
@@ -40,18 +40,18 @@
*/
#define GUC_HXG_MSG_MIN_LEN 1u
-#define GUC_HXG_MSG_0_ORIGIN (0x1 << 31)
+#define GUC_HXG_MSG_0_ORIGIN (0x1u << 31)
#define GUC_HXG_ORIGIN_HOST 0u
#define GUC_HXG_ORIGIN_GUC 1u
-#define GUC_HXG_MSG_0_TYPE (0x7 << 28)
+#define GUC_HXG_MSG_0_TYPE (0x7u << 28)
#define GUC_HXG_TYPE_REQUEST 0u
#define GUC_HXG_TYPE_EVENT 1u
#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
#define GUC_HXG_TYPE_RESPONSE_SUCCESS 7u
-#define GUC_HXG_MSG_0_AUX (0xfffffff << 0)
-#define GUC_HXG_MSG_n_PAYLOAD (0xffffffff << 0)
+#define GUC_HXG_MSG_0_AUX (0xfffffffu << 0)
+#define GUC_HXG_MSG_n_PAYLOAD (0xffffffffu << 0)
/**
* DOC: HXG Request
@@ -85,8 +85,8 @@
*/
#define GUC_HXG_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfff << 16)
-#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffff << 0)
+#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfffu << 16)
+#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/**
@@ -117,8 +117,8 @@
*/
#define GUC_HXG_EVENT_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfff << 16)
-#define GUC_HXG_EVENT_MSG_0_ACTION (0xffff << 0)
+#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfffu << 16)
+#define GUC_HXG_EVENT_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_EVENT_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/**
@@ -188,8 +188,8 @@
*/
#define GUC_HXG_FAILURE_MSG_LEN GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_FAILURE_MSG_0_HINT (0xfff << 16)
-#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffff << 0)
+#define GUC_HXG_FAILURE_MSG_0_HINT (0xfffu << 16)
+#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffffu << 0)
/**
* DOC: HXG Response
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
index 5f19550cc845..777c20ceabab 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -10,7 +10,7 @@
#include "xe_bo.h"
-#define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT)
+#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */
static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n)
{
@@ -35,12 +35,10 @@ static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
u32 ofs, u64 *ptr, u32 size)
{
struct ttm_bo_kmap_obj map;
- void *virtual;
+ void *src;
bool is_iomem;
int ret;
- XE_WARN_ON(size != 8);
-
ret = xe_bo_lock(bo, true);
if (ret)
return ret;
@@ -50,11 +48,12 @@ static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
goto out_unlock;
ofs &= ~PAGE_MASK;
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+ src = ttm_kmap_obj_virtual(&map, &is_iomem);
+ src += ofs;
if (is_iomem)
- *ptr = readq((void __iomem *)(virtual + ofs));
+ memcpy_fromio(ptr, (void __iomem *)src, size);
else
- *ptr = *(u64 *)(virtual + ofs);
+ memcpy(ptr, src, size);
ttm_bo_kunmap(&map);
out_unlock:
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 412b2e7ce40c..3436fd9cf2b2 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -125,14 +125,13 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
ttm_bo_type_device, bo_flags);
-
- xe_bo_lock(bo, false);
-
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n");
return;
}
+ xe_bo_lock(bo, false);
+
kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
test);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 7a32faa2f688..a6523df0f1d3 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -331,7 +331,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
- &src_it, XE_PAGE_SIZE, pt);
+ &src_it, XE_PAGE_SIZE, pt->ttm.resource);
run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
index ef56bd517b28..421b819fd4ba 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
@@ -21,4 +21,5 @@ kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_mocs kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index a53c22a19582..b4715b78ef3b 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -74,9 +74,6 @@ static const struct platform_test_case cases[] = {
SUBPLATFORM_CASE(DG2, G11, B1),
SUBPLATFORM_CASE(DG2, G12, A0),
SUBPLATFORM_CASE(DG2, G12, A1),
- PLATFORM_CASE(PVC, B0),
- PLATFORM_CASE(PVC, B1),
- PLATFORM_CASE(PVC, C0),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 8e4a3b1f6b93..4d3b80ec906d 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -28,6 +28,14 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
+const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
+ [XE_PL_SYSTEM] = "system",
+ [XE_PL_TT] = "gtt",
+ [XE_PL_VRAM0] = "vram0",
+ [XE_PL_VRAM1] = "vram1",
+ [XE_PL_STOLEN] = "stolen"
+};
+
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
@@ -125,9 +133,9 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
-
if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
+ xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_TT,
};
@@ -145,6 +153,8 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct xe_mem_region *vram;
u64 io_size;
+ xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
xe_assert(xe, vram && vram->usable_size);
io_size = vram->io_size;
@@ -175,8 +185,6 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
-
if (bo->props.preferred_gt == XE_GT1) {
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
@@ -193,9 +201,9 @@ static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
- xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
-
if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
+ xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
+
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN,
.flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
@@ -442,7 +450,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
if (vram->mapping &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)
- mem->bus.addr = (u8 *)vram->mapping +
+ mem->bus.addr = (u8 __force *)vram->mapping +
mem->bus.offset;
mem->bus.offset += vram->io_start;
@@ -713,8 +721,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
migrate = xe->tiles[0].migrate;
xe_assert(xe, migrate);
-
- trace_xe_bo_move(bo);
+ trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
xe_device_mem_access_get(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
@@ -734,7 +741,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
/* Create a new VMAP once kernel BO back in VRAM */
if (!ret && resource_is_vram(new_mem)) {
struct xe_mem_region *vram = res_to_mem_region(new_mem);
- void *new_addr = vram->mapping +
+ void __iomem *new_addr = vram->mapping +
(new_mem->start << PAGE_SHIFT);
if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 9b1279aca127..8be42ac6cd07 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -243,6 +243,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
extern struct ttm_device_funcs xe_ttm_funcs;
+extern const char *const xe_mem_type_to_name[];
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index d9ae77fe7382..5176c27e4b6a 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -83,9 +83,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
- struct xe_file *xef);
-
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -102,8 +99,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&xef->exec_queue.lock);
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
- device_kill_persistent_exec_queues(xe, xef);
-
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
@@ -255,9 +250,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
- INIT_LIST_HEAD(&xe->persistent_engines.list);
-
spin_lock_init(&xe->pinned.lock);
INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
INIT_LIST_HEAD(&xe->pinned.external_vram);
@@ -484,7 +476,7 @@ int xe_device_probe(struct xe_device *xe)
err = xe_device_set_has_flat_ccs(xe);
if (err)
- return err;
+ goto err_irq_shutdown;
err = xe_mmio_probe_vram(xe);
if (err)
@@ -570,37 +562,6 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
-{
- mutex_lock(&xe->persistent_engines.lock);
- list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
- struct xe_exec_queue *q)
-{
- mutex_lock(&xe->persistent_engines.lock);
- if (!list_empty(&q->persistent.link))
- list_del(&q->persistent.link);
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
- struct xe_file *xef)
-{
- struct xe_exec_queue *q, *next;
-
- mutex_lock(&xe->persistent_engines.lock);
- list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
- persistent.link)
- if (q->persistent.xef == xef) {
- xe_exec_queue_kill(q);
- list_del_init(&q->persistent.link);
- }
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
void xe_device_wmb(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
@@ -613,7 +574,7 @@ void xe_device_wmb(struct xe_device *xe)
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
{
return xe_device_has_flat_ccs(xe) ?
- DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
+ DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
}
bool xe_device_mem_access_ongoing(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 3da83b233206..08d8b72c7731 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe);
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
- struct xe_exec_queue *q);
-
void xe_device_wmb(struct xe_device *xe);
static inline struct xe_file *to_xe_file(const struct drm_file *file)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index c45ef17b3473..e8491979a6f2 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -97,7 +97,7 @@ struct xe_mem_region {
*/
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
- void *__iomem mapping;
+ void __iomem *mapping;
};
/**
@@ -146,7 +146,7 @@ struct xe_tile {
size_t size;
/** @regs: pointer to tile's MMIO space (starting with registers) */
- void *regs;
+ void __iomem *regs;
} mmio;
/**
@@ -159,7 +159,7 @@ struct xe_tile {
size_t size;
/** @regs: pointer to tile's additional MMIO-extension space */
- void *regs;
+ void __iomem *regs;
} mmio_ext;
/** @mem: memory management info for tile */
@@ -301,7 +301,7 @@ struct xe_device {
/** @size: size of MMIO space for device */
size_t size;
/** @regs: pointer to MMIO space for device */
- void *regs;
+ void __iomem *regs;
} mmio;
/** @mem: memory info for device */
@@ -341,14 +341,6 @@ struct xe_device {
struct mutex lock;
} usm;
- /** @persistent_engines: engines that are closed but still running */
- struct {
- /** @lock: protects persistent engines */
- struct mutex lock;
- /** @list: list of persistent engines */
- struct list_head list;
- } persistent_engines;
-
/** @pinned: pinned BO state */
struct {
/** @lock: protected pinned BO list state */
diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/xe_display.c
index 74391d9b11ae..e4db069f0db3 100644
--- a/drivers/gpu/drm/xe/xe_display.c
+++ b/drivers/gpu/drm/xe/xe_display.c
@@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
int xe_display_init_nommio(struct xe_device *xe)
{
- int err;
-
if (!xe->info.enable_display)
return 0;
@@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(xe);
- err = intel_power_domains_init(xe);
- if (err)
- return err;
-
return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 64ed303728fd..da2627ed6ae7 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -175,7 +175,7 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return 0;
}
-const struct dma_buf_ops xe_dmabuf_ops = {
+static const struct dma_buf_ops xe_dmabuf_ops = {
.attach = xe_dma_buf_attach,
.detach = xe_dma_buf_detach,
.pin = xe_dma_buf_pin,
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 82d1305e831f..6040e4d22b28 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo,
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
{
- static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = {
- [XE_PL_SYSTEM] = "system",
- [XE_PL_TT] = "gtt",
- [XE_PL_VRAM0] = "vram0",
- [XE_PL_VRAM1] = "vram1",
- [4 ... 6] = NULL,
- [XE_PL_STOLEN] = "stolen"
- };
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
struct xe_file *xef = file->driver_priv;
struct ttm_device *bdev = &xef->xe->ttm;
@@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
spin_unlock(&client->bos_lock);
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
- if (!mem_type_to_name[mem_type])
+ if (!xe_mem_type_to_name[mem_type])
continue;
man = ttm_manager_type(bdev, mem_type);
@@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
- mem_type_to_name[mem_type]);
+ xe_mem_type_to_name[mem_type]);
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index d30c0d0689bc..17f26952e665 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -111,11 +111,11 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
- u32 i, num_syncs = 0;
+ u32 i, num_syncs = 0, num_ufence = 0;
struct xe_sched_job *job;
struct dma_fence *rebind_fence;
struct xe_vm *vm;
- bool write_locked;
+ bool write_locked, skip_retry = false;
ktime_t end = 0;
int err = 0;
@@ -157,6 +157,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
SYNC_PARSE_FLAG_LR_MODE : 0));
if (err)
goto err_syncs;
+
+ if (xe_sync_is_ufence(&syncs[i]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+ err = -EINVAL;
+ goto err_syncs;
}
if (xe_exec_queue_is_parallel(q)) {
@@ -227,7 +235,8 @@ retry:
}
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
- err = -EWOULDBLOCK;
+ err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
+ skip_retry = true;
goto err_exec;
}
@@ -337,7 +346,7 @@ err_unlock_list:
up_write(&vm->lock);
else
up_read(&vm->lock);
- if (err == -EAGAIN)
+ if (err == -EAGAIN && !skip_retry)
goto retry;
err_syncs:
for (i = 0; i < num_syncs; i++)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 44fe8097b7cd..49223026c89f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -60,13 +60,17 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops;
- INIT_LIST_HEAD(&q->persistent.link);
INIT_LIST_HEAD(&q->compute.link);
INIT_LIST_HEAD(&q->multi_gt_link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
q->sched_props.preempt_timeout_us =
hwe->eclass->sched_props.preempt_timeout_us;
+ if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
+ q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
+ q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
+ else
+ q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
if (xe_exec_queue_is_parallel(q)) {
q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
@@ -305,102 +309,6 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
return q->ops->set_timeslice(q, value);
}
-static int exec_queue_set_preemption_timeout(struct xe_device *xe,
- struct xe_exec_queue *q, u64 value,
- bool create)
-{
- u32 min = 0, max = 0;
-
- xe_exec_queue_get_prop_minmax(q->hwe->eclass,
- XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
-
- if (xe_exec_queue_enforce_schedule_limit() &&
- !xe_hw_engine_timeout_in_range(value, min, max))
- return -EINVAL;
-
- return q->ops->set_preempt_timeout(q, value);
-}
-
-static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
- return -EINVAL;
-
- if (value)
- q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
- else
- q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
-
- return 0;
-}
-
-static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- u32 min = 0, max = 0;
-
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- xe_exec_queue_get_prop_minmax(q->hwe->eclass,
- XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
-
- if (xe_exec_queue_enforce_schedule_limit() &&
- !xe_hw_engine_timeout_in_range(value, min, max))
- return -EINVAL;
-
- return q->ops->set_job_timeout(q, value);
-}
-
-static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- q->usm.acc_trigger = value;
-
- return 0;
-}
-
-static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- q->usm.acc_notify = value;
-
- return 0;
-}
-
-static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- if (value > DRM_XE_ACC_GRANULARITY_64M)
- return -EINVAL;
-
- q->usm.acc_granularity = value;
-
- return 0;
-}
-
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value, bool create);
@@ -408,12 +316,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -432,10 +334,15 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, ext.property >=
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
- XE_IOCTL_DBG(xe, ext.pad))
+ XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
+ if (!exec_queue_set_property_funcs[idx])
+ return -EINVAL;
+
return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
}
@@ -699,9 +606,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
- args->width, hwe,
- xe_vm_in_lr_mode(vm) ? 0 :
- EXEC_QUEUE_FLAG_PERSISTENT);
+ args->width, hwe, 0);
up_read(&vm->lock);
xe_vm_put(vm);
if (IS_ERR(q))
@@ -723,8 +628,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue;
}
- q->persistent.xef = xef;
-
mutex_lock(&xef->exec_queue.lock);
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock);
@@ -867,10 +770,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
- if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
- xe_exec_queue_kill(q);
- else
- xe_device_add_persistent_exec_queues(xe, q);
+ xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
xe_exec_queue_put(q);
@@ -921,20 +821,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
* @q: The exec queue
* @vm: The VM the engine does a bind or exec for
*
- * Get last fence, does not take a ref
+ * Get last fence, takes a ref
*
* Returns: last fence if not signaled, dma fence stub if signaled
*/
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
struct xe_vm *vm)
{
+ struct dma_fence *fence;
+
xe_exec_queue_last_fence_lockdep_assert(q, vm);
if (q->last_fence &&
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
xe_exec_queue_last_fence_put(q, vm);
- return q->last_fence ? q->last_fence : dma_fence_get_stub();
+ fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 3d7e704ec3d9..36f4901d8d7e 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -52,8 +52,6 @@ struct xe_exec_queue {
struct xe_vm *vm;
/** @class: class of this exec queue */
enum xe_engine_class class;
- /** @priority: priority of this exec queue */
- enum xe_exec_queue_priority priority;
/**
* @logical_mask: logical mask of where job submitted to exec queue can run
*/
@@ -84,6 +82,8 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_VM BIT(4)
/* child of VM queue for multi-tile VM jobs */
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
+/* kernel exec_queue only, set priority to highest level */
+#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
@@ -105,16 +105,6 @@ struct xe_exec_queue {
struct xe_guc_exec_queue *guc;
};
- /**
- * @persistent: persistent exec queue state
- */
- struct {
- /** @xef: file which this exec queue belongs to */
- struct xe_file *xef;
- /** @link: link in list of persistent exec queues */
- struct list_head link;
- } persistent;
-
union {
/**
* @parallel: parallel submission state
@@ -142,6 +132,8 @@ struct xe_exec_queue {
u32 timeslice_us;
/** @preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us;
+ /** @priority: priority of this exec queue */
+ enum xe_exec_queue_priority priority;
} sched_props;
/** @compute: compute exec queue state */
@@ -158,16 +150,6 @@ struct xe_exec_queue {
spinlock_t lock;
} compute;
- /** @usm: unified shared memory state */
- struct {
- /** @acc_trigger: access counter trigger */
- u32 acc_trigger;
- /** @acc_notify: access counter notify */
- u32 acc_notify;
- /** @acc_granularity: access counter granularity */
- u32 acc_granularity;
- } usm;
-
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 96b5224eb478..acb4d9f38fd7 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -212,7 +212,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
{
struct xe_execlist_port *port = exl->port;
- enum xe_exec_queue_priority priority = exl->active_priority;
+ enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
XE_WARN_ON(priority < 0);
@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
- xe_device_remove_persistent_exec_queues(xe, q);
drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched);
kfree(exl);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 3af2adec1295..35474ddbaf97 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
* USM has its only SA pool to non-block behind user operations
*/
if (gt_to_xe(gt)->info.has_usm) {
- gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
+ struct xe_device *xe = gt_to_xe(gt);
+
+ gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
+ IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
if (IS_ERR(gt->usm.bb_pool)) {
err = PTR_ERR(gt->usm.bb_pool);
goto err_force_wake;
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 3adfa6686e7c..e5b0f4ecdbe8 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -196,6 +196,9 @@ void xe_gt_freq_init(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
int err;
+ if (xe->info.skip_guc_pc)
+ return;
+
gt->freq = kobject_create_and_add("freq0", gt->sysfs);
if (!gt->freq) {
drm_warn(&xe->drm, "failed to add freq0 directory to %s\n",
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 9358f7336889..9fcae65b6469 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
}
if (xe_gt_is_media_type(gt)) {
- sprintf(gtidle->name, "gt%d-mc\n", gt->info.id);
+ sprintf(gtidle->name, "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
} else {
- sprintf(gtidle->name, "gt%d-rc\n", gt->info.id);
+ sprintf(gtidle->name, "gt%d-rc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_rc6_residency;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 77925b35cf8d..8546cd3cc50d 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -480,7 +480,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
* to synchronize with external clients (e.g., firmware), so a semaphore
* register will also need to be taken.
*/
-static void mcr_lock(struct xe_gt *gt)
+static void mcr_lock(struct xe_gt *gt) __acquires(&gt->mcr_lock)
{
struct xe_device *xe = gt_to_xe(gt);
int ret = 0;
@@ -500,7 +500,7 @@ static void mcr_lock(struct xe_gt *gt)
drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
}
-static void mcr_unlock(struct xe_gt *gt)
+static void mcr_unlock(struct xe_gt *gt) __releases(&gt->mcr_lock)
{
/* Release hardware semaphore - this is done by writing 1 to the register */
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 59a70d2e0a7a..73f08f1924df 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -165,7 +165,8 @@ retry_userptr:
goto unlock_vm;
}
- if (!xe_vma_is_userptr(vma) || !xe_vma_userptr_check_repin(vma)) {
+ if (!xe_vma_is_userptr(vma) ||
+ !xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
downgrade_write(&vm->lock);
write_locked = false;
}
@@ -181,11 +182,13 @@ retry_userptr:
/* TODO: Validate fault */
if (xe_vma_is_userptr(vma) && write_locked) {
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
+
spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&vma->userptr.invalidate_link);
+ list_del_init(&uvma->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
- ret = xe_vma_userptr_pin_pages(vma);
+ ret = xe_vma_userptr_pin_pages(uvma);
if (ret)
goto unlock_vm;
@@ -220,7 +223,7 @@ retry_userptr:
dma_fence_put(fence);
if (xe_vma_is_userptr(vma))
- ret = xe_vma_userptr_check_repin(vma);
+ ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
vma->usm.tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
@@ -332,7 +335,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
return -EPROTO;
asid = FIELD_GET(PFD_ASID, msg[1]);
- pf_queue = &gt->usm.pf_queue[asid % NUM_PF_QUEUE];
+ pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 7eef23a00d77..f4c485289dbe 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -247,6 +247,14 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
xe_gt_assert(gt, vma);
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist) {
+ if (fence)
+ __invalidation_fence_signal(fence);
+
+ return 0;
+ }
+
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
if (!xe->info.has_range_tlb_invalidation) {
@@ -317,6 +325,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
struct drm_printer p = drm_err_printer(__func__);
int ret;
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist)
+ return 0;
+
/*
* XXX: See above, this algorithm only works if seqno are always in
* order
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 482cb0df9f15..0a61390c64a7 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -60,7 +60,12 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc)
static u32 guc_ctl_feature_flags(struct xe_guc *guc)
{
- return GUC_CTL_ENABLE_SLPC;
+ u32 flags = 0;
+
+ if (!guc_to_xe(guc)->info.skip_guc_pc)
+ flags |= GUC_CTL_ENABLE_SLPC;
+
+ return flags;
}
static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index f71085228cb3..d91702592520 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -963,7 +963,9 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc)
struct xe_device *xe = pc_to_xe(pc);
if (xe->info.skip_guc_pc) {
+ xe_device_mem_access_get(xe);
xe_gt_idle_disable_c6(pc_to_gt(pc));
+ xe_device_mem_access_put(xe);
return;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 21ac68e3246f..f22ae717b0b2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -421,7 +421,7 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{
struct exec_queue_policy policy;
struct xe_device *xe = guc_to_xe(guc);
- enum xe_exec_queue_priority prio = q->priority;
+ enum xe_exec_queue_priority prio = q->sched_props.priority;
u32 timeslice_us = q->sched_props.timeslice_us;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
@@ -1028,8 +1028,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
- if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
- xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
@@ -1231,7 +1229,6 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
err = xe_sched_entity_init(&ge->entity, sched);
if (err)
goto err_sched;
- q->priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
if (xe_exec_queue_is_lr(q))
INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
@@ -1301,15 +1298,15 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
{
struct xe_sched_msg *msg;
- if (q->priority == priority || exec_queue_killed_or_banned(q))
+ if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q))
return 0;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
+ q->sched_props.priority = priority;
guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
- q->priority = priority;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
index a6094c81f2ad..a5de3e7b0bd6 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence.c
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -217,13 +217,13 @@ struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx,
if (!fence)
return ERR_PTR(-ENOMEM);
- dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
- ctx->dma_fence_ctx, ctx->next_seqno++);
-
fence->ctx = ctx;
fence->seqno_map = seqno_map;
INIT_LIST_HEAD(&fence->irq_link);
+ dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
+ ctx->dma_fence_ctx, ctx->next_seqno++);
+
trace_xe_hw_fence_create(fence);
return fence;
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 6ef2aa1eae8b..174ed2185481 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -419,7 +419,7 @@ static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_READ_I1, 0),
- uval, 0);
+ uval, NULL);
}
static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index b7fa3831b684..b38319d2801e 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -21,10 +21,10 @@
#include "xe_map.h"
#include "xe_vm.h"
-#define CTX_VALID (1 << 0)
-#define CTX_PRIVILEGE (1 << 8)
-#define CTX_ADDRESSING_MODE_SHIFT 3
-#define LEGACY_64B_CONTEXT 3
+#define LRC_VALID (1 << 0)
+#define LRC_PRIVILEGE (1 << 8)
+#define LRC_ADDRESSING_MODE_SHIFT 3
+#define LRC_LEGACY_64B_CONTEXT 3
#define ENGINE_CLASS_SHIFT 61
#define ENGINE_INSTANCE_SHIFT 48
@@ -682,8 +682,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
#define PVC_CTX_ASID (0x2e + 1)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
-#define ACC_GRANULARITY_S 20
-#define ACC_NOTIFY_S 16
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
@@ -754,23 +752,17 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
if (xe->info.has_asid && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
- (q->usm.acc_granularity <<
- ACC_GRANULARITY_S) | vm->usm.asid);
- if (xe->info.has_usm && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
- (q->usm.acc_notify << ACC_NOTIFY_S) |
- q->usm.acc_trigger);
-
- lrc->desc = CTX_VALID;
- lrc->desc |= LEGACY_64B_CONTEXT << CTX_ADDRESSING_MODE_SHIFT;
+ xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
+
+ lrc->desc = LRC_VALID;
+ lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
/* TODO: Priority */
/* While this appears to have something about privileged batches or
* some such, it really just means PPGTT mode.
*/
if (vm)
- lrc->desc |= CTX_PRIVILEGE;
+ lrc->desc |= LRC_PRIVILEGE;
if (GRAPHICS_VERx100(xe) < 1250) {
lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index adf1dab5eba2..70480c305602 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -62,6 +62,8 @@ struct xe_migrate {
* out of the pt_bo.
*/
struct drm_suballoc_manager vm_update_sa;
+ /** @min_chunk_size: For dgfx, Minimum chunk size */
+ u64 min_chunk_size;
};
#define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
@@ -168,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (!IS_DGFX(xe)) {
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
- if (xe->info.has_usm) {
- batch = tile->primary_gt->usm.bb_pool->bo;
- m->usm_batch_base_ofs = m->batch_base_ofs;
- }
-
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
@@ -183,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
entry);
level++;
}
+ if (xe->info.has_usm) {
+ xe_tile_assert(tile, batch->size == SZ_1M);
+
+ batch = tile->primary_gt->usm.bb_pool->bo;
+ m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
+ xe_tile_assert(tile, batch->size == SZ_512K);
+
+ for (i = 0; i < batch->size;
+ i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
+ XE_PAGE_SIZE) {
+ entry = vm->pt_ops->pte_encode_bo(batch, i,
+ pat_index, 0);
+
+ xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
+ entry);
+ level++;
+ }
+ }
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
@@ -344,7 +359,8 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT);
+ EXEC_QUEUE_FLAG_PERMANENT |
+ EXEC_QUEUE_FLAG_HIGH_PRIORITY);
} else {
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
@@ -355,8 +371,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
xe_vm_close_and_put(vm);
return ERR_CAST(m->q);
}
- if (xe->info.has_usm)
- m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
mutex_init(&m->job_mutex);
@@ -364,6 +378,19 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (err)
return ERR_PTR(err);
+ if (IS_DGFX(xe)) {
+ if (xe_device_has_flat_ccs(xe))
+ /* min chunk size corresponds to 4K of CCS Metadata */
+ m->min_chunk_size = SZ_4K * SZ_64K /
+ xe_device_ccs_bytes(xe, SZ_64K);
+ else
+ /* Somewhat arbitrary to avoid a huge amount of blits */
+ m->min_chunk_size = SZ_64K;
+ m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size);
+ drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n",
+ (unsigned long long)m->min_chunk_size);
+ }
+
return m;
}
@@ -375,16 +402,35 @@ static u64 max_mem_transfer_per_pass(struct xe_device *xe)
return MAX_PREEMPTDISABLE_TRANSFER;
}
-static u64 xe_migrate_res_sizes(struct xe_device *xe, struct xe_res_cursor *cur)
+static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
{
- /*
- * For VRAM we use identity mapped pages so we are limited to current
- * cursor size. For system we program the pages ourselves so we have no
- * such limitation.
- */
- return min_t(u64, max_mem_transfer_per_pass(xe),
- mem_type_is_vram(cur->mem_type) ? cur->size :
- cur->remaining);
+ struct xe_device *xe = tile_to_xe(m->tile);
+ u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
+
+ if (mem_type_is_vram(cur->mem_type)) {
+ /*
+ * VRAM we want to blit in chunks with sizes aligned to
+ * min_chunk_size in order for the offset to CCS metadata to be
+ * page-aligned. If it's the last chunk it may be smaller.
+ *
+ * Another constraint is that we need to limit the blit to
+ * the VRAM block size, unless size is smaller than
+ * min_chunk_size.
+ */
+ u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
+
+ size = min_t(u64, size, chunk);
+ if (size > m->min_chunk_size)
+ size = round_down(size, m->min_chunk_size);
+ }
+
+ return size;
+}
+
+static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
+{
+ /* If the chunk is not fragmented, allow identity map. */
+ return cur->size >= size;
}
static u32 pte_update_size(struct xe_migrate *m,
@@ -397,7 +443,12 @@ static u32 pte_update_size(struct xe_migrate *m,
u32 cmds = 0;
*L0_pt = pt_ofs;
- if (!is_vram) {
+ if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
+ /* Offset into identity map. */
+ *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
+ cur->start + vram_region_gpu_offset(res));
+ cmds += cmd_size;
+ } else {
/* Clip L0 to available size */
u64 size = min(*L0, (u64)avail_pts * SZ_2M);
u64 num_4k_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE);
@@ -413,11 +464,6 @@ static u32 pte_update_size(struct xe_migrate *m,
/* Each chunk has a single blit command */
cmds += cmd_size;
- } else {
- /* Offset into identity map. */
- *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
- cur->start + vram_region_gpu_offset(res));
- cmds += cmd_size;
}
return cmds;
@@ -427,10 +473,10 @@ static void emit_pte(struct xe_migrate *m,
struct xe_bb *bb, u32 at_pt,
bool is_vram, bool is_comp_pte,
struct xe_res_cursor *cur,
- u32 size, struct xe_bo *bo)
+ u32 size, struct ttm_resource *res)
{
struct xe_device *xe = tile_to_xe(m->tile);
-
+ struct xe_vm *vm = m->q->vm;
u16 pat_index;
u32 ptes;
u64 ofs = at_pt * XE_PAGE_SIZE;
@@ -439,17 +485,10 @@ static void emit_pte(struct xe_migrate *m,
/* Indirect access needs compression enabled uncached PAT index */
if (GRAPHICS_VERx100(xe) >= 2000)
pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
- xe->pat.idx[XE_CACHE_NONE];
+ xe->pat.idx[XE_CACHE_WB];
else
pat_index = xe->pat.idx[XE_CACHE_WB];
- /*
- * FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently
- * we're only emitting VRAM PTEs during sanity tests, so when
- * that's moved to a Kunit test, we should condition VRAM PTEs
- * on running tests.
- */
-
ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
while (ptes) {
@@ -469,20 +508,22 @@ static void emit_pte(struct xe_migrate *m,
addr = xe_res_dma(cur) & PAGE_MASK;
if (is_vram) {
- /* Is this a 64K PTE entry? */
- if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
- !(cur_ofs & (16 * 8 - 1))) {
- xe_tile_assert(m->tile, IS_ALIGNED(addr, SZ_64K));
+ if (vm->flags & XE_VM_FLAG_64K) {
+ u64 va = cur_ofs * XE_PAGE_SIZE / 8;
+
+ xe_assert(xe, (va & (SZ_64K - 1)) ==
+ (addr & (SZ_64K - 1)));
+
flags |= XE_PTE_PS64;
}
- addr += vram_region_gpu_offset(bo->ttm.resource);
+ addr += vram_region_gpu_offset(res);
devmem = true;
}
- addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
- addr, pat_index,
- 0, devmem, flags);
+ addr = vm->pt_ops->pte_encode_addr(m->tile->xe,
+ addr, pat_index,
+ 0, devmem, flags);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
@@ -694,8 +735,8 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
- src_L0 = xe_migrate_res_sizes(xe, &src_it);
- dst_L0 = xe_migrate_res_sizes(xe, &dst_it);
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+ dst_L0 = xe_migrate_res_sizes(m, &dst_it);
drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
pass++, src_L0, dst_L0);
@@ -716,6 +757,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
&ccs_ofs, &ccs_pt, 0,
2 * avail_pts,
avail_pts);
+ xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
}
/* Add copy commands size here */
@@ -728,20 +770,20 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
goto err_sync;
}
- if (!src_is_vram)
- emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0,
- src_bo);
- else
+ if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
xe_res_next(&src_it, src_L0);
-
- if (!dst_is_vram)
- emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0,
- dst_bo);
else
+ emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
+ &src_it, src_L0, src);
+
+ if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
xe_res_next(&dst_it, src_L0);
+ else
+ emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
+ &dst_it, src_L0, dst);
if (copy_system_ccs)
- emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src_bo);
+ emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
@@ -950,7 +992,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
bool usm = xe->info.has_usm;
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
- clear_L0 = xe_migrate_res_sizes(xe, &src_it);
+ clear_L0 = xe_migrate_res_sizes(m, &src_it);
drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
@@ -977,12 +1019,12 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
size -= clear_L0;
/* Preemption is enabled again by the ring ops. */
- if (!clear_vram) {
- emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0,
- bo);
- } else {
+ if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
xe_res_next(&src_it, clear_L0);
- }
+ else
+ emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
+ &src_it, clear_L0, dst);
+
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
@@ -1175,8 +1217,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
}
if (q) {
fence = xe_exec_queue_last_fence_get(q, vm);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ dma_fence_put(fence);
return false;
+ }
+ dma_fence_put(fence);
}
return true;
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index f660cfb79f50..02f7808f28ca 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -105,7 +105,7 @@ static void xe_resize_vram_bar(struct xe_device *xe)
pci_bus_for_each_resource(root, root_res, i) {
if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
- root_res->start > 0x100000000ull)
+ (u64)root_res->start > 0x100000000ul)
break;
}
@@ -272,8 +272,8 @@ int xe_mmio_probe_vram(struct xe_device *xe)
drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
- &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + tile->mem.vram.actual_physical_size,
- &tile->mem.vram.io_start, tile->mem.vram.io_start + tile->mem.vram.io_size);
+ &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
+ &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
/* calculate total size using tile size to get the correct HW sizing */
total_size += tile_size;
@@ -303,7 +303,7 @@ void xe_mmio_probe_tiles(struct xe_device *xe)
u8 id, tile_count = xe->info.tile_count;
struct xe_gt *gt = xe_root_mmio_gt(xe);
struct xe_tile *tile;
- void *regs;
+ void __iomem *regs;
u32 mtcfg;
if (tile_count == 1)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index de1030a47588..6653c045f3c9 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -20,8 +20,8 @@
struct xe_pt_dir {
struct xe_pt pt;
- /** @dir: Directory structure for the xe_pt_walk functionality */
- struct xe_ptw_dir dir;
+ /** @children: Array of page-table child nodes */
+ struct xe_ptw *children[XE_PDES];
};
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -44,7 +44,7 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
{
- return container_of(pt_dir->dir.entries[index], struct xe_pt, base);
+ return container_of(pt_dir->children[index], struct xe_pt, base);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
@@ -65,6 +65,14 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
XE_PTE_NULL;
}
+static void xe_pt_free(struct xe_pt *pt)
+{
+ if (pt->level)
+ kfree(as_xe_pt_dir(pt));
+ else
+ kfree(pt);
+}
+
/**
* xe_pt_create() - Create a page-table.
* @vm: The vm to create for.
@@ -85,15 +93,19 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
{
struct xe_pt *pt;
struct xe_bo *bo;
- size_t size;
int err;
- size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) +
- XE_PDES * sizeof(struct xe_ptw *);
- pt = kzalloc(size, GFP_KERNEL);
+ if (level) {
+ struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+
+ pt = (dir) ? &dir->pt : NULL;
+ } else {
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ }
if (!pt)
return ERR_PTR(-ENOMEM);
+ pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
@@ -106,8 +118,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
goto err_kfree;
}
pt->bo = bo;
- pt->level = level;
- pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL;
+ pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
if (vm->xef)
xe_drm_client_add_bo(vm->xef->client, pt->bo);
@@ -116,7 +127,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
return pt;
err_kfree:
- kfree(pt);
+ xe_pt_free(pt);
return ERR_PTR(err);
}
@@ -193,7 +204,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
deferred);
}
}
- kfree(pt);
+ xe_pt_free(pt);
}
/**
@@ -358,7 +369,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
struct iosys_map *map = &parent->bo->vmap;
if (unlikely(xe_child))
- parent->base.dir->entries[offset] = &xe_child->base;
+ parent->base.children[offset] = &xe_child->base;
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
parent->num_live++;
@@ -488,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
* this device *requires* 64K PTE size for VRAM, fail.
*/
if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk))
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
pte |= XE_PTE_PS64;
- else if (XE_WARN_ON(xe_walk->needs_64K))
+ } else if (XE_WARN_ON(xe_walk->needs_64K)) {
return -EINVAL;
+ }
}
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
@@ -534,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
*child = &xe_child->base;
/*
- * Prefer the compact pagetable layout for L0 if possible.
+ * Prefer the compact pagetable layout for L0 if possible. Only
+ * possible if VMA covers entire 2MB region as compact 64k and
+ * 4k pages cannot be mixed within a 2MB region.
* TODO: Suballocate the pt bo to avoid wasting a lot of
* memory.
*/
if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts;
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
flags |= XE_PDE_64K;
xe_child->is_compact = true;
}
@@ -618,8 +634,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma)) {
if (xe_vma_is_userptr(vma))
- xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma),
- &curs);
+ xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
+ xe_vma_size(vma), &curs);
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
@@ -853,7 +869,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
xe_pt_destroy(xe_pt_entry(pt_dir, j_),
xe_vma_vm(vma)->flags, deferred);
- pt_dir->dir.entries[j_] = &newpte->base;
+ pt_dir->children[j_] = &newpte->base;
}
kfree(entries[i].pt_entries);
}
@@ -906,17 +922,17 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
-static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
+static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
- u32 divisor = vma->userptr.divisor ? vma->userptr.divisor : 2;
+ u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
static u32 count;
if (count++ % divisor == divisor - 1) {
- struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
- vma->userptr.divisor = divisor << 1;
+ uvma->userptr.divisor = divisor << 1;
spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&vma->userptr.invalidate_link,
+ list_move_tail(&uvma->userptr.invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
return true;
@@ -927,7 +943,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
#else
-static bool xe_pt_userptr_inject_eagain(struct xe_vma *vma)
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
return false;
}
@@ -1000,9 +1016,9 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
{
struct xe_pt_migrate_pt_update *userptr_update =
container_of(pt_update, typeof(*userptr_update), base);
- struct xe_vma *vma = pt_update->vma;
- unsigned long notifier_seq = vma->userptr.notifier_seq;
- struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
+ unsigned long notifier_seq = uvma->userptr.notifier_seq;
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
int err = xe_pt_vm_dependencies(pt_update->job,
&vm->rftree[pt_update->tile_id],
pt_update->start,
@@ -1023,7 +1039,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
*/
do {
down_read(&vm->userptr.notifier_lock);
- if (!mmu_interval_read_retry(&vma->userptr.notifier,
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
notifier_seq))
break;
@@ -1032,11 +1048,11 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
if (userptr_update->bind)
return -EAGAIN;
- notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
+ notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
} while (true);
/* Inject errors to test_whether they are handled correctly */
- if (userptr_update->bind && xe_pt_userptr_inject_eagain(vma)) {
+ if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
up_read(&vm->userptr.notifier_lock);
return -EAGAIN;
}
@@ -1297,7 +1313,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
vma->tile_present |= BIT(tile->id);
if (bind_pt_update.locked) {
- vma->userptr.initial_bind = true;
+ to_userptr_vma(vma)->userptr.initial_bind = true;
up_read(&vm->userptr.notifier_lock);
xe_bo_put_commit(&deferred);
}
@@ -1507,7 +1523,7 @@ xe_pt_commit_unbind(struct xe_vma *vma,
xe_pt_destroy(xe_pt_entry(pt_dir, i),
xe_vma_vm(vma)->flags, deferred);
- pt_dir->dir.entries[i] = NULL;
+ pt_dir->children[i] = NULL;
}
}
}
@@ -1642,7 +1658,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
if (!vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&vma->userptr.invalidate_link);
+ list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
}
up_read(&vm->userptr.notifier_lock);
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
index 8f6c8d063f39..b8b3d2aea492 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.c
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -74,7 +74,7 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
u64 addr, u64 end, struct xe_pt_walk *walk)
{
pgoff_t offset = xe_pt_offset(addr, level, walk);
- struct xe_ptw **entries = parent->dir ? parent->dir->entries : NULL;
+ struct xe_ptw **entries = parent->children ? parent->children : NULL;
const struct xe_pt_walk_ops *ops = walk->ops;
enum page_walk_action action;
struct xe_ptw *child;
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
index ec3d1e9efa6d..5ecc4d2f0f65 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.h
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -8,28 +8,15 @@
#include <linux/pagewalk.h>
#include <linux/types.h>
-struct xe_ptw_dir;
-
/**
* struct xe_ptw - base class for driver pagetable subclassing.
- * @dir: Pointer to an array of children if any.
+ * @children: Pointer to an array of children if any.
*
* Drivers could subclass this, and if it's a page-directory, typically
- * embed the xe_ptw_dir::entries array in the same allocation.
+ * embed an array of xe_ptw pointers.
*/
struct xe_ptw {
- struct xe_ptw_dir *dir;
-};
-
-/**
- * struct xe_ptw_dir - page directory structure
- * @entries: Array holding page directory children.
- *
- * It is the responsibility of the user to ensure @entries is
- * correctly sized.
- */
-struct xe_ptw_dir {
- struct xe_ptw *entries[0];
+ struct xe_ptw **children;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 9b35673b286c..7e924faeeea0 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -459,21 +459,21 @@ static size_t calc_topo_query_size(struct xe_device *xe)
sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
}
-static void __user *copy_mask(void __user *ptr,
- struct drm_xe_query_topology_mask *topo,
- void *mask, size_t mask_size)
+static int copy_mask(void __user **ptr,
+ struct drm_xe_query_topology_mask *topo,
+ void *mask, size_t mask_size)
{
topo->num_bytes = mask_size;
- if (copy_to_user(ptr, topo, sizeof(*topo)))
- return ERR_PTR(-EFAULT);
- ptr += sizeof(topo);
+ if (copy_to_user(*ptr, topo, sizeof(*topo)))
+ return -EFAULT;
+ *ptr += sizeof(topo);
- if (copy_to_user(ptr, mask, mask_size))
- return ERR_PTR(-EFAULT);
- ptr += mask_size;
+ if (copy_to_user(*ptr, mask, mask_size))
+ return -EFAULT;
+ *ptr += mask_size;
- return ptr;
+ return 0;
}
static int query_gt_topology(struct xe_device *xe,
@@ -493,28 +493,28 @@ static int query_gt_topology(struct xe_device *xe,
}
for_each_gt(gt, xe, id) {
+ int err;
+
topo.gt_id = id;
topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
- query_ptr = copy_mask(query_ptr, &topo,
- gt->fuse_topo.g_dss_mask,
- sizeof(gt->fuse_topo.g_dss_mask));
- if (IS_ERR(query_ptr))
- return PTR_ERR(query_ptr);
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
+ sizeof(gt->fuse_topo.g_dss_mask));
+ if (err)
+ return err;
topo.type = DRM_XE_TOPO_DSS_COMPUTE;
- query_ptr = copy_mask(query_ptr, &topo,
- gt->fuse_topo.c_dss_mask,
- sizeof(gt->fuse_topo.c_dss_mask));
- if (IS_ERR(query_ptr))
- return PTR_ERR(query_ptr);
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
+ sizeof(gt->fuse_topo.c_dss_mask));
+ if (err)
+ return err;
topo.type = DRM_XE_TOPO_EU_PER_DSS;
- query_ptr = copy_mask(query_ptr, &topo,
- gt->fuse_topo.eu_mask_per_dss,
- sizeof(gt->fuse_topo.eu_mask_per_dss));
- if (IS_ERR(query_ptr))
- return PTR_ERR(query_ptr);
+ err = copy_mask(&query_ptr, &topo,
+ gt->fuse_topo.eu_mask_per_dss,
+ sizeof(gt->fuse_topo.eu_mask_per_dss));
+ if (err)
+ return err;
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_range_fence.c b/drivers/gpu/drm/xe/xe_range_fence.c
index d35d9ec58e86..372378e89e98 100644
--- a/drivers/gpu/drm/xe/xe_range_fence.c
+++ b/drivers/gpu/drm/xe/xe_range_fence.c
@@ -151,6 +151,11 @@ xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last)
return xe_range_fence_tree_iter_next(rfence, start, last);
}
+static void xe_range_fence_free(struct xe_range_fence *rfence)
+{
+ kfree(rfence);
+}
+
const struct xe_range_fence_ops xe_range_fence_kfree_ops = {
- .free = (void (*)(struct xe_range_fence *rfence)) kfree,
+ .free = xe_range_fence_free,
};
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 01106a1156ad..4e2ccad0e52f 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
struct dma_fence *fence;
fence = xe_exec_queue_last_fence_get(job->q, vm);
- dma_fence_get(fence);
return drm_sched_job_add_dependency(&job->drm, fence);
}
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index e4c220cf9115..02c9577fe418 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -19,7 +19,7 @@
#include "xe_macros.h"
#include "xe_sched_job_types.h"
-struct user_fence {
+struct xe_user_fence {
struct xe_device *xe;
struct kref refcount;
struct dma_fence_cb cb;
@@ -27,31 +27,32 @@ struct user_fence {
struct mm_struct *mm;
u64 __user *addr;
u64 value;
+ int signalled;
};
static void user_fence_destroy(struct kref *kref)
{
- struct user_fence *ufence = container_of(kref, struct user_fence,
+ struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
refcount);
mmdrop(ufence->mm);
kfree(ufence);
}
-static void user_fence_get(struct user_fence *ufence)
+static void user_fence_get(struct xe_user_fence *ufence)
{
kref_get(&ufence->refcount);
}
-static void user_fence_put(struct user_fence *ufence)
+static void user_fence_put(struct xe_user_fence *ufence)
{
kref_put(&ufence->refcount, user_fence_destroy);
}
-static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
- u64 value)
+static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
+ u64 value)
{
- struct user_fence *ufence;
+ struct xe_user_fence *ufence;
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
@@ -69,7 +70,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
static void user_fence_worker(struct work_struct *w)
{
- struct user_fence *ufence = container_of(w, struct user_fence, worker);
+ struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
if (mmget_not_zero(ufence->mm)) {
kthread_use_mm(ufence->mm);
@@ -80,10 +81,11 @@ static void user_fence_worker(struct work_struct *w)
}
wake_up_all(&ufence->xe->ufence_wq);
+ WRITE_ONCE(ufence->signalled, 1);
user_fence_put(ufence);
}
-static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
+static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
{
INIT_WORK(&ufence->worker, user_fence_worker);
queue_work(ufence->xe->ordered_wq, &ufence->worker);
@@ -92,7 +94,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- struct user_fence *ufence = container_of(cb, struct user_fence, cb);
+ struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
kick_ufence(ufence, fence);
}
@@ -307,7 +309,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
/* Easy case... */
if (!num_in_fence) {
fence = xe_exec_queue_last_fence_get(q, vm);
- dma_fence_get(fence);
return fence;
}
@@ -322,7 +323,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
}
}
fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
- dma_fence_get(fences[current_fence - 1]);
cf = dma_fence_array_create(num_in_fence, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
@@ -342,3 +342,39 @@ err_out:
return ERR_PTR(-ENOMEM);
}
+
+/**
+ * xe_sync_ufence_get() - Get user fence from sync
+ * @sync: input sync
+ *
+ * Get a user fence reference from sync.
+ *
+ * Return: xe_user_fence pointer with reference
+ */
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
+{
+ user_fence_get(sync->ufence);
+
+ return sync->ufence;
+}
+
+/**
+ * xe_sync_ufence_put() - Put user fence reference
+ * @ufence: user fence reference
+ *
+ */
+void xe_sync_ufence_put(struct xe_user_fence *ufence)
+{
+ user_fence_put(ufence);
+}
+
+/**
+ * xe_sync_ufence_get_status() - Get user fence status
+ * @ufence: user fence
+ *
+ * Return: 1 if signalled, 0 not signalled, <0 on error
+ */
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
+{
+ return READ_ONCE(ufence->signalled);
+}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index d284afbe917c..0fd0d51208e6 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -33,4 +33,13 @@ struct dma_fence *
xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
struct xe_exec_queue *q, struct xe_vm *vm);
+static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
+{
+ return !!sync->ufence;
+}
+
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
+void xe_sync_ufence_put(struct xe_user_fence *ufence);
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h
index 852db5e7884f..30ac3f51993b 100644
--- a/drivers/gpu/drm/xe/xe_sync_types.h
+++ b/drivers/gpu/drm/xe/xe_sync_types.h
@@ -18,7 +18,7 @@ struct xe_sync_entry {
struct drm_syncobj *syncobj;
struct dma_fence *fence;
struct dma_fence_chain *chain_fence;
- struct user_fence *ufence;
+ struct xe_user_fence *ufence;
u64 addr;
u64 timeline_value;
u32 type;
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 95163c303f3e..4ddc55527f9a 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -12,6 +12,7 @@
#include <linux/tracepoint.h>
#include <linux/types.h>
+#include "xe_bo.h"
#include "xe_bo_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
@@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(u64, fence)
+ __field(struct xe_gt_tlb_invalidation_fence *, fence)
__field(int, seqno)
),
TP_fast_assign(
- __entry->fence = (u64)fence;
+ __entry->fence = fence;
__entry->seqno = fence->seqno;
),
- TP_printk("fence=0x%016llx, seqno=%d",
+ TP_printk("fence=%p, seqno=%d",
__entry->fence, __entry->seqno)
);
@@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_STRUCT__entry(
__field(size_t, size)
__field(u32, flags)
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
),
TP_fast_assign(
__entry->size = bo->size;
__entry->flags = bo->flags;
- __entry->vm = (unsigned long)bo->vm;
+ __entry->vm = bo->vm;
),
- TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
+ TP_printk("size=%zu, flags=0x%02x, vm=%p",
__entry->size, __entry->flags, __entry->vm)
);
@@ -100,9 +101,31 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
-DEFINE_EVENT(xe_bo, xe_bo_move,
- TP_PROTO(struct xe_bo *bo),
- TP_ARGS(bo)
+TRACE_EVENT(xe_bo_move,
+ TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
+ bool move_lacks_source),
+ TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
+ TP_STRUCT__entry(
+ __field(struct xe_bo *, bo)
+ __field(size_t, size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ __array(char, device_id, 12)
+ __field(bool, move_lacks_source)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->size = bo->size;
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
+ __entry->move_lacks_source = move_lacks_source;
+ ),
+ TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
+ __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
+ xe_mem_type_to_name[__entry->old_placement],
+ xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
);
DECLARE_EVENT_CLASS(xe_exec_queue,
@@ -327,16 +350,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
TP_STRUCT__entry(
__field(u64, ctx)
__field(u32, seqno)
- __field(u64, fence)
+ __field(struct xe_hw_fence *, fence)
),
TP_fast_assign(
__entry->ctx = fence->dma.context;
__entry->seqno = fence->dma.seqno;
- __entry->fence = (unsigned long)fence;
+ __entry->fence = fence;
),
- TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
+ TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
__entry->ctx, __entry->fence, __entry->seqno)
);
@@ -365,7 +388,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_ARGS(vma),
TP_STRUCT__entry(
- __field(u64, vma)
+ __field(struct xe_vma *, vma)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@@ -373,14 +396,14 @@ DECLARE_EVENT_CLASS(xe_vma,
),
TP_fast_assign(
- __entry->vma = (unsigned long)vma;
+ __entry->vma = vma;
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
- TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
+ TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
__entry->vma, __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@@ -465,16 +488,16 @@ DECLARE_EVENT_CLASS(xe_vm,
TP_ARGS(vm),
TP_STRUCT__entry(
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
__field(u32, asid)
),
TP_fast_assign(
- __entry->vm = (unsigned long)vm;
+ __entry->vm = vm;
__entry->asid = vm->usm.asid;
),
- TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
+ TP_printk("vm=%p, asid=0x%05x", __entry->vm,
__entry->asid)
);
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index d2b00d0bf1e2..e5d7d5e2bec1 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -31,7 +31,7 @@ struct xe_ttm_stolen_mgr {
/* GPU base offset */
resource_size_t stolen_base;
- void *__iomem mapping;
+ void __iomem *mapping;
};
static inline struct xe_ttm_stolen_mgr *
@@ -275,7 +275,7 @@ static int __xe_ttm_stolen_io_mem_reserve_bar2(struct xe_device *xe,
drm_WARN_ON(&xe->drm, !(mem->placement & TTM_PL_FLAG_CONTIGUOUS));
if (mem->placement & TTM_PL_FLAG_CONTIGUOUS && mgr->mapping)
- mem->bus.addr = (u8 *)mgr->mapping + mem->bus.offset;
+ mem->bus.addr = (u8 __force *)mgr->mapping + mem->bus.offset;
mem->bus.offset += mgr->io_base;
mem->bus.is_iomem = true;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 0cfe7289b97e..3b21afe5b488 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -37,8 +37,6 @@
#include "generated/xe_wa_oob.h"
#include "xe_wa.h"
-#define TEST_VM_ASYNC_OPS_ERROR
-
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
return vm->gpuvm.r_obj;
@@ -46,7 +44,7 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
/**
* xe_vma_userptr_check_repin() - Advisory check for repin needed
- * @vma: The userptr vma
+ * @uvma: The userptr vma
*
* Check if the userptr vma has been invalidated since last successful
* repin. The check is advisory only and can the function can be called
@@ -56,15 +54,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
*
* Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
*/
-int xe_vma_userptr_check_repin(struct xe_vma *vma)
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
{
- return mmu_interval_check_retry(&vma->userptr.notifier,
- vma->userptr.notifier_seq) ?
+ return mmu_interval_check_retry(&uvma->userptr.notifier,
+ uvma->userptr.notifier_seq) ?
-EAGAIN : 0;
}
-int xe_vma_userptr_pin_pages(struct xe_vma *vma)
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe;
const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
@@ -80,30 +80,30 @@ retry:
if (vma->gpuva.flags & XE_VMA_DESTROYED)
return 0;
- notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
- if (notifier_seq == vma->userptr.notifier_seq)
+ notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+ if (notifier_seq == userptr->notifier_seq)
return 0;
pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
- if (vma->userptr.sg) {
+ if (userptr->sg) {
dma_unmap_sgtable(xe->drm.dev,
- vma->userptr.sg,
+ userptr->sg,
read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL, 0);
- sg_free_table(vma->userptr.sg);
- vma->userptr.sg = NULL;
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
}
pinned = ret = 0;
if (in_kthread) {
- if (!mmget_not_zero(vma->userptr.notifier.mm)) {
+ if (!mmget_not_zero(userptr->notifier.mm)) {
ret = -EFAULT;
goto mm_closed;
}
- kthread_use_mm(vma->userptr.notifier.mm);
+ kthread_use_mm(userptr->notifier.mm);
}
while (pinned < num_pages) {
@@ -112,43 +112,40 @@ retry:
num_pages - pinned,
read_only ? 0 : FOLL_WRITE,
&pages[pinned]);
- if (ret < 0) {
- if (in_kthread)
- ret = 0;
+ if (ret < 0)
break;
- }
pinned += ret;
ret = 0;
}
if (in_kthread) {
- kthread_unuse_mm(vma->userptr.notifier.mm);
- mmput(vma->userptr.notifier.mm);
+ kthread_unuse_mm(userptr->notifier.mm);
+ mmput(userptr->notifier.mm);
}
mm_closed:
if (ret)
goto out;
- ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
+ ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
pinned, 0,
(u64)pinned << PAGE_SHIFT,
xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL);
if (ret) {
- vma->userptr.sg = NULL;
+ userptr->sg = NULL;
goto out;
}
- vma->userptr.sg = &vma->userptr.sgt;
+ userptr->sg = &userptr->sgt;
- ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
+ ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_NO_KERNEL_MAPPING);
if (ret) {
- sg_free_table(vma->userptr.sg);
- vma->userptr.sg = NULL;
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
goto out;
}
@@ -167,8 +164,8 @@ out:
kvfree(pages);
if (!(ret < 0)) {
- vma->userptr.notifier_seq = notifier_seq;
- if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
+ userptr->notifier_seq = notifier_seq;
+ if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
goto retry;
}
@@ -335,13 +332,13 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
down_write(&vm->lock);
err = drm_gpuvm_exec_lock(&vm_exec);
if (err)
- return err;
+ goto out_up_write;
pfence = xe_preempt_fence_create(q, q->compute.context,
++q->compute.seqno);
if (!pfence) {
err = -ENOMEM;
- goto out_unlock;
+ goto out_fini;
}
list_add(&q->compute.link, &vm->preempt.exec_queues);
@@ -364,8 +361,9 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
up_read(&vm->userptr.notifier_lock);
-out_unlock:
+out_fini:
drm_exec_fini(exec);
+out_up_write:
up_write(&vm->lock);
return err;
@@ -634,7 +632,9 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
- struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
+ struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
+ struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+ struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor;
struct dma_fence *fence;
@@ -650,7 +650,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
mmu_interval_set_seq(mni, cur_seq);
/* No need to stop gpu access if the userptr is not yet bound. */
- if (!vma->userptr.initial_bind) {
+ if (!userptr->initial_bind) {
up_write(&vm->userptr.notifier_lock);
return true;
}
@@ -662,7 +662,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
if (!xe_vm_in_fault_mode(vm) &&
!(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&vma->userptr.invalidate_link,
+ list_move_tail(&userptr->invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
}
@@ -702,7 +702,7 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
int xe_vm_userptr_pin(struct xe_vm *vm)
{
- struct xe_vma *vma, *next;
+ struct xe_userptr_vma *uvma, *next;
int err = 0;
LIST_HEAD(tmp_evict);
@@ -710,22 +710,23 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
/* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock);
- list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
+ list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
- list_del_init(&vma->userptr.invalidate_link);
- list_move_tail(&vma->combined_links.userptr,
+ list_del_init(&uvma->userptr.invalidate_link);
+ list_move_tail(&uvma->userptr.repin_link,
&vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
/* Pin and move to temporary list */
- list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
- combined_links.userptr) {
- err = xe_vma_userptr_pin_pages(vma);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ err = xe_vma_userptr_pin_pages(uvma);
if (err < 0)
return err;
- list_move_tail(&vma->combined_links.userptr, &vm->rebind_list);
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
}
return 0;
@@ -781,6 +782,14 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
return fence;
}
+static void xe_vma_free(struct xe_vma *vma)
+{
+ if (xe_vma_is_userptr(vma))
+ kfree(to_userptr_vma(vma));
+ else
+ kfree(vma);
+}
+
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
@@ -799,14 +808,26 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
- if (!bo && !is_null) /* userptr */
+ /*
+ * Allocate and ensure that the xe_vma_is_userptr() return
+ * matches what was allocated.
+ */
+ if (!bo && !is_null) {
+ struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
+
+ if (!uvma)
+ return ERR_PTR(-ENOMEM);
+
+ vma = &uvma->vma;
+ } else {
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- else
- vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
- GFP_KERNEL);
- if (!vma) {
- vma = ERR_PTR(-ENOMEM);
- return vma;
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_null)
+ vma->gpuva.flags |= DRM_GPUVA_SPARSE;
+ if (bo)
+ vma->gpuva.gem.obj = &bo->ttm.base;
}
INIT_LIST_HEAD(&vma->combined_links.rebind);
@@ -817,8 +838,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.va.range = end - start + 1;
if (read_only)
vma->gpuva.flags |= XE_VMA_READ_ONLY;
- if (is_null)
- vma->gpuva.flags |= DRM_GPUVA_SPARSE;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -835,35 +854,35 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
if (IS_ERR(vm_bo)) {
- kfree(vma);
+ xe_vma_free(vma);
return ERR_CAST(vm_bo);
}
drm_gpuvm_bo_extobj_add(vm_bo);
drm_gem_object_get(&bo->ttm.base);
- vma->gpuva.gem.obj = &bo->ttm.base;
vma->gpuva.gem.offset = bo_offset_or_userptr;
drm_gpuva_link(&vma->gpuva, vm_bo);
drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
if (!is_null) {
+ struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
u64 size = end - start + 1;
int err;
- INIT_LIST_HEAD(&vma->userptr.invalidate_link);
+ INIT_LIST_HEAD(&userptr->invalidate_link);
+ INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
- err = mmu_interval_notifier_insert(&vma->userptr.notifier,
+ err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm,
xe_vma_userptr(vma), size,
&vma_userptr_notifier_ops);
if (err) {
- kfree(vma);
- vma = ERR_PTR(err);
- return vma;
+ xe_vma_free(vma);
+ return ERR_PTR(err);
}
- vma->userptr.notifier_seq = LONG_MAX;
+ userptr->notifier_seq = LONG_MAX;
}
xe_vm_get(vm);
@@ -878,14 +897,21 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
struct xe_device *xe = vm->xe;
bool read_only = xe_vma_read_only(vma);
+ if (vma->ufence) {
+ xe_sync_ufence_put(vma->ufence);
+ vma->ufence = NULL;
+ }
+
if (xe_vma_is_userptr(vma)) {
- if (vma->userptr.sg) {
+ struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+
+ if (userptr->sg) {
dma_unmap_sgtable(xe->drm.dev,
- vma->userptr.sg,
+ userptr->sg,
read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL, 0);
- sg_free_table(vma->userptr.sg);
- vma->userptr.sg = NULL;
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
}
/*
@@ -893,7 +919,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
* the notifer until we're sure the GPU is not accessing
* them anymore
*/
- mmu_interval_notifier_remove(&vma->userptr.notifier);
+ mmu_interval_notifier_remove(&userptr->notifier);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma)) {
xe_vm_put(vm);
@@ -901,7 +927,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
xe_bo_put(xe_vma_bo(vma));
}
- kfree(vma);
+ xe_vma_free(vma);
}
static void vma_destroy_work_func(struct work_struct *w)
@@ -932,7 +958,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock);
- list_del(&vma->userptr.invalidate_link);
+ list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
} else if (!xe_vma_is_null(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
@@ -974,9 +1000,16 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
int err;
XE_WARN_ON(!vm);
- err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
- if (!err && bo && !bo->vm)
- err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
+ if (num_shared)
+ err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
+ else
+ err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
+ if (!err && bo && !bo->vm) {
+ if (num_shared)
+ err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
+ else
+ err = drm_exec_lock_obj(exec, &bo->ttm.base);
+ }
return err;
}
@@ -1580,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
trace_xe_vma_unbind(vma);
+ if (vma->ufence) {
+ struct xe_user_fence * const f = vma->ufence;
+
+ if (!xe_sync_ufence_get_status(f))
+ return ERR_PTR(-EBUSY);
+
+ vma->ufence = NULL;
+ xe_sync_ufence_put(f);
+ }
+
if (number_tiles > 1) {
fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
@@ -1713,6 +1756,21 @@ err_fences:
return ERR_PTR(err);
}
+static struct xe_user_fence *
+find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_syncs; i++) {
+ struct xe_sync_entry *e = &syncs[i];
+
+ if (xe_sync_is_ufence(e))
+ return xe_sync_ufence_get(e);
+ }
+
+ return NULL;
+}
+
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool immediate, bool first_op,
@@ -1720,9 +1778,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
{
struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+ struct xe_user_fence *ufence;
xe_vm_assert_held(vm);
+ ufence = find_ufence_get(syncs, num_syncs);
+ if (vma->ufence && ufence)
+ xe_sync_ufence_put(vma->ufence);
+
+ vma->ufence = ufence ?: vma->ufence;
+
if (immediate) {
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
last_op);
@@ -1854,10 +1919,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
mutex_lock(&xef->vm.lock);
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->vm.lock);
- if (err) {
- xe_vm_close_and_put(vm);
- return err;
- }
+ if (err)
+ goto err_close_and_put;
if (xe->info.has_asid) {
mutex_lock(&xe->usm.lock);
@@ -1865,11 +1928,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
XA_LIMIT(1, XE_MAX_ASID - 1),
&xe->usm.next_asid, GFP_KERNEL);
mutex_unlock(&xe->usm.lock);
- if (err < 0) {
- xe_vm_close_and_put(vm);
- return err;
- }
- err = 0;
+ if (err < 0)
+ goto err_free_id;
+
vm->usm.asid = asid;
}
@@ -1887,6 +1948,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
#endif
return 0;
+
+err_free_id:
+ mutex_lock(&xef->vm.lock);
+ xa_erase(&xef->vm.xa, id);
+ mutex_unlock(&xef->vm.lock);
+err_close_and_put:
+ xe_vm_close_and_put(vm);
+
+ return err;
}
int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
@@ -1953,6 +2023,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
xe_sync_entry_signal(&syncs[i], NULL, fence);
+ dma_fence_put(fence);
}
}
@@ -2033,7 +2104,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
- struct xe_vma_op *op;
struct drm_gpuvm_bo *vm_bo;
int err;
@@ -2063,9 +2133,11 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
if (err)
return ERR_PTR(err);
- vm_bo = drm_gpuvm_bo_find(&vm->gpuvm, obj);
- if (!vm_bo)
- break;
+ vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj);
+ if (IS_ERR(vm_bo)) {
+ xe_bo_unlock(bo);
+ return ERR_CAST(vm_bo);
+ }
ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
drm_gpuvm_bo_put(vm_bo);
@@ -2078,23 +2150,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
if (IS_ERR(ops))
return ops;
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- if (operation & FORCE_ASYNC_OP_ERROR) {
- op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
- base.entry);
- if (op)
- op->inject_error = true;
- }
-#endif
-
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
- op->map.immediate =
- flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
@@ -2142,7 +2201,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
drm_exec_fini(&exec);
if (xe_vma_is_userptr(vma)) {
- err = xe_vma_userptr_pin_pages(vma);
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
if (err) {
prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma);
@@ -2164,13 +2223,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
{
if (vma->gpuva.flags & XE_VMA_PTE_1G)
return SZ_1G;
- else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+ else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
return SZ_2M;
+ else if (vma->gpuva.flags & XE_VMA_PTE_64K)
+ return SZ_64K;
+ else if (vma->gpuva.flags & XE_VMA_PTE_4K)
+ return SZ_4K;
- return SZ_4K;
+ return SZ_1G; /* Uninitialized, used max size */
}
-static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
+static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
{
switch (size) {
case SZ_1G:
@@ -2179,9 +2242,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
case SZ_2M:
vma->gpuva.flags |= XE_VMA_PTE_2M;
break;
+ case SZ_64K:
+ vma->gpuva.flags |= XE_VMA_PTE_64K;
+ break;
+ case SZ_4K:
+ vma->gpuva.flags |= XE_VMA_PTE_4K;
+ break;
}
-
- return SZ_4K;
}
static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
@@ -2279,8 +2346,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
- flags |= op->map.read_only ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
@@ -2411,7 +2476,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
- op->map.immediate || !xe_vm_in_fault_mode(vm),
+ !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
@@ -2497,13 +2562,25 @@ retry_userptr:
}
drm_exec_fini(&exec);
- if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
+ if (err == -EAGAIN) {
lockdep_assert_held_write(&vm->lock);
- err = xe_vma_userptr_pin_pages(vma);
- if (!err)
- goto retry_userptr;
- trace_xe_vma_fail(vma);
+ if (op->base.op == DRM_GPUVA_OP_REMAP) {
+ if (!op->remap.unmap_done)
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ else if (op->remap.prev)
+ vma = op->remap.prev;
+ else
+ vma = op->remap.next;
+ }
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (!err)
+ goto retry_userptr;
+
+ trace_xe_vma_fail(vma);
+ }
}
return err;
@@ -2515,13 +2592,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
lockdep_assert_held_write(&vm->lock);
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- if (op->inject_error) {
- op->inject_error = false;
- return -ENOMEM;
- }
-#endif
-
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
ret = __xe_vma_op_execute(vm, op->map.vma, op);
@@ -2636,7 +2706,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
{
int i;
- for (i = num_ops_list - 1; i; ++i) {
+ for (i = num_ops_list - 1; i >= 0; --i) {
struct drm_gpuva_ops *__ops = ops[i];
struct drm_gpuva_op *__op;
@@ -2681,21 +2751,11 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
-#ifdef TEST_VM_ASYNC_OPS_ERROR
-#define SUPPORTED_FLAGS \
- (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
- DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
-#else
-#define SUPPORTED_FLAGS \
- (DRM_XE_VM_BIND_FLAG_READONLY | \
- DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
- 0xffff)
-#endif
+#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
+ DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
-#define MAX_BINDS 512 /* FIXME: Picking random upper limit */
-
static int vm_bind_ioctl_check_args(struct xe_device *xe,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
@@ -2707,16 +2767,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions) ||
- XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
+ if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
if (args->num_binds > 1) {
u64 __user *bind_user =
u64_to_user_ptr(args->vector_of_binds);
- *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds, GFP_KERNEL);
+ *bind_ops = kvmalloc_array(args->num_binds,
+ sizeof(struct drm_xe_vm_bind_op),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!*bind_ops)
return -ENOMEM;
@@ -2806,7 +2866,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
free_bind_ops:
if (args->num_binds > 1)
- kfree(*bind_ops);
+ kvfree(*bind_ops);
return err;
}
@@ -2843,7 +2903,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_gpuva_ops **ops = NULL;
struct xe_vm *vm;
struct xe_exec_queue *q = NULL;
- u32 num_syncs;
+ u32 num_syncs, num_ufence = 0;
struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops;
LIST_HEAD(ops_list);
@@ -2894,13 +2954,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (args->num_binds) {
- bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
+ bos = kvcalloc(args->num_binds, sizeof(*bos),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
- ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
+ ops = kvcalloc(args->num_binds, sizeof(*ops),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@@ -2980,6 +3042,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
if (err)
goto free_syncs;
+
+ if (xe_sync_is_ufence(&syncs[num_syncs]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+ err = -EINVAL;
+ goto free_syncs;
}
if (!args->num_binds) {
@@ -3033,10 +3103,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; bos && i < args->num_binds; ++i)
xe_bo_put(bos[i]);
- kfree(bos);
- kfree(ops);
+ kvfree(bos);
+ kvfree(ops);
if (args->num_binds > 1)
- kfree(bind_ops);
+ kvfree(bind_ops);
return err;
@@ -3060,10 +3130,10 @@ put_exec_queue:
if (q)
xe_exec_queue_put(q);
free_objs:
- kfree(bos);
- kfree(ops);
+ kvfree(bos);
+ kvfree(ops);
if (args->num_binds > 1)
- kfree(bind_ops);
+ kvfree(bind_ops);
return err;
}
@@ -3122,8 +3192,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
WARN_ON_ONCE(!mmu_interval_check_retry
- (&vma->userptr.notifier,
- vma->userptr.notifier_seq));
+ (&to_userptr_vma(vma)->userptr.notifier,
+ to_userptr_vma(vma)->userptr.notifier_seq));
WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
DMA_RESV_USAGE_BOOKKEEP));
@@ -3184,11 +3254,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
if (is_null) {
addr = 0;
} else if (is_userptr) {
+ struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
struct xe_res_cursor cur;
- if (vma->userptr.sg) {
- xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
- &cur);
+ if (sg) {
+ xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
addr = xe_res_dma(&cur);
} else {
addr = 0;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index cf2f96e8c1ab..9654a0612fc2 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -160,6 +160,18 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
}
+/**
+ * to_userptr_vma() - Return a pointer to an embedding userptr vma
+ * @vma: Pointer to the embedded struct xe_vma
+ *
+ * Return: Pointer to the embedding userptr vma
+ */
+static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
+{
+ xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
+ return container_of(vma, struct xe_userptr_vma, vma);
+}
+
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -224,9 +236,9 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
}
}
-int xe_vma_userptr_pin_pages(struct xe_vma *vma);
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
-int xe_vma_userptr_check_repin(struct xe_vma *vma);
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 63e8a50b88e9..7300eea5394b 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -19,11 +19,9 @@
struct xe_bo;
struct xe_sync_entry;
+struct xe_user_fence;
struct xe_vm;
-#define TEST_VM_ASYNC_OPS_ERROR
-#define FORCE_ASYNC_OP_ERROR BIT(31)
-
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
@@ -32,11 +30,15 @@ struct xe_vm;
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
/** struct xe_userptr - User pointer */
struct xe_userptr {
/** @invalidate_link: Link for the vm::userptr.invalidated list */
struct list_head invalidate_link;
+ /** @userptr: link into VM repin list if userptr. */
+ struct list_head repin_link;
/**
* @notifier: MMU notifier for user pointer (invalidation call back)
*/
@@ -68,8 +70,6 @@ struct xe_vma {
* resv.
*/
union {
- /** @userptr: link into VM repin list if userptr. */
- struct list_head userptr;
/** @rebind: link into VM if this VMA needs rebinding. */
struct list_head rebind;
/** @destroy: link to contested list when VM is being closed. */
@@ -107,9 +107,19 @@ struct xe_vma {
u16 pat_index;
/**
- * @userptr: user pointer state, only allocated for VMAs that are
- * user pointers
+ * @ufence: The user fence that was provided with MAP.
+ * Needs to be signalled before UNMAP can be processed.
*/
+ struct xe_user_fence *ufence;
+};
+
+/**
+ * struct xe_userptr_vma - A userptr vma subclass
+ * @vma: The vma.
+ * @userptr: Additional userptr information.
+ */
+struct xe_userptr_vma {
+ struct xe_vma vma;
struct xe_userptr userptr;
};
@@ -285,10 +295,6 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
- /** @immediate: Immediate bind */
- bool immediate;
- /** @read_only: Read only */
- bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @pat_index: The pat index to use for this operation. */
@@ -356,11 +362,6 @@ struct xe_vma_op {
/** @flags: operation flags */
enum xe_vma_op_flags flags;
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- /** @inject_error: inject error to test async op error handling */
- bool inject_error;
-#endif
-
union {
/** @map: VMA map operation specific data */
struct xe_vma_op_map map;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 42fd504abbcd..89983d7d73ca 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -169,6 +169,7 @@ static const struct host1x_info host1x06_info = {
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
.reserve_vblank_syncpts = false,
+ .skip_reset_assert = true,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
@@ -680,13 +681,15 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev)
host1x_intr_stop(host);
host1x_syncpt_save(host);
- err = reset_control_bulk_assert(host->nresets, host->resets);
- if (err) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- goto resume_host1x;
- }
+ if (!host->info->skip_reset_assert) {
+ err = reset_control_bulk_assert(host->nresets, host->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ goto resume_host1x;
+ }
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
+ }
clk_disable_unprepare(host->clk);
reset_control_bulk_release(host->nresets, host->resets);
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index c8e302de7625..925a118db23f 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -116,6 +116,12 @@ struct host1x_info {
* the display driver disables VBLANK increments.
*/
bool reserve_vblank_syncpts;
+ /*
+ * On Tegra186, secure world applications may require access to
+ * host1x during suspend/resume. To allow this, we need to leave
+ * host1x not in reset.
+ */
+ bool skip_reset_assert;
};
struct host1x {
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index d9ef45fcaeab..e630caf644e8 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -143,6 +143,9 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s
}
EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
+/* Disables missing prototype warnings */
+__bpf_kfunc_start_defs();
+
/**
* hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
*
@@ -152,7 +155,7 @@ EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
*
* @returns %NULL on error, an %__u8 memory pointer on success
*/
-noinline __u8 *
+__bpf_kfunc __u8 *
hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
{
struct hid_bpf_ctx_kern *ctx_kern;
@@ -167,14 +170,15 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr
return ctx_kern->data + offset;
}
+__bpf_kfunc_end_defs();
/*
* The following set contains all functions we agree BPF programs
* can use.
*/
-BTF_SET8_START(hid_bpf_kfunc_ids)
+BTF_KFUNCS_START(hid_bpf_kfunc_ids)
BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
-BTF_SET8_END(hid_bpf_kfunc_ids)
+BTF_KFUNCS_END(hid_bpf_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
.owner = THIS_MODULE,
@@ -241,6 +245,42 @@ int hid_bpf_reconnect(struct hid_device *hdev)
return 0;
}
+static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog,
+ __u32 flags)
+{
+ int fd, err, prog_type;
+
+ prog_type = hid_bpf_get_prog_attach_type(prog);
+ if (prog_type < 0)
+ return prog_type;
+
+ if (prog_type >= HID_BPF_PROG_TYPE_MAX)
+ return -EINVAL;
+
+ if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
+ err = hid_bpf_allocate_event_data(hdev);
+ if (err)
+ return err;
+ }
+
+ fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags);
+ if (fd < 0)
+ return fd;
+
+ if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
+ err = hid_bpf_reconnect(hdev);
+ if (err) {
+ close_fd(fd);
+ return err;
+ }
+ }
+
+ return fd;
+}
+
+/* Disables missing prototype warnings */
+__bpf_kfunc_start_defs();
+
/**
* hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
*
@@ -253,22 +293,17 @@ int hid_bpf_reconnect(struct hid_device *hdev)
* is pinned to the BPF file system).
*/
/* called from syscall */
-noinline int
+__bpf_kfunc int
hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
{
struct hid_device *hdev;
+ struct bpf_prog *prog;
struct device *dev;
- int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
+ int err, fd;
if (!hid_bpf_ops)
return -EINVAL;
- if (prog_type < 0)
- return prog_type;
-
- if (prog_type >= HID_BPF_PROG_TYPE_MAX)
- return -EINVAL;
-
if ((flags & ~HID_BPF_FLAG_MASK))
return -EINVAL;
@@ -278,25 +313,29 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
hdev = to_hid_device(dev);
- if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
- err = hid_bpf_allocate_event_data(hdev);
- if (err)
- return err;
+ /*
+ * take a ref on the prog itself, it will be released
+ * on errors or when it'll be detached
+ */
+ prog = bpf_prog_get(prog_fd);
+ if (IS_ERR(prog)) {
+ err = PTR_ERR(prog);
+ goto out_dev_put;
}
- fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
- if (fd < 0)
- return fd;
-
- if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
- err = hid_bpf_reconnect(hdev);
- if (err) {
- close_fd(fd);
- return err;
- }
+ fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags);
+ if (fd < 0) {
+ err = fd;
+ goto out_prog_put;
}
return fd;
+
+ out_prog_put:
+ bpf_prog_put(prog);
+ out_dev_put:
+ put_device(dev);
+ return err;
}
/**
@@ -306,7 +345,7 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
*
* @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
*/
-noinline struct hid_bpf_ctx *
+__bpf_kfunc struct hid_bpf_ctx *
hid_bpf_allocate_context(unsigned int hid_id)
{
struct hid_device *hdev;
@@ -323,8 +362,10 @@ hid_bpf_allocate_context(unsigned int hid_id)
hdev = to_hid_device(dev);
ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
- if (!ctx_kern)
+ if (!ctx_kern) {
+ put_device(dev);
return NULL;
+ }
ctx_kern->ctx.hid = hdev;
@@ -337,14 +378,19 @@ hid_bpf_allocate_context(unsigned int hid_id)
* @ctx: the HID-BPF context to release
*
*/
-noinline void
+__bpf_kfunc void
hid_bpf_release_context(struct hid_bpf_ctx *ctx)
{
struct hid_bpf_ctx_kern *ctx_kern;
+ struct hid_device *hid;
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+ hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
kfree(ctx_kern);
+
+ /* get_device() is called by bus_find_device() */
+ put_device(&hid->dev);
}
/**
@@ -358,7 +404,7 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
*
* @returns %0 on success, a negative error code otherwise.
*/
-noinline int
+__bpf_kfunc int
hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
enum hid_report_type rtype, enum hid_class_request reqtype)
{
@@ -426,6 +472,7 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
kfree(dma_data);
return ret;
}
+__bpf_kfunc_end_defs();
/* our HID-BPF entrypoints */
BTF_SET8_START(hid_bpf_fmodret_ids)
@@ -440,12 +487,12 @@ static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
};
/* for syscall HID-BPF */
-BTF_SET8_START(hid_bpf_syscall_kfunc_ids)
+BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
BTF_ID_FLAGS(func, hid_bpf_attach_prog)
BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
BTF_ID_FLAGS(func, hid_bpf_hw_request)
-BTF_SET8_END(hid_bpf_syscall_kfunc_ids)
+BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.h b/drivers/hid/bpf/hid_bpf_dispatch.h
index 63dfc8605cd2..fbe0639d09f2 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.h
+++ b/drivers/hid/bpf/hid_bpf_dispatch.h
@@ -12,9 +12,9 @@ struct hid_bpf_ctx_kern {
int hid_bpf_preload_skel(void);
void hid_bpf_free_links_and_skel(void);
-int hid_bpf_get_prog_attach_type(int prog_fd);
+int hid_bpf_get_prog_attach_type(struct bpf_prog *prog);
int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, int prog_fd,
- __u32 flags);
+ struct bpf_prog *prog, __u32 flags);
void __hid_bpf_destroy_device(struct hid_device *hdev);
int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
struct hid_bpf_ctx_kern *ctx_kern);
diff --git a/drivers/hid/bpf/hid_bpf_jmp_table.c b/drivers/hid/bpf/hid_bpf_jmp_table.c
index eca34b7372f9..aa8e1c79cdf5 100644
--- a/drivers/hid/bpf/hid_bpf_jmp_table.c
+++ b/drivers/hid/bpf/hid_bpf_jmp_table.c
@@ -196,6 +196,7 @@ static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx)
static void hid_bpf_release_progs(struct work_struct *work)
{
int i, j, n, map_fd = -1;
+ bool hdev_destroyed;
if (!jmp_table.map)
return;
@@ -220,6 +221,12 @@ static void hid_bpf_release_progs(struct work_struct *work)
if (entry->hdev) {
hdev = entry->hdev;
type = entry->type;
+ /*
+ * hdev is still valid, even if we are called after hid_destroy_device():
+ * when hid_bpf_attach() gets called, it takes a ref on the dev through
+ * bus_find_device()
+ */
+ hdev_destroyed = hdev->bpf.destroyed;
hid_bpf_populate_hdev(hdev, type);
@@ -232,12 +239,19 @@ static void hid_bpf_release_progs(struct work_struct *work)
if (test_bit(next->idx, jmp_table.enabled))
continue;
- if (next->hdev == hdev && next->type == type)
+ if (next->hdev == hdev && next->type == type) {
+ /*
+ * clear the hdev reference and decrement the device ref
+ * that was taken during bus_find_device() while calling
+ * hid_bpf_attach()
+ */
next->hdev = NULL;
+ put_device(&hdev->dev);
+ }
}
- /* if type was rdesc fixup, reconnect device */
- if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP)
+ /* if type was rdesc fixup and the device is not gone, reconnect device */
+ if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP && !hdev_destroyed)
hid_bpf_reconnect(hdev);
}
}
@@ -333,15 +347,10 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
return err;
}
-int hid_bpf_get_prog_attach_type(int prog_fd)
+int hid_bpf_get_prog_attach_type(struct bpf_prog *prog)
{
- struct bpf_prog *prog = NULL;
- int i;
int prog_type = HID_BPF_PROG_TYPE_UNDEF;
-
- prog = bpf_prog_get(prog_fd);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
+ int i;
for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) {
if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) {
@@ -350,8 +359,6 @@ int hid_bpf_get_prog_attach_type(int prog_fd)
}
}
- bpf_prog_put(prog);
-
return prog_type;
}
@@ -388,19 +395,13 @@ static const struct bpf_link_ops hid_bpf_link_lops = {
/* called from syscall */
noinline int
__hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
- int prog_fd, __u32 flags)
+ int prog_fd, struct bpf_prog *prog, __u32 flags)
{
struct bpf_link_primer link_primer;
struct hid_bpf_link *link;
- struct bpf_prog *prog = NULL;
struct hid_bpf_prog_entry *prog_entry;
int cnt, err = -EINVAL, prog_table_idx = -1;
- /* take a ref on the prog itself */
- prog = bpf_prog_get(prog_fd);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
-
mutex_lock(&hid_bpf_attach_lock);
link = kzalloc(sizeof(*link), GFP_USER);
@@ -467,7 +468,6 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
err_unlock:
mutex_unlock(&hid_bpf_attach_lock);
- bpf_prog_put(prog);
kfree(link);
return err;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index fb30e228d35f..828a5c022c64 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -298,6 +298,9 @@
#define USB_VENDOR_ID_CIDC 0x1677
+#define I2C_VENDOR_ID_CIRQUE 0x0488
+#define I2C_PRODUCT_ID_CIRQUE_1063 0x1063
+
#define USB_VENDOR_ID_CJTOUCH 0x24b8
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index fd6d8f1d9b8f..d2f3f234f29d 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -203,6 +203,8 @@ struct hidpp_device {
struct hidpp_scroll_counter vertical_wheel_counter;
u8 wireless_feature_index;
+
+ bool connected_once;
};
/* HID++ 1.0 error codes */
@@ -988,8 +990,13 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
hidpp->protocol_minor = response.rap.params[1];
print_version:
- hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
+ if (!hidpp->connected_once) {
+ hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ hidpp->connected_once = true;
+ } else
+ hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
return 0;
}
@@ -4184,7 +4191,7 @@ static void hidpp_connect_event(struct work_struct *work)
/* Get device version to check if it is connected */
ret = hidpp_root_get_protocol_version(hidpp);
if (ret) {
- hid_info(hidpp->hid_dev, "Disconnected\n");
+ hid_dbg(hidpp->hid_dev, "Disconnected\n");
if (hidpp->battery.ps) {
hidpp->battery.online = false;
hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN;
@@ -4610,6 +4617,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
{ /* Logitech G Pro X Superlight Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
+ { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
{ /* G935 Gaming Headset */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fd5b0637dad6..3e91e4d6ba6f 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2153,6 +2153,10 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
+
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
index 82d0a77359c4..58b15750dbb0 100644
--- a/drivers/hid/hid-nvidia-shield.c
+++ b/drivers/hid/hid-nvidia-shield.c
@@ -800,6 +800,8 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
"thunderstrike%d:blue:led", ts->id);
+ if (!led->name)
+ return -ENOMEM;
led->max_brightness = 1;
led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN;
led->brightness_get = &thunderstrike_led_get_brightness;
@@ -831,6 +833,8 @@ static inline int thunderstrike_psy_create(struct shield_device *shield_dev)
shield_dev->battery_dev.desc.name =
devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
"thunderstrike_%d", ts->id);
+ if (!shield_dev->battery_dev.desc.name)
+ return -ENOMEM;
shield_dev->battery_dev.psy = power_supply_register(
&hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg);
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index b3c4e50e248a..b08a5ab58528 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -1109,10 +1109,9 @@ static int steam_probe(struct hid_device *hdev,
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL);
- if (!steam) {
- ret = -ENOMEM;
- goto steam_alloc_fail;
- }
+ if (!steam)
+ return -ENOMEM;
+
steam->hdev = hdev;
hid_set_drvdata(hdev, steam);
spin_lock_init(&steam->lock);
@@ -1129,14 +1128,14 @@ static int steam_probe(struct hid_device *hdev,
*/
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW);
if (ret)
- goto hid_hw_start_fail;
+ goto err_cancel_work;
ret = hid_hw_open(hdev);
if (ret) {
hid_err(hdev,
"%s:hid_hw_open\n",
__func__);
- goto hid_hw_open_fail;
+ goto err_hw_stop;
}
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
@@ -1152,36 +1151,37 @@ static int steam_probe(struct hid_device *hdev,
hid_err(hdev,
"%s:steam_register failed with error %d\n",
__func__, ret);
- goto input_register_fail;
+ goto err_hw_close;
}
}
steam->client_hdev = steam_create_client_hid(hdev);
if (IS_ERR(steam->client_hdev)) {
ret = PTR_ERR(steam->client_hdev);
- goto client_hdev_fail;
+ goto err_stream_unregister;
}
steam->client_hdev->driver_data = steam;
ret = hid_add_device(steam->client_hdev);
if (ret)
- goto client_hdev_add_fail;
+ goto err_destroy;
return 0;
-client_hdev_add_fail:
- hid_hw_stop(hdev);
-client_hdev_fail:
+err_destroy:
hid_destroy_device(steam->client_hdev);
-input_register_fail:
-hid_hw_open_fail:
-hid_hw_start_fail:
+err_stream_unregister:
+ if (steam->connected)
+ steam_unregister(steam);
+err_hw_close:
+ hid_hw_close(hdev);
+err_hw_stop:
+ hid_hw_stop(hdev);
+err_cancel_work:
cancel_work_sync(&steam->work_connect);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->rumble_work);
-steam_alloc_fail:
- hid_err(hdev, "%s: failed with error %d\n",
- __func__, ret);
+
return ret;
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 13c8dd8cd350..2bc762d31ac7 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -357,8 +357,11 @@ static int hidraw_release(struct inode * inode, struct file * file)
down_write(&minors_rwsem);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
- for (int i = list->tail; i < list->head; i++)
- kfree(list->buffer[i].value);
+ while (list->tail != list->head) {
+ kfree(list->buffer[list->tail].value);
+ list->buffer[list->tail].value = NULL;
+ list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
+ }
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90f316ae9819..2df1ab3c31cc 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -49,6 +49,7 @@
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(2)
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3)
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
+#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
/* Command opcodes */
#define I2C_HID_OPCODE_RESET 0x01
@@ -131,6 +132,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_RESET_ON_RESUME },
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
I2C_HID_QUIRK_BAD_INPUT_SIZE },
+ { I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
+ I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
/*
* Sending the wakeup after reset actually break ELAN touchscreen controller
*/
@@ -956,7 +959,8 @@ static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
return ret;
/* Save some power */
- i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND))
+ i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
disable_irq(client->irq);
diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c
index c4e1fa0273c8..8be4d576da77 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of.c
@@ -87,6 +87,7 @@ static int i2c_hid_of_probe(struct i2c_client *client)
if (!ihid_of)
return -ENOMEM;
+ ihid_of->client = client;
ihid_of->ops.power_up = i2c_hid_of_power_up;
ihid_of->ops.power_down = i2c_hid_of_power_down;
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index aa6cb033bb06..03d5601ce807 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -722,6 +722,8 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
cl->state = ISHTP_CL_DISCONNECTED;
+ if (warm_reset && cl->device->reference_count)
+ continue;
/*
* Wake any pending process. The waiter would check dev->state
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 82c907f01bd3..8a7f2f6a4f86 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -49,7 +49,9 @@ static void ishtp_read_list_flush(struct ishtp_cl *cl)
list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
list_del(&rb->list);
- ishtp_io_rb_free(rb);
+ spin_lock(&cl->free_list_spinlock);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock(&cl->free_list_spinlock);
}
spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b613f11ed949..2bc45b24075c 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2087,7 +2087,7 @@ static int wacom_allocate_inputs(struct wacom *wacom)
return 0;
}
-static int wacom_register_inputs(struct wacom *wacom)
+static int wacom_setup_inputs(struct wacom *wacom)
{
struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
@@ -2106,10 +2106,6 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(pen_input_dev);
wacom_wac->pen_input = NULL;
pen_input_dev = NULL;
- } else {
- error = input_register_device(pen_input_dev);
- if (error)
- goto fail;
}
error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
@@ -2118,10 +2114,6 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(touch_input_dev);
wacom_wac->touch_input = NULL;
touch_input_dev = NULL;
- } else {
- error = input_register_device(touch_input_dev);
- if (error)
- goto fail;
}
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
@@ -2130,7 +2122,34 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(pad_input_dev);
wacom_wac->pad_input = NULL;
pad_input_dev = NULL;
- } else {
+ }
+
+ return 0;
+}
+
+static int wacom_register_inputs(struct wacom *wacom)
+{
+ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ int error = 0;
+
+ pen_input_dev = wacom_wac->pen_input;
+ touch_input_dev = wacom_wac->touch_input;
+ pad_input_dev = wacom_wac->pad_input;
+
+ if (pen_input_dev) {
+ error = input_register_device(pen_input_dev);
+ if (error)
+ goto fail;
+ }
+
+ if (touch_input_dev) {
+ error = input_register_device(touch_input_dev);
+ if (error)
+ goto fail;
+ }
+
+ if (pad_input_dev) {
error = input_register_device(pad_input_dev);
if (error)
goto fail;
@@ -2383,6 +2402,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
+ error = wacom_setup_inputs(wacom);
+ if (error)
+ goto fail;
+
+ if (features->type == HID_GENERIC)
+ connect_mask |= HID_CONNECT_DRIVER;
+
+ /* Regular HID work starts now */
+ error = hid_hw_start(hdev, connect_mask);
+ if (error) {
+ hid_err(hdev, "hw start failed\n");
+ goto fail;
+ }
+
error = wacom_register_inputs(wacom);
if (error)
goto fail;
@@ -2397,16 +2430,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail;
}
- if (features->type == HID_GENERIC)
- connect_mask |= HID_CONNECT_DRIVER;
-
- /* Regular HID work starts now */
- error = hid_hw_start(hdev, connect_mask);
- if (error) {
- hid_err(hdev, "hw start failed\n");
- goto fail;
- }
-
if (!wireless) {
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(wacom);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index da8a01fedd39..fbe10fbc5769 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2575,7 +2575,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], sense);
if (wacom_wac->serial[0]) {
- input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ /*
+ * xf86-input-wacom does not accept a serial number
+ * of '0'. Report the low 32 bits if possible, but
+ * if they are zero, report the upper ones instead.
+ */
+ __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu;
+ __u32 serial_hi = wacom_wac->serial[0] >> 32;
+ input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi));
input_report_abs(input, ABS_MISC, sense ? id : 0);
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 56f7e06c673e..adbf674355b2 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -322,125 +322,89 @@ static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
- /* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
sizeof(struct vmbus_channel_gpadl_header) -
sizeof(struct gpa_range);
+ pfncount = umin(pagecount, pfnsize / sizeof(u64));
+
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pfncount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgheader)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
+ for (i = 0; i < pfncount; i++)
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
+ *msginfo = msgheader;
+
+ pfnsum = pfncount;
+ pfnleft = pagecount - pfncount;
+
+ /* how many pfns can we fit in a body message */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_body);
pfncount = pfnsize / sizeof(u64);
- if (pagecount > pfncount) {
- /* we need a gpadl body */
- /* fill in the header */
+ /*
+ * If pfnleft is zero, everything fits in the header and no body
+ * messages are needed
+ */
+ while (pfnleft) {
+ pfncurr = umin(pfncount, pfnleft);
msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pfncount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (!msgheader)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
- *msginfo = msgheader;
-
- pfnsum = pfncount;
- pfnleft = pagecount - pfncount;
-
- /* how many pfns can we fit */
- pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
- sizeof(struct vmbus_channel_gpadl_body);
- pfncount = pfnsize / sizeof(u64);
-
- /* fill in the body */
- while (pfnleft) {
- if (pfnleft > pfncount)
- pfncurr = pfncount;
- else
- pfncurr = pfnleft;
-
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_body) +
- pfncurr * sizeof(u64);
- msgbody = kzalloc(msgsize, GFP_KERNEL);
-
- if (!msgbody) {
- struct vmbus_channel_msginfo *pos = NULL;
- struct vmbus_channel_msginfo *tmp = NULL;
- /*
- * Free up all the allocated messages.
- */
- list_for_each_entry_safe(pos, tmp,
- &msgheader->submsglist,
- msglistentry) {
-
- list_del(&pos->msglistentry);
- kfree(pos);
- }
-
- goto nomem;
- }
-
- msgbody->msgsize = msgsize;
- gpadl_body =
- (struct vmbus_channel_gpadl_body *)msgbody->msg;
+ sizeof(struct vmbus_channel_gpadl_body) +
+ pfncurr * sizeof(u64);
+ msgbody = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgbody) {
+ struct vmbus_channel_msginfo *pos = NULL;
+ struct vmbus_channel_msginfo *tmp = NULL;
/*
- * Gpadl is u32 and we are using a pointer which could
- * be 64-bit
- * This is governed by the guest/host protocol and
- * so the hypervisor guarantees that this is ok.
+ * Free up all the allocated messages.
*/
- for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
- kbuffer, size, send_offset, pfnsum + i);
-
- /* add to msg header */
- list_add_tail(&msgbody->msglistentry,
- &msgheader->submsglist);
- pfnsum += pfncurr;
- pfnleft -= pfncurr;
+ list_for_each_entry_safe(pos, tmp,
+ &msgheader->submsglist,
+ msglistentry) {
+
+ list_del(&pos->msglistentry);
+ kfree(pos);
+ }
+ kfree(msgheader);
+ return -ENOMEM;
}
- } else {
- /* everything fits in a header */
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pagecount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (msgheader == NULL)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
-
- *msginfo = msgheader;
+
+ msgbody->msgsize = msgsize;
+ gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
+
+ /*
+ * Gpadl is u32 and we are using a pointer which could
+ * be 64-bit
+ * This is governed by the guest/host protocol and
+ * so the hypervisor guarantees that this is ok.
+ */
+ for (i = 0; i < pfncurr; i++)
+ gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+ kbuffer, size, send_offset, pfnsum + i);
+
+ /* add to msg header */
+ list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
+ pfnsum += pfncurr;
+ pfnleft -= pfncurr;
}
return 0;
-nomem:
- kfree(msgheader);
- kfree(msgbody);
- return -ENOMEM;
}
/*
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 42aec2c5606a..9c97c4065fe7 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -296,6 +296,11 @@ static struct {
spinlock_t lock;
} host_ts;
+static bool timesync_implicit;
+
+module_param(timesync_implicit, bool, 0644);
+MODULE_PARM_DESC(timesync_implicit, "If set treat SAMPLE as SYNC when clock is behind");
+
static inline u64 reftime_to_ns(u64 reftime)
{
return (reftime - WLTIMEDELTA) * 100;
@@ -345,6 +350,29 @@ static void hv_set_host_time(struct work_struct *work)
}
/*
+ * Due to a bug on Hyper-V hosts, the sync flag may not always be sent on resume.
+ * Force a sync if the guest is behind.
+ */
+static inline bool hv_implicit_sync(u64 host_time)
+{
+ struct timespec64 new_ts;
+ struct timespec64 threshold_ts;
+
+ new_ts = ns_to_timespec64(reftime_to_ns(host_time));
+ ktime_get_real_ts64(&threshold_ts);
+
+ threshold_ts.tv_sec += 5;
+
+ /*
+ * If guest behind the host by 5 or more seconds.
+ */
+ if (timespec64_compare(&new_ts, &threshold_ts) >= 0)
+ return true;
+
+ return false;
+}
+
+/*
* Synchronize time with host after reboot, restore, etc.
*
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
@@ -384,7 +412,8 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
spin_unlock_irqrestore(&host_ts.lock, flags);
/* Schedule work to do do_settimeofday64() */
- if (adj_flags & ICTIMESYNCFLAG_SYNC)
+ if ((adj_flags & ICTIMESYNCFLAG_SYNC) ||
+ (timesync_implicit && hv_implicit_sync(host_ts.host_time)))
schedule_work(&adj_time_work);
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b33d5abd9beb..7f7965f3d187 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -988,7 +988,7 @@ static const struct dev_pm_ops vmbus_pm = {
};
/* The one and only one */
-static struct bus_type hv_bus = {
+static const struct bus_type hv_bus = {
.name = "vmbus",
.match = vmbus_match,
.shutdown = vmbus_shutdown,
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index f6e1e55e8292..4acc1858d8ac 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -195,6 +195,8 @@ struct aspeed_pwm_tacho_data {
u8 fan_tach_ch_source[MAX_ASPEED_FAN_TACH_CHANNELS];
struct aspeed_cooling_device *cdev[8];
const struct attribute_group *groups[3];
+ /* protects access to shared ASPEED_PTCR_RESULT */
+ struct mutex tach_lock;
};
enum type { TYPEM, TYPEN, TYPEO };
@@ -529,6 +531,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
u8 fan_tach_ch_source, type, mode, both;
int ret;
+ mutex_lock(&priv->tach_lock);
+
regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
@@ -546,6 +550,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
ASPEED_RPM_STATUS_SLEEP_USEC,
usec);
+ mutex_unlock(&priv->tach_lock);
+
/* return -ETIMEDOUT if we didn't get an answer. */
if (ret)
return ret;
@@ -915,6 +921,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ mutex_init(&priv->tach_lock);
priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index ba82d1e79c13..b8fc8d1ef20d 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
+#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
@@ -419,7 +419,7 @@ static ssize_t show_temp(struct device *dev,
}
static int create_core_attrs(struct temp_data *tdata, struct device *dev,
- int attr_no)
+ int index)
{
int i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -431,13 +431,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
};
for (i = 0; i < tdata->attr_size; i++) {
+ /*
+ * We map the attr number to core id of the CPU
+ * The attr number is always core id + 2
+ * The Pkgtemp will always show up as temp1_*, if available
+ */
+ int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
+
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
"temp%d_%s", attr_no, suffixes[i]);
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
- tdata->sd_attrs[i].index = attr_no;
+ tdata->sd_attrs[i].index = index;
tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
}
tdata->attr_group.attrs = tdata->attrs;
@@ -495,30 +502,25 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
- int err, index, attr_no;
+ int err, index;
if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
return 0;
/*
- * Find attr number for sysfs:
- * We map the attr number to core id of the CPU
- * The attr number is always core id + 2
- * The Pkgtemp will always show up as temp1_*, if available
+ * Get the index of tdata in pdata->core_data[]
+ * tdata for package: pdata->core_data[1]
+ * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
*/
if (pkg_flag) {
- attr_no = PKG_SYSFS_ATTR_NO;
+ index = PKG_SYSFS_ATTR_NO;
} else {
- index = ida_alloc(&pdata->ida, GFP_KERNEL);
+ index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
if (index < 0)
return index;
- pdata->cpu_map[index] = topology_core_id(cpu);
- attr_no = index + BASE_SYSFS_ATTR_NO;
- }
- if (attr_no > MAX_CORE_DATA - 1) {
- err = -ERANGE;
- goto ida_free;
+ pdata->cpu_map[index] = topology_core_id(cpu);
+ index += BASE_SYSFS_ATTR_NO;
}
tdata = init_temp_data(cpu, pkg_flag);
@@ -544,20 +546,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
if (get_ttarget(tdata, &pdev->dev) >= 0)
tdata->attr_size++;
- pdata->core_data[attr_no] = tdata;
+ pdata->core_data[index] = tdata;
/* Create sysfs interfaces */
- err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
+ err = create_core_attrs(tdata, pdata->hwmon_dev, index);
if (err)
goto exit_free;
return 0;
exit_free:
- pdata->core_data[attr_no] = NULL;
+ pdata->core_data[index] = NULL;
kfree(tdata);
ida_free:
if (!pkg_flag)
- ida_free(&pdata->ida, index);
+ ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
return err;
}
diff --git a/drivers/hwmon/gigabyte_waterforce.c b/drivers/hwmon/gigabyte_waterforce.c
index 85e523775714..8129d7b3ceaf 100644
--- a/drivers/hwmon/gigabyte_waterforce.c
+++ b/drivers/hwmon/gigabyte_waterforce.c
@@ -146,7 +146,7 @@ static int waterforce_get_status(struct waterforce_data *priv)
/* Send command for getting status */
ret = waterforce_write_expanded(priv, get_status_cmd, GET_STATUS_CMD_LENGTH);
if (ret < 0)
- return ret;
+ goto unlock_and_return;
ret = wait_for_completion_interruptible_timeout(&priv->status_report_received,
msecs_to_jiffies(STATUS_VALIDITY));
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 8d2ef3145bca..9fbab8f02334 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -3512,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
+ int num_reg_temp_config;
struct device *hwmon_dev;
struct sensor_template_group tsi_temp_tg;
@@ -3594,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3669,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3746,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6775_REG_TEMP_CRIT;
@@ -3821,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6776_REG_TEMP_CRIT;
@@ -3900,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@@ -4034,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@@ -4123,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6798_REG_TEMP_OVER;
reg_temp_hyst = NCT6798_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6798_REG_TEMP_CRIT;
@@ -4204,7 +4212,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
= reg_temp_crit[src - 1];
if (reg_temp_crit_l && reg_temp_crit_l[i])
data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
- data->reg_temp_config[src - 1] = reg_temp_config[i];
+ if (i < num_reg_temp_config)
+ data->reg_temp_config[src - 1] = reg_temp_config[i];
data->temp_src[src - 1] = src;
continue;
}
@@ -4217,7 +4226,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
data->reg_temp[0][s] = reg_temp[i];
data->reg_temp[1][s] = reg_temp_over[i];
data->reg_temp[2][s] = reg_temp_hyst[i];
- data->reg_temp_config[s] = reg_temp_config[i];
+ if (i < num_reg_temp_config)
+ data->reg_temp_config[s] = reg_temp_config[i];
if (reg_temp_crit_h && reg_temp_crit_h[i])
data->reg_temp[3][s] = reg_temp_crit_h[i];
else if (reg_temp_crit[src - 1])
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index b9bb469e2d8f..e5fa10b3b8bc 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -126,6 +126,21 @@ static const struct regulator_desc __maybe_unused mp2975_reg_desc[] = {
#define to_mp2975_data(x) container_of(x, struct mp2975_data, info)
+static int mp2975_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * Report direct format as configured by MFR_DC_LOOP_CTRL.
+ * Unlike on MP2971/MP2973 the reported VOUT_MODE isn't automatically
+ * internally updated, but always reads as PB_VOUT_MODE_VID.
+ */
+ return PB_VOUT_MODE_DIRECT;
+ default:
+ return -ENODATA;
+ }
+}
+
static int
mp2975_read_word_helper(struct i2c_client *client, int page, int phase, u8 reg,
u16 mask)
@@ -869,6 +884,7 @@ static struct pmbus_driver_info mp2975_info = {
PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT | PMBUS_PHASE_VIRTUAL,
+ .read_byte_data = mp2975_read_byte_data,
.read_word_data = mp2975_read_word_data,
#if IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR)
.num_regulators = 1,
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3757b9391e60..aa0ee8ecd6f2 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -90,10 +90,8 @@ obj-$(CONFIG_I2C_NPCM) += i2c-npcm7xx.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
obj-$(CONFIG_I2C_OWL) += i2c-owl.o
-i2c-pasemi-objs := i2c-pasemi-core.o i2c-pasemi-pci.o
-obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
-i2c-apple-objs := i2c-pasemi-core.o i2c-pasemi-platform.o
-obj-$(CONFIG_I2C_APPLE) += i2c-apple.o
+obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi-core.o i2c-pasemi-pci.o
+obj-$(CONFIG_I2C_APPLE) += i2c-pasemi-core.o i2c-pasemi-platform.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 3932e8d96a17..2c36b36d7d51 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -498,11 +498,10 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
/* Set block buffer mode */
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
-
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
outb_p(len, SMBHSTDAT0(priv));
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
outb_p(data->block[i+1], SMBBLKDAT(priv));
}
@@ -520,6 +519,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
}
data->block[0] = len;
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
data->block[i + 1] = inb_p(SMBBLKDAT(priv));
}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 88a053987403..60e813137f84 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
ctl &= ~I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+
+ /* flag the last byte as processed */
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_READ_PROCESSED, &value);
+
i2c_imx_slave_finish_op(i2c_imx);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index 7d54a9f34c74..bd8becbdeeb2 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -369,6 +369,7 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus)
return 0;
}
+EXPORT_SYMBOL_GPL(pasemi_i2c_common_probe);
irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
{
@@ -378,3 +379,8 @@ irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
complete(&smbus->irq_completion);
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(pasemi_irq_handler);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 0d2e7171e3a6..da94df466e83 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -613,20 +613,20 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
peripheral.addr = msgs[i].addr;
+ ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
+ &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
+ if (ret)
+ goto err;
+
if (msgs[i].flags & I2C_M_RD) {
ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
&rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
if (ret)
goto err;
- }
-
- ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
- &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
- if (ret)
- goto err;
- if (msgs[i].flags & I2C_M_RD)
dma_async_issue_pending(gi2c->rx_c);
+ }
+
dma_async_issue_pending(gi2c->tx_c);
timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 91adcac875a4..c9d7afe489e8 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -219,10 +219,12 @@ config BMA400
config BMA400_I2C
tristate
+ select REGMAP_I2C
depends on BMA400
config BMA400_SPI
tristate
+ select REGMAP_SPI
depends on BMA400
config BMC150_ACCEL
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index feb86fe6c422..62490424b6ae 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -1821,7 +1821,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
{
struct device *dev = &st->spi->dev;
struct device_node *of_node = dev_of_node(dev);
- struct clk_init_data init;
+ struct clk_init_data init = {};
const char *clk_name;
int ret;
@@ -1891,10 +1891,14 @@ static int ad4130_setup(struct iio_dev *indio_dev)
return ret;
/*
- * Configure all GPIOs for output. If configured, the interrupt function
- * of P2 takes priority over the GPIO out function.
+ * Configure unused GPIOs for output. If configured, the interrupt
+ * function of P2 takes priority over the GPIO out function.
*/
- val = AD4130_IO_CONTROL_GPIO_CTRL_MASK;
+ val = 0;
+ for (i = 0; i < AD4130_MAX_GPIOS; i++)
+ if (st->pins_fn[i + AD4130_AIN2_P1] == AD4130_PIN_FN_NONE)
+ val |= FIELD_PREP(AD4130_IO_CONTROL_GPIO_CTRL_MASK, BIT(i));
+
val |= FIELD_PREP(AD4130_IO_CONTROL_INT_PIN_SEL_MASK, st->int_pin_sel);
ret = regmap_write(st->regmap, AD4130_IO_CONTROL_REG, val);
diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c
index 57700f124803..700564305057 100644
--- a/drivers/iio/adc/ad7091r8.c
+++ b/drivers/iio/adc/ad7091r8.c
@@ -195,7 +195,7 @@ static int ad7091r8_gpio_setup(struct ad7091r_state *st)
st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(st->reset_gpio))
- return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio),
+ return dev_err_probe(st->dev, PTR_ERR(st->reset_gpio),
"Error on requesting reset GPIO\n");
if (st->reset_gpio) {
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 2de5494e7c22..b15b7a3b66d5 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -48,6 +48,18 @@ config HDC2010
To compile this driver as a module, choose M here: the module
will be called hdc2010.
+config HDC3020
+ tristate "TI HDC3020 relative humidity and temperature sensor"
+ depends on I2C
+ select CRC8
+ help
+ Say yes here to build support for the Texas Instruments
+ HDC3020, HDC3021 and HDC3022 relative humidity and temperature
+ sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hdc3020.
+
config HID_SENSOR_HUMIDITY
tristate "HID Environmental humidity sensor"
depends on HID_SENSOR_HUB
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index f19ff3de97c5..5fbeef299f61 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
obj-$(CONFIG_HDC2010) += hdc2010.o
+obj-$(CONFIG_HDC3020) += hdc3020.o
obj-$(CONFIG_HID_SENSOR_HUMIDITY) += hid-sensor-humidity.o
hts221-y := hts221_core.o \
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index 4e3311170725..ed70415512f6 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -322,7 +322,7 @@ static int hdc3020_read_raw(struct iio_dev *indio_dev,
if (chan->type != IIO_TEMP)
return -EINVAL;
- *val = 16852;
+ *val = -16852;
return IIO_VAL_INT;
default:
diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
index 83e53acfbe88..c7f5866a177d 100644
--- a/drivers/iio/imu/bno055/Kconfig
+++ b/drivers/iio/imu/bno055/Kconfig
@@ -8,6 +8,7 @@ config BOSCH_BNO055
config BOSCH_BNO055_SERIAL
tristate "Bosch BNO055 attached via UART"
depends on SERIAL_DEV_BUS
+ select REGMAP
select BOSCH_BNO055
help
Enable this to support Bosch BNO055 IMUs attached via UART.
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 9a85752124dd..173dc00762a1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1584,10 +1584,13 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
ret = iio_device_register_sysfs_group(indio_dev,
&iio_dev_opaque->chan_attr_group);
if (ret)
- goto error_clear_attrs;
+ goto error_free_chan_attrs;
return 0;
+error_free_chan_attrs:
+ kfree(iio_dev_opaque->chan_attr_group.attrs);
+ iio_dev_opaque->chan_attr_group.attrs = NULL;
error_clear_attrs:
iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 5cd27f04b45e..b6c4bef2a7bb 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -226,6 +226,7 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 69938204456f..42b70cd42b39 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -530,6 +530,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
struct rm3100_data *data;
unsigned int tmp;
int ret;
+ int samp_rate_index;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
@@ -586,9 +587,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp);
if (ret < 0)
return ret;
+
+ samp_rate_index = tmp - RM3100_TMRC_OFFSET;
+ if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) {
+ dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n");
+ return -EINVAL;
+ }
/* Initializing max wait time, which is double conversion time. */
- data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2]
- * 2;
+ data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2;
/* Cycle count values may not be what we want. */
if ((tmp - RM3100_TMRC_OFFSET) == 0)
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 433d6fac83c4..e8a5fed07e88 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -87,6 +87,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
static const struct spi_device_id bmp280_spi_id[] = {
+ { "bmp085", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp180", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp181", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp280", (kernel_ulong_t)&bmp280_chip_info },
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 824349659d69..ce9c5bae83bf 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -401,6 +401,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
struct bnxt_re_fence_data *fence = &pd->fence;
struct ib_mr *ib_mr = &fence->mr->ib_mr;
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
memset(wqe, 0, sizeof(*wqe));
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
@@ -455,6 +459,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
struct device *dev = &rdev->en_dev->pdev->dev;
struct bnxt_re_mr *mr = fence->mr;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
if (fence->mw) {
bnxt_re_dealloc_mw(fence->mw);
fence->mw = NULL;
@@ -486,6 +493,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
struct ib_mw *mw;
int rc;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
@@ -1817,7 +1827,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
/* SRQ resize is not supported */
- break;
+ return -EINVAL;
case IB_SRQ_LIMIT:
/* Change the SRQ threshold */
if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
@@ -1832,13 +1842,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */
- break;
+ return 0;
default:
ibdev_err(&rdev->ibdev,
"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
return -EINVAL;
}
- return 0;
}
int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
@@ -2556,11 +2565,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
- /* Need unconditional fence for local invalidate
- * opcode to work as expected.
- */
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
if (wr->send_flags & IB_SEND_SOLICITED)
@@ -2583,12 +2587,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.levels = qplib_frpl->hwq.level;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
- /* Need unconditional fence for reg_mr
- * opcode to function as expected.
- */
-
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
@@ -2719,6 +2717,18 @@ bad:
return rc;
}
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+ /* Need unconditional fence for non-wire memory opcode
+ * to work as expected.
+ */
+ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
@@ -2798,8 +2808,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = -EINVAL;
goto bad;
}
- if (!rc)
+ if (!rc) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_re_legacy_set_uc_fence(&wqe);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ }
bad:
if (rc) {
ibdev_err(&qp->rdev->ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index f022c922fae5..54b4d2f3a5d8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -280,9 +280,6 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{
-
- if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
- return;
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index c98e04fe2ddd..439d0c7c5d0c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -744,7 +744,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
- srq->threshold = le16_to_cpu(sb->srq_limit);
+ if (!rc)
+ srq->threshold = le16_to_cpu(sb->srq_limit);
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 68c621ff59d0..5a91cbda4aee 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd)
"Unable to allocate credit return DMA range for NUMA %d\n",
i);
ret = -ENOMEM;
- goto done;
+ goto free_cr_base;
}
}
set_dev_node(&dd->pcidev->dev, dd->node);
@@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd)
ret = 0;
done:
return ret;
+
+free_cr_base:
+ free_credit_return(dd);
+ goto done;
}
void free_credit_return(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 6e5ac2023328..b67d23b1f286 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
- if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
__sdma_txclean(dd, tx);
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index 8fb752f2eda2..2cb4b96db721 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -346,6 +346,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
#define IRDMA_AE_RESET_SENT 0x0601
#define IRDMA_AE_TERMINATE_SENT 0x0602
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index bd4b2b896444..ad50b77282f8 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
@@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
irq_update_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
+ if (rf == dev_id) {
+ tasklet_kill(&rf->dpc_tasklet);
+ } else {
+ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
+
+ tasklet_kill(&iwceq->dpc_tasklet);
+ }
}
/**
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index b5eb8d421988..0b046c061742 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -839,7 +839,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
- init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
+ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
+ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
@@ -2184,9 +2186,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
info.cq_base_pa = iwcq->kmem.pa;
}
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
- info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
- (u32)IRDMA_MAX_CQ_READ_THRESH);
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
if (irdma_sc_cq_init(cq, &info)) {
ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index f87531318feb..a78a067e3ce7 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+ if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
+ i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
+ if (!MLX5_CAP_GEN(mdev, roce) ||
+ !MLX5_CAP_ROCE(mdev, roce_cc_general))
+ continue;
+
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
dbg_cc_params->params[i].port_num = port_num;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 869369cb5b5f..253fea374a72 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2949,7 +2949,7 @@ DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
UVERBS_IDR_ANY_OBJECT,
- UVERBS_ACCESS_WRITE,
+ UVERBS_ACCESS_READ,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index df1d1b0a3ef7..9947feb7fb8a 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
*/
copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
left);
- memcpy(eseg->inline_hdr.start, pdata, copysz);
+ memcpy(eseg->inline_hdr.data, pdata, copysz);
stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
sizeof(eseg->inline_hdr.start) + copysz, 16);
*size += stride / 16;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7887a6786ed4..f118ce0a9a61 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, true, 0, alloc_and_init);
- if (rc)
+ if (rc) {
+ ib_umem_release(qp->usq.umem);
+ qp->usq.umem = NULL;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info,
+ qp->usq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ }
return rc;
+ }
}
memset(&in_params, 0, sizeof(in_params));
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7a5be705d718..6f2a688fccbf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1272,10 +1272,10 @@ static int ipoib_get_iflink(const struct net_device *dev)
/* parent interface */
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
/* child/vlan interface */
- return priv->parent->ifindex;
+ return READ_ONCE(priv->parent->ifindex);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 58f70cfec45a..040234c01be4 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
MODULE_PARM_DESC(srpt_srq_size,
"Shared receive queue (SRQ) size.");
+static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
+{
+ return kstrtou64(buffer, 16, (u64 *)kp->arg);
+}
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
- 0444);
+module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
+ &srpt_service_guid, 0444);
MODULE_PARM_DESC(srpt_service_guid,
"Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
/**
* srpt_qp_event - QP event callback function
* @event: Description of the event that occurred.
- * @ch: SRPT RDMA channel.
+ * @ptr: SRPT RDMA channel.
*/
-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+static void srpt_qp_event(struct ib_event *event, void *ptr)
{
+ struct srpt_rdma_ch *ch = ptr;
+
pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
event->event, ch, ch->sess_name, ch->qp->qp_num,
get_ch_state_name(ch->state));
@@ -1807,8 +1813,7 @@ retry:
ch->cq_size = ch->rq_size + sq_size;
qp_init->qp_context = (void *)ch;
- qp_init->event_handler
- = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->event_handler = srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index b1244d7df6cc..7c4b2a5cc1b5 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -294,6 +294,7 @@ static const struct xpad_device {
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -491,6 +492,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x15e4), /* Numark Xbox 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech Xbox 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 13ef6284223d..7f67f9f2946b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -811,7 +811,6 @@ static int atkbd_probe(struct atkbd *atkbd)
{
struct ps2dev *ps2dev = &atkbd->ps2dev;
unsigned char param[2];
- bool skip_getid;
/*
* Some systems, where the bit-twiddling when testing the io-lines of the
@@ -825,6 +824,11 @@ static int atkbd_probe(struct atkbd *atkbd)
"keyboard reset failed on %s\n",
ps2dev->serio->phys);
+ if (atkbd_skip_getid(atkbd)) {
+ atkbd->id = 0xab83;
+ goto deactivate_kbd;
+ }
+
/*
* Then we check the keyboard ID. We should get 0xab83 under normal conditions.
* Some keyboards report different values, but the first byte is always 0xab or
@@ -833,18 +837,17 @@ static int atkbd_probe(struct atkbd *atkbd)
*/
param[0] = param[1] = 0xa5; /* initialize with invalid values */
- skip_getid = atkbd_skip_getid(atkbd);
- if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+ if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
/*
- * If the get ID command was skipped or failed, we check if we can at least set
+ * If the get ID command failed, we check if we can at least set
* the LEDs on the keyboard. This should work on every keyboard out there.
* It also turns the LEDs off, which we want anyway.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
return -1;
- atkbd->id = skip_getid ? 0xab83 : 0xabba;
+ atkbd->id = 0xabba;
return 0;
}
@@ -860,6 +863,7 @@ static int atkbd_probe(struct atkbd *atkbd)
return -1;
}
+deactivate_kbd:
/*
* Make sure nothing is coming from the keyboard and disturbs our
* internal state.
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ca150618d32f..953992b458e9 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -19,6 +19,7 @@
* Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch)
*/
+#include "linux/usb.h"
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -193,6 +194,8 @@ enum tp_type {
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
+/* maximum number of supported endpoints (currently trackpad and button) */
+#define MAX_ENDPOINTS 2
/* trackpad finger data block size */
#define FSIZE_TYPE1 (14 * sizeof(__le16))
@@ -891,6 +894,18 @@ static int bcm5974_resume(struct usb_interface *iface)
return error;
}
+static bool bcm5974_check_endpoints(struct usb_interface *iface,
+ const struct bcm5974_config *cfg)
+{
+ u8 ep_addr[MAX_ENDPOINTS + 1] = {0};
+
+ ep_addr[0] = cfg->tp_ep;
+ if (cfg->tp_type == TYPE1)
+ ep_addr[1] = cfg->bt_ep;
+
+ return usb_check_int_endpoints(iface, ep_addr);
+}
+
static int bcm5974_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
@@ -903,6 +918,11 @@ static int bcm5974_probe(struct usb_interface *iface,
/* find the product index */
cfg = bcm5974_get_config(udev);
+ if (!bcm5974_check_endpoints(iface, cfg)) {
+ dev_err(&iface->dev, "Unexpected non-int endpoint\n");
+ return -ENODEV;
+ }
+
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index b585b1dab870..dfc6c581873b 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -635,6 +635,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOAUX)
},
{
+ /* Fujitsu Lifebook U728 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
+ {
/* Gigabyte M912 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
@@ -1210,6 +1218,12 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index af32fbe57b63..b068ff8afbc9 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -884,7 +884,8 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
}
}
- if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
+ /* Some devices with gpio_int_idx 0 list a third unused GPIO */
+ if ((ts->gpio_count == 2 || ts->gpio_count == 3) && ts->gpio_int_idx == 0) {
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_first_gpios;
} else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) {
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index 20331e119beb..03d626776ba1 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -1372,6 +1372,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
+ .keepalive = true,
.num_nodes = 1,
.nodes = { &slv_qns_cdsp_mem_noc }
};
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index 629faa4c9aae..fc22cecf650f 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -2223,6 +2223,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8550",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c
index b83de54577b6..b962e6c233ef 100644
--- a/drivers/interconnect/qcom/sm8650.c
+++ b/drivers/interconnect/qcom/sm8650.c
@@ -1160,7 +1160,7 @@ static struct qcom_icc_node qns_gemnoc_sf = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
- .enable_mask = BIT(3),
+ .enable_mask = BIT(0),
.num_nodes = 1,
.nodes = { &ebi },
};
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index d19501d913b3..cbaf4f9c41be 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -1586,6 +1586,7 @@ static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.num_nodes = 1,
.nodes = { &ebi },
};
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 05722121f00e..4a27fbdb2d84 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
- unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
- struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
@@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
- cd);
- if (ret) {
- list_for_each_entry_from_reverse(
- master, &smmu_domain->devices, domain_head)
- arm_smmu_write_ctx_desc(
- master, mm_get_enqcmd_pasid(mm), NULL);
- break;
- }
- }
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- if (ret)
- goto err_put_notifier;
-
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
return smmu_mn;
-err_put_notifier:
- /* Frees smmu_mn */
- mmu_notifier_put(&smmu_mn->mn);
err_free_cd:
arm_smmu_free_shared_cd(cd);
return ERR_PTR(ret);
@@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
list_del(&smmu_mn->list);
- arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
- NULL);
-
/*
* If we went through clear(), we've already invalidated, and no
* new TLB entry can have been formed.
@@ -381,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd);
}
-static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
+ struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
@@ -404,9 +381,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
goto err_free_bond;
}
+ ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
+ if (ret)
+ goto err_put_notifier;
+
list_add(&bond->list, &master->bonds);
return 0;
+err_put_notifier:
+ arm_smmu_mmu_notifier_put(bond->smmu_mn);
err_free_bond:
kfree(bond);
return ret;
@@ -568,6 +551,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
mutex_lock(&sva_lock);
+
+ arm_smmu_write_ctx_desc(master, id, NULL);
+
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
bond = t;
@@ -590,7 +576,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock);
- ret = __arm_smmu_sva_bind(dev, mm);
+ ret = __arm_smmu_sva_bind(dev, id, mm);
mutex_unlock(&sva_lock);
return ret;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 68b6bc5e7c71..6317aaf7b3ab 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
arm_smmu_rpm_put(smmu);
}
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED) {
+ if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
+ return NULL;
+ }
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
- if (dev) {
- struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
-
- if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
- kfree(smmu_domain);
- return NULL;
- }
- }
-
return &smmu_domain->domain;
}
@@ -1600,7 +1595,7 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
- .domain_alloc_paging = arm_smmu_domain_alloc_paging,
+ .domain_alloc = arm_smmu_domain_alloc,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 6fb5f6fceea1..11652e0bcab3 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -396,8 +396,6 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
-static void domain_update_iotlb(struct dmar_domain *domain);
-
/* Return the super pagesize bitmap if supported. */
static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
{
@@ -1218,7 +1216,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
return NULL;
}
-static void domain_update_iotlb(struct dmar_domain *domain)
+void domain_update_iotlb(struct dmar_domain *domain)
{
struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
@@ -1368,6 +1366,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_unlock_irqrestore(&domain->lock, flags);
}
+static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ unsigned long pfn, unsigned int pages,
+ int ih)
+{
+ unsigned int aligned_pages = __roundup_pow_of_two(pages);
+ unsigned long bitmask = aligned_pages - 1;
+ unsigned int mask = ilog2(aligned_pages);
+ u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
+ /*
+ * Fallback to domain selective flush if no PSI support or
+ * the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+}
+
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@@ -1384,42 +1422,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih)
ih = 1 << 6;
- if (domain->use_first_level) {
+ if (domain->use_first_level)
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
- } else {
- unsigned long bitmask = aligned_pages - 1;
-
- /*
- * PSI masks the low order bits of the base address. If the
- * address isn't aligned to the mask, then compute a mask value
- * needed to ensure the target range is flushed.
- */
- if (unlikely(bitmask & pfn)) {
- unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
- /*
- * Since end_pfn <= pfn + bitmask, the only way bits
- * higher than bitmask can differ in pfn and end_pfn is
- * by carrying. This means after masking out bitmask,
- * high bits starting with the first set bit in
- * shared_bits are all equal in both pfn and end_pfn.
- */
- shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
- mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
- }
-
- /*
- * Fallback to domain selective flush if no PSI support or
- * the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ else
+ __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
/*
* In caching mode, changes of pages from non-present to present require
@@ -1443,6 +1449,46 @@ static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *
iommu_flush_write_buffer(iommu);
}
+/*
+ * Flush the relevant caches in nested translation if the domain
+ * also serves as a parent
+ */
+static void parent_domain_flush(struct dmar_domain *domain,
+ unsigned long pfn,
+ unsigned long pages, int ih)
+{
+ struct dmar_domain *s1_domain;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ struct device_domain_info *device_info;
+ struct iommu_domain_info *info;
+ unsigned long flags;
+ unsigned long i;
+
+ xa_for_each(&s1_domain->iommu_array, i, info)
+ __iommu_flush_iotlb_psi(info->iommu, info->did,
+ pfn, pages, ih);
+
+ if (!s1_domain->has_iotlb_device)
+ continue;
+
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ list_for_each_entry(device_info, &s1_domain->devices, link)
+ /*
+ * Address translation cache in device side caches the
+ * result of nested translation. There is no easy way
+ * to identify the exact set of nested translations
+ * affected by a change in S2. So just flush the entire
+ * device cache.
+ */
+ __iommu_flush_dev_iotlb(device_info, 0,
+ MAX_AGAW_PFN_WIDTH);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+}
+
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@@ -1462,6 +1508,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
}
+
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, 0, -1, 0);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1985,6 +2034,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
+ if (domain->nested_parent)
+ parent_domain_flush(domain, start_pfn,
+ lvl_pages, 0);
}
pte++;
@@ -3883,6 +3935,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
/* Must be NESTING domain */
@@ -3908,11 +3961,16 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
if (!domain)
return ERR_PTR(-ENOMEM);
- if (nested_parent)
- to_dmar_domain(domain)->nested_parent = true;
+ dmar_domain = to_dmar_domain(domain);
+
+ if (nested_parent) {
+ dmar_domain->nested_parent = true;
+ INIT_LIST_HEAD(&dmar_domain->s1_domains);
+ spin_lock_init(&dmar_domain->s1_lock);
+ }
if (dirty_tracking) {
- if (to_dmar_domain(domain)->use_first_level) {
+ if (dmar_domain->use_first_level) {
iommu_domain_free(domain);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -3924,8 +3982,12 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ WARN_ON(dmar_domain->nested_parent &&
+ !list_empty(&dmar_domain->s1_domains));
if (domain != &si_domain->domain)
- domain_exit(to_dmar_domain(domain));
+ domain_exit(dmar_domain);
}
int prepare_domain_attach_device(struct iommu_domain *domain,
@@ -4107,6 +4169,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, start_pfn, nrpages,
+ list_empty(&gather->freelist));
put_pages_list(&gather->freelist);
}
@@ -4664,21 +4729,70 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
return vtd;
}
+/*
+ * Set dirty tracking for the device list of a domain. The caller must
+ * hold the domain->lock when calling it.
+ */
+static int device_set_dirty_tracking(struct list_head *devices, bool enable)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ list_for_each_entry(info, devices, link) {
+ ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev,
+ IOMMU_NO_PASID, enable);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
+ bool enable)
+{
+ struct dmar_domain *s1_domain;
+ unsigned long flags;
+ int ret;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ ret = device_set_dirty_tracking(&s1_domain->devices, enable);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ if (ret)
+ goto err_unwind;
+ }
+ spin_unlock(&domain->s1_lock);
+ return 0;
+
+err_unwind:
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ device_set_dirty_tracking(&s1_domain->devices,
+ domain->dirty_tracking);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+ return ret;
+}
+
static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct device_domain_info *info;
int ret;
spin_lock(&dmar_domain->lock);
if (dmar_domain->dirty_tracking == enable)
goto out_unlock;
- list_for_each_entry(info, &dmar_domain->devices, link) {
- ret = intel_pasid_setup_dirty_tracking(info->iommu,
- info->domain, info->dev,
- IOMMU_NO_PASID, enable);
+ ret = device_set_dirty_tracking(&dmar_domain->devices, enable);
+ if (ret)
+ goto err_unwind;
+
+ if (dmar_domain->nested_parent) {
+ ret = parent_domain_set_dirty_tracking(dmar_domain, enable);
if (ret)
goto err_unwind;
}
@@ -4690,10 +4804,8 @@ out_unlock:
return 0;
err_unwind:
- list_for_each_entry(info, &dmar_domain->devices, link)
- intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain,
- info->dev, IOMMU_NO_PASID,
- dmar_domain->dirty_tracking);
+ device_set_dirty_tracking(&dmar_domain->devices,
+ dmar_domain->dirty_tracking);
spin_unlock(&dmar_domain->lock);
return ret;
}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d02f916d8e59..4145c04cb1c6 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -627,6 +627,10 @@ struct dmar_domain {
int agaw;
/* maximum mapped address */
u64 max_addr;
+ /* Protect the s1_domains list */
+ spinlock_t s1_lock;
+ /* Track s1_domains nested on this domain */
+ struct list_head s1_domains;
};
/* Nested user domain */
@@ -637,6 +641,8 @@ struct dmar_domain {
unsigned long s1_pgtbl;
/* page table attributes */
struct iommu_hwpt_vtd_s1 s1_cfg;
+ /* link to parent domain siblings */
+ struct list_head s2_link;
};
};
@@ -1060,6 +1066,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
+void domain_update_iotlb(struct dmar_domain *domain);
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index f26c7f1c46cc..a7d68f3d518a 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -65,12 +65,20 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ domain_update_iotlb(dmar_domain);
+
return 0;
}
static void intel_nested_domain_free(struct iommu_domain *domain)
{
- kfree(to_dmar_domain(domain));
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct dmar_domain *s2_domain = dmar_domain->s2_domain;
+
+ spin_lock(&s2_domain->s1_lock);
+ list_del(&dmar_domain->s2_link);
+ spin_unlock(&s2_domain->s1_lock);
+ kfree(dmar_domain);
}
static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
@@ -95,7 +103,7 @@ static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
}
static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
- unsigned long npages, bool ih)
+ u64 npages, bool ih)
{
struct iommu_domain_info *info;
unsigned int mask;
@@ -201,5 +209,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
spin_lock_init(&domain->lock);
xa_init(&domain->iommu_array);
+ spin_lock(&s2_domain->s1_lock);
+ list_add(&domain->s2_link, &s2_domain->s1_domains);
+ spin_unlock(&s2_domain->s1_lock);
+
return &domain->domain;
}
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 3239cefa4c33..108158e2b907 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -428,7 +428,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Set up dirty tracking on a second only or nested translation type.
*/
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled)
{
@@ -445,7 +444,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
return -ENODEV;
}
- did = domain_id_iommu(domain, iommu);
+ did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
if (pgtt != PASID_ENTRY_PGTT_SL_ONLY &&
pgtt != PASID_ENTRY_PGTT_NESTED) {
@@ -658,6 +657,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, s2_domain->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (s2_domain->dirty_tracking)
+ pasid_set_ssade(pte);
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
pasid_set_present(pte);
spin_unlock(&iommu->lock);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 8d40d4c66e31..487ede039bdd 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -307,7 +307,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index c3fc9201d0be..65814cbc8402 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -41,6 +41,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
}
iommu_mm->pasid = pasid;
INIT_LIST_HEAD(&iommu_mm->sva_domains);
+ INIT_LIST_HEAD(&iommu_mm->sva_handles);
/*
* Make sure the write to mm->iommu_mm is not reordered in front of
* initialization to iommu_mm fields. If it does, readers may see a
@@ -82,6 +83,14 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
goto out_unlock;
}
+ list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
+ if (handle->dev == dev) {
+ refcount_inc(&handle->users);
+ mutex_unlock(&iommu_sva_lock);
+ return handle;
+ }
+ }
+
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle) {
ret = -ENOMEM;
@@ -111,6 +120,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
list_add(&domain->next, &mm->iommu_mm->sva_domains);
out:
+ refcount_set(&handle->users, 1);
+ list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
mutex_unlock(&iommu_sva_lock);
handle->dev = dev;
handle->domain = domain;
@@ -141,6 +152,12 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
struct device *dev = handle->dev;
mutex_lock(&iommu_sva_lock);
+ if (!refcount_dec_and_test(&handle->users)) {
+ mutex_unlock(&iommu_sva_lock);
+ return;
+ }
+ list_del(&handle->handle_item);
+
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
if (--domain->users == 0) {
list_del(&domain->next);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 68e648b55767..d14413916f93 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1799,7 +1799,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
* domain. Do not use in new drivers.
*/
if (ops->default_domain) {
- if (req_type)
+ if (req_type != ops->default_domain->type)
return ERR_PTR(-EINVAL);
return ops->default_domain;
}
@@ -1871,10 +1871,18 @@ static int iommu_get_def_domain_type(struct iommu_group *group,
const struct iommu_ops *ops = dev_iommu_ops(dev);
int type;
- if (!ops->def_domain_type)
- return cur_type;
-
- type = ops->def_domain_type(dev);
+ if (ops->default_domain) {
+ /*
+ * Drivers that declare a global static default_domain will
+ * always choose that.
+ */
+ type = ops->default_domain->type;
+ } else {
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+ else
+ return cur_type;
+ }
if (!type || cur_type == type)
return cur_type;
if (!cur_type)
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 3f3f1fa1a0a9..33d142f8057d 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -263,7 +263,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
if (cmd->__reserved)
return -EOPNOTSUPP;
- if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len)
+ if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
+ (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
return -EINVAL;
idev = iommufd_get_device(ucmd, cmd->dev_id);
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 504ac1b01b2d..05fd9d3abf1b 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -1330,20 +1330,23 @@ out_unlock:
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
{
+ u32 new_id;
int rc;
down_write(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem);
- rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access,
- xa_limit_16b, GFP_KERNEL_ACCOUNT);
+ rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
+ GFP_KERNEL_ACCOUNT);
+
if (rc)
goto out_unlock;
rc = iopt_calculate_iova_alignment(iopt);
if (rc) {
- xa_erase(&iopt->access_list, access->iopt_access_list_id);
+ xa_erase(&iopt->access_list, new_id);
goto out_unlock;
}
+ access->iopt_access_list_id = new_id;
out_unlock:
up_write(&iopt->iova_rwsem);
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index 482d4059f5db..e854d3f67205 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -45,6 +45,7 @@ enum {
enum {
MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0,
+ MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1,
};
enum {
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index 0a92c9eeaf7f..db8c46bee155 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -100,7 +100,7 @@ struct iova_bitmap {
struct iova_bitmap_map mapped;
/* userspace address of the bitmap */
- u64 __user *bitmap;
+ u8 __user *bitmap;
/* u64 index that @mapped points to */
unsigned long mapped_base_index;
@@ -113,6 +113,9 @@ struct iova_bitmap {
/* length of the IOVA range for the whole bitmap */
size_t length;
+
+ /* length of the IOVA range set ahead the pinned pages */
+ unsigned long set_ahead_length;
};
/*
@@ -162,7 +165,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
{
struct iova_bitmap_map *mapped = &bitmap->mapped;
unsigned long npages;
- u64 __user *addr;
+ u8 __user *addr;
long ret;
/*
@@ -176,17 +179,18 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
sizeof(*bitmap->bitmap), PAGE_SIZE);
/*
- * We always cap at max number of 'struct page' a base page can fit.
- * This is, for example, on x86 means 2M of bitmap data max.
- */
- npages = min(npages, PAGE_SIZE / sizeof(struct page *));
-
- /*
* Bitmap address to be pinned is calculated via pointer arithmetic
* with bitmap u64 word index.
*/
addr = bitmap->bitmap + bitmap->mapped_base_index;
+ /*
+ * We always cap at max number of 'struct page' a base page can fit.
+ * This is, for example, on x86 means 2M of bitmap data max.
+ */
+ npages = min(npages + !!offset_in_page(addr),
+ PAGE_SIZE / sizeof(struct page *));
+
ret = pin_user_pages_fast((unsigned long)addr, npages,
FOLL_WRITE, mapped->pages);
if (ret <= 0)
@@ -247,7 +251,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
mapped = &bitmap->mapped;
mapped->pgshift = __ffs(page_size);
- bitmap->bitmap = data;
+ bitmap->bitmap = (u8 __user *)data;
bitmap->mapped_total_index =
iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
bitmap->iova = iova;
@@ -304,7 +308,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
remaining = min_t(unsigned long, remaining,
- bytes / sizeof(*bitmap->bitmap));
+ DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
return remaining;
}
@@ -341,6 +345,32 @@ static bool iova_bitmap_done(struct iova_bitmap *bitmap)
return bitmap->mapped_base_index >= bitmap->mapped_total_index;
}
+static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap,
+ size_t set_ahead_length)
+{
+ int ret = 0;
+
+ while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) {
+ unsigned long length = iova_bitmap_mapped_length(bitmap);
+ unsigned long iova = iova_bitmap_mapped_iova(bitmap);
+
+ ret = iova_bitmap_get(bitmap);
+ if (ret)
+ break;
+
+ length = min(length, set_ahead_length);
+ iova_bitmap_set(bitmap, iova, length);
+
+ set_ahead_length -= length;
+ bitmap->mapped_base_index +=
+ iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ iova_bitmap_put(bitmap);
+ }
+
+ bitmap->set_ahead_length = 0;
+ return ret;
+}
+
/*
* Advances to the next range, releases the current pinned
* pages and pins the next set of bitmap pages.
@@ -357,6 +387,15 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap)
if (iova_bitmap_done(bitmap))
return 0;
+ /* Iterate, set and skip any bits requested for next iteration */
+ if (bitmap->set_ahead_length) {
+ int ret;
+
+ ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length);
+ if (ret)
+ return ret;
+ }
+
/* When advancing the index we pin the next set of bitmap pages */
return iova_bitmap_get(bitmap);
}
@@ -409,6 +448,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+ unsigned long last_page_idx = mapped->npages - 1;
do {
unsigned int page_idx = cur_bit / BITS_PER_PAGE;
@@ -417,10 +457,18 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
last_bit - cur_bit + 1);
void *kaddr;
+ if (unlikely(page_idx > last_page_idx))
+ break;
+
kaddr = kmap_local_page(mapped->pages[page_idx]);
bitmap_set(kaddr, offset, nbits);
kunmap_local(kaddr);
cur_bit += nbits;
} while (cur_bit <= last_bit);
+
+ if (unlikely(cur_bit <= last_bit)) {
+ bitmap->set_ahead_length =
+ ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift);
+ }
}
EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD);
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index d9e9920c7eba..7a2199470f31 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -36,11 +36,12 @@ static struct mock_bus_type iommufd_mock_bus_type = {
},
};
-static atomic_t mock_dev_num;
+static DEFINE_IDA(mock_dev_ida);
enum {
MOCK_DIRTY_TRACK = 1,
MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
+ MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
/*
* Like a real page table alignment requires the low bits of the address
@@ -53,6 +54,7 @@ enum {
MOCK_PFN_START_IOVA = _MOCK_PFN_START,
MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
+ MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
/*
@@ -61,8 +63,8 @@ enum {
* In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
* value. This has a much smaller randomization space and syzkaller can hit it.
*/
-static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
- u64 *iova)
+static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
+ u64 *iova)
{
struct syz_layout {
__u32 nth_area;
@@ -86,6 +88,21 @@ static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
return 0;
}
+static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
+ u64 *iova)
+{
+ unsigned long ret;
+
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return 0;
+ }
+ ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
+ mutex_unlock(&access->ioas_lock);
+ return ret;
+}
+
void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id, u64 *iova, u32 *flags)
{
@@ -98,7 +115,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
if (IS_ERR(ioas))
return;
- *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
+ *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
iommufd_put_object(ucmd->ictx, &ioas->obj);
}
@@ -121,6 +138,7 @@ enum selftest_obj_type {
struct mock_dev {
struct device dev;
unsigned long flags;
+ int id;
};
struct selftest_obj {
@@ -191,6 +209,34 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
return 0;
}
+static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
+ unsigned long iova, size_t page_size,
+ unsigned long flags)
+{
+ unsigned long cur, end = iova + page_size - 1;
+ bool dirty = false;
+ void *ent, *old;
+
+ for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
+ ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+ if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
+ continue;
+
+ dirty = true;
+ /* Clear dirty */
+ if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
+ unsigned long val;
+
+ val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+ old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+ xa_mk_value(val), GFP_KERNEL);
+ WARN_ON_ONCE(ent != old);
+ }
+ }
+
+ return dirty;
+}
+
static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
unsigned long iova, size_t size,
unsigned long flags,
@@ -198,31 +244,31 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
{
struct mock_iommu_domain *mock =
container_of(domain, struct mock_iommu_domain, domain);
- unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
- void *ent, *old;
+ unsigned long end = iova + size;
+ void *ent;
if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
return -EINVAL;
- for (i = 0; i < max; i++) {
- unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+ do {
+ unsigned long pgsize = MOCK_IO_PAGE_SIZE;
+ unsigned long head;
- ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
- if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
- /* Clear dirty */
- if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
- unsigned long val;
-
- val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns,
- cur / MOCK_IO_PAGE_SIZE,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- }
- iommu_dirty_bitmap_record(dirty, cur,
- MOCK_IO_PAGE_SIZE);
+ ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+ if (!ent) {
+ iova += pgsize;
+ continue;
}
- }
+
+ if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
+ pgsize = MOCK_HUGE_PAGE_SIZE;
+ head = iova & ~(pgsize - 1);
+
+ /* Clear dirty */
+ if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
+ iommu_dirty_bitmap_record(dirty, head, pgsize);
+ iova = head + pgsize;
+ } while (iova < end);
return 0;
}
@@ -234,6 +280,7 @@ const struct iommu_dirty_ops dirty_ops = {
static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
{
+ struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
struct mock_iommu_domain *mock;
mock = kzalloc(sizeof(*mock), GFP_KERNEL);
@@ -242,6 +289,8 @@ static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+ if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+ mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
mock->domain.ops = mock_ops.default_domain_ops;
mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
xa_init(&mock->pfns);
@@ -287,7 +336,7 @@ mock_domain_alloc_user(struct device *dev, u32 flags,
return ERR_PTR(-EOPNOTSUPP);
if (user_data || (has_dirty_flag && no_dirty_ops))
return ERR_PTR(-EOPNOTSUPP);
- domain = mock_domain_alloc_paging(NULL);
+ domain = mock_domain_alloc_paging(dev);
if (!domain)
return ERR_PTR(-ENOMEM);
if (has_dirty_flag)
@@ -350,6 +399,9 @@ static int mock_domain_map_pages(struct iommu_domain *domain,
if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
flags = MOCK_PFN_LAST_IOVA;
+ if (pgsize != MOCK_IO_PAGE_SIZE) {
+ flags |= MOCK_PFN_HUGE_IOVA;
+ }
old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
flags),
@@ -394,20 +446,27 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
/*
* iommufd generates unmaps that must be a strict
- * superset of the map's performend So every starting
- * IOVA should have been an iova passed to map, and the
+ * superset of the map's performend So every
+ * starting/ending IOVA should have been an iova passed
+ * to map.
*
- * First IOVA must be present and have been a first IOVA
- * passed to map_pages
+ * This simple logic doesn't work when the HUGE_PAGE is
+ * turned on since the core code will automatically
+ * switch between the two page sizes creating a break in
+ * the unmap calls. The break can land in the middle of
+ * contiguous IOVA.
*/
- if (first) {
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_START_IOVA));
- first = false;
+ if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
+ if (first) {
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_START_IOVA));
+ first = false;
+ }
+ if (pgcount == 1 &&
+ cur + MOCK_IO_PAGE_SIZE == pgsize)
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_LAST_IOVA));
}
- if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_LAST_IOVA));
iova += MOCK_IO_PAGE_SIZE;
ret += MOCK_IO_PAGE_SIZE;
@@ -595,7 +654,7 @@ static void mock_dev_release(struct device *dev)
{
struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
- atomic_dec(&mock_dev_num);
+ ida_free(&mock_dev_ida, mdev->id);
kfree(mdev);
}
@@ -604,7 +663,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
struct mock_dev *mdev;
int rc;
- if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY))
+ if (dev_flags &
+ ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
return ERR_PTR(-EINVAL);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
@@ -616,8 +676,12 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
mdev->dev.release = mock_dev_release;
mdev->dev.bus = &iommufd_mock_bus_type.bus;
- rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
- atomic_inc_return(&mock_dev_num));
+ rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
+ if (rc < 0)
+ goto err_put;
+ mdev->id = rc;
+
+ rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
if (rc)
goto err_put;
@@ -1119,7 +1183,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
}
if (flags & MOCK_FLAGS_ACCESS_SYZ)
- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
+ iova = iommufd_test_syz_conv_iova(staccess->access,
&cmd->access_pages.iova);
npages = (ALIGN(iova + length, PAGE_SIZE) -
@@ -1221,8 +1285,8 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
}
if (flags & MOCK_FLAGS_ACCESS_SYZ)
- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
- &cmd->access_rw.iova);
+ iova = iommufd_test_syz_conv_iova(staccess->access,
+ &cmd->access_rw.iova);
rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
if (rc)
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 5559c943f03f..2b0b3175cea0 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -2,7 +2,7 @@
/*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
*
- * Copyright (C) 2014-2017 Broadcom
+ * Copyright (C) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
generic_handle_domain_irq(b->domain, irq);
} while (status);
out:
+ /* Don't ack parent before all device writes are done */
+ wmb();
+
chained_irq_exit(chip, desc);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d097001c1e3e..b822752c4261 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
}
+static bool rdists_support_shareable(void)
+{
+ return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
+}
+
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
@@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void)
break;
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
- val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
- FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
- val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
- FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ if (rdists_support_shareable()) {
+ val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+ FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+ FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ }
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
return val;
@@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void)
WARN_ON(!IS_ALIGNED(pa, psz));
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
- val |= GICR_VPROPBASER_RaWb;
- val |= GICR_VPROPBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ }
val |= GICR_VPROPBASER_4_1_Z;
val |= GICR_VPROPBASER_4_1_VALID;
@@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void)
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
- if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
+ if (!rdists_support_shareable())
tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
@@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void)
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
- if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
+ if (!rdists_support_shareable())
tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
@@ -3172,6 +3181,7 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
+out:
if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
@@ -3207,7 +3217,6 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
-out:
gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
smp_processor_id(),
@@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- int from, cpu = cpumask_first(mask_val);
+ struct cpumask common, *table_mask;
unsigned long flags;
+ int from, cpu;
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d,
* taken on any vLPI handling path that evaluates vpe->col_idx.
*/
from = vpe_to_cpuid_lock(vpe, &flags);
- if (from == cpu)
- goto out;
-
- vpe->col_idx = cpu;
+ table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
/*
- * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
- * is sharing its VPE table with the current one.
+ * If we are offered another CPU in the same GICv4.1 ITS
+ * affinity, pick this one. Otherwise, any CPU will do.
*/
- if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
- cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
+ if (table_mask && cpumask_and(&common, mask_val, table_mask))
+ cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (from == cpu)
goto out;
+ vpe->col_idx = cpu;
+
its_send_vmovp(vpe);
its_vpe_db_proxy_move(vpe, from, cpu);
@@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
GENMASK_ULL(51, 12);
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
- val |= GICR_VPROPBASER_RaWb;
- val |= GICR_VPROPBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ }
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
- val |= GICR_VPENDBASER_RaWaWb;
- val |= GICR_VPENDBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPENDBASER_RaWaWb;
+ val |= GICR_VPENDBASER_InnerShareable;
+ }
/*
* There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very
@@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its)
u32 ctlr;
int err;
+ its_enable_quirks(its);
+
if (is_v4(its)) {
if (!(its->typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(its);
@@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node)
if (!its)
return -ENOMEM;
- its_enable_quirks(its);
err = its_probe_one(its);
if (err) {
its_node_destroy(its);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 1623cd779175..b3736bdd4b9f 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -241,7 +241,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
int ret;
unsigned int i, type;
unsigned long hwirq = 0;
- struct eiointc *priv = domain->host_data;
+ struct eiointc_priv *priv = domain->host_data;
ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
if (ret)
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 5101a3fb11df..58881d313979 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -235,22 +235,17 @@ static const struct irq_domain_ops mbigen_domain_ops = {
static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
- struct device *parent;
struct platform_device *child;
struct irq_domain *domain;
struct device_node *np;
u32 num_pins;
int ret = 0;
- parent = bus_get_dev_root(&platform_bus_type);
- if (!parent)
- return -ENODEV;
-
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
- child = of_platform_device_create(np, NULL, parent);
+ child = of_platform_device_create(np, NULL, NULL);
if (!child) {
ret = -ENOMEM;
break;
@@ -273,7 +268,6 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
}
}
- put_device(parent);
if (ret)
of_node_put(np);
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index cda5838d2232..7942d8eb3d00 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -389,8 +389,8 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
/* Don't use devm_ioremap_resource, as we're accessing a shared region. */
priv->base = devm_ioremap(dev, res.start, resource_size(&res));
of_node_put(msgram_np);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ if (!priv->base)
+ return -ENOMEM;
} else {
/* Otherwise, fall back to simple MMIO. */
priv->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 5b7bc4fd9517..bf0b40b0fad4 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -148,7 +148,13 @@ static void plic_irq_eoi(struct irq_data *d)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ if (unlikely(irqd_irq_disabled(d))) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+ } else {
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ }
}
#ifdef CONFIG_SMP
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6e80d7bd3c4d..3ed257334562 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -49,7 +49,9 @@ MODULE_LICENSE("GPL");
/* -------- driver information -------------------------------------- */
static DEFINE_MUTEX(capi_mutex);
-static struct class *capi_class;
+static const struct class capi_class = {
+ .name = "capi",
+};
static int capi_major = 68; /* allocated */
module_param_named(major, capi_major, uint, 0);
@@ -1393,18 +1395,19 @@ static int __init capi_init(void)
kcapi_exit();
return major_ret;
}
- capi_class = class_create("capi");
- if (IS_ERR(capi_class)) {
+
+ ret = class_register(&capi_class);
+ if (ret) {
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
- return PTR_ERR(capi_class);
+ return ret;
}
- device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
+ device_create(&capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
if (capinc_tty_init() < 0) {
- device_destroy(capi_class, MKDEV(capi_major, 0));
- class_destroy(capi_class);
+ device_destroy(&capi_class, MKDEV(capi_major, 0));
+ class_unregister(&capi_class);
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
return -ENOMEM;
@@ -1427,8 +1430,8 @@ static void __exit capi_exit(void)
{
proc_exit();
- device_destroy(capi_class, MKDEV(capi_major, 0));
- class_destroy(capi_class);
+ device_destroy(&capi_class, MKDEV(capi_major, 0));
+ class_unregister(&capi_class);
unregister_chrdev(capi_major, "capi20");
capinc_tty_exit();
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 09b72f14d4b7..b4ed0bb8ddfb 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -31,7 +31,9 @@ struct dsp_element_entry {
static LIST_HEAD(dsp_elements);
/* sysfs */
-static struct class *elements_class;
+static const struct class elements_class = {
+ .name = "dsp_pipeline",
+};
static ssize_t
attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
@@ -80,7 +82,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
- entry->dev.class = elements_class;
+ entry->dev.class = &elements_class;
entry->dev.release = mISDN_dsp_dev_release;
dev_set_drvdata(&entry->dev, elem);
dev_set_name(&entry->dev, "%s", elem->name);
@@ -131,9 +133,11 @@ EXPORT_SYMBOL(mISDN_dsp_element_unregister);
int dsp_pipeline_module_init(void)
{
- elements_class = class_create("dsp_pipeline");
- if (IS_ERR(elements_class))
- return PTR_ERR(elements_class);
+ int err;
+
+ err = class_register(&elements_class);
+ if (err)
+ return err;
dsp_hwec_init();
@@ -146,7 +150,7 @@ void dsp_pipeline_module_exit(void)
dsp_hwec_exit();
- class_destroy(elements_class);
+ class_unregister(&elements_class);
list_for_each_entry_safe(entry, n, &dsp_elements, list) {
list_del(&entry->list);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 095b9b49aa82..e6757a30dcca 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -22,6 +22,8 @@
#include "dm-ima.h"
#define DM_RESERVED_MAX_IOS 1024
+#define DM_MAX_TARGETS 1048576
+#define DM_MAX_TARGET_PARAMS 1024
struct dm_io;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 855b482cbff1..59445763e55a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -53,15 +53,17 @@
struct convert_context {
struct completion restart;
struct bio *bio_in;
- struct bio *bio_out;
struct bvec_iter iter_in;
+ struct bio *bio_out;
struct bvec_iter iter_out;
- u64 cc_sector;
atomic_t cc_pending;
+ u64 cc_sector;
union {
struct skcipher_request *req;
struct aead_request *req_aead;
} r;
+ bool aead_recheck;
+ bool aead_failed;
};
@@ -73,10 +75,8 @@ struct dm_crypt_io {
struct bio *base_bio;
u8 *integrity_metadata;
bool integrity_metadata_from_pool:1;
- bool in_tasklet:1;
struct work_struct work;
- struct tasklet_struct tasklet;
struct convert_context ctx;
@@ -84,6 +84,8 @@ struct dm_crypt_io {
blk_status_t error;
sector_t sector;
+ struct bvec_iter saved_bi_iter;
+
struct rb_node rb_node;
} CRYPTO_MINALIGN_ATTR;
@@ -1372,10 +1374,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
if (r == -EBADMSG) {
sector_t s = le64_to_cpu(*sector);
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
- ctx->bio_in->bi_bdev, s);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
- ctx->bio_in, s, 0);
+ ctx->aead_failed = true;
+ if (ctx->aead_recheck) {
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+ ctx->bio_in, s, 0);
+ }
}
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
@@ -1759,10 +1764,11 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->base_bio = bio;
io->sector = sector;
io->error = 0;
+ io->ctx.aead_recheck = false;
+ io->ctx.aead_failed = false;
io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
- io->in_tasklet = false;
atomic_set(&io->io_pending, 0);
}
@@ -1771,12 +1777,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
-static void kcryptd_io_bio_endio(struct work_struct *work)
-{
- struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
-
- bio_endio(io->base_bio);
-}
+static void kcryptd_queue_read(struct dm_crypt_io *io);
/*
* One of the bios was finished. Check for completion of
@@ -1791,6 +1792,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->io_pending))
return;
+ if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
+ cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
+ io->ctx.aead_recheck = true;
+ io->ctx.aead_failed = false;
+ io->error = 0;
+ kcryptd_queue_read(io);
+ return;
+ }
+
if (io->ctx.r.req)
crypt_free_req(cc, io->ctx.r.req, base_bio);
@@ -1801,20 +1811,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
base_bio->bi_status = error;
- /*
- * If we are running this function from our tasklet,
- * we can't call bio_endio() here, because it will call
- * clone_endio() from dm.c, which in turn will
- * free the current struct dm_crypt_io structure with
- * our tasklet. In this case we need to delay bio_endio()
- * execution to after the tasklet is done and dequeued.
- */
- if (io->in_tasklet) {
- INIT_WORK(&io->work, kcryptd_io_bio_endio);
- queue_work(cc->io_queue, &io->work);
- return;
- }
-
bio_endio(base_bio);
}
@@ -1840,15 +1836,19 @@ static void crypt_endio(struct bio *clone)
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
unsigned int rw = bio_data_dir(clone);
- blk_status_t error;
+ blk_status_t error = clone->bi_status;
+
+ if (io->ctx.aead_recheck && !error) {
+ kcryptd_queue_crypt(io);
+ return;
+ }
/*
* free the processed pages
*/
- if (rw == WRITE)
+ if (rw == WRITE || io->ctx.aead_recheck)
crypt_free_buffer_pages(cc, clone);
- error = clone->bi_status;
bio_put(clone);
if (rw == READ && !error) {
@@ -1869,6 +1869,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
struct crypt_config *cc = io->cc;
struct bio *clone;
+ if (io->ctx.aead_recheck) {
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ return 1;
+ crypt_inc_pending(io);
+ clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+ if (unlikely(!clone)) {
+ crypt_dec_pending(io);
+ return 1;
+ }
+ clone->bi_iter.bi_sector = cc->start + io->sector;
+ crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
+ io->saved_bi_iter = clone->bi_iter;
+ dm_submit_bio_remap(io->base_bio, clone);
+ return 0;
+ }
+
/*
* We need the original biovec array in order to decrypt the whole bio
* data *afterwards* -- thanks to immutable biovecs we don't need to
@@ -2095,6 +2111,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.bio_out = clone;
io->ctx.iter_out = clone->bi_iter;
+ if (crypt_integrity_aead(cc)) {
+ bio_copy_data(clone, io->base_bio);
+ io->ctx.bio_in = clone;
+ io->ctx.iter_in = clone->bi_iter;
+ }
+
sector += bio_sectors(clone);
crypt_inc_pending(io);
@@ -2131,6 +2153,14 @@ dec:
static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
{
+ if (io->ctx.aead_recheck) {
+ if (!io->error) {
+ io->ctx.bio_in->bi_iter = io->saved_bi_iter;
+ bio_copy_data(io->base_bio, io->ctx.bio_in);
+ }
+ crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
+ bio_put(io->ctx.bio_in);
+ }
crypt_dec_pending(io);
}
@@ -2160,11 +2190,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
- crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
- io->sector);
+ if (io->ctx.aead_recheck) {
+ io->ctx.cc_sector = io->sector + cc->iv_offset;
+ r = crypt_convert(cc, &io->ctx,
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ } else {
+ crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+ io->sector);
- r = crypt_convert(cc, &io->ctx,
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ r = crypt_convert(cc, &io->ctx,
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ }
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
@@ -2206,10 +2242,13 @@ static void kcryptd_async_done(void *data, int error)
if (error == -EBADMSG) {
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
- ctx->bio_in->bi_bdev, s);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
- ctx->bio_in, s, 0);
+ ctx->aead_failed = true;
+ if (ctx->aead_recheck) {
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+ ctx->bio_in, s, 0);
+ }
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
io->error = BLK_STS_IOERR;
@@ -2246,11 +2285,6 @@ static void kcryptd_crypt(struct work_struct *work)
kcryptd_crypt_write_convert(io);
}
-static void kcryptd_crypt_tasklet(unsigned long work)
-{
- kcryptd_crypt((struct work_struct *)work);
-}
-
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -2262,15 +2296,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled.
*/
- if (in_hardirq() || irqs_disabled()) {
- io->in_tasklet = true;
- tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
- tasklet_schedule(&io->tasklet);
+ if (!(in_hardirq() || irqs_disabled())) {
+ kcryptd_crypt(&io->work);
return;
}
-
- kcryptd_crypt(&io->work);
- return;
}
INIT_WORK(&io->work, kcryptd_crypt);
@@ -3144,7 +3173,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
if (!strcasecmp(sval, "aead")) {
set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
- } else if (strcasecmp(sval, "none")) {
+ } else if (strcasecmp(sval, "none")) {
ti->error = "Unknown integrity profile";
return -EINVAL;
}
@@ -3673,7 +3702,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 24, 0},
+ .version = {1, 25, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c5f03aab4552..1fc901df84eb 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -278,6 +278,8 @@ struct dm_integrity_c {
atomic64_t number_of_mismatches;
+ mempool_t recheck_pool;
+
struct notifier_block reboot_notifier;
};
@@ -1689,6 +1691,77 @@ failed:
get_random_bytes(result, ic->tag_size);
}
+static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ struct dm_integrity_c *ic = dio->ic;
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ sector_t sector, logical_sector, area, offset;
+ struct page *page;
+ void *buffer;
+
+ get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
+ dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
+ &dio->metadata_offset);
+ sector = get_data_sector(ic, area, offset);
+ logical_sector = dio->range.logical_sector;
+
+ page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
+ buffer = page_to_virt(page);
+
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+ unsigned pos = 0;
+
+ do {
+ char *mem;
+ int r;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+ io_req.bi_opf = REQ_OP_READ;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = sector;
+ io_loc.count = ic->sectors_per_block;
+
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r)) {
+ dio->bi_status = errno_to_blk_status(r);
+ goto free_ret;
+ }
+
+ integrity_sector_checksum(ic, logical_sector, buffer, checksum);
+ r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
+ &dio->metadata_offset, ic->tag_size, TAG_CMP);
+ if (r) {
+ if (r > 0) {
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ bio->bi_bdev, logical_sector);
+ atomic64_inc(&ic->number_of_mismatches);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+ bio, logical_sector, 0);
+ r = -EILSEQ;
+ }
+ dio->bi_status = errno_to_blk_status(r);
+ goto free_ret;
+ }
+
+ mem = bvec_kmap_local(&bv);
+ memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
+ kunmap_local(mem);
+
+ pos += ic->sectors_per_block << SECTOR_SHIFT;
+ sector += ic->sectors_per_block;
+ logical_sector += ic->sectors_per_block;
+ } while (pos < bv.bv_len);
+ }
+free_ret:
+ mempool_free(page, &ic->recheck_pool);
+}
+
static void integrity_metadata(struct work_struct *w)
{
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
@@ -1776,15 +1849,8 @@ again:
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- sector_t s;
-
- s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
- DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
- bio->bi_bdev, s);
- r = -EILSEQ;
- atomic64_inc(&ic->number_of_mismatches);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
- bio, s, 0);
+ integrity_recheck(dio, checksums);
+ goto skip_io;
}
if (likely(checksums != checksums_onstack))
kfree(checksums);
@@ -4261,6 +4327,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
goto bad;
}
+ r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
+ if (r) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
if (!ic->metadata_wq) {
@@ -4609,6 +4681,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ mempool_exit(&ic->recheck_pool);
mempool_exit(&ic->journal_io_mempool);
if (ic->io)
dm_io_client_destroy(ic->io);
@@ -4661,7 +4734,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index e65058e0ed06..3b1ad7127cb8 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1941,7 +1941,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
minimum_data_size - sizeof(param_kernel->version)))
return -EFAULT;
- if (param_kernel->data_size < minimum_data_size) {
+ if (unlikely(param_kernel->data_size < minimum_data_size) ||
+ unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) {
DMERR("Invalid data size in the ioctl structure: %u",
param_kernel->data_size);
return -EINVAL;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index bdc14ec99814..1e5d988f44da 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -66,6 +66,9 @@ struct dm_stats_last_position {
unsigned int last_rw;
};
+#define DM_STAT_MAX_ENTRIES 8388608
+#define DM_STAT_MAX_HISTOGRAM_ENTRIES 134217728
+
/*
* A typo on the command line could possibly make the kernel run out of memory
* and crash. To prevent the crash we account all used memory. We fail if we
@@ -285,6 +288,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
return -EOVERFLOW;
+ if (n_entries > DM_STAT_MAX_ENTRIES)
+ return -EOVERFLOW;
+
shared_alloc_size = struct_size(s, stat_shared, n_entries);
if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
return -EOVERFLOW;
@@ -297,6 +303,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
return -EOVERFLOW;
+ if ((n_histogram_entries + 1) * (size_t)n_entries > DM_STAT_MAX_HISTOGRAM_ENTRIES)
+ return -EOVERFLOW;
+
if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
return -ENOMEM;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 260b5b8f2b0d..41f1d731ae5a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -129,7 +129,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
int dm_table_create(struct dm_table **result, blk_mode_t mode,
unsigned int num_targets, struct mapped_device *md)
{
- struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ struct dm_table *t;
+
+ if (num_targets > DM_MAX_TARGETS)
+ return -EOVERFLOW;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
@@ -144,7 +149,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
if (!num_targets) {
kfree(t);
- return -ENOMEM;
+ return -EOVERFLOW;
}
if (alloc_targets(t, num_targets)) {
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 14e58ae70521..1b591bfa90d5 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -482,6 +482,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
return 0;
}
+static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len)
+{
+ memcpy(data, io->recheck_buffer, len);
+ io->recheck_buffer += len;
+
+ return 0;
+}
+
+static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter start, sector_t cur_block)
+{
+ struct page *page;
+ void *buffer;
+ int r;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+
+ page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
+ buffer = page_to_virt(page);
+
+ io_req.bi_opf = REQ_OP_READ;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = v->io;
+ io_loc.bdev = v->data_dev->bdev;
+ io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
+ io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r))
+ goto free_ret;
+
+ r = verity_hash(v, verity_io_hash_req(v, io), buffer,
+ 1 << v->data_dev_block_bits,
+ verity_io_real_digest(v, io), true);
+ if (unlikely(r))
+ goto free_ret;
+
+ if (memcmp(verity_io_real_digest(v, io),
+ verity_io_want_digest(v, io), v->digest_size)) {
+ r = -EIO;
+ goto free_ret;
+ }
+
+ io->recheck_buffer = buffer;
+ r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
+ if (unlikely(r))
+ goto free_ret;
+
+ r = 0;
+free_ret:
+ mempool_free(page, &v->recheck_pool);
+
+ return r;
+}
+
static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len)
{
@@ -508,9 +565,7 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
-#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
-#endif
struct bvec_iter iter_copy;
struct bvec_iter *iter;
struct crypto_wait wait;
@@ -561,10 +616,7 @@ static int verity_verify_io(struct dm_verity_io *io)
if (unlikely(r < 0))
return r;
-#if defined(CONFIG_DM_VERITY_FEC)
- if (verity_fec_is_enabled(v))
- start = *iter;
-#endif
+ start = *iter;
r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
@@ -586,6 +638,10 @@ static int verity_verify_io(struct dm_verity_io *io)
* tasklet since it may sleep, so fallback to work-queue.
*/
return -EAGAIN;
+ } else if (verity_recheck(v, io, start, cur_block) == 0) {
+ if (v->validated_blocks)
+ set_bit(cur_block, v->validated_blocks);
+ continue;
#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0) {
@@ -645,23 +701,6 @@ static void verity_work(struct work_struct *w)
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
-static void verity_tasklet(unsigned long data)
-{
- struct dm_verity_io *io = (struct dm_verity_io *)data;
- int err;
-
- io->in_tasklet = true;
- err = verity_verify_io(io);
- if (err == -EAGAIN || err == -ENOMEM) {
- /* fallback to retrying with work-queue */
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
- return;
- }
-
- verity_finish_io(io, errno_to_blk_status(err));
-}
-
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
@@ -674,13 +713,8 @@ static void verity_end_io(struct bio *bio)
return;
}
- if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
- tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
- tasklet_schedule(&io->tasklet);
- } else {
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
- }
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
}
/*
@@ -963,6 +997,10 @@ static void verity_dtr(struct dm_target *ti)
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
+ mempool_exit(&v->recheck_pool);
+ if (v->io)
+ dm_io_client_destroy(v->io);
+
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -1401,6 +1439,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
v->hash_blocks = hash_position;
+ r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
+ if (unlikely(r)) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+
+ v->io = dm_io_client_create();
+ if (IS_ERR(v->io)) {
+ r = PTR_ERR(v->io);
+ v->io = NULL;
+ ti->error = "Cannot allocate dm io";
+ goto bad;
+ }
+
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
dm_bufio_alloc_callback, NULL,
@@ -1508,7 +1560,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index f9d522c870e6..db93a91169d5 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -11,6 +11,7 @@
#ifndef DM_VERITY_H
#define DM_VERITY_H
+#include <linux/dm-io.h>
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
#include <linux/interrupt.h>
@@ -68,6 +69,9 @@ struct dm_verity {
unsigned long *validated_blocks; /* bitset blocks validated */
char *signature_key_desc; /* signature keyring reference */
+
+ struct dm_io_client *io;
+ mempool_t recheck_pool;
};
struct dm_verity_io {
@@ -76,14 +80,15 @@ struct dm_verity_io {
/* original value of bio->bi_end_io */
bio_end_io_t *orig_bi_end_io;
+ struct bvec_iter iter;
+
sector_t block;
unsigned int n_blocks;
bool in_tasklet;
- struct bvec_iter iter;
-
struct work_struct work;
- struct tasklet_struct tasklet;
+
+ char *recheck_buffer;
/*
* Three variably-size fields follow this struct:
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 074cb785eafc..b463c28c39ad 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -299,7 +299,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
long i;
wc->memory_map = NULL;
- pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
+ pages = vmalloc_array(p, sizeof(struct page *));
if (!pages) {
r = -ENOMEM;
goto err2;
@@ -330,7 +330,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
r = -ENOMEM;
goto err3;
}
- kvfree(pages);
+ vfree(pages);
wc->memory_vmapped = true;
}
@@ -341,7 +341,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
return 0;
err3:
- kvfree(pages);
+ vfree(pages);
err2:
dax_read_unlock(id);
err1:
@@ -962,7 +962,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
if (wc->entries)
return 0;
- wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
+ wc->entries = vmalloc_array(wc->n_blocks, sizeof(struct wc_entry));
if (!wc->entries)
return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0a2bd72a6d76..9e41a9aaba8b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -579,8 +579,12 @@ static void submit_flushes(struct work_struct *ws)
rcu_read_lock();
}
rcu_read_unlock();
- if (atomic_dec_and_test(&mddev->flush_pending))
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pair is percpu_ref_get() from md_flush_request() */
+ percpu_ref_put(&mddev->active_io);
+
queue_work(md_wq, &mddev->flush_work);
+ }
}
static void md_submit_flush_data(struct work_struct *ws)
@@ -8132,6 +8136,19 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "\n");
}
+static void status_personalities(struct seq_file *seq)
+{
+ struct md_personality *pers;
+
+ seq_puts(seq, "Personalities : ");
+ spin_lock(&pers_lock);
+ list_for_each_entry(pers, &pers_list, list)
+ seq_printf(seq, "[%s] ", pers->name);
+
+ spin_unlock(&pers_lock);
+ seq_puts(seq, "\n");
+}
+
static int status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
@@ -8273,20 +8290,10 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
static void *md_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(&all_mddevs_lock)
{
- struct md_personality *pers;
-
- seq_puts(seq, "Personalities : ");
- spin_lock(&pers_lock);
- list_for_each_entry(pers, &pers_list, list)
- seq_printf(seq, "[%s] ", pers->name);
-
- spin_unlock(&pers_lock);
- seq_puts(seq, "\n");
seq->poll_event = atomic_read(&md_event_count);
-
spin_lock(&all_mddevs_lock);
- return seq_list_start(&all_mddevs, *pos);
+ return seq_list_start_head(&all_mddevs, *pos);
}
static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -8297,16 +8304,23 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void md_seq_stop(struct seq_file *seq, void *v)
__releases(&all_mddevs_lock)
{
- status_unused(seq);
spin_unlock(&all_mddevs_lock);
}
static int md_seq_show(struct seq_file *seq, void *v)
{
- struct mddev *mddev = list_entry(v, struct mddev, all_mddevs);
+ struct mddev *mddev;
sector_t sectors;
struct md_rdev *rdev;
+ if (v == &all_mddevs) {
+ status_personalities(seq);
+ if (list_empty(&all_mddevs))
+ status_unused(seq);
+ return 0;
+ }
+
+ mddev = list_entry(v, struct mddev, all_mddevs);
if (!mddev_get(mddev))
return 0;
@@ -8382,6 +8396,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
}
spin_unlock(&mddev->lock);
spin_lock(&all_mddevs_lock);
+
+ if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
+ status_unused(seq);
+
if (atomic_dec_and_test(&mddev->active))
__mddev_put(mddev);
@@ -8774,12 +8792,16 @@ void md_do_sync(struct md_thread *thread)
int ret;
/* just incase thread restarts... */
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
- test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
- if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
+
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ goto skip;
+
+ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
+ !md_is_rdwr(mddev)) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- return;
+ goto skip;
}
if (mddev_is_clustered(mddev)) {
@@ -9354,13 +9376,19 @@ static void md_start_sync(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, sync_work);
int spares = 0;
bool suspend = false;
+ char *name;
- if (md_spares_need_change(mddev))
+ /*
+ * If reshape is still in progress, spares won't be added or removed
+ * from conf until reshape is done.
+ */
+ if (mddev->reshape_position == MaxSector &&
+ md_spares_need_change(mddev)) {
suspend = true;
+ mddev_suspend(mddev, false);
+ }
- suspend ? mddev_suspend_and_lock_nointr(mddev) :
- mddev_lock_nointr(mddev);
-
+ mddev_lock_nointr(mddev);
if (!md_is_rdwr(mddev)) {
/*
* On a read-only array we can:
@@ -9386,8 +9414,10 @@ static void md_start_sync(struct work_struct *ws)
if (spares)
md_bitmap_write_all(mddev->bitmap);
+ name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
+ "reshape" : "resync";
rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "resync"));
+ md_register_thread(md_do_sync, mddev, name));
if (!mddev->sync_thread) {
pr_warn("%s: could not start resync thread...\n",
mdname(mddev));
@@ -9431,6 +9461,20 @@ not_running:
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
+static void unregister_sync_thread(struct mddev *mddev)
+{
+ if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
+ /* resync/recovery still happening */
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ return;
+ }
+
+ if (WARN_ON_ONCE(!mddev->sync_thread))
+ return;
+
+ md_reap_sync_thread(mddev);
+}
+
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
@@ -9455,9 +9499,6 @@ not_running:
*/
void md_check_recovery(struct mddev *mddev)
{
- if (READ_ONCE(mddev->suspended))
- return;
-
if (mddev->bitmap)
md_bitmap_daemon_work(mddev);
@@ -9471,7 +9512,8 @@ void md_check_recovery(struct mddev *mddev)
}
if (!md_is_rdwr(mddev) &&
- !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if ( ! (
(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
@@ -9493,8 +9535,7 @@ void md_check_recovery(struct mddev *mddev)
struct md_rdev *rdev;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- /* sync_work already queued. */
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ unregister_sync_thread(mddev);
goto unlock;
}
@@ -9557,16 +9598,7 @@ void md_check_recovery(struct mddev *mddev)
* still set.
*/
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
- /* resync/recovery still happening */
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- goto unlock;
- }
-
- if (WARN_ON_ONCE(!mddev->sync_thread))
- goto unlock;
-
- md_reap_sync_thread(mddev);
+ unregister_sync_thread(mddev);
goto unlock;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index aaa434f0c175..286f8b16c7bd 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1968,12 +1968,12 @@ static void end_sync_write(struct bio *bio)
}
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
- int sectors, struct page *page, int rw)
+ int sectors, struct page *page, blk_opf_t rw)
{
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE) {
+ if (rw == REQ_OP_WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement,
&rdev->flags))
@@ -2090,7 +2090,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev;
if (r1_sync_page_io(rdev, sect, s,
pages[idx],
- WRITE) == 0) {
+ REQ_OP_WRITE) == 0) {
r1_bio->bios[d]->bi_end_io = NULL;
rdev_dec_pending(rdev, mddev);
}
@@ -2105,7 +2105,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev;
if (r1_sync_page_io(rdev, sect, s,
pages[idx],
- READ) != 0)
+ REQ_OP_READ) != 0)
atomic_add(s, &rdev->corrected_errors);
}
sectors -= s;
@@ -2262,7 +2262,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
int sectors = r1_bio->sectors;
int read_disk = r1_bio->read_disk;
struct mddev *mddev = conf->mddev;
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[read_disk].rdev);
+ struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
if (exceed_read_errors(mddev, rdev)) {
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
@@ -2321,7 +2321,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
r1_sync_page_io(rdev, sect, s,
- conf->tmppage, WRITE);
+ conf->tmppage, REQ_OP_WRITE);
rdev_dec_pending(rdev, mddev);
}
}
@@ -2335,7 +2335,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
if (r1_sync_page_io(rdev, sect, s,
- conf->tmppage, READ)) {
+ conf->tmppage, REQ_OP_READ)) {
atomic_add(s, &rdev->corrected_errors);
pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7412066ea22c..a5f8419e2df1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4175,11 +4175,7 @@ static int raid10_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread)
- goto out_free_conf;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
return 0;
@@ -4573,16 +4569,8 @@ out:
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread) {
- ret = -EAGAIN;
- goto abort;
- }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_wakeup_thread(mddev->sync_thread);
md_new_event();
return 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8497880135ee..6a7a32f7fb91 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7936,11 +7936,7 @@ static int raid5_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread)
- goto abort;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
/* Ok, everything is just fine now */
@@ -8506,29 +8502,8 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread) {
- mddev->recovery = 0;
- spin_lock_irq(&conf->device_lock);
- write_seqcount_begin(&conf->gen_lock);
- mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
- mddev->new_chunk_sectors =
- conf->chunk_sectors = conf->prev_chunk_sectors;
- mddev->new_layout = conf->algorithm = conf->prev_algo;
- rdev_for_each(rdev, mddev)
- rdev->new_data_offset = rdev->data_offset;
- smp_wmb();
- conf->generation --;
- conf->reshape_progress = MaxSector;
- mddev->reshape_position = MaxSector;
- write_seqcount_end(&conf->gen_lock);
- spin_unlock_irq(&conf->device_lock);
- return -EAGAIN;
- }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_wakeup_thread(mddev->sync_thread);
md_new_event();
return 0;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 41a832dd1426..b6bf8f232f48 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -989,7 +989,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
bool no_previous_buffers = !q_num_bufs;
int ret = 0;
- if (q->num_buffers == q->max_num_buffers) {
+ if (q_num_bufs == q->max_num_buffers) {
dprintk(q, 1, "maximum number of buffers already allocated\n");
return -ENOBUFS;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 54d572c3b515..c575198e8354 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -671,8 +671,20 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
}
EXPORT_SYMBOL(vb2_querybuf);
-static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
+static void vb2_set_flags_and_caps(struct vb2_queue *q, u32 memory,
+ u32 *flags, u32 *caps, u32 *max_num_bufs)
{
+ if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+ /*
+ * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+ * but in order to avoid bugs we zero out all bits.
+ */
+ *flags = 0;
+ } else {
+ /* Clear all unknown flags. */
+ *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ }
+
*caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
if (q->io_modes & VB2_MMAP)
*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
@@ -686,21 +698,9 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
-}
-
-static void validate_memory_flags(struct vb2_queue *q,
- int memory,
- u32 *flags)
-{
- if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
- /*
- * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
- * but in order to avoid bugs we zero out all bits.
- */
- *flags = 0;
- } else {
- /* Clear all unknown flags. */
- *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ if (max_num_bufs) {
+ *max_num_bufs = q->max_num_buffers;
+ *caps |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
}
}
@@ -709,8 +709,8 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
int ret = vb2_verify_memory_type(q, req->memory, req->type);
u32 flags = req->flags;
- fill_buf_caps(q, &req->capabilities);
- validate_memory_flags(q, req->memory, &flags);
+ vb2_set_flags_and_caps(q, req->memory, &flags,
+ &req->capabilities, NULL);
req->flags = flags;
return ret ? ret : vb2_core_reqbufs(q, req->memory,
req->flags, &req->count);
@@ -751,11 +751,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
int ret = vb2_verify_memory_type(q, create->memory, f->type);
unsigned i;
- fill_buf_caps(q, &create->capabilities);
- validate_memory_flags(q, create->memory, &create->flags);
create->index = vb2_get_num_buffers(q);
- create->max_num_buffers = q->max_num_buffers;
- create->capabilities |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
+ vb2_set_flags_and_caps(q, create->memory, &create->flags,
+ &create->capabilities, &create->max_num_buffers);
if (create->count == 0)
return ret != -EBUSY ? ret : 0;
@@ -1006,8 +1004,8 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
u32 flags = p->flags;
- fill_buf_caps(vdev->queue, &p->capabilities);
- validate_memory_flags(vdev->queue, p->memory, &flags);
+ vb2_set_flags_and_caps(vdev->queue, p->memory, &flags,
+ &p->capabilities, NULL);
p->flags = flags;
if (res)
return res;
@@ -1026,12 +1024,11 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
struct v4l2_create_buffers *p)
{
struct video_device *vdev = video_devdata(file);
- int res = vb2_verify_memory_type(vdev->queue, p->memory,
- p->format.type);
+ int res = vb2_verify_memory_type(vdev->queue, p->memory, p->format.type);
- p->index = vdev->queue->num_buffers;
- fill_buf_caps(vdev->queue, &p->capabilities);
- validate_memory_flags(vdev->queue, p->memory, &p->flags);
+ p->index = vb2_get_num_buffers(vdev->queue);
+ vb2_set_flags_and_caps(vdev->queue, p->memory, &p->flags,
+ &p->capabilities, &p->max_num_buffers);
/*
* If count == 0, then just check if memory and type are valid.
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
diff --git a/drivers/media/pci/solo6x10/solo6x10-offsets.h b/drivers/media/pci/solo6x10/solo6x10-offsets.h
index f414ee1316f2..fdbb817e6360 100644
--- a/drivers/media/pci/solo6x10/solo6x10-offsets.h
+++ b/drivers/media/pci/solo6x10/solo6x10-offsets.h
@@ -57,16 +57,16 @@
#define SOLO_MP4E_EXT_ADDR(__solo) \
(SOLO_EREF_EXT_ADDR(__solo) + SOLO_EREF_EXT_AREA(__solo))
#define SOLO_MP4E_EXT_SIZE(__solo) \
- max((__solo->nr_chans * 0x00080000), \
- min(((__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo)) - \
- __SOLO_JPEG_MIN_SIZE(__solo)), 0x00ff0000))
+ clamp(__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo) - \
+ __SOLO_JPEG_MIN_SIZE(__solo), \
+ __solo->nr_chans * 0x00080000, 0x00ff0000)
#define __SOLO_JPEG_MIN_SIZE(__solo) (__solo->nr_chans * 0x00080000)
#define SOLO_JPEG_EXT_ADDR(__solo) \
(SOLO_MP4E_EXT_ADDR(__solo) + SOLO_MP4E_EXT_SIZE(__solo))
#define SOLO_JPEG_EXT_SIZE(__solo) \
- max(__SOLO_JPEG_MIN_SIZE(__solo), \
- min((__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo)), 0x00ff0000))
+ clamp(__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo), \
+ __SOLO_JPEG_MIN_SIZE(__solo), 0x00ff0000)
#define SOLO_SDRAM_END(__solo) \
(SOLO_JPEG_EXT_ADDR(__solo) + SOLO_JPEG_EXT_SIZE(__solo))
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index bfe4caa79cc9..0d90b5820bef 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -272,7 +272,7 @@ static const struct wave5_match_data ti_wave521c_data = {
};
static const struct of_device_id wave5_dt_ids[] = {
- { .compatible = "ti,k3-j721s2-wave521c", .data = &ti_wave521c_data },
+ { .compatible = "ti,j721s2-wave521c", .data = &ti_wave521c_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, wave5_dt_ids);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index aebd3c12020b..c381c22135a2 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -725,6 +725,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
unsigned int i;
u32 status;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index 4b6b28c05b89..b757f75edecf 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -450,6 +450,7 @@ struct rkisp1_debug {
* @debug: debug params to be exposed on debugfs
* @info: version-specific ISP information
* @irqs: IRQ line numbers
+ * @irqs_enabled: the hardware is enabled and can cause interrupts
*/
struct rkisp1_device {
void __iomem *base_addr;
@@ -471,6 +472,7 @@ struct rkisp1_device {
struct rkisp1_debug debug;
const struct rkisp1_info *info;
int irqs[RKISP1_NUM_IRQS];
+ bool irqs_enabled;
};
/*
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index b6e47e2f1b94..4202642e0523 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -196,6 +196,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index f96f821a7b50..73cf08a74011 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
{
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ rkisp1->irqs_enabled = false;
+ /* Make sure the IRQ handler will see the above */
+ mb();
+
+ /*
+ * Wait until any running IRQ handler has returned. The IRQ handler
+ * may get called even after this (as it's a shared interrupt line)
+ * but the 'irqs_enabled' flag will make the handler return immediately.
+ */
+ for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
+ if (rkisp1->irqs[il] == -1)
+ continue;
+
+ /* Skip if the irq line is the same as previous */
+ if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il])
+ synchronize_irq(rkisp1->irqs[il]);
+ }
+
clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
return pinctrl_pm_select_sleep_state(dev);
}
@@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
if (ret)
return ret;
+ rkisp1->irqs_enabled = true;
+ /* Make sure the IRQ handler will see the above */
+ mb();
+
return 0;
}
@@ -559,7 +581,7 @@ static int rkisp1_probe(struct platform_device *pdev)
rkisp1->irqs[il] = irq;
}
- ret = devm_request_irq(dev, irq, info->isrs[i].isr, 0,
+ ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED,
dev_driver_string(dev), dev);
if (ret) {
dev_err(dev, "request irq failed: %d\n", ret);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index f00873d31c42..78a1f7a1499b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -976,6 +976,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 status, isp_err;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 2afe67ffa285..74d69ce22a33 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -319,6 +319,7 @@ config IR_PWM_TX
tristate "PWM IR transmitter"
depends on LIRC
depends on PWM
+ depends on HIGH_RES_TIMERS
depends on OF
help
Say Y if you want to use a PWM based IR transmitter. This is
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index fe17c7f98e81..2f7564f26445 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -110,7 +110,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_trace_printk:
- if (perfmon_capable())
+ if (bpf_token_capable(prog->aux->token, CAP_PERFMON))
return bpf_get_trace_printk_proto();
fallthrough;
default:
@@ -253,7 +253,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
if (attr->attach_flags)
return -EINVAL;
- rcdev = rc_dev_get_from_fd(attr->target_fd);
+ rcdev = rc_dev_get_from_fd(attr->target_fd, true);
if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
@@ -278,7 +278,7 @@ int lirc_prog_detach(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
- rcdev = rc_dev_get_from_fd(attr->target_fd);
+ rcdev = rc_dev_get_from_fd(attr->target_fd, true);
if (IS_ERR(rcdev)) {
bpf_prog_put(prog);
return PTR_ERR(rcdev);
@@ -303,7 +303,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
if (attr->query.query_flags)
return -EINVAL;
- rcdev = rc_dev_get_from_fd(attr->query.target_fd);
+ rcdev = rc_dev_get_from_fd(attr->query.target_fd, false);
if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 196806709259..69e630d85262 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -332,6 +332,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
if (err) {
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
+ kfree(buf);
return err;
}
@@ -339,6 +340,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
if (err) {
dev_err(irtoy->dev, "enter sample mode: %d\n", err);
+ kfree(buf);
return err;
}
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index a537734832c5..caad59f76793 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -814,7 +814,7 @@ void __exit lirc_dev_exit(void)
unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX);
}
-struct rc_dev *rc_dev_get_from_fd(int fd)
+struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
{
struct fd f = fdget(fd);
struct lirc_fh *fh;
@@ -828,6 +828,9 @@ struct rc_dev *rc_dev_get_from_fd(int fd)
return ERR_PTR(-EINVAL);
}
+ if (write && !(f.file->f_mode & FMODE_WRITE))
+ return ERR_PTR(-EPERM);
+
fh = f.file->private_data;
dev = fh->rc;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index ef1e95e1af7f..7df949fc65e2 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -325,7 +325,7 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev);
void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc);
int lirc_register(struct rc_dev *dev);
void lirc_unregister(struct rc_dev *dev);
-struct rc_dev *rc_dev_get_from_fd(int fd);
+struct rc_dev *rc_dev_get_from_fd(int fd, bool write);
#else
static inline int lirc_dev_init(void) { return 0; }
static inline void lirc_dev_exit(void) {}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 1c6c62a7f7f5..03319a1fa97f 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -2191,7 +2191,7 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
int i;
spin_lock_irqsave(&cctx->lock, flags);
- for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
+ for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
if (cctx->session[i].sid == sess->sid) {
cctx->session[i].valid = false;
cctx->sesscount--;
diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
index 8aea2d070a40..d279a4f195e2 100644
--- a/drivers/misc/open-dice.c
+++ b/drivers/misc/open-dice.c
@@ -140,7 +140,6 @@ static int __init open_dice_probe(struct platform_device *pdev)
return -ENOMEM;
*drvdata = (struct open_dice_drvdata){
- .lock = __MUTEX_INITIALIZER(drvdata->lock),
.rmem = rmem,
.misc = (struct miscdevice){
.parent = dev,
@@ -150,6 +149,7 @@ static int __init open_dice_probe(struct platform_device *pdev)
.mode = 0600,
},
};
+ mutex_init(&drvdata->lock);
/* Index overflow check not needed, misc_register() will fail. */
snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f410bee50132..58ed7193a3ca 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1015,10 +1015,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 2a2d949a9344..39f45c2b6de8 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -75,11 +75,15 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_irq);
int mmc_gpio_get_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
+ int cansleep;
if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
- return gpiod_get_value_cansleep(ctx->ro_gpio);
+ cansleep = gpiod_cansleep(ctx->ro_gpio);
+ return cansleep ?
+ gpiod_get_value_cansleep(ctx->ro_gpio) :
+ gpiod_get_value(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 35067e1e6cd8..f5da7f9baa52 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -225,6 +225,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
+ host->dma_in_progress = true;
+
if (!host->variant->dma_lli || data->sg_len == 1 ||
idma->use_bounce_buffer) {
u32 dma_addr;
@@ -263,9 +265,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
+static void sdmmc_idma_error(struct mmci_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct sdmmc_idma *idma = host->dma_priv;
+
+ if (!dma_inprogress(host))
+ return;
+
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
+ data->host_cookie = 0;
+
+ if (!idma->use_bounce_buffer)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+}
+
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
+ if (!dma_inprogress(host))
+ return;
+
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@@ -676,6 +699,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
+ .dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 7bfee28116af..d4a02184784a 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -693,6 +693,35 @@ static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *io
return 0;
}
+static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ u32 scratch_32 = 0;
+ u8 scratch_8 = 0;
+
+ chip = slot->chip;
+
+ if (mode == MMC_POWER_OFF) {
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+ /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+ scratch_32 &= ~(O2_SD_SEL_DLL);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
+
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ }
+
+ sdhci_set_power(host, mode, vdd);
+}
+
static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
@@ -1051,6 +1080,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_pci_o2_set_power,
};
const struct sdhci_pci_fixes sdhci_o2 = {
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 8cf3a375de65..cc9d28b75eb9 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
+#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
+#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
+
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
+static int xenon_check_stability_internal_clk(struct sdhci_host *host)
+{
+ u32 reg;
+ int err;
+
+ err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
+ 1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
+ if (err)
+ dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
+
+ return err;
+}
+
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ int ret = xenon_check_stability_internal_clk(host);
+
+ if (ret)
+ return ret;
+
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
- /* wait for host eMMC PHY init completes */
- udelay(wait);
- reg = sdhci_readl(host, phy_regs->timing_adj);
- reg &= XENON_PHY_INITIALIZAION;
- if (reg) {
+ /*
+ * AC5X spec says bit must be polled until zero.
+ * We see cases in which timeout can take longer
+ * than the standard calculation on AC5X, which is
+ * expected following the spec comment above.
+ * According to the spec, we must wait as long as
+ * it takes for that bit to toggle on AC5X.
+ * Cap that with 100 delay loops so we won't get
+ * stuck here forever:
+ */
+
+ ret = read_poll_timeout(sdhci_readl, reg,
+ !(reg & XENON_PHY_INITIALIZAION),
+ wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
+ false, host, phy_regs->timing_adj);
+ if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
- wait);
- return -ETIMEDOUT;
- }
+ wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
- return 0;
+ return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e451b28840d5..5887feb347a4 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -621,6 +621,7 @@ static void mtd_check_of_node(struct mtd_info *mtd)
if (plen == mtd_name_len &&
!strncmp(mtd->name, pname + offset, plen)) {
mtd_set_of_node(mtd, mtd_dn);
+ of_node_put(mtd_dn);
break;
}
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index a46698744850..5b0f5a9cef81 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -290,16 +290,13 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30),
- MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30),
- MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 2048, 512, 16, 4, 4, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
- MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
- MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30),
- MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 4096, 512, 8, 4, 4, 1024, 0, 30, 0, 64, 30),
+ MARVELL_LAYOUT( 4096, 512, 16, 8, 8, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
- MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
- MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30),
- MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 8192, 512, 8, 8, 8, 1024, 0, 30, 0, 160, 30),
+ MARVELL_LAYOUT( 8192, 512, 16, 16, 16, 512, 0, 30, 0, 32, 30),
};
/**
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 987710e09441..6023cba748bb 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
@@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 8c651fdee039..57f1729066f2 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -186,4 +186,5 @@ static void __exit arcnet_raw_exit(void)
module_init(arcnet_raw_init);
module_exit(arcnet_raw_exit);
+MODULE_DESCRIPTION("ARCnet raw mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 8c3ccc7c83cd..53d10a04d1bd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -312,6 +312,7 @@ module_param(node, int, 0);
module_param(io, int, 0);
module_param(irq, int, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx RIM I chipset driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d9e052c49ba1..166bfc3c8e6c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -108,6 +108,7 @@ static int go_tx(struct net_device *dev);
static int debug = ARCNET_DEBUG;
module_param(debug, int, 0);
+MODULE_DESCRIPTION("ARCnet core driver");
MODULE_LICENSE("GPL");
static int __init arcnet_init(void)
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index c09b567845e1..7a0a79973769 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -265,4 +265,5 @@ static void __exit capmode_module_exit(void)
module_init(capmode_module_init);
module_exit(capmode_module_exit);
+MODULE_DESCRIPTION("ARCnet CAP mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 7b5c8bb02f11..c5e571ec94c9 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -61,6 +61,7 @@ module_param(timeout, int, 0);
module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCI driver");
MODULE_LICENSE("GPL");
static void led_tx_set(struct led_classdev *led_cdev,
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 06e1651b594b..a0053e3992a3 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -399,6 +399,7 @@ EXPORT_SYMBOL(com20020_found);
EXPORT_SYMBOL(com20020_netdev_ops);
#endif
+MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver");
MODULE_LICENSE("GPL");
#ifdef MODULE
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index dc3253b318da..75f08aa7528b 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -97,6 +97,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCMCIA driver");
MODULE_LICENSE("GPL");
/*====================================================================*/
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 37b47749fc8b..3b463fbc6402 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -350,6 +350,7 @@ static char device[9]; /* use eg. device=arc1 to change name */
module_param_hw(io, int, ioport, 0);
module_param_hw(irq, int, irq, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx IO mapped chipset driver");
MODULE_LICENSE("GPL");
#ifndef MODULE
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index f49dae194284..b3b287c16561 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -645,6 +645,7 @@ static void com90xx_copy_from_card(struct net_device *dev, int bufnum,
TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
}
+MODULE_DESCRIPTION("ARCnet COM90xx normal chipset driver");
MODULE_LICENSE("GPL");
static int __init com90xx_init(void)
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index a7752a5b647f..46519ca63a0a 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -78,6 +78,7 @@ static void __exit arcnet_rfc1051_exit(void)
module_init(arcnet_rfc1051_init);
module_exit(arcnet_rfc1051_exit);
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1051) module");
MODULE_LICENSE("GPL");
/* Determine a packet's protocol ID.
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index a4c856282674..0edf35d971c5 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -35,6 +35,7 @@
#include "arcdevice.h"
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1201) module");
MODULE_LICENSE("GPL");
static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 31377bb1cc97..339db6e4a1d5 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -194,15 +194,10 @@ static int bareudp_init(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&bareudp->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
+
return 0;
}
@@ -211,7 +206,6 @@ static void bareudp_uninit(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
gro_cells_destroy(&bareudp->gro_cells);
- free_percpu(dev->tstats);
}
static struct socket *bareudp_create_sock(struct net *net, __be16 port)
@@ -529,7 +523,6 @@ static const struct net_device_ops bareudp_netdev_ops = {
.ndo_open = bareudp_open,
.ndo_stop = bareudp_stop,
.ndo_start_xmit = bareudp_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
};
@@ -567,6 +560,7 @@ static void bareudp_setup(struct net_device *dev)
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -760,23 +754,18 @@ static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
unregister_netdevice_queue(bareudp->dev, head);
}
-static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
+static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_kill_list)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ bareudp_destroy_tunnels(net, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch = bareudp_exit_batch_net,
+ .exit_batch_rtnl = bareudp_exit_batch_rtnl,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c99ffe6c683a..c6807e473ab7 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -82,10 +82,6 @@ enum ad_link_speed_type {
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
- 0, 0, 0, 0, 0, 0
-};
-
static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
@@ -106,6 +102,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator,
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_enable_collecting(struct port *port);
+static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
static void ad_enable_collecting_distributing(struct port *port,
bool *update_slave_arr);
static void ad_disable_collecting_distributing(struct port *port,
@@ -172,8 +171,37 @@ static inline int __agg_has_partner(struct aggregator *agg)
}
/**
+ * __disable_distributing_port - disable the port's slave for distributing.
+ * Port will still be able to collect.
+ * @port: the port we're looking at
+ *
+ * This will disable only distributing on the port's slave.
+ */
+static void __disable_distributing_port(struct port *port)
+{
+ bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
+ * __enable_collecting_port - enable the port's slave for collecting,
+ * if it's up
+ * @port: the port we're looking at
+ *
+ * This will enable only collecting on the port's slave.
+ */
+static void __enable_collecting_port(struct port *port)
+{
+ struct slave *slave = port->slave;
+
+ if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave))
+ bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
* __disable_port - disable the port's slave
* @port: the port we're looking at
+ *
+ * This will disable both collecting and distributing on the port's slave.
*/
static inline void __disable_port(struct port *port)
{
@@ -183,6 +211,8 @@ static inline void __disable_port(struct port *port)
/**
* __enable_port - enable the port's slave, if it's up
* @port: the port we're looking at
+ *
+ * This will enable both collecting and distributing on the port's slave.
*/
static inline void __enable_port(struct port *port)
{
@@ -193,10 +223,27 @@ static inline void __enable_port(struct port *port)
}
/**
- * __port_is_enabled - check if the port's slave is in active state
+ * __port_move_to_attached_state - check if port should transition back to attached
+ * state.
+ * @port: the port we're looking at
+ */
+static bool __port_move_to_attached_state(struct port *port)
+{
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION))
+ port->sm_mux_state = AD_MUX_ATTACHED;
+
+ return port->sm_mux_state == AD_MUX_ATTACHED;
+}
+
+/**
+ * __port_is_collecting_distributing - check if the port's slave is in the
+ * combined collecting/distributing state
* @port: the port we're looking at
*/
-static inline int __port_is_enabled(struct port *port)
+static int __port_is_collecting_distributing(struct port *port)
{
return bond_is_active_slave(port->slave);
}
@@ -942,6 +989,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
*/
static void ad_mux_machine(struct port *port, bool *update_slave_arr)
{
+ struct bonding *bond = __get_bond_by_port(port);
mux_states_t last_state;
/* keep current State Machine state to compare later if it was
@@ -999,9 +1047,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if ((port->sm_vars & AD_PORT_SELECTED) &&
(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
- if (port->aggregator->is_active)
- port->sm_mux_state =
- AD_MUX_COLLECTING_DISTRIBUTING;
+ if (port->aggregator->is_active) {
+ int state = AD_MUX_COLLECTING_DISTRIBUTING;
+
+ if (!bond->params.coupled_control)
+ state = AD_MUX_COLLECTING;
+ port->sm_mux_state = state;
+ }
} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY)) {
/* if UNSELECTED or STANDBY */
@@ -1019,11 +1071,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
+ if (!__port_move_to_attached_state(port)) {
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+ * port in an active aggregator is enabled
+ */
+ if (port->aggregator->is_active &&
+ !__port_is_collecting_distributing(port)) {
+ __enable_port(port);
+ *update_slave_arr = true;
+ }
+ }
+ break;
+ case AD_MUX_COLLECTING:
+ if (!__port_move_to_attached_state(port)) {
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
+ (port->partner_oper.port_state & LACP_STATE_COLLECTING)) {
+ port->sm_mux_state = AD_MUX_DISTRIBUTING;
+ } else {
+ /* If port state hasn't changed, make sure that a collecting
+ * port is enabled for an active aggregator.
+ */
+ struct slave *slave = port->slave;
+
+ if (port->aggregator->is_active &&
+ bond_is_slave_rx_disabled(slave)) {
+ ad_enable_collecting(port);
+ *update_slave_arr = true;
+ }
+ }
+ }
+ break;
+ case AD_MUX_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_COLLECTING) ||
!(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
!(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
- port->sm_mux_state = AD_MUX_ATTACHED;
+ port->sm_mux_state = AD_MUX_COLLECTING;
} else {
/* if port state hasn't changed make
* sure that a collecting distributing
@@ -1031,7 +1117,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
*/
if (port->aggregator &&
port->aggregator->is_active &&
- !__port_is_enabled(port)) {
+ !__port_is_collecting_distributing(port)) {
__enable_port(port);
*update_slave_arr = true;
}
@@ -1082,6 +1168,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
update_slave_arr);
port->ntt = true;
break;
+ case AD_MUX_COLLECTING:
+ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting(port);
+ ad_disable_distributing(port, update_slave_arr);
+ port->ntt = true;
+ break;
+ case AD_MUX_DISTRIBUTING:
+ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting_distributing(port,
+ update_slave_arr);
+ break;
default:
break;
}
@@ -1484,7 +1584,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
- ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ ((__agg_has_partner(aggregator) && /* partner answers */
!aggregator->is_individual) /* but is not individual OR */
)
) {
@@ -1907,6 +2007,43 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
}
/**
+ * ad_enable_collecting - enable a port's receive
+ * @port: the port we're looking at
+ *
+ * Enable @port if it's in an active aggregator
+ */
+static void ad_enable_collecting(struct port *port)
+{
+ if (port->aggregator->is_active) {
+ struct slave *slave = port->slave;
+
+ slave_dbg(slave->bond->dev, slave->dev,
+ "Enabling collecting on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __enable_collecting_port(port);
+ }
+}
+
+/**
+ * ad_disable_distributing - disable a port's transmit
+ * @port: the port we're looking at
+ * @update_slave_arr: Does slave array need update?
+ */
+static void ad_disable_distributing(struct port *port, bool *update_slave_arr)
+{
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Disabling distributing on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __disable_distributing_port(port);
+ /* Slave array needs an update */
+ *update_slave_arr = true;
+ }
+}
+
+/**
* ad_enable_collecting_distributing - enable a port's transmit/receive
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
@@ -1935,9 +2072,7 @@ static void ad_enable_collecting_distributing(struct port *port,
static void ad_disable_collecting_distributing(struct port *port,
bool *update_slave_arr)
{
- if (port->aggregator &&
- !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
- &(null_mac_addr))) {
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Disabling port %d (LAG %d)\n",
port->actor_port_number,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4e0600c7b050..2c5ed0a7cb18 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1811,7 +1811,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond)) {
+ if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
@@ -1819,6 +1819,8 @@ void bond_xdp_set_features(struct net_device *bond_dev)
bond_for_each_slave(bond, slave, iter)
val &= slave->dev->xdp_features;
+ val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
xdp_set_features_flag(bond_dev, val);
}
@@ -2609,7 +2611,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
slave->delay = bond->params.downdelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
(BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -2623,9 +2625,10 @@ static int bond_miimon_inspect(struct bonding *bond)
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
- slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -2647,7 +2650,7 @@ static int bond_miimon_inspect(struct bonding *bond)
commit++;
slave->delay = bond->params.updelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
ignore_updelay ? 0 :
bond->params.updelay *
@@ -2657,9 +2660,10 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
- slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
- (bond->params.updelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -5909,9 +5913,6 @@ void bond_setup(struct net_device *bond_dev)
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
-
- if (bond_xdp_check(bond))
- bond_dev->xdp_features = NETDEV_XDP_ACT_MASK;
}
/* Destroy a bonding device.
@@ -6306,6 +6307,7 @@ static int __init bond_check_params(struct bond_params *params)
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
+ params->coupled_control = 1;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
@@ -6415,28 +6417,41 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
+ * race condition in bond unloading") we need to remove sysfs files
+ * before we remove our devices (done later in bond_net_exit_batch_rtnl())
+ */
+static void __net_exit bond_net_pre_exit(struct net *net)
+{
+ struct bond_net *bn = net_generic(net, bond_net_id);
+
+ bond_destroy_sysfs(bn);
+}
+
+static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_kill_list)
{
struct bond_net *bn;
struct net *net;
- LIST_HEAD(list);
-
- list_for_each_entry(net, net_list, exit_list) {
- bn = net_generic(net, bond_net_id);
- bond_destroy_sysfs(bn);
- }
/* Kill off any bonds created after unregistering bond rtnl ops */
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct bonding *bond, *tmp_bond;
bn = net_generic(net, bond_net_id);
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
+}
+
+/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
+ * only after all bonds are gone") bond_destroy_proc_dir() is called
+ * after bond_net_exit_batch_rtnl() has completed.
+ */
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+{
+ struct bond_net *bn;
+ struct net *net;
list_for_each_entry(net, net_list, exit_list) {
bn = net_generic(net, bond_net_id);
@@ -6446,6 +6461,8 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
+ .pre_exit = bond_net_pre_exit,
+ .exit_batch_rtnl = bond_net_exit_batch_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index cfa74cf8bb1a..29b4c3d1b9b6 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -122,6 +122,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -549,6 +550,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BOND_COUPLED_CONTROL]) {
+ int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]);
+
+ bond_opt_initval(&newval, coupled_control);
+ err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval,
+ data[IFLA_BOND_COUPLED_CONTROL], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -615,6 +626,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
/* IFLA_BOND_NS_IP6_TARGET */
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
0;
}
@@ -774,6 +786,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.missed_max))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL,
+ bond->params.coupled_control))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index f3f27f0bd2a6..4cdbc7e084f4 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -84,7 +84,8 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval);
-
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
@@ -232,6 +233,12 @@ static const struct bond_opt_value bond_missed_max_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_coupled_control_tbl[] = {
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { "off", 0, 0},
+ { NULL, -1, 0},
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -496,6 +503,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.desc = "Delay between each peer notification on failover event, in milliseconds",
.values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
+ },
+ [BOND_OPT_COUPLED_CONTROL] = {
+ .id = BOND_OPT_COUPLED_CONTROL,
+ .name = "coupled_control",
+ .desc = "Opt into using coupled control MUX for LACP states",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_coupled_control_tbl,
+ .set = bond_option_coupled_control_set,
}
};
@@ -1692,3 +1708,13 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
bond->params.ad_user_port_key = newval->value;
return 0;
}
+
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n",
+ newval->string, newval->value);
+
+ bond->params.coupled_control = newval->value;
+ return 0;
+}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index eb410714afc2..2e31db55d927 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -168,6 +168,8 @@ config CAN_KVASER_PCIEFD
Kvaser Mini PCI Express 2xHS v2
Kvaser Mini PCI Express 1xCAN v3
Kvaser Mini PCI Express 2xCAN v3
+ Kvaser M.2 PCIe 4xCAN
+ Kvaser PCIe 8xCAN
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
@@ -218,6 +220,7 @@ config CAN_XILINXCAN
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
source "drivers/net/can/ctucanfd/Kconfig"
+source "drivers/net/can/esd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index ff8f76295d13..4669cd51e7bf 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
+obj-y += esd/
obj-y += rcar/
obj-y += spi/
obj-y += usb/
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index f44ba2600415..e2ec69aa46e5 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -30,9 +30,9 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -259,22 +259,13 @@ static int c_can_plat_probe(struct platform_device *pdev)
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
- const struct of_device_id *match;
struct resource *mem;
int irq;
struct clk *clk;
const struct c_can_driver_data *drvdata;
struct device_node *np = pdev->dev.of_node;
- match = of_match_device(c_can_of_table, &pdev->dev);
- if (match) {
- drvdata = match->data;
- } else if (pdev->id_entry->driver_data) {
- drvdata = (struct c_can_driver_data *)
- platform_get_device_id(pdev)->driver_data;
- } else {
- return -ENODEV;
- }
+ drvdata = device_get_match_data(&pdev->dev);
/* get the appropriate clk */
clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 036d85ef07f5..dfdc039d92a6 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -346,7 +346,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Neither of TDC parameters nor TDC flags are
* provided: do calculation
*/
- can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
+ can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
&priv->ctrlmode, priv->ctrlmode_supported);
} /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
* turned off. TDC is disabled: do nothing
diff --git a/drivers/net/can/esd/Kconfig b/drivers/net/can/esd/Kconfig
new file mode 100644
index 000000000000..54bfc366634c
--- /dev/null
+++ b/drivers/net/can/esd/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CAN_ESD_402_PCI
+ tristate "esd electronics gmbh CAN-PCI(e)/402 family"
+ depends on PCI && HAS_DMA
+ help
+ Support for C402 card family from esd electronics gmbh.
+ This card family is based on the ESDACC CAN controller and
+ available in several form factors: PCI, PCIe, PCIe Mini,
+ M.2 PCIe, CPCIserial, PMC, XMC (see https://esd.eu/en)
+
+ This driver can also be built as a module. In this case the
+ module will be called esd_402_pci.
diff --git a/drivers/net/can/esd/Makefile b/drivers/net/can/esd/Makefile
new file mode 100644
index 000000000000..5dd2d470c286
--- /dev/null
+++ b/drivers/net/can/esd/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for esd gmbh ESDACC controller driver
+#
+esd_402_pci-objs := esdacc.o esd_402_pci-core.o
+
+obj-$(CONFIG_CAN_ESD_402_PCI) += esd_402_pci.o
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
new file mode 100644
index 000000000000..b7cdcffd0e45
--- /dev/null
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "esdacc.h"
+
+#define ESD_PCI_DEVICE_ID_PCIE402 0x0402
+
+#define PCI402_FPGA_VER_MIN 0x003d
+#define PCI402_MAX_CORES 6
+#define PCI402_BAR 0
+#define PCI402_IO_OV_OFFS 0
+#define PCI402_IO_PCIEP_OFFS 0x10000
+#define PCI402_IO_LEN_TOTAL 0x20000
+#define PCI402_IO_LEN_CORE 0x2000
+#define PCI402_PCICFG_MSICAP 0x50
+
+#define PCI402_DMA_MASK DMA_BIT_MASK(32)
+#define PCI402_DMA_SIZE ALIGN(0x10000, PAGE_SIZE)
+
+#define PCI402_PCIEP_OF_INT_ENABLE 0x0050
+#define PCI402_PCIEP_OF_BM_ADDR_LO 0x1000
+#define PCI402_PCIEP_OF_BM_ADDR_HI 0x1004
+#define PCI402_PCIEP_OF_MSI_ADDR_LO 0x1008
+#define PCI402_PCIEP_OF_MSI_ADDR_HI 0x100c
+
+struct pci402_card {
+ /* Actually mapped io space, all other iomem derived from this */
+ void __iomem *addr;
+ void __iomem *addr_pciep;
+
+ void *dma_buf;
+ dma_addr_t dma_hnd;
+
+ struct acc_ov ov;
+ struct acc_core *cores;
+
+ bool msi_enabled;
+};
+
+/* The BTR register capabilities described by the can_bittiming_const structures
+ * below are valid since esdACC version 0x0032.
+ */
+
+/* Used if the esdACC FPGA is built as CAN-Classic version. */
+static const struct can_bittiming_const pci402_bittiming_const = {
+ .name = "esd_402",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+/* Used if the esdACC FPGA is built as CAN-FD version. */
+static const struct can_bittiming_const pci402_bittiming_const_canfd = {
+ .name = "esd_402fd",
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct net_device_ops pci402_acc_netdev_ops = {
+ .ndo_open = acc_open,
+ .ndo_stop = acc_close,
+ .ndo_start_xmit = acc_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+};
+
+static const struct ethtool_ops pci402_acc_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
+static irqreturn_t pci402_interrupt(int irq, void *dev_id)
+{
+ struct pci_dev *pdev = dev_id;
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ irqreturn_t irq_status;
+
+ irq_status = acc_card_interrupt(&card->ov, card->cores);
+
+ return irq_status;
+}
+
+static int pci402_set_msiconfig(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ u32 addr_lo_offs = 0;
+ u32 addr_lo = 0;
+ u32 addr_hi = 0;
+ u32 data = 0;
+ u16 csr = 0;
+ int err;
+
+ /* The FPGA hard IP PCIe core implements a 64-bit MSI Capability
+ * Register Format
+ */
+ err = pci_read_config_word(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_FLAGS, &csr);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_LO,
+ &addr_lo);
+ if (err)
+ goto failed;
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_HI,
+ &addr_hi);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_DATA_64,
+ &data);
+ if (err)
+ goto failed;
+
+ addr_lo_offs = addr_lo & 0x0000ffff;
+ addr_lo &= 0xffff0000;
+
+ if (addr_hi)
+ addr_lo |= 1; /* To enable 64-Bit addressing in PCIe endpoint */
+
+ if (!(csr & PCI_MSI_FLAGS_ENABLE)) {
+ err = -EINVAL;
+ goto failed;
+ }
+
+ iowrite32(addr_lo, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_LO);
+ iowrite32(addr_hi, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_HI);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_ADDRESSOFFSET, addr_lo_offs);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_DATA, data);
+
+ return 0;
+
+failed:
+ pci_warn(pdev, "Error while setting MSI configuration:\n"
+ "CSR: 0x%.4x, addr: 0x%.8x%.8x, offs: 0x%.4x, data: 0x%.8x\n",
+ csr, addr_hi, addr_lo, addr_lo_offs, data);
+
+ return err;
+}
+
+static int pci402_init_card(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ card->ov.addr = card->addr + PCI402_IO_OV_OFFS;
+ card->addr_pciep = card->addr + PCI402_IO_PCIEP_OFFS;
+
+ acc_reset_fpga(&card->ov);
+ acc_init_ov(&card->ov, &pdev->dev);
+
+ if (card->ov.version < PCI402_FPGA_VER_MIN) {
+ pci_err(pdev,
+ "esdACC version (0x%.4x) outdated, please update\n",
+ card->ov.version);
+ return -EINVAL;
+ }
+
+ if (card->ov.timestamp_frequency != ACC_TS_FREQ_80MHZ) {
+ pci_err(pdev,
+ "esdACC timestamp frequency of %uHz not supported by driver. Aborted.\n",
+ card->ov.timestamp_frequency);
+ return -EINVAL;
+ }
+
+ if (card->ov.active_cores > PCI402_MAX_CORES) {
+ pci_err(pdev,
+ "Card with %u active cores not supported by driver. Aborted.\n",
+ card->ov.active_cores);
+ return -EINVAL;
+ }
+ card->cores = devm_kcalloc(&pdev->dev, card->ov.active_cores,
+ sizeof(struct acc_core), GFP_KERNEL);
+ if (!card->cores)
+ return -ENOMEM;
+
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ pci_warn(pdev,
+ "esdACC with CAN-FD feature detected. This driver doesn't support CAN-FD yet.\n");
+ }
+
+#ifdef __LITTLE_ENDIAN
+ /* So card converts all busmastered data to LE for us: */
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE);
+#endif
+
+ return 0;
+}
+
+static int pci402_init_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = pci_enable_msi(pdev);
+ if (!err) {
+ err = pci402_set_msiconfig(pdev);
+ if (!err) {
+ card->msi_enabled = true;
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_dbg(pdev, "MSI preparation done\n");
+ }
+ }
+
+ err = devm_request_irq(&pdev->dev, pdev->irq, pci402_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), pdev);
+ if (err)
+ goto failure_msidis;
+
+ iowrite32(1, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+
+ return 0;
+
+failure_msidis:
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+
+ return err;
+}
+
+static void pci402_finish_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+ devm_free_irq(&pdev->dev, pdev->irq, pdev);
+
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+}
+
+static int pci402_init_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = dma_set_coherent_mask(&pdev->dev, PCI402_DMA_MASK);
+ if (err) {
+ pci_err(pdev, "DMA set mask failed!\n");
+ return err;
+ }
+
+ /* The esdACC DMA engine needs the DMA buffer aligned to a 64k
+ * boundary. The DMA API guarantees to align the returned buffer to the
+ * smallest PAGE_SIZE order which is greater than or equal to the
+ * requested size. With PCI402_DMA_SIZE == 64kB this suffices here.
+ */
+ card->dma_buf = dma_alloc_coherent(&pdev->dev, PCI402_DMA_SIZE,
+ &card->dma_hnd, GFP_KERNEL);
+ if (!card->dma_buf)
+ return -ENOMEM;
+
+ acc_init_bm_ptr(&card->ov, card->cores, card->dma_buf);
+
+ iowrite32(card->dma_hnd,
+ card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ pci_set_master(pdev);
+
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ return 0;
+}
+
+static void pci402_finish_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ pci_clear_master(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ card->ov.bmfifo.messages = NULL;
+ card->ov.bmfifo.irq_cnt = NULL;
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+
+ core->bmfifo.messages = NULL;
+ core->bmfifo.irq_cnt = NULL;
+ }
+
+ dma_free_coherent(&pdev->dev, PCI402_DMA_SIZE, card->dma_buf,
+ card->dma_hnd);
+ card->dma_buf = NULL;
+}
+
+static void pci402_unregister_core(struct acc_core *core)
+{
+ netdev_info(core->netdev, "unregister\n");
+ unregister_candev(core->netdev);
+
+ free_candev(core->netdev);
+ core->netdev = NULL;
+}
+
+static int pci402_init_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+ struct acc_net_priv *priv;
+ struct net_device *netdev;
+ u32 fifo_config;
+
+ core->addr = card->ov.addr + (i + 1) * PCI402_IO_LEN_CORE;
+
+ fifo_config = acc_read32(core, ACC_CORE_OF_TXFIFO_CONFIG);
+ core->tx_fifo_size = (fifo_config >> 24);
+ if (core->tx_fifo_size <= 1) {
+ pci_err(pdev, "Invalid tx_fifo_size!\n");
+ err = -EINVAL;
+ goto failure;
+ }
+
+ netdev = alloc_candev(sizeof(*priv), core->tx_fifo_size);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto failure;
+ }
+ core->netdev = netdev;
+
+ netdev->flags |= IFF_ECHO;
+ netdev->dev_port = i;
+ netdev->netdev_ops = &pci402_acc_netdev_ops;
+ netdev->ethtool_ops = &pci402_acc_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ priv = netdev_priv(netdev);
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_CC_LEN8_DLC;
+
+ priv->can.clock.freq = card->ov.core_frequency;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
+ priv->can.bittiming_const = &pci402_bittiming_const_canfd;
+ else
+ priv->can.bittiming_const = &pci402_bittiming_const;
+ priv->can.do_set_bittiming = acc_set_bittiming;
+ priv->can.do_set_mode = acc_set_mode;
+ priv->can.do_get_berr_counter = acc_get_berr_counter;
+
+ priv->core = core;
+ priv->ov = &card->ov;
+
+ err = register_candev(netdev);
+ if (err) {
+ free_candev(core->netdev);
+ core->netdev = NULL;
+ goto failure;
+ }
+
+ netdev_info(netdev, "registered\n");
+ }
+
+ return 0;
+
+failure:
+ for (i--; i >= 0; i--)
+ pci402_unregister_core(&card->cores[i]);
+
+ return err;
+}
+
+static void pci402_finish_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++)
+ pci402_unregister_core(&card->cores[i]);
+}
+
+static int pci402_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pci402_card *card = NULL;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ err = -ENOMEM;
+ goto failure_disable_pci;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ err = pci_request_regions(pdev, pci_name(pdev));
+ if (err)
+ goto failure_disable_pci;
+
+ card->addr = pci_iomap(pdev, PCI402_BAR, PCI402_IO_LEN_TOTAL);
+ if (!card->addr) {
+ err = -ENOMEM;
+ goto failure_release_regions;
+ }
+
+ err = pci402_init_card(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_dma(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_interrupt(pdev);
+ if (err)
+ goto failure_finish_dma;
+
+ err = pci402_init_cores(pdev);
+ if (err)
+ goto failure_finish_interrupt;
+
+ return 0;
+
+failure_finish_interrupt:
+ pci402_finish_interrupt(pdev);
+
+failure_finish_dma:
+ pci402_finish_dma(pdev);
+
+failure_unmap:
+ pci_iounmap(pdev, card->addr);
+
+failure_release_regions:
+ pci_release_regions(pdev);
+
+failure_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void pci402_remove(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ pci402_finish_interrupt(pdev);
+ pci402_finish_cores(pdev);
+ pci402_finish_dma(pdev);
+ pci_iounmap(pdev, card->addr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id pci402_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_ESDGMBH,
+ .device = ESD_PCI_DEVICE_ID_PCIE402,
+ .subvendor = PCI_VENDOR_ID_ESDGMBH,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci402_tbl);
+
+static struct pci_driver pci402_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci402_tbl,
+ .probe = pci402_probe,
+ .remove = pci402_remove,
+};
+module_pci_driver(pci402_driver);
+
+MODULE_DESCRIPTION("Socket-CAN driver for esd CAN 402 card family with esdACC core on PCIe");
+MODULE_AUTHOR("Thomas Körper <socketcan@esd.eu>");
+MODULE_AUTHOR("Stefan Mätje <stefan.maetje@esd.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
new file mode 100644
index 000000000000..121cbbf81458
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include "esdacc.h"
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+
+/* esdACC ID register layout */
+#define ACC_ID_ID_MASK GENMASK(28, 0)
+#define ACC_ID_EFF_FLAG BIT(29)
+
+/* esdACC DLC register layout */
+#define ACC_DLC_DLC_MASK GENMASK(3, 0)
+#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_TXD_FLAG BIT(5)
+
+/* ecc value of esdACC equals SJA1000's ECC register */
+#define ACC_ECC_SEG 0x1f
+#define ACC_ECC_DIR 0x20
+#define ACC_ECC_BIT 0x00
+#define ACC_ECC_FORM 0x40
+#define ACC_ECC_STUFF 0x80
+#define ACC_ECC_MASK 0xc0
+
+/* esdACC Status Register bits. Unused bits not documented. */
+#define ACC_REG_STATUS_MASK_STATUS_ES BIT(17)
+#define ACC_REG_STATUS_MASK_STATUS_EP BIT(18)
+#define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
+
+/* esdACC Overview Module BM_IRQ_Mask register related defines */
+/* Two bit wide command masks to mask or unmask a single core IRQ */
+#define ACC_BM_IRQ_UNMASK BIT(0)
+#define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1)
+/* Command to unmask all IRQ sources. Created by shifting
+ * and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
+ */
+#define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
+
+static void acc_resetmode_enter(struct acc_core *core)
+{
+ acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_resetmode_leave(struct acc_core *core)
+{
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
+ const void *data)
+{
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
+ *((const u32 *)(data + 4)));
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0,
+ *((const u32 *)data));
+ acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc);
+ /* CAN id must be written at last. This write starts TX. */
+ acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id);
+}
+
+static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx)
+{
+ ++tx_fifo_idx;
+ if (tx_fifo_idx >= core->tx_fifo_size)
+ tx_fifo_idx = 0U;
+ return tx_fifo_idx;
+}
+
+/* Convert timestamp from esdACC time stamp ticks to ns
+ *
+ * The conversion factor ts2ns from time stamp counts to ns is basically
+ * ts2ns = NSEC_PER_SEC / timestamp_frequency
+ *
+ * We handle here only a fixed timestamp frequency of 80MHz. The
+ * resulting ts2ns factor would be 12.5.
+ *
+ * At the end we multiply by 12 and add the half of the HW timestamp
+ * to get a multiplication by 12.5. This way any overflow is
+ * avoided until ktime_t itself overflows.
+ */
+#define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ)
+#define ACC_TS_80MHZ_SHIFT 1
+
+static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts)
+{
+ u64 ns;
+
+ ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT);
+
+ return ns_to_ktime(ns);
+}
+
+#undef ACC_TS_FACTOR
+#undef ACC_TS_80MHZ_SHIFT
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev)
+{
+ u32 temp;
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_VERSION);
+ ov->version = temp;
+ ov->features = (temp >> 16);
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_INFO);
+ ov->total_cores = temp;
+ ov->active_cores = (temp >> 8);
+
+ ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ);
+ ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO);
+
+ /* Depending on esdACC feature NEW_PSC enable the new prescaler
+ * or adjust core_frequency according to the implicit division by 2.
+ */
+ if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
+ } else {
+ ov->core_frequency /= 2;
+ }
+
+ dev_dbg(dev,
+ "esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n",
+ ov->version, ov->core_frequency, ov->timestamp_frequency,
+ ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16,
+ ov->active_cores, ov->total_cores);
+}
+
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem)
+{
+ unsigned int u;
+
+ /* DMA buffer layout as follows where N is the number of CAN cores
+ * implemented in the FPGA, i.e. N = ov->total_cores
+ *
+ * Section Layout Section size
+ * ----------------------------------------------
+ * FIFO Card/Overview ACC_CORE_DMABUF_SIZE
+ * FIFO Core0 ACC_CORE_DMABUF_SIZE
+ * ... ...
+ * FIFO CoreN ACC_CORE_DMABUF_SIZE
+ * irq_cnt Card/Overview sizeof(u32)
+ * irq_cnt Core0 sizeof(u32)
+ * ... ...
+ * irq_cnt CoreN sizeof(u32)
+ */
+ ov->bmfifo.messages = mem;
+ ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE;
+
+ for (u = 0U; u < ov->active_cores; u++) {
+ struct acc_core *core = &cores[u];
+
+ core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE;
+ core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U);
+ }
+}
+
+int acc_open(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ u32 tx_fifo_status;
+ u32 ctrl_mode;
+ int err;
+
+ /* Retry to enter RESET mode if out of sync. */
+ if (priv->can.state != CAN_STATE_STOPPED) {
+ netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
+ __func__, can_get_state_str(priv->can.state));
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+ }
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
+ ACC_REG_CONTROL_MASK_IE_TXERROR |
+ ACC_REG_CONTROL_MASK_IE_ERRWARN |
+ ACC_REG_CONTROL_MASK_IE_OVERRUN |
+ ACC_REG_CONTROL_MASK_IE_ERRPASS;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
+
+ acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
+
+ acc_resetmode_leave(core);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Resync TX FIFO indices to HW state after (re-)start. */
+ tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS);
+ core->tx_fifo_head = tx_fifo_status & 0xff;
+ core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff;
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+int acc_close(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_IE_RXTX |
+ ACC_REG_CONTROL_MASK_IE_TXERROR |
+ ACC_REG_CONTROL_MASK_IE_ERRWARN |
+ ACC_REG_CONTROL_MASK_IE_OVERRUN |
+ ACC_REG_CONTROL_MASK_IE_ERRPASS |
+ ACC_REG_CONTROL_MASK_IE_BUSERR);
+
+ netif_stop_queue(netdev);
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Mark pending TX requests to be aborted after controller restart. */
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+
+ /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_LOM);
+
+ close_candev(netdev);
+ return 0;
+}
+
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 tx_fifo_head = core->tx_fifo_head;
+ int fifo_usage;
+ u32 acc_id;
+ u8 acc_dlc;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* Access core->tx_fifo_tail only once because it may be changed
+ * from the interrupt level.
+ */
+ fifo_usage = tx_fifo_head - core->tx_fifo_tail;
+ if (fifo_usage < 0)
+ fifo_usage += core->tx_fifo_size;
+
+ if (fifo_usage >= core->tx_fifo_size - 1) {
+ netdev_err(core->netdev,
+ "BUG: TX ring full when queue awake!\n");
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (fifo_usage == core->tx_fifo_size - 2)
+ netif_stop_queue(netdev);
+
+ acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+ if (cf->can_id & CAN_RTR_FLAG)
+ acc_dlc |= ACC_DLC_RTR_FLAG;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ acc_id = cf->can_id & CAN_EFF_MASK;
+ acc_id |= ACC_ID_EFF_FLAG;
+ } else {
+ acc_id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0);
+
+ core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head);
+
+ acc_txq_put(core, acc_id, acc_dlc, cf->data);
+
+ return NETDEV_TX_OK;
+}
+
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS);
+
+ bec->txerr = (core_status >> 8) & 0xff;
+ bec->rxerr = core_status & 0xff;
+
+ return 0;
+}
+
+int acc_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* Paranoid FIFO index check. */
+ {
+ const u32 tx_fifo_status =
+ acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS);
+ const u8 hw_fifo_head = tx_fifo_status;
+
+ if (hw_fifo_head != priv->core->tx_fifo_head ||
+ hw_fifo_head != priv->core->tx_fifo_tail) {
+ netdev_warn(netdev,
+ "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
+ priv->core->tx_fifo_tail,
+ priv->core->tx_fifo_head,
+ tx_fifo_status);
+ }
+ }
+ acc_resetmode_leave(priv->core);
+ /* To leave the bus-off state the esdACC controller begins
+ * here a grace period where it counts 128 "idle conditions" (each
+ * of 11 consecutive recessive bits) on the bus as required
+ * by the CAN spec.
+ *
+ * During this time the TX FIFO may still contain already
+ * aborted "zombie" frames that are only drained from the FIFO
+ * at the end of the grace period.
+ *
+ * To not to interfere with this drain process we don't
+ * call netif_wake_queue() here. When the controller reaches
+ * the error-active state again, it informs us about that
+ * with an acc_bmmsg_errstatechange message. Then
+ * netif_wake_queue() is called from
+ * handle_core_msg_errstatechange() instead.
+ */
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int acc_set_bittiming(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 brp;
+ u32 btr;
+
+ if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ u32 fbtr = 0;
+
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x",
+ brp, btr, fbtr);
+ } else {
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr);
+ }
+
+ return 0;
+}
+
+static void handle_core_msg_rxtxdone(struct acc_core *core,
+ const struct acc_bmmsg_rxtxdone *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct sk_buff *skb;
+
+ if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) {
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+
+ if (core->tx_fifo_head == tx_fifo_tail) {
+ netdev_warn(core->netdev,
+ "TX interrupt, but queue is empty!?\n");
+ return;
+ }
+
+ /* Direct access echo skb to attach HW time stamp. */
+ skb = priv->can.echo_skb[tx_fifo_tail];
+ if (skb) {
+ skb_hwtstamps(skb)->hwtstamp =
+ acc_ts2ktime(priv->ov, msg->ts);
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail,
+ NULL);
+
+ core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+
+ netif_wake_queue(core->netdev);
+
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(core->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = msg->id & ACC_ID_ID_MASK;
+ if (msg->id & ACC_ID_EFF_FLAG)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK,
+ priv->can.ctrlmode);
+
+ if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ memcpy(cf->data, msg->data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+}
+
+static void handle_core_msg_txabort(struct acc_core *core,
+ const struct acc_bmmsg_txabort *msg)
+{
+ struct net_device_stats *stats = &core->netdev->stats;
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+ u32 abort_mask = msg->abort_mask; /* u32 extend to avoid warnings later */
+
+ /* The abort_mask shows which frames were aborted in esdACC's FIFO. */
+ while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) {
+ const u32 tail_mask = (1U << tx_fifo_tail);
+
+ if (!(abort_mask & tail_mask))
+ break;
+ abort_mask &= ~tail_mask;
+
+ can_free_echo_skb(core->netdev, tx_fifo_tail, NULL);
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+
+ tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+ }
+ core->tx_fifo_tail = tx_fifo_tail;
+ if (abort_mask)
+ netdev_warn(core->netdev, "Unhandled aborted messages\n");
+
+ if (!acc_resetmode_entered(core))
+ netif_wake_queue(core->netdev);
+}
+
+static void handle_core_msg_overrun(struct acc_core *core,
+ const struct acc_bmmsg_overrun *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* lost_cnt may be 0 if not supported by esdACC version */
+ if (msg->lost_cnt) {
+ stats->rx_errors += msg->lost_cnt;
+ stats->rx_over_errors += msg->lost_cnt;
+ } else {
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void handle_core_msg_buserr(struct acc_core *core,
+ const struct acc_bmmsg_buserr *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ u8 can_err_prot_type = 0U;
+
+ priv->can.can_stats.bus_error++;
+
+ /* Error occurred during transmission? */
+ if (msg->ecc & ACC_ECC_DIR) {
+ stats->rx_errors++;
+ } else {
+ can_err_prot_type |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ }
+ /* Determine error type */
+ switch (msg->ecc & ACC_ECC_MASK) {
+ case ACC_ECC_BIT:
+ can_err_prot_type |= CAN_ERR_PROT_BIT;
+ break;
+ case ACC_ECC_FORM:
+ can_err_prot_type |= CAN_ERR_PROT_FORM;
+ break;
+ case ACC_ECC_STUFF:
+ can_err_prot_type |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ can_err_prot_type |= CAN_ERR_PROT_UNSPEC;
+ break;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+
+ /* Set protocol error type */
+ cf->data[2] = can_err_prot_type;
+ /* Set error location */
+ cf->data[3] = msg->ecc & ACC_ECC_SEG;
+
+ /* Insert CAN TX and RX error counters. */
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void
+handle_core_msg_errstatechange(struct acc_core *core,
+ const struct acc_bmmsg_errstatechange *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ enum can_state new_state;
+
+ if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ /* See comment in acc_set_mode() for CAN_MODE_START */
+ netif_wake_queue(core->netdev);
+ }
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+
+ if (new_state != priv->can.state) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (txerr >= rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (rxerr >= txerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+
+ /* Always call can_change_state() to update the state
+ * even if alloc_can_err_skb() may have failed.
+ * can_change_state() can cope with a NULL cf pointer.
+ */
+ can_change_state(core->netdev, cf, tx_state, rx_state);
+ }
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+ can_bus_off(core->netdev);
+ }
+}
+
+static void handle_core_interrupt(struct acc_core *core)
+{
+ u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff;
+
+ while (core->bmfifo.msg_fifo_tail != msg_fifo_head) {
+ const union acc_bmmsg *msg =
+ &core->bmfifo.messages[core->bmfifo.msg_fifo_tail];
+
+ switch (msg->msg_id) {
+ case BM_MSG_ID_RXTXDONE:
+ handle_core_msg_rxtxdone(core, &msg->rxtxdone);
+ break;
+
+ case BM_MSG_ID_TXABORT:
+ handle_core_msg_txabort(core, &msg->txabort);
+ break;
+
+ case BM_MSG_ID_OVERRUN:
+ handle_core_msg_overrun(core, &msg->overrun);
+ break;
+
+ case BM_MSG_ID_BUSERR:
+ handle_core_msg_buserr(core, &msg->buserr);
+ break;
+
+ case BM_MSG_ID_ERRPASSIVE:
+ case BM_MSG_ID_ERRWARN:
+ handle_core_msg_errstatechange(core,
+ &msg->errstatechange);
+ break;
+
+ default:
+ /* Ignore all other BM messages (like the CAN-FD messages) */
+ break;
+ }
+
+ core->bmfifo.msg_fifo_tail =
+ (core->bmfifo.msg_fifo_tail + 1) & 0xff;
+ }
+}
+
+/**
+ * acc_card_interrupt() - handle the interrupts of an esdACC FPGA
+ *
+ * @ov: overview module structure
+ * @cores: array of core structures
+ *
+ * This function handles all interrupts pending for the overview module and the
+ * CAN cores of the esdACC FPGA.
+ *
+ * It examines for all cores (the overview module core and the CAN cores)
+ * the bmfifo.irq_cnt and compares it with the previously saved
+ * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA
+ * updates the bmfifo.irq_cnt values by DMA.
+ *
+ * The pending interrupts are masked by writing to the IRQ mask register at
+ * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command
+ * field evaluated as follows:
+ *
+ * Define, bit pattern: meaning
+ * 00: no action
+ * ACC_BM_IRQ_UNMASK, 01: unmask interrupt
+ * ACC_BM_IRQ_MASK, 10: mask interrupt
+ * 11: no action
+ *
+ * For each CAN core with a pending IRQ handle_core_interrupt() handles all
+ * busmaster messages from the message FIFO. The last handled message (FIFO
+ * index) is written to the CAN core to acknowledge its handling.
+ *
+ * Last step is to unmask all interrupts in the FPGA using
+ * ACC_BM_IRQ_UNMASK_ALL.
+ *
+ * Return:
+ * IRQ_HANDLED, if card generated an interrupt that was handled
+ * IRQ_NONE, if the interrupt is not ours
+ */
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
+{
+ u32 irqmask;
+ int i;
+
+ /* First we look for whom interrupts are pending, card/overview
+ * or any of the cores. Two bits in irqmask are used for each;
+ * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is
+ * pending.
+ */
+ irqmask = 0U;
+ if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
+ irqmask |= ACC_BM_IRQ_MASK;
+ ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
+ irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
+ core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
+ }
+ }
+
+ if (!irqmask)
+ return IRQ_NONE;
+
+ /* At second we tell the card we're working on them by writing irqmask,
+ * call handle_{ov|core}_interrupt and then acknowledge the
+ * interrupts by writing irq_cnt:
+ */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
+
+ if (irqmask & ACC_BM_IRQ_MASK) {
+ /* handle_ov_interrupt(); - no use yet. */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
+ ov->bmfifo.local_irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
+ handle_core_interrupt(core);
+ acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
+ core->bmfifo.local_irq_cnt);
+ }
+ }
+
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
new file mode 100644
index 000000000000..a70488b25d39
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/bits.h>
+#include <linux/can/dev.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+
+#define ACC_TS_FREQ_80MHZ (80 * HZ_PER_MHZ)
+#define ACC_I2C_ADDON_DETECT_DELAY_MS 10
+
+/* esdACC Overview Module */
+#define ACC_OV_OF_PROBE 0x0000
+#define ACC_OV_OF_VERSION 0x0004
+#define ACC_OV_OF_INFO 0x0008
+#define ACC_OV_OF_CANCORE_FREQ 0x000c
+#define ACC_OV_OF_TS_FREQ_LO 0x0010
+#define ACC_OV_OF_TS_FREQ_HI 0x0014
+#define ACC_OV_OF_IRQ_STATUS_CORES 0x0018
+#define ACC_OV_OF_TS_CURR_LO 0x001c
+#define ACC_OV_OF_TS_CURR_HI 0x0020
+#define ACC_OV_OF_IRQ_STATUS 0x0028
+#define ACC_OV_OF_MODE 0x002c
+#define ACC_OV_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_OV_OF_BM_IRQ_MASK 0x0074
+#define ACC_OV_OF_MSI_DATA 0x0080
+#define ACC_OV_OF_MSI_ADDRESSOFFSET 0x0084
+
+/* Feature flags are contained in the upper 16 bit of the version
+ * register at ACC_OV_OF_VERSION but only used with these masks after
+ * extraction into an extra variable => (xx - 16).
+ */
+#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
+#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+
+#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
+#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
+#define ACC_OV_REG_MODE_MASK_MODE_LED BIT(2)
+#define ACC_OV_REG_MODE_MASK_TIMER_ENABLE BIT(4)
+#define ACC_OV_REG_MODE_MASK_TIMER_ONE_SHOT BIT(5)
+#define ACC_OV_REG_MODE_MASK_TIMER_ABSOLUTE BIT(6)
+#define ACC_OV_REG_MODE_MASK_TIMER GENMASK(6, 4)
+#define ACC_OV_REG_MODE_MASK_TS_SRC GENMASK(8, 7)
+#define ACC_OV_REG_MODE_MASK_I2C_ENABLE BIT(11)
+#define ACC_OV_REG_MODE_MASK_MSI_ENABLE BIT(14)
+#define ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE BIT(15)
+#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
+
+/* esdACC CAN Core Module */
+#define ACC_CORE_OF_CTRL_MODE 0x0000
+#define ACC_CORE_OF_STATUS_IRQ 0x0008
+#define ACC_CORE_OF_BRP 0x000c
+#define ACC_CORE_OF_BTR 0x0010
+#define ACC_CORE_OF_FBTR 0x0014
+#define ACC_CORE_OF_STATUS 0x0030
+#define ACC_CORE_OF_TXFIFO_CONFIG 0x0048
+#define ACC_CORE_OF_TXFIFO_STATUS 0x004c
+#define ACC_CORE_OF_TX_STATUS_IRQ 0x0050
+#define ACC_CORE_OF_TX_ABORT_MASK 0x0054
+#define ACC_CORE_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_CORE_OF_TXFIFO_ID 0x00c0
+#define ACC_CORE_OF_TXFIFO_DLC 0x00c4
+#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
+#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
+
+#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0)
+#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1)
+#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2)
+#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5)
+#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6)
+#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7)
+
+#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15)
+
+/* BRP and BTR register layout for CAN-Classic version */
+#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG1 GENMASK(3, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG2 GENMASK(18, 16)
+#define ACC_REG_BTR_CL_MASK_SJW GENMASK(25, 24)
+
+/* BRP and BTR register layout for CAN-FD version */
+#define ACC_REG_BRP_FD_MASK_BRP GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG1 GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG2 GENMASK(22, 16)
+#define ACC_REG_BTR_FD_MASK_SJW GENMASK(30, 24)
+
+/* 256 BM_MSGs of 32 byte size */
+#define ACC_CORE_DMAMSG_SIZE 32U
+#define ACC_CORE_DMABUF_SIZE (256U * ACC_CORE_DMAMSG_SIZE)
+
+enum acc_bmmsg_id {
+ BM_MSG_ID_RXTXDONE = 0x01,
+ BM_MSG_ID_TXABORT = 0x02,
+ BM_MSG_ID_OVERRUN = 0x03,
+ BM_MSG_ID_BUSERR = 0x04,
+ BM_MSG_ID_ERRPASSIVE = 0x05,
+ BM_MSG_ID_ERRWARN = 0x06,
+ BM_MSG_ID_TIMESLICE = 0x07,
+ BM_MSG_ID_HWTIMER = 0x08,
+ BM_MSG_ID_HOTPLUG = 0x09,
+};
+
+/* The struct acc_bmmsg_* structure declarations that follow here provide
+ * access to the ring buffer of bus master messages maintained by the FPGA
+ * bus master engine. All bus master messages have the same size of
+ * ACC_CORE_DMAMSG_SIZE and a minimum alignment of ACC_CORE_DMAMSG_SIZE in
+ * memory.
+ *
+ * All structure members are natural aligned. Therefore we should not need
+ * a __packed attribute. All struct acc_bmmsg_* declarations have at least
+ * reserved* members to fill the structure to the full ACC_CORE_DMAMSG_SIZE.
+ *
+ * A failure of this property due padding will be detected at compile time
+ * by static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE).
+ */
+
+struct acc_bmmsg_rxtxdone {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u32 id;
+ struct {
+ u8 len;
+ u8 txdfifo_idx;
+ u8 zeroes8;
+ u8 reserved;
+ } acc_dlc;
+ u8 data[CAN_MAX_DLEN];
+ /* Time stamps in struct acc_ov::timestamp_frequency ticks. */
+ u64 ts;
+};
+
+struct acc_bmmsg_txabort {
+ u8 msg_id;
+ u8 txfifo_level;
+ u16 abort_mask;
+ u8 txtsfifo_level;
+ u8 reserved2[1];
+ u16 abort_mask_txts;
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_overrun {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 lost_cnt;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_buserr {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 ecc;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reg_btr;
+ u32 reserved3[2];
+};
+
+struct acc_bmmsg_errstatechange {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reserved3[3];
+};
+
+struct acc_bmmsg_timeslice {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hwtimer {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[1];
+ u64 timer;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hotplug {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[7];
+};
+
+union acc_bmmsg {
+ u8 msg_id;
+ struct acc_bmmsg_rxtxdone rxtxdone;
+ struct acc_bmmsg_txabort txabort;
+ struct acc_bmmsg_overrun overrun;
+ struct acc_bmmsg_buserr buserr;
+ struct acc_bmmsg_errstatechange errstatechange;
+ struct acc_bmmsg_timeslice timeslice;
+ struct acc_bmmsg_hwtimer hwtimer;
+};
+
+/* Check size of union acc_bmmsg to be of expected size. */
+static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE);
+
+struct acc_bmfifo {
+ const union acc_bmmsg *messages;
+ /* irq_cnt points to an u32 value where the esdACC FPGA deposits
+ * the bm_fifo head index in coherent DMA memory. Only bits 7..0
+ * are valid. Use READ_ONCE() to access this memory location.
+ */
+ const u32 *irq_cnt;
+ u32 local_irq_cnt;
+ u32 msg_fifo_tail;
+};
+
+struct acc_core {
+ void __iomem *addr;
+ struct net_device *netdev;
+ struct acc_bmfifo bmfifo;
+ u8 tx_fifo_size;
+ u8 tx_fifo_head;
+ u8 tx_fifo_tail;
+};
+
+struct acc_ov {
+ void __iomem *addr;
+ struct acc_bmfifo bmfifo;
+ u32 timestamp_frequency;
+ u32 core_frequency;
+ u16 version;
+ u16 features;
+ u8 total_cores;
+ u8 active_cores;
+};
+
+struct acc_net_priv {
+ struct can_priv can; /* must be the first member! */
+ struct acc_core *core;
+ struct acc_ov *ov;
+};
+
+static inline u32 acc_read32(struct acc_core *core, unsigned short offs)
+{
+ return ioread32be(core->addr + offs);
+}
+
+static inline void acc_write32(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, core->addr + offs);
+}
+
+static inline void acc_write32_noswap(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32(v, core->addr + offs);
+}
+
+static inline void acc_set_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v |= mask;
+ acc_write32(core, offs, v);
+}
+
+static inline void acc_clear_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v &= ~mask;
+ acc_write32(core, offs, v);
+}
+
+static inline int acc_resetmode_entered(struct acc_core *core)
+{
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE);
+
+ return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0;
+}
+
+static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
+{
+ return ioread32be(ov->addr + offs);
+}
+
+static inline void acc_ov_write32(struct acc_ov *ov,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, ov->addr + offs);
+}
+
+static inline void acc_ov_set_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v |= b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_ov_clear_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v &= ~b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_reset_fpga(struct acc_ov *ov)
+{
+ acc_ov_write32(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_FPGA_RESET);
+
+ /* (Re-)start and wait for completion of addon detection on the I^2C bus */
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_I2C_ENABLE);
+ mdelay(ACC_I2C_ADDON_DETECT_DELAY_MS);
+}
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev);
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores,
+ const void *mem);
+int acc_open(struct net_device *netdev);
+int acc_close(struct net_device *netdev);
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+int acc_set_mode(struct net_device *netdev, enum can_mode mode);
+int acc_set_bittiming(struct net_device *netdev);
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores);
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index d15f85a40c1e..8ea7f2795551 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -23,11 +23,11 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/can/platform/flexcan.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -2034,7 +2034,6 @@ MODULE_DEVICE_TABLE(platform, flexcan_id_table);
static int flexcan_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
@@ -2090,14 +2089,7 @@ static int flexcan_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- of_id = of_match_device(flexcan_of_match, &pdev->dev);
- if (of_id)
- devtype_data = of_id->data;
- else if (platform_get_device_id(pdev)->driver_data)
- devtype_data = (struct flexcan_devtype_data *)
- platform_get_device_id(pdev)->driver_data;
- else
- return -ENODEV;
+ devtype_data = device_get_match_data(&pdev->dev);
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
!((devtype_data->quirks &
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index a57005faa04f..f81b598147b3 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
#define KVASER_PCIEFD_MAX_ERR_REP 256U
#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
-#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
#define KVASER_PCIEFD_DMA_COUNT 2U
#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
@@ -47,12 +47,19 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
+/* Xilinx based devices */
+#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017
+#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019
+
/* Altera SerDes Enable 64-bit DMA address translation */
#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
/* SmartFusion2 SerDes LSB address translation mask */
#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
+/* Xilinx SerDes LSB address translation mask */
+#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12)
+
/* Kvaser KCAN CAN controller registers */
#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
@@ -281,6 +288,8 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
struct kvaser_pciefd_address_offset {
u32 serdes;
@@ -335,6 +344,18 @@ static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offse
.kcan_ch1 = 0x142000,
};
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = {
+ .serdes = 0x00208,
+ .pci_ien = 0x102004,
+ .pci_irq = 0x102008,
+ .sysid = 0x100000,
+ .loopback = 0x103000,
+ .kcan_srb_fifo = 0x120000,
+ .kcan_srb = 0x121000,
+ .kcan_ch0 = 0x140000,
+ .kcan_ch1 = 0x142000,
+};
+
static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = {
.kcan_rx0 = BIT(4),
.kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) },
@@ -347,6 +368,12 @@ static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
.all = GENMASK(19, 16) | BIT(4),
};
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
+ .all = GENMASK(19, 16) | BIT(4),
+};
+
static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera,
};
@@ -355,6 +382,10 @@ static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = {
.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2,
};
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx,
+};
+
static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = {
.address_offset = &kvaser_pciefd_altera_address_offset,
.irq_mask = &kvaser_pciefd_altera_irq_mask,
@@ -367,6 +398,12 @@ static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = {
.ops = &kvaser_pciefd_sf2_dev_ops,
};
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = {
+ .address_offset = &kvaser_pciefd_xilinx_address_offset,
+ .irq_mask = &kvaser_pciefd_xilinx_irq_mask,
+ .ops = &kvaser_pciefd_xilinx_dev_ops,
+};
+
struct kvaser_pciefd_can {
struct can_priv can;
struct kvaser_pciefd *kv_pcie;
@@ -457,6 +494,14 @@ static struct pci_device_id kvaser_pciefd_id_table[] = {
.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
},
{
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
0,
},
};
@@ -1035,6 +1080,21 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
iowrite32(msb, serdes_base + 0x4);
}
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
+{
+ void __iomem *serdes_base;
+ u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
+ u32 msb = 0x0;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ msb = addr >> 32;
+#endif
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
+ iowrite32(msb, serdes_base);
+ iowrite32(lsb, serdes_base + 0x4);
+}
+
static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
int i;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 16ecc11c7f62..14b231c4d7ec 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -255,6 +255,7 @@ enum m_can_reg {
#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
+#define TXEFC_EFWM_MASK GENMASK(29, 24)
#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
@@ -320,6 +321,12 @@ struct id_and_dlc {
u32 dlc;
};
+struct m_can_fifo_element {
+ u32 id;
+ u32 dlc;
+ u8 data[CANFD_MAX_DLEN];
+};
+
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
return cdev->ops->read_reg(cdev, reg);
@@ -372,16 +379,6 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static inline bool _m_can_tx_fifo_full(u32 txfqs)
-{
- return !!(txfqs & TXFQS_TFQF);
-}
-
-static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
-{
- return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS));
-}
-
static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
{
u32 cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -416,15 +413,48 @@ static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
}
}
+static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
+{
+ if (cdev->active_interrupts == interrupts)
+ return;
+ cdev->ops->write_reg(cdev, M_CAN_IE, interrupts);
+ cdev->active_interrupts = interrupts;
+}
+
+static void m_can_coalescing_disable(struct m_can_classdev *cdev)
+{
+ u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN;
+
+ if (!cdev->net->irq)
+ return;
+
+ hrtimer_cancel(&cdev->hrtimer);
+ m_can_interrupt_enable(cdev, new_interrupts);
+}
+
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Start hrtimer\n");
+ hrtimer_start(&cdev->hrtimer,
+ ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
/* Only interrupt line 0 is used in this driver */
m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
+ m_can_coalescing_disable(cdev);
m_can_write(cdev, M_CAN_ILE, 0x0);
+ cdev->active_interrupts = 0x0;
+
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Stop hrtimer\n");
+ hrtimer_cancel(&cdev->hrtimer);
+ }
}
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
@@ -444,18 +474,26 @@ static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
+ unsigned long irqflags;
- if (cdev->tx_skb) {
- int putidx = 0;
+ if (cdev->tx_ops) {
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ if (!cdev->tx_ops[i].skb)
+ continue;
- net->stats.tx_errors++;
- if (cdev->version > 30)
- putidx = FIELD_GET(TXFQS_TFQPI_MASK,
- m_can_read(cdev, M_CAN_TXFQS));
-
- can_free_echo_skb(cdev->net, putidx, NULL);
- cdev->tx_skb = NULL;
+ net->stats.tx_errors++;
+ cdev->tx_ops[i].skb = NULL;
+ }
}
+
+ for (int i = 0; i != cdev->can.echo_skb_max; ++i)
+ can_free_echo_skb(cdev->net, i, NULL);
+
+ netdev_reset_queue(cdev->net);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight = 0;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
}
/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -1007,23 +1045,60 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
*/
-static void m_can_tx_update_stats(struct m_can_classdev *cdev,
- unsigned int msg_mark,
- u32 timestamp)
+static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark, u32 timestamp)
{
struct net_device *dev = cdev->net;
struct net_device_stats *stats = &dev->stats;
+ unsigned int frame_len;
if (cdev->is_peripheral)
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
msg_mark,
timestamp,
- NULL);
+ &frame_len);
else
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
stats->tx_packets++;
+
+ return frame_len;
+}
+
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
+ unsigned int transmitted_frame_len)
+{
+ unsigned long irqflags;
+
+ netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
+ netif_wake_queue(cdev->net);
+ cdev->tx_fifo_in_flight -= transmitted;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
+static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev)
+{
+ unsigned long irqflags;
+ int tx_fifo_in_flight;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1;
+ if (tx_fifo_in_flight >= cdev->tx_fifo_size) {
+ netif_stop_queue(cdev->net);
+ if (tx_fifo_in_flight > cdev->tx_fifo_size) {
+ netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n");
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ cdev->tx_fifo_in_flight = tx_fifo_in_flight;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+
+ return NETDEV_TX_OK;
}
static int m_can_echo_tx_event(struct net_device *dev)
@@ -1035,6 +1110,8 @@ static int m_can_echo_tx_event(struct net_device *dev)
int i = 0;
int err = 0;
unsigned int msg_mark;
+ int processed = 0;
+ unsigned int processed_frame_len = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1063,25 +1140,62 @@ static int m_can_echo_tx_event(struct net_device *dev)
fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
- m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
+ timestamp);
+
+ ++processed;
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
+ m_can_finish_tx(cdev, processed, processed_frame_len);
+
return err;
}
+static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
+{
+ u32 new_interrupts = cdev->active_interrupts;
+ bool enable_rx_timer = false;
+ bool enable_tx_timer = false;
+
+ if (!cdev->net->irq)
+ return;
+
+ if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) {
+ enable_rx_timer = true;
+ new_interrupts &= ~IR_RF0N;
+ }
+ if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) {
+ enable_tx_timer = true;
+ new_interrupts &= ~IR_TEFN;
+ }
+ if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_RF0N;
+ if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_TEFN;
+
+ m_can_interrupt_enable(cdev, new_interrupts);
+ if (enable_rx_timer | enable_tx_timer)
+ hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait,
+ HRTIMER_MODE_REL);
+}
+
static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct m_can_classdev *cdev = netdev_priv(dev);
u32 ir;
- if (pm_runtime_suspended(cdev->dev))
+ if (pm_runtime_suspended(cdev->dev)) {
+ m_can_coalescing_disable(cdev);
return IRQ_NONE;
+ }
+
ir = m_can_read(cdev, M_CAN_IR);
+ m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
@@ -1096,13 +1210,17 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
- if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
+ if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) {
cdev->irqstatus = ir;
if (!cdev->is_peripheral) {
m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
- } else if (m_can_rx_peripheral(dev, ir) < 0) {
- goto out_fail;
+ } else {
+ int pkts;
+
+ pkts = m_can_rx_peripheral(dev, ir);
+ if (pkts < 0)
+ goto out_fail;
}
}
@@ -1110,21 +1228,18 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
u32 timestamp = 0;
+ unsigned int frame_len;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
- m_can_tx_update_stats(cdev, 0, timestamp);
- netif_wake_queue(dev);
+ frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
+ m_can_finish_tx(cdev, 1, frame_len);
}
} else {
- if (ir & IR_TEFN) {
+ if (ir & (IR_TEFN | IR_TEFW)) {
/* New TX FIFO Element arrived */
if (m_can_echo_tx_event(dev) != 0)
goto out_fail;
-
- if (netif_queue_stopped(dev) &&
- !m_can_tx_fifo_full(cdev))
- netif_wake_queue(dev);
}
}
@@ -1138,6 +1253,15 @@ out_fail:
return IRQ_HANDLED;
}
+static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+
+ irq_wake_thread(cdev->net->irq, cdev->net);
+
+ return HRTIMER_NORESTART;
+}
+
static const struct can_bittiming_const m_can_bittiming_const_30X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
@@ -1276,9 +1400,8 @@ static int m_can_chip_config(struct net_device *dev)
}
/* Disable unused interrupts */
- interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE |
- IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N |
- IR_RF0F | IR_RF0W);
+ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
m_can_config_endisable(cdev, true);
@@ -1315,6 +1438,8 @@ static int m_can_chip_config(struct net_device *dev)
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
+ FIELD_PREP(TXEFC_EFWM_MASK,
+ cdev->tx_max_coalesced_frames_irq) |
FIELD_PREP(TXEFC_EFS_MASK,
cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
@@ -1322,6 +1447,7 @@ static int m_can_chip_config(struct net_device *dev)
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
+ FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) |
FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
@@ -1380,7 +1506,7 @@ static int m_can_chip_config(struct net_device *dev)
else
interrupts &= ~(IR_ERR_LEC_31X);
}
- m_can_write(cdev, M_CAN_IE, interrupts);
+ m_can_interrupt_enable(cdev, interrupts);
/* route all interrupts to INT0 */
m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
@@ -1413,15 +1539,16 @@ static int m_can_start(struct net_device *dev)
if (ret)
return ret;
+ netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
+ cdev->tx_max_coalesced_frames);
+
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
m_can_enable_all_interrupts(cdev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Start hrtimer\n");
- hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
- HRTIMER_MODE_REL_PINNED);
- }
+ if (cdev->version > 30)
+ cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
return 0;
}
@@ -1577,11 +1704,6 @@ static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Stop hrtimer\n");
- hrtimer_cancel(&cdev->hrtimer);
- }
-
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
@@ -1605,8 +1727,9 @@ static int m_can_close(struct net_device *dev)
m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
+ m_can_clean(dev);
+
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
@@ -1619,57 +1742,42 @@ static int m_can_close(struct net_device *dev)
return 0;
}
-static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
-{
- struct m_can_classdev *cdev = netdev_priv(dev);
- /*get wrap around for loopback skb index */
- unsigned int wrap = cdev->can.echo_skb_max;
- int next_idx;
-
- /* calculate next index */
- next_idx = (++putidx >= wrap ? 0 : putidx);
-
- /* check if occupied */
- return !!cdev->can.echo_skb[next_idx];
-}
-
-static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
+static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
{
- struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 len_padded = DIV_ROUND_UP(cf->len, 4);
+ struct m_can_fifo_element fifo_element;
struct net_device *dev = cdev->net;
- struct sk_buff *skb = cdev->tx_skb;
- struct id_and_dlc fifo_header;
u32 cccr, fdflags;
- u32 txfqs;
int err;
- int putidx;
-
- cdev->tx_skb = NULL;
+ u32 putidx;
+ unsigned int frame_len = can_skb_get_frame_len(skb);
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
- fifo_header.id = cf->can_id & CAN_EFF_MASK;
- fifo_header.id |= TX_BUF_XTD;
+ fifo_element.id = cf->can_id & CAN_EFF_MASK;
+ fifo_element.id |= TX_BUF_XTD;
} else {
- fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
+ fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18);
}
if (cf->can_id & CAN_RTR_FLAG)
- fifo_header.id |= TX_BUF_RTR;
+ fifo_element.id |= TX_BUF_RTR;
if (cdev->version == 30) {
netif_stop_queue(dev);
- fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
+ fifo_element.dlc = can_fd_len2dlc(cf->len) << 16;
/* Write the frame ID, DLC, and payload to the FIFO element. */
- err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
+ err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2);
if (err)
goto out_fail;
err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ cf->data, len_padded);
if (err)
goto out_fail;
@@ -1690,33 +1798,15 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
- can_put_echo_skb(skb, dev, 0, 0);
+ can_put_echo_skb(skb, dev, 0, frame_len);
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
- txfqs = m_can_read(cdev, M_CAN_TXFQS);
-
- /* Check if FIFO full */
- if (_m_can_tx_fifo_full(txfqs)) {
- /* This shouldn't happen */
- netif_stop_queue(dev);
- netdev_warn(dev,
- "TX queue active although FIFO is full.");
-
- if (cdev->is_peripheral) {
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- } else {
- return NETDEV_TX_BUSY;
- }
- }
-
/* get put index for frame */
- putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs);
+ putidx = cdev->tx_fifo_putidx;
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker,
@@ -1731,30 +1821,32 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
fdflags |= TX_BUF_BRS;
}
- fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
- if (err)
- goto out_fail;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data,
+ cf->len, 0);
+
+ err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID,
+ &fifo_element, 2 + len_padded);
if (err)
goto out_fail;
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx, 0);
+ can_put_echo_skb(skb, dev, putidx, frame_len);
- /* Enable TX FIFO element to start transfer */
- m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
-
- /* stop network queue if fifo full */
- if (m_can_tx_fifo_full(cdev) ||
- m_can_next_echo_skb_occupied(dev, putidx))
- netif_stop_queue(dev);
+ if (cdev->is_peripheral) {
+ /* Delay enabling TX FIFO element */
+ cdev->tx_peripheral_submit |= BIT(putidx);
+ } else {
+ /* Enable TX FIFO element to start transfer */
+ m_can_write(cdev, M_CAN_TXBAR, BIT(putidx));
+ }
+ cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ?
+ 0 : cdev->tx_fifo_putidx);
}
return NETDEV_TX_OK;
@@ -1765,46 +1857,91 @@ out_fail:
return NETDEV_TX_BUSY;
}
+static void m_can_tx_submit(struct m_can_classdev *cdev)
+{
+ if (cdev->version == 30)
+ return;
+ if (!cdev->is_peripheral)
+ return;
+
+ m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
+ cdev->tx_peripheral_submit = 0;
+}
+
static void m_can_tx_work_queue(struct work_struct *ws)
{
- struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
- tx_work);
+ struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
+ struct m_can_classdev *cdev = op->cdev;
+ struct sk_buff *skb = op->skb;
+
+ op->skb = NULL;
+ m_can_tx_handler(cdev, skb);
+ if (op->submit)
+ m_can_tx_submit(cdev);
+}
+
+static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb,
+ bool submit)
+{
+ cdev->tx_ops[cdev->next_tx_op].skb = skb;
+ cdev->tx_ops[cdev->next_tx_op].submit = submit;
+ queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
+
+ ++cdev->next_tx_op;
+ if (cdev->next_tx_op >= cdev->tx_fifo_size)
+ cdev->next_tx_op = 0;
+}
+
+static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
+{
+ bool submit;
+
+ ++cdev->nr_txs_without_submit;
+ if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames ||
+ !netdev_xmit_more()) {
+ cdev->nr_txs_without_submit = 0;
+ submit = true;
+ } else {
+ submit = false;
+ }
+ m_can_tx_queue_skb(cdev, skb, submit);
- m_can_tx_handler(cdev);
+ return NETDEV_TX_OK;
}
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ unsigned int frame_len;
+ netdev_tx_t ret;
if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
- if (cdev->is_peripheral) {
- if (cdev->tx_skb) {
- netdev_err(dev, "hard_xmit called while tx busy\n");
- return NETDEV_TX_BUSY;
- }
+ frame_len = can_skb_get_frame_len(skb);
- if (cdev->can.state == CAN_STATE_BUS_OFF) {
- m_can_clean(dev);
- } else {
- /* Need to stop the queue to avoid numerous requests
- * from being sent. Suggested improvement is to create
- * a queueing mechanism that will queue the skbs and
- * process them in order.
- */
- cdev->tx_skb = skb;
- netif_stop_queue(cdev->net);
- queue_work(cdev->tx_wq, &cdev->tx_work);
- }
- } else {
- cdev->tx_skb = skb;
- return m_can_tx_handler(cdev);
+ if (cdev->can.state == CAN_STATE_BUS_OFF) {
+ m_can_clean(cdev->net);
+ return NETDEV_TX_OK;
}
- return NETDEV_TX_OK;
+ ret = m_can_start_tx(cdev);
+ if (ret != NETDEV_TX_OK)
+ return ret;
+
+ netdev_sent_queue(dev, frame_len);
+
+ if (cdev->is_peripheral)
+ ret = m_can_start_peripheral_xmit(cdev, skb);
+ else
+ ret = m_can_tx_handler(cdev, skb);
+
+ if (ret != NETDEV_TX_OK)
+ netdev_completed_queue(dev, 1, frame_len);
+
+ return ret;
}
static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
@@ -1844,15 +1981,17 @@ static int m_can_open(struct net_device *dev)
/* register interrupt handler */
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
- cdev->tx_wq = alloc_workqueue("mcan_wq",
- WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ cdev->tx_wq = alloc_ordered_workqueue("mcan_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM);
if (!cdev->tx_wq) {
err = -ENOMEM;
goto out_wq_fail;
}
- INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ cdev->tx_ops[i].cdev = cdev;
+ INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
+ }
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT,
@@ -1900,7 +2039,108 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static int m_can_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq;
+ ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq;
+ ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames;
+ ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq;
+ ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int m_can_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ if (cdev->can.state != CAN_STATE_STOPPED) {
+ netdev_err(dev, "Device is in use, please shut it down first\n");
+ return -EBUSY;
+ }
+
+ if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) {
+ netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n",
+ ec->rx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_RXF0].num);
+ return -EINVAL;
+ }
+ if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 &&
+ ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) {
+ netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n",
+ ec->rx_coalesce_usecs_irq,
+ ec->tx_coalesce_usecs_irq);
+ return -EINVAL;
+ }
+
+ cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+ cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+ cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ if (cdev->rx_coalesce_usecs_irq)
+ cdev->irq_timer_wait =
+ ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC);
+ else
+ cdev->irq_timer_wait =
+ ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC);
+
+ return 0;
+}
+
static const struct ethtool_ops m_can_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = m_can_get_coalesce,
+ .set_coalesce = m_can_set_coalesce,
+};
+
+static const struct ethtool_ops m_can_ethtool_ops_polling = {
.get_ts_info = ethtool_op_get_ts_info,
};
@@ -1908,7 +2148,10 @@ static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
- dev->ethtool_ops = &m_can_ethtool_ops;
+ if (dev->irq)
+ dev->ethtool_ops = &m_can_ethtool_ops;
+ else
+ dev->ethtool_ops = &m_can_ethtool_ops_polling;
return register_candev(dev);
}
@@ -2056,12 +2299,23 @@ int m_can_class_register(struct m_can_classdev *cdev)
{
int ret;
- if (cdev->pm_clock_support) {
- ret = m_can_clk_start(cdev);
- if (ret)
- return ret;
+ cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num,
+ cdev->mcfg[MRAM_TXE].num));
+ if (cdev->is_peripheral) {
+ cdev->tx_ops =
+ devm_kzalloc(cdev->dev,
+ cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
+ GFP_KERNEL);
+ if (!cdev->tx_ops) {
+ dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n");
+ return -ENOMEM;
+ }
}
+ ret = m_can_clk_start(cdev);
+ if (ret)
+ return ret;
+
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
NAPI_POLL_WEIGHT);
@@ -2069,8 +2323,15 @@ int m_can_class_register(struct m_can_classdev *cdev)
goto clk_disable;
}
- if (!cdev->net->irq)
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
+ hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
cdev->hrtimer.function = &hrtimer_callback;
+ } else {
+ hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cdev->hrtimer.function = m_can_coalescing_timer;
+ }
ret = m_can_dev_setup(cdev);
if (ret)
@@ -2121,7 +2382,15 @@ int m_can_class_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
- m_can_stop(ndev);
+
+ /* leave the chip running with rx interrupt enabled if it is
+ * used as a wake-up source.
+ */
+ if (cdev->pm_wake_source)
+ m_can_write(cdev, M_CAN_IE, IR_RF0N);
+ else
+ m_can_stop(ndev);
+
m_can_clk_stop(cdev);
}
@@ -2148,11 +2417,15 @@ int m_can_class_resume(struct device *dev)
ret = m_can_clk_start(cdev);
if (ret)
return ret;
- ret = m_can_start(ndev);
- if (ret) {
- m_can_clk_stop(cdev);
- return ret;
+ if (cdev->pm_wake_source) {
+ m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
+ } else {
+ ret = m_can_start(ndev);
+ if (ret) {
+ m_can_clk_stop(cdev);
+ return ret;
+ }
}
netif_device_attach(ndev);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 520e14277dff..3a9edc292593 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -70,6 +70,13 @@ struct m_can_ops {
int (*init)(struct m_can_classdev *cdev);
};
+struct m_can_tx_op {
+ struct m_can_classdev *cdev;
+ struct work_struct work;
+ struct sk_buff *skb;
+ bool submit;
+};
+
struct m_can_classdev {
struct can_priv can;
struct can_rx_offload offload;
@@ -80,18 +87,42 @@ struct m_can_classdev {
struct clk *cclk;
struct workqueue_struct *tx_wq;
- struct work_struct tx_work;
- struct sk_buff *tx_skb;
struct phy *transceiver;
+ ktime_t irq_timer_wait;
+
struct m_can_ops *ops;
int version;
u32 irqstatus;
int pm_clock_support;
+ int pm_wake_source;
int is_peripheral;
+ // Cached M_CAN_IE register content
+ u32 active_interrupts;
+ u32 rx_max_coalesced_frames_irq;
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames;
+ u32 tx_max_coalesced_frames_irq;
+ u32 tx_coalesce_usecs_irq;
+
+ // Store this internally to avoid fetch delays on peripheral chips
+ u32 tx_fifo_putidx;
+
+ /* Protects shared state between start_xmit and m_can_isr */
+ spinlock_t tx_handling_spinlock;
+ int tx_fifo_in_flight;
+
+ struct m_can_tx_op *tx_ops;
+ int tx_fifo_size;
+ int next_tx_op;
+
+ int nr_txs_without_submit;
+ /* bitfield of fifo elements that will be submitted together */
+ u32 tx_peripheral_submit;
+
struct mram_cfg mcfg[MRAM_CFG_NUM];
struct hrtimer hrtimer;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index f2219aa2824b..45400de4163d 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -125,6 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index cdb28d6a092c..df0367124b4c 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -109,10 +109,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
ret = irq;
goto probe_fail;
}
- } else {
- dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
}
/* message ram could be shared */
@@ -143,6 +139,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->net->irq = irq;
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
mcan_class->transceiver = transceiver;
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index ae8c42f5debd..a42600dac70d 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -411,6 +411,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->spi = spi;
mcan_class->pm_clock_support = 0;
+ mcan_class->pm_wake_source = device_property_read_bool(&spi->dev, "wakeup-source");
mcan_class->can.clock.freq = freq;
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
@@ -459,6 +460,9 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
+ if (mcan_class->pm_wake_source)
+ device_init_wakeup(&spi->dev, true);
+
ret = m_can_class_register(mcan_class);
if (ret) {
dev_err(&spi->dev, "Failed registering m_can device %pe\n",
@@ -487,6 +491,29 @@ static void tcan4x5x_can_remove(struct spi_device *spi)
m_can_class_free_dev(priv->cdev.net);
}
+static int __maybe_unused tcan4x5x_suspend(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+
+ if (cdev->pm_wake_source)
+ enable_irq_wake(spi->irq);
+
+ return m_can_class_suspend(dev);
+}
+
+static int __maybe_unused tcan4x5x_resume(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret = m_can_class_resume(dev);
+
+ if (cdev->pm_wake_source)
+ disable_irq_wake(spi->irq);
+
+ return ret;
+}
+
static const struct of_device_id tcan4x5x_of_match[] = {
{
.compatible = "ti,tcan4x5x",
@@ -505,11 +532,15 @@ static const struct spi_device_id tcan4x5x_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
+static const struct dev_pm_ops tcan4x5x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tcan4x5x_suspend, tcan4x5x_resume)
+};
+
static struct spi_driver tcan4x5x_can_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = tcan4x5x_of_match,
- .pm = NULL,
+ .pm = &tcan4x5x_pm_ops,
},
.id_table = tcan4x5x_id_table,
.probe = tcan4x5x_can_probe,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 4837df6efa92..5b3d69c3b6b6 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/netdevice.h>
#include <linux/can/dev.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -290,7 +292,7 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
int irq, mscan_clksrc = 0;
int err = -ENOMEM;
- data = of_device_get_match_data(&ofdev->dev);
+ data = device_get_match_data(&ofdev->dev);
if (!data)
return -EINVAL;
@@ -351,13 +353,11 @@ exit_unmap_mem:
static void mpc5xxx_can_remove(struct platform_device *ofdev)
{
- const struct of_device_id *match;
const struct mpc5xxx_can_data *data;
struct net_device *dev = platform_get_drvdata(ofdev);
struct mscan_priv *priv = netdev_priv(dev);
- match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
- data = match ? match->data : NULL;
+ data = device_get_match_data(&ofdev->dev);
unregister_mscandev(dev);
if (data && data->put_clock)
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 32286f861a19..721df91cdbfb 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -436,7 +436,7 @@ int softing_startstop(struct net_device *dev, int up)
return ret;
bus_bitmask_start = 0;
- if (dev && up)
+ if (up)
/* prepare to start this bus as well */
bus_bitmask_start |= (1 << priv->index);
/* bring netdevs down */
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index eebf967f4711..1d9057dc44f2 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -837,7 +837,7 @@ static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
return err;
if (trec & MCP251XFD_REG_TREC_TXBO)
- bec->txerr = 256;
+ bec->txerr = CAN_BUS_OFF_THRESHOLD;
else
bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index d1450722cb3c..bd58c636d465 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -100,6 +100,7 @@ config CAN_KVASER_USB
- Scania VCI2 (if you have the Kvaser logo on top)
- Kvaser BlackBird v2
- Kvaser Leaf Pro HS v2
+ - Kvaser Leaf v3
- Kvaser Hybrid CAN/LIN
- Kvaser Hybrid 2xCAN/LIN
- Kvaser Hybrid Pro CAN/LIN
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 95b0fdb602c8..65c962f76898 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -385,7 +385,7 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
static int gs_cmd_reset(struct gs_can *dev)
{
struct gs_device_mode dm = {
- .mode = GS_CAN_MODE_RESET,
+ .mode = cpu_to_le32(GS_CAN_MODE_RESET),
};
return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 71ef4db5c09f..8faf8a462c05 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -88,6 +88,7 @@
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 0x0114
#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
+#define USB_LEAF_V3_PRODUCT_ID 0x0117
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
.quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
@@ -235,6 +236,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 98c669ad5141..f7fabba707ea 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -119,7 +119,7 @@ static int vxcan_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index abe58f103043..fae0120473f8 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -20,8 +20,8 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -31,6 +31,7 @@
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/u64_stats_sync.h>
#define DRIVER_NAME "xilinx_can"
@@ -58,6 +59,13 @@ enum xcan_reg {
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
+
+ /* only on AXI CAN cores */
+ XCAN_ECC_CFG_OFFSET = 0xC8, /* ECC Configuration */
+ XCAN_TXTLFIFO_ECC_OFFSET = 0xCC, /* TXTL FIFO ECC error counter */
+ XCAN_TXOLFIFO_ECC_OFFSET = 0xD0, /* TXOL FIFO ECC error counter */
+ XCAN_RXFIFO_ECC_OFFSET = 0xD4, /* RX FIFO ECC error counter */
+
XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
@@ -124,6 +132,18 @@ enum xcan_reg {
#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
+#define XCAN_IXR_E2BERX_MASK BIT(23) /* RX FIFO two bit ECC error */
+#define XCAN_IXR_E1BERX_MASK BIT(22) /* RX FIFO one bit ECC error */
+#define XCAN_IXR_E2BETXOL_MASK BIT(21) /* TXOL FIFO two bit ECC error */
+#define XCAN_IXR_E1BETXOL_MASK BIT(20) /* TXOL FIFO One bit ECC error */
+#define XCAN_IXR_E2BETXTL_MASK BIT(19) /* TXTL FIFO Two bit ECC error */
+#define XCAN_IXR_E1BETXTL_MASK BIT(18) /* TXTL FIFO One bit ECC error */
+#define XCAN_IXR_ECC_MASK (XCAN_IXR_E2BERX_MASK | \
+ XCAN_IXR_E1BERX_MASK | \
+ XCAN_IXR_E2BETXOL_MASK | \
+ XCAN_IXR_E1BETXOL_MASK | \
+ XCAN_IXR_E2BETXTL_MASK | \
+ XCAN_IXR_E1BETXTL_MASK)
#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
@@ -137,6 +157,11 @@ enum xcan_reg {
#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
+#define XCAN_ECC_CFG_REECRX_MASK BIT(2) /* Reset RX FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXOL_MASK BIT(1) /* Reset TXOL FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXTL_MASK BIT(0) /* Reset TXTL FIFO ECC error counters */
+#define XCAN_ECC_1BIT_CNT_MASK GENMASK(15, 0) /* FIFO ECC 1bit count mask */
+#define XCAN_ECC_2BIT_CNT_MASK GENMASK(31, 16) /* FIFO ECC 2bit count mask */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
@@ -202,6 +227,14 @@ struct xcan_devtype_data {
* @devtype: Device type specific constants
* @transceiver: Optional pointer to associated CAN transceiver
* @rstc: Pointer to reset control
+ * @ecc_enable: ECC enable flag
+ * @syncp: synchronization for ECC error stats
+ * @ecc_rx_2_bit_errors: RXFIFO 2bit ECC count
+ * @ecc_rx_1_bit_errors: RXFIFO 1bit ECC count
+ * @ecc_txol_2_bit_errors: TXOLFIFO 2bit ECC count
+ * @ecc_txol_1_bit_errors: TXOLFIFO 1bit ECC count
+ * @ecc_txtl_2_bit_errors: TXTLFIFO 2bit ECC count
+ * @ecc_txtl_1_bit_errors: TXTLFIFO 1bit ECC count
*/
struct xcan_priv {
struct can_priv can;
@@ -221,6 +254,14 @@ struct xcan_priv {
struct xcan_devtype_data devtype;
struct phy *transceiver;
struct reset_control *rstc;
+ bool ecc_enable;
+ struct u64_stats_sync syncp;
+ u64_stats_t ecc_rx_2_bit_errors;
+ u64_stats_t ecc_rx_1_bit_errors;
+ u64_stats_t ecc_txol_2_bit_errors;
+ u64_stats_t ecc_txol_1_bit_errors;
+ u64_stats_t ecc_txtl_2_bit_errors;
+ u64_stats_t ecc_txtl_1_bit_errors;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -308,6 +349,24 @@ static const struct can_tdc_const xcan_tdc_const_canfd2 = {
.tdcf_max = 0,
};
+enum xcan_stats_type {
+ XCAN_ECC_RX_2_BIT_ERRORS,
+ XCAN_ECC_RX_1_BIT_ERRORS,
+ XCAN_ECC_TXOL_2_BIT_ERRORS,
+ XCAN_ECC_TXOL_1_BIT_ERRORS,
+ XCAN_ECC_TXTL_2_BIT_ERRORS,
+ XCAN_ECC_TXTL_1_BIT_ERRORS,
+};
+
+static const char xcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [XCAN_ECC_RX_2_BIT_ERRORS] = "ecc_rx_2_bit_errors",
+ [XCAN_ECC_RX_1_BIT_ERRORS] = "ecc_rx_1_bit_errors",
+ [XCAN_ECC_TXOL_2_BIT_ERRORS] = "ecc_txol_2_bit_errors",
+ [XCAN_ECC_TXOL_1_BIT_ERRORS] = "ecc_txol_1_bit_errors",
+ [XCAN_ECC_TXTL_2_BIT_ERRORS] = "ecc_txtl_2_bit_errors",
+ [XCAN_ECC_TXTL_1_BIT_ERRORS] = "ecc_txtl_1_bit_errors",
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -523,6 +582,9 @@ static int xcan_chip_start(struct net_device *ndev)
XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
+ if (priv->ecc_enable)
+ ier |= XCAN_IXR_ECC_MASK;
+
if (priv->devtype.flags & XCAN_FLAG_RXMNF)
ier |= XCAN_IXR_RXMNF_MASK;
@@ -1127,6 +1189,54 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
priv->can.can_stats.bus_error++;
}
+ if (priv->ecc_enable && isr & XCAN_IXR_ECC_MASK) {
+ u32 reg_rx_ecc, reg_txol_ecc, reg_txtl_ecc;
+
+ reg_rx_ecc = priv->read_reg(priv, XCAN_RXFIFO_ECC_OFFSET);
+ reg_txol_ecc = priv->read_reg(priv, XCAN_TXOLFIFO_ECC_OFFSET);
+ reg_txtl_ecc = priv->read_reg(priv, XCAN_TXTLFIFO_ECC_OFFSET);
+
+ /* The counter reaches its maximum at 0xffff and does not overflow.
+ * Accept the small race window between reading and resetting ECC counters.
+ */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+
+ u64_stats_update_begin(&priv->syncp);
+
+ if (isr & XCAN_IXR_E2BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ u64_stats_update_end(&priv->syncp);
+ }
+
if (cf.can_id) {
struct can_frame *skb_cf;
struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
@@ -1354,8 +1464,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr_errors, mask;
u32 isr, ier;
- u32 isr_errors;
u32 rx_int_mask = xcan_rx_int_mask(priv);
/* Get the interrupt status from Xilinx CAN */
@@ -1374,10 +1484,15 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
if (isr & XCAN_IXR_TXOK_MASK)
xcan_tx_interrupt(ndev, isr);
+ mask = XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
+ XCAN_IXR_RXMNF_MASK;
+
+ if (priv->ecc_enable)
+ mask |= XCAN_IXR_ECC_MASK;
+
/* Check for the type of error interrupt and Processing it */
- isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
- XCAN_IXR_RXMNF_MASK);
+ isr_errors = isr & mask;
if (isr_errors) {
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
@@ -1546,6 +1661,43 @@ static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
return 0;
}
+static void xcan_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &xcan_priv_flags_strings,
+ sizeof(xcan_priv_flags_strings));
+ }
+}
+
+static int xcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void xcan_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ data[XCAN_ECC_RX_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_2_bit_errors);
+ data[XCAN_ECC_RX_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_1_bit_errors);
+ data[XCAN_ECC_TXOL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_2_bit_errors);
+ data[XCAN_ECC_TXOL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_1_bit_errors);
+ data[XCAN_ECC_TXTL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_2_bit_errors);
+ data[XCAN_ECC_TXTL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_1_bit_errors);
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1555,6 +1707,9 @@ static const struct net_device_ops xcan_netdev_ops = {
static const struct ethtool_ops xcan_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
+ .get_strings = xcan_get_strings,
+ .get_sset_count = xcan_get_sset_count,
+ .get_ethtool_stats = xcan_get_ethtool_stats,
};
/**
@@ -1726,8 +1881,7 @@ static int xcan_probe(struct platform_device *pdev)
struct net_device *ndev;
struct xcan_priv *priv;
struct phy *transceiver;
- const struct of_device_id *of_id;
- const struct xcan_devtype_data *devtype = &xcan_axi_data;
+ const struct xcan_devtype_data *devtype;
void __iomem *addr;
int ret;
int rx_max, tx_max;
@@ -1741,9 +1895,7 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
- of_id = of_match_device(xcan_of_match, &pdev->dev);
- if (of_id && of_id->data)
- devtype = of_id->data;
+ devtype = device_get_match_data(&pdev->dev);
hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
"tx-mailbox-count" : "tx-fifo-depth";
@@ -1796,6 +1948,7 @@ static int xcan_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->ecc_enable = of_property_read_bool(pdev->dev.of_node, "xlnx,has-ecc");
priv->dev = &pdev->dev;
priv->can.bittiming_const = devtype->bittiming_const;
priv->can.do_set_mode = xcan_do_set_mode;
@@ -1912,6 +2065,11 @@ static int xcan_probe(struct platform_device *pdev)
priv->reg_base, ndev->irq, priv->can.clock.freq,
hw_tx_max, priv->tx_max);
+ if (priv->ecc_enable) {
+ /* Reset FIFO ECC counters */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+ }
return 0;
err_disableclks:
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f8c1d73b251d..3092b391031a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -48,7 +48,7 @@ config NET_DSA_MT7530
config NET_DSA_MT7530_MDIO
tristate "MediaTek MT7530 MDIO interface driver"
depends on NET_DSA_MT7530
- imply MEDIATEK_GE_PHY
+ select MEDIATEK_GE_PHY
select PCS_MTK_LYNXI
help
This enables support for the MediaTek MT7530 and MT7531 switch
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0d628b35fd5c..b2eeff04f4c8 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -559,6 +559,19 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
}
+static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
+ if (enable)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+}
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -1257,7 +1270,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
+ struct ethtool_keee *p = &dev->ports[port].eee;
u8 rgmii_ctrl = 0, reg = 0, off;
bool tx_pause = false;
bool rx_pause = false;
@@ -2193,21 +2206,6 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mirror_del);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
-{
- struct b53_device *dev = ds->priv;
- u16 reg;
-
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
- if (enable)
- reg |= BIT(port);
- else
- reg &= ~BIT(port);
- b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
-}
-EXPORT_SYMBOL(b53_eee_enable_set);
-
-
/* Returns 0 if EEE was not enabled, or 1 otherwise
*/
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
@@ -2224,27 +2222,21 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
- u16 reg;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
- e->eee_enabled = p->eee_enabled;
- e->eee_active = !!(reg & BIT(port));
-
return 0;
}
EXPORT_SYMBOL(b53_get_mac_eee);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
+ struct ethtool_keee *p = &dev->ports[port].eee;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index fdcfd5081c28..c13a907947f1 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -95,7 +95,7 @@ struct b53_pcs {
struct b53_port {
u16 vlan_ctl_mask;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
struct b53_vlan {
@@ -395,9 +395,8 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 4a52ccbe393f..bc77ee9e6d0a 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -835,7 +835,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bool tx_pause, bool rx_pause)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ struct ethtool_keee *p = &priv->dev->ports[port].eee;
u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
index 237066d30704..14ca42491512 100644
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ b/drivers/net/dsa/dsa_loop_bdinfo.c
@@ -32,4 +32,5 @@ static int __init dsa_loop_bdinfo_init(void)
}
arch_initcall(dsa_loop_bdinfo_init)
+MODULE_DESCRIPTION("DSA mock-up switch driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 61b71bcfe396..14923535ca7e 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -49,9 +49,9 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
mutex_lock(&dev->alu_mutex);
ctrl_addr = IND_ACC_TABLE(table) | addr;
- ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
if (!ret)
- ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
mutex_unlock(&dev->alu_mutex);
@@ -633,6 +633,57 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
}
/**
+ * ksz879x_get_loopback - KSZ879x specific function to get loopback
+ * configuration status for a specific port
+ * @dev: Pointer to the device structure
+ * @port: Port number to query
+ * @val: Pointer to store the result
+ *
+ * This function reads the SMI registers to determine whether loopback mode
+ * is enabled for a specific port.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_get_loopback(struct ksz_device *dev, u16 port,
+ u16 *val)
+{
+ u8 stat3;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_3, &stat3);
+ if (ret)
+ return ret;
+
+ if (stat3 & PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ return 0;
+}
+
+/**
+ * ksz879x_set_loopback - KSZ879x specific function to set loopback mode for
+ * a specific port
+ * @dev: Pointer to the device structure.
+ * @port: Port number to modify.
+ * @val: Value indicating whether to enable or disable loopback mode.
+ *
+ * This function translates loopback bit of the BMCR register into the
+ * corresponding hardware register bit value and writes it to the SMI interface.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_set_loopback(struct ksz_device *dev, u16 port, u16 val)
+{
+ u8 stat3 = 0;
+
+ if (val & BMCR_LOOPBACK)
+ stat3 |= PORT_PHY_LOOPBACK;
+
+ return ksz_prmw8(dev, port, REG_PORT_STATUS_3, PORT_PHY_LOOPBACK,
+ stat3);
+}
+
+/**
* ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY
* Control register (Reg. 31).
* @dev: The KSZ device instance.
@@ -676,59 +727,122 @@ static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val)
return 0;
}
+/**
+ * ksz8_r_phy_bmcr - Translates and reads from the SMI interface to a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be read.
+ * @val: The value read from the SMI interface.
+ *
+ * This function reads the SMI interface and translates the hardware register
+ * bit values into their corresponding control settings for a MIIM PHY Basic
+ * mode control register.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_r_phy_bmcr(struct ksz_device *dev, u16 port, u16 *val)
+{
+ const u16 *regs = dev->info->regs;
+ u8 restart, speed, ctrl;
+ int ret;
+
+ *val = 0;
+
+ ret = ksz_pread8(dev, port, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ if (ctrl & PORT_FORCE_100_MBIT)
+ *val |= BMCR_SPEED100;
+
+ if (ksz_is_ksz88x3(dev)) {
+ if (restart & KSZ8873_PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ if ((ctrl & PORT_AUTO_NEG_ENABLE))
+ *val |= BMCR_ANENABLE;
+ } else {
+ ret = ksz879x_get_loopback(dev, port, val);
+ if (ret)
+ return ret;
+
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ *val |= BMCR_ANENABLE;
+ }
+
+ if (restart & PORT_POWER_DOWN)
+ *val |= BMCR_PDOWN;
+
+ if (restart & PORT_AUTO_NEG_RESTART)
+ *val |= BMCR_ANRESTART;
+
+ if (ctrl & PORT_FORCE_FULL_DUPLEX)
+ *val |= BMCR_FULLDPLX;
+
+ if (speed & PORT_HP_MDIX)
+ *val |= KSZ886X_BMCR_HP_MDIX;
+
+ if (restart & PORT_FORCE_MDIX)
+ *val |= KSZ886X_BMCR_FORCE_MDI;
+
+ if (restart & PORT_AUTO_MDIX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
+
+ if (restart & PORT_TX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_TRANSMIT;
+
+ if (restart & PORT_LED_OFF)
+ *val |= KSZ886X_BMCR_DISABLE_LED;
+
+ return 0;
+}
+
int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
- u8 restart, speed, ctrl, link;
+ u8 ctrl, link, val1, val2;
int processed = true;
const u16 *regs;
- u8 val1, val2;
u16 data = 0;
- u8 p = phy;
+ u16 p = phy;
int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- if (ret)
- return ret;
-
- ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- if (ret)
- return ret;
-
- ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ ret = ksz8_r_phy_bmcr(dev, p, &data);
if (ret)
return ret;
-
- if (restart & PORT_PHY_LOOPBACK)
- data |= BMCR_LOOPBACK;
- if (ctrl & PORT_FORCE_100_MBIT)
- data |= BMCR_SPEED100;
- if (ksz_is_ksz88x3(dev)) {
- if ((ctrl & PORT_AUTO_NEG_ENABLE))
- data |= BMCR_ANENABLE;
- } else {
- if (!(ctrl & PORT_AUTO_NEG_DISABLE))
- data |= BMCR_ANENABLE;
- }
- if (restart & PORT_POWER_DOWN)
- data |= BMCR_PDOWN;
- if (restart & PORT_AUTO_NEG_RESTART)
- data |= BMCR_ANRESTART;
- if (ctrl & PORT_FORCE_FULL_DUPLEX)
- data |= BMCR_FULLDPLX;
- if (speed & PORT_HP_MDIX)
- data |= KSZ886X_BMCR_HP_MDIX;
- if (restart & PORT_FORCE_MDIX)
- data |= KSZ886X_BMCR_FORCE_MDI;
- if (restart & PORT_AUTO_MDIX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
- if (restart & PORT_TX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
- if (restart & PORT_LED_OFF)
- data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
@@ -860,113 +974,137 @@ static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val)
return ret;
}
-int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+/**
+ * ksz8_w_phy_bmcr - Translates and writes to the SMI interface from a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be configured.
+ * @val: The register value to be written.
+ *
+ * This function translates control settings from a MIIM PHY Basic mode control
+ * register into their corresponding hardware register bit values for the SMI
+ * interface.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_w_phy_bmcr(struct ksz_device *dev, u16 port, u16 val)
{
- u8 restart, speed, ctrl, data;
- const u16 *regs;
- u8 p = phy;
+ u8 restart, speed, ctrl, restart_mask;
+ const u16 *regs = dev->info->regs;
int ret;
- regs = dev->info->regs;
+ /* Do not support PHY reset function. */
+ if (val & BMCR_RESET)
+ return 0;
- switch (reg) {
- case MII_BMCR:
+ speed = 0;
+ if (val & KSZ886X_BMCR_HP_MDIX)
+ speed |= PORT_HP_MDIX;
- /* Do not support PHY reset function. */
- if (val & BMCR_RESET)
- break;
- ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- if (ret)
- return ret;
+ ret = ksz_prmw8(dev, port, regs[P_SPEED_STATUS], PORT_HP_MDIX, speed);
+ if (ret)
+ return ret;
- data = speed;
- if (val & KSZ886X_BMCR_HP_MDIX)
- data |= PORT_HP_MDIX;
- else
- data &= ~PORT_HP_MDIX;
+ ctrl = 0;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_ENABLE;
+ } else {
+ if (!(val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_DISABLE;
- if (data != speed) {
- ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- if (ret)
- return ret;
- }
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[port].fiber)
+ ctrl |= PORT_AUTO_NEG_DISABLE;
+ }
- ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
- if (ret)
- return ret;
+ if (val & BMCR_SPEED100)
+ ctrl |= PORT_FORCE_100_MBIT;
- data = ctrl;
- if (ksz_is_ksz88x3(dev)) {
- if ((val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_ENABLE;
- else
- data &= ~PORT_AUTO_NEG_ENABLE;
- } else {
- if (!(val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_DISABLE;
- else
- data &= ~PORT_AUTO_NEG_DISABLE;
-
- /* Fiber port does not support auto-negotiation. */
- if (dev->ports[p].fiber)
- data |= PORT_AUTO_NEG_DISABLE;
- }
+ if (val & BMCR_FULLDPLX)
+ ctrl |= PORT_FORCE_FULL_DUPLEX;
- if (val & BMCR_SPEED100)
- data |= PORT_FORCE_100_MBIT;
- else
- data &= ~PORT_FORCE_100_MBIT;
- if (val & BMCR_FULLDPLX)
- data |= PORT_FORCE_FULL_DUPLEX;
- else
- data &= ~PORT_FORCE_FULL_DUPLEX;
+ ret = ksz_prmw8(dev, port, regs[P_FORCE_CTRL], PORT_FORCE_100_MBIT |
+ /* PORT_AUTO_NEG_ENABLE and PORT_AUTO_NEG_DISABLE are the same
+ * bits
+ */
+ PORT_FORCE_FULL_DUPLEX | PORT_AUTO_NEG_ENABLE, ctrl);
+ if (ret)
+ return ret;
- if (data != ctrl) {
- ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- if (ret)
- return ret;
- }
+ restart = 0;
+ restart_mask = PORT_LED_OFF | PORT_TX_DISABLE | PORT_AUTO_NEG_RESTART |
+ PORT_POWER_DOWN | PORT_AUTO_MDIX_DISABLE | PORT_FORCE_MDIX;
+
+ if (val & KSZ886X_BMCR_DISABLE_LED)
+ restart |= PORT_LED_OFF;
+
+ if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
+ restart |= PORT_TX_DISABLE;
+
+ if (val & BMCR_ANRESTART)
+ restart |= PORT_AUTO_NEG_RESTART;
+
+ if (val & BMCR_PDOWN)
+ restart |= PORT_POWER_DOWN;
+
+ if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
+ restart |= PORT_AUTO_MDIX_DISABLE;
+
+ if (val & KSZ886X_BMCR_FORCE_MDI)
+ restart |= PORT_FORCE_MDIX;
- ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ksz_is_ksz88x3(dev)) {
+ restart_mask |= KSZ8873_PORT_PHY_LOOPBACK;
+
+ if (val & BMCR_LOOPBACK)
+ restart |= KSZ8873_PORT_PHY_LOOPBACK;
+ } else {
+ ret = ksz879x_set_loopback(dev, port, val);
if (ret)
return ret;
+ }
- data = restart;
- if (val & KSZ886X_BMCR_DISABLE_LED)
- data |= PORT_LED_OFF;
- else
- data &= ~PORT_LED_OFF;
- if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
- data |= PORT_TX_DISABLE;
- else
- data &= ~PORT_TX_DISABLE;
- if (val & BMCR_ANRESTART)
- data |= PORT_AUTO_NEG_RESTART;
- else
- data &= ~(PORT_AUTO_NEG_RESTART);
- if (val & BMCR_PDOWN)
- data |= PORT_POWER_DOWN;
- else
- data &= ~PORT_POWER_DOWN;
- if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
- data |= PORT_AUTO_MDIX_DISABLE;
- else
- data &= ~PORT_AUTO_MDIX_DISABLE;
- if (val & KSZ886X_BMCR_FORCE_MDI)
- data |= PORT_FORCE_MDIX;
- else
- data &= ~PORT_FORCE_MDIX;
- if (val & BMCR_LOOPBACK)
- data |= PORT_PHY_LOOPBACK;
- else
- data &= ~PORT_PHY_LOOPBACK;
+ return ksz_prmw8(dev, port, regs[P_NEG_RESTART_CTRL], restart_mask,
+ restart);
+}
- if (data != restart) {
- ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
- data);
- if (ret)
- return ret;
- }
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ const u16 *regs;
+ u8 ctrl, data;
+ u16 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
+
+ switch (reg) {
+ case MII_BMCR:
+ ret = ksz8_w_phy_bmcr(dev, p, val);
+ if (ret)
+ return ret;
break;
case MII_ADVERTISE:
ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index beca974e0171..7c9341ef73b0 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -265,6 +265,7 @@
#define PORT_AUTO_MDIX_DISABLE BIT(2)
#define PORT_FORCE_MDIX BIT(1)
#define PORT_MAC_LOOPBACK BIT(0)
+#define KSZ8873_PORT_PHY_LOOPBACK BIT(0)
#define REG_PORT_1_STATUS_2 0x1E
#define REG_PORT_2_STATUS_2 0x2E
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index cac4a607e54a..82bebee4615c 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -104,6 +104,10 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8563]
},
{
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 245dfb7a7a31..f799be868274 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1476,6 +1476,39 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.gbit_capable = {true, true, true},
},
+ [KSZ8567] = {
+ .chip_id = KSZ8567_CHIP_ID,
+ .dev_name = "KSZ8567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {false, false, false, false, false,
+ true, true},
+ },
+
[KSZ9567] = {
.chip_id = KSZ9567_CHIP_ID,
.dev_name = "KSZ9567",
@@ -1864,6 +1897,29 @@ static void ksz_get_strings(struct dsa_switch *ds, int port,
}
}
+/**
+ * ksz_update_port_member - Adjust port forwarding rules based on STP state and
+ * isolation settings.
+ * @dev: A pointer to the struct ksz_device representing the device.
+ * @port: The port number to adjust.
+ *
+ * This function dynamically adjusts the port membership configuration for a
+ * specified port and other device ports, based on Spanning Tree Protocol (STP)
+ * states and port isolation settings. Each port, including the CPU port, has a
+ * membership register, represented as a bitfield, where each bit corresponds
+ * to a port number. A set bit indicates permission to forward frames to that
+ * port. This function iterates over all ports, updating the membership register
+ * to reflect current forwarding permissions:
+ *
+ * 1. Forwards frames only to ports that are part of the same bridge group and
+ * in the BR_STATE_FORWARDING state.
+ * 2. Takes into account the isolation status of ports; ports in the
+ * BR_STATE_FORWARDING state with BR_ISOLATED configuration will not forward
+ * frames to each other, even if they are in the same bridge group.
+ * 3. Ensures that the CPU port is included in the membership based on its
+ * upstream port configuration, allowing for management and control traffic
+ * to flow as required.
+ */
static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
@@ -1892,7 +1948,14 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
if (other_p->stp_state != BR_STATE_FORWARDING)
continue;
- if (p->stp_state == BR_STATE_FORWARDING) {
+ /* At this point we know that "port" and "other" port [i] are in
+ * the same bridge group and that "other" port [i] is in
+ * forwarding stp state. If "port" is also in forwarding stp
+ * state, we can allow forwarding from port [port] to port [i].
+ * Except if both ports are isolated.
+ */
+ if (p->stp_state == BR_STATE_FORWARDING &&
+ !(p->isolated && other_p->isolated)) {
val |= BIT(port);
port_member |= BIT(i);
}
@@ -1911,8 +1974,19 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
third_p = &dev->ports[j];
if (third_p->stp_state != BR_STATE_FORWARDING)
continue;
+
third_dp = dsa_to_port(ds, j);
- if (dsa_port_bridge_same(other_dp, third_dp))
+
+ /* Now we updating relation of the "other" port [i] to
+ * the "third" port [j]. We already know that "other"
+ * port [i] is in forwarding stp state and that "third"
+ * port [j] is in forwarding stp state too.
+ * We need to check if "other" port [i] and "third" port
+ * [j] are in the same bridge group and not isolated
+ * before allowing forwarding from port [i] to port [j].
+ */
+ if (dsa_port_bridge_same(other_dp, third_dp) &&
+ !(other_p->isolated && third_p->isolated))
val |= BIT(j);
}
@@ -2649,6 +2723,7 @@ static void ksz_port_teardown(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2664,7 +2739,7 @@ static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~BR_LEARNING)
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -2677,8 +2752,12 @@ static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
- if (flags.mask & BR_LEARNING) {
- p->learning = !!(flags.val & BR_LEARNING);
+ if (flags.mask & (BR_LEARNING | BR_ISOLATED)) {
+ if (flags.mask & BR_LEARNING)
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ if (flags.mask & BR_ISOLATED)
+ p->isolated = !!(flags.val & BR_ISOLATED);
/* Make the change take effect immediately */
ksz_port_stp_state_set(ds, port, p->stp_state);
@@ -2705,7 +2784,8 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
dev->chip_id == KSZ9563_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
- if (dev->chip_id == KSZ9477_CHIP_ID ||
+ if (dev->chip_id == KSZ8567_CHIP_ID ||
+ dev->chip_id == KSZ9477_CHIP_ID ||
dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
dev->chip_id == KSZ9567_CHIP_ID)
@@ -2813,6 +2893,7 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8830_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2839,6 +2920,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2852,7 +2934,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
}
static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
int ret;
@@ -2872,7 +2954,7 @@ static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
int ret;
@@ -3183,6 +3265,7 @@ static int ksz_switch_detect(struct ksz_device *dev)
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case KSZ9567_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
case LAN9372_CHIP_ID:
@@ -3220,6 +3303,7 @@ static int ksz_cls_flower_add(struct dsa_switch *ds, int port,
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -3239,6 +3323,7 @@ static int ksz_cls_flower_del(struct dsa_switch *ds, int port,
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -4142,6 +4227,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 15612101a155..40c11b0d6b62 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -110,6 +110,7 @@ struct ksz_switch_macaddr {
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
bool learning;
+ bool isolated;
int stp_state;
struct phy_device phydev;
@@ -187,6 +188,7 @@ struct ksz_device {
/* List of supported models */
enum ksz_model {
KSZ8563,
+ KSZ8567,
KSZ8795,
KSZ8794,
KSZ8765,
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 6f6d878e742c..c8166fb440ab 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -165,6 +165,10 @@ static const struct of_device_id ksz_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8563]
},
{
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
@@ -204,6 +208,7 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz9893" },
{ "ksz9563" },
{ "ksz8563" },
+ { "ksz8567" },
{ "ksz9567" },
{ "lan9370" },
{ "lan9371" },
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index 088533663b83..fa3ee85a99c1 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -81,17 +81,14 @@ static const struct regmap_bus mt7530_regmap_bus = {
};
static int
-mt7531_create_sgmii(struct mt7530_priv *priv, bool dual_sgmii)
+mt7531_create_sgmii(struct mt7530_priv *priv)
{
struct regmap_config *mt7531_pcs_config[2] = {};
struct phylink_pcs *pcs;
struct regmap *regmap;
int i, ret = 0;
- /* MT7531AE has two SGMII units for port 5 and port 6
- * MT7531BE has only one SGMII unit for port 6
- */
- for (i = dual_sgmii ? 0 : 1; i < 2; i++) {
+ for (i = priv->p5_sgmii ? 0 : 1; i < 2; i++) {
mt7531_pcs_config[i] = devm_kzalloc(priv->dev,
sizeof(struct regmap_config),
GFP_KERNEL);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 391c4dbdff42..1ae11b180d54 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -414,92 +414,57 @@ mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
}
/* Setup port 6 interface mode and TRGMII TX circuit */
-static int
-mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+static void
+mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, xtal;
-
- xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ u32 ncpo1, ssc_delta, xtal;
- if (xtal == HWTRAP_XTAL_20MHZ) {
- dev_err(priv->dev,
- "%s: MT7530 with a 20MHz XTAL is not supported!\n",
- __func__);
- return -EINVAL;
- }
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- trgint = 0;
- break;
- case PHY_INTERFACE_MODE_TRGMII:
- trgint = 1;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
- if (priv->id == ID_MT7621) {
- /* PLL frequency: 125MHz: 1.0GBit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0640;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x0a00;
- } else { /* PLL frequency: 250MHz: 2.0Gbit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0c80;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x1400;
- }
- break;
- default:
- dev_err(priv->dev, "xMII interface %d not supported\n",
- interface);
- return -EINVAL;
+ if (interface == PHY_INTERFACE_MODE_RGMII) {
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ P6_INTF_MODE(0));
+ return;
}
- mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
- P6_INTF_MODE(trgint));
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
- if (trgint) {
- /* Disable the MT7530 TRGMII clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
- /* Setup the MT7530 TRGMII Tx Clock */
- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
- core_write(priv, CORE_PLL_GROUP4,
- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
- RG_SYSPLL_BIAS_LPF_EN);
- core_write(priv, CORE_PLL_GROUP2,
- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
- RG_SYSPLL_POSDIV(1));
- core_write(priv, CORE_PLL_GROUP7,
- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
- /* Enable the MT7530 TRGMII clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ if (priv->id == ID_MT7621) {
+ /* PLL frequency: 125MHz: 1.0GBit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0640;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x0a00;
+ } else { /* PLL frequency: 250MHz: 2.0Gbit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0c80;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x1400;
}
- return 0;
-}
-
-static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
-{
- u32 val;
-
- val = mt7530_read(priv, MT7531_TOP_SIG_SR);
-
- return (val & PAD_DUAL_SGMII_EN) != 0;
-}
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
+ RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL |
+ RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG |
+ RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-static int
-mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
-{
- return 0;
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
}
static void
@@ -510,9 +475,6 @@ mt7531_pll_setup(struct mt7530_priv *priv)
u32 xtal;
u32 val;
- if (mt7531_dual_sgmii_supported(priv))
- return;
-
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
hwstrap = mt7530_read(priv, MT7531_HWTRAP);
@@ -920,8 +882,6 @@ static const char *p5_intf_modes(unsigned int p5_interface)
return "PHY P4";
case P5_INTF_SEL_GMAC5:
return "GMAC5";
- case P5_INTF_SEL_GMAC5_SGMII:
- return "GMAC5_SGMII";
default:
return "unknown";
}
@@ -956,13 +916,8 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
/* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
val &= ~MHWTRAP_P5_DIS;
break;
- case P5_DISABLED:
- interface = PHY_INTERFACE_MODE_NA;
- break;
default:
- dev_err(ds->dev, "Unsupported p5_intf_sel %d\n",
- priv->p5_intf_sel);
- goto unlock_exit;
+ break;
}
/* Setup RGMII settings */
@@ -992,9 +947,6 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
- priv->p5_interface = interface;
-
-unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
@@ -1014,18 +966,10 @@ mt753x_trap_frames(struct mt7530_priv *priv)
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
}
-static int
+static void
mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- int ret;
-
- /* Setup max capability of CPU port at first */
- if (priv->info->cpu_port_config) {
- ret = priv->info->cpu_port_config(ds, port);
- if (ret)
- return ret;
- }
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
@@ -1035,10 +979,6 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
UNU_FFP(BIT(port)));
- /* Set CPU port number */
- if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
- mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
-
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
@@ -1055,8 +995,6 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
-
- return 0;
}
static int
@@ -1080,7 +1018,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
@@ -1100,7 +1037,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
priv->ports[port].enable = false;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
}
@@ -2107,7 +2043,7 @@ mt7530_setup_irq(struct mt7530_priv *priv)
}
/* This register must be set for MT7530 to properly fire interrupts */
- if (priv->id != ID_MT7531)
+ if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
@@ -2146,24 +2082,40 @@ mt7530_free_irq_common(struct mt7530_priv *priv)
static void
mt7530_free_irq(struct mt7530_priv *priv)
{
- mt7530_free_mdio_irq(priv);
+ struct device_node *mnp, *np = priv->dev->of_node;
+
+ mnp = of_get_child_by_name(np, "mdio");
+ if (!mnp)
+ mt7530_free_mdio_irq(priv);
+ of_node_put(mnp);
+
mt7530_free_irq_common(priv);
}
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
+ struct device_node *mnp, *np = priv->dev->of_node;
struct dsa_switch *ds = priv->ds;
struct device *dev = priv->dev;
struct mii_bus *bus;
static int idx;
- int ret;
+ int ret = 0;
+
+ mnp = of_get_child_by_name(np, "mdio");
+
+ if (mnp && !of_device_is_available(mnp))
+ goto out;
bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!mnp)
+ ds->user_mii_bus = bus;
- ds->user_mii_bus = bus;
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
@@ -2174,16 +2126,18 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq)
+ if (priv->irq && !mnp)
mt7530_setup_mdio_irq(priv);
- ret = devm_mdiobus_register(dev, bus);
+ ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq)
+ if (priv->irq && !mnp)
mt7530_free_mdio_irq(priv);
}
+out:
+ of_node_put(mnp);
return ret;
}
@@ -2267,6 +2221,12 @@ mt7530_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) {
+ dev_err(priv->dev,
+ "MT7530 with a 20MHz XTAL is not supported!\n");
+ return -EINVAL;
+ }
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
@@ -2289,14 +2249,18 @@ mt7530_setup(struct dsa_switch *ds)
val |= MHWTRAP_MANUAL;
mt7530_write(priv, MT7530_MHWTRAP, val);
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ PMCR_FORCE_MODE, PMCR_FORCE_MODE);
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2305,9 +2269,7 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2326,16 +2288,13 @@ mt7530_setup(struct dsa_switch *ds)
return ret;
/* Setup port 5 */
- priv->p5_intf_sel = P5_DISABLED;
- interface = PHY_INTERFACE_MODE_NA;
-
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
- if (ret && ret != -ENODEV)
- return ret;
} else {
- /* Scan the ethernet nodes. look for GMAC1, lookup used phy */
+ /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
+ * Set priv->p5_intf_sel to the appropriate value if PHY muxing
+ * is detected.
+ */
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
"mediatek,eth-mac"))
@@ -2366,6 +2325,10 @@ mt7530_setup(struct dsa_switch *ds)
of_node_put(phy_node);
break;
}
+
+ if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 ||
+ priv->p5_intf_sel == P5_INTF_SEL_PHY_P4)
+ mt7530_setup_port5(ds, interface);
}
#ifdef CONFIG_GPIOLIB
@@ -2376,8 +2339,6 @@ mt7530_setup(struct dsa_switch *ds)
}
#endif /* CONFIG_GPIOLIB */
- mt7530_setup_port5(ds, interface);
-
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2402,6 +2363,12 @@ mt7531_setup_common(struct dsa_switch *ds)
UNU_FFP_MASK);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ MT7531_FORCE_MODE, MT7531_FORCE_MODE);
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2412,9 +2379,7 @@ mt7531_setup_common(struct dsa_switch *ds)
mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2474,38 +2439,35 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
- /* all MACs must be forced link-down before sw reset */
+ /* MT7531AE has got two SGMII units. One for port 5, one for port 6.
+ * MT7531BE has got only one SGMII unit which is for port 6.
+ */
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
+
+ /* Force link down on all ports before internal reset */
for (i = 0; i < MT7530_NUM_PORTS; i++)
mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
/* Reset the switch through internal reset */
- mt7530_write(priv, MT7530_SYS_CTRL,
- SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
- SYS_CTRL_REG_RST);
-
- mt7531_pll_setup(priv);
-
- if (mt7531_dual_sgmii_supported(priv)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
+ if (!priv->p5_sgmii) {
+ mt7531_pll_setup(priv);
+ } else {
/* Let ds->user_mii_bus be able to access external phy. */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
- } else {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
}
- dev_dbg(ds->dev, "P5 support %s interface\n",
- p5_intf_modes(priv->p5_intf_sel));
+
+ if (!dsa_is_unused_port(ds, 5))
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Let phylink decide the interface later. */
- priv->p5_interface = PHY_INTERFACE_MODE_NA;
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
/* Enable PHY core PLL, since phy_device has not yet been created
* provided for phy_[read,write]_mmd_indirect is called, we provide
* our own mt7531_ind_mmd_phy_[read,write] to complete this
@@ -2535,12 +2497,14 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ /* Port 5 supports rgmii with delays, mii, and gmii. */
+ case 5:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
config->supported_interfaces);
@@ -2548,7 +2512,8 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
config->supported_interfaces);
break;
- case 6: /* 1st cpu port */
+ /* Port 6 supports rgmii and trgmii. */
+ case 6:
__set_bit(PHY_INTERFACE_MODE_RGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_TRGMII,
@@ -2557,30 +2522,30 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
-static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
-{
- return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
-}
-
static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
- case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port)) {
+ /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on
+ * MT7531AE.
+ */
+ case 5:
+ if (!priv->p5_sgmii) {
phy_interface_set_rgmii(config->supported_interfaces);
break;
}
fallthrough;
- case 6: /* 1st cpu port supports sgmii/8023z only */
+ /* Port 6 supports sgmii/802.3z. */
+ case 6:
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
@@ -2596,14 +2561,14 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
- phy_interface_zero(config->supported_interfaces);
-
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 3:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
@@ -2612,41 +2577,24 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
-static int
-mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->pad_setup(ds, state->interface);
-}
-
-static int
+static void
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- /* Only need to setup port5. */
- if (port != 5)
- return 0;
-
- mt7530_setup_port5(priv->ds, interface);
-
- return 0;
+ if (port == 5)
+ mt7530_setup_port5(priv->ds, interface);
+ else if (port == 6)
+ mt7530_setup_port6(priv->ds, interface);
}
-static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
- phy_interface_t interface,
- struct phy_device *phydev)
+static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface,
+ struct phy_device *phydev)
{
u32 val;
- if (!mt7531_is_rgmii_port(priv, port)) {
- dev_err(priv->dev, "RGMII mode is not available for port %d\n",
- port);
- return -EINVAL;
- }
-
val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
val |= GP_CLK_EN;
val &= ~GP_MODE_MASK;
@@ -2674,31 +2622,14 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
case PHY_INTERFACE_MODE_RGMII_ID:
break;
default:
- return -EINVAL;
+ break;
}
}
- mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
-
- return 0;
-}
-static bool mt753x_is_mac_port(u32 port)
-{
- return (port == 5 || port == 6);
-}
-
-static int
-mt7988_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
-{
- if (dsa_is_cpu_port(ds, port) &&
- interface == PHY_INTERFACE_MODE_INTERNAL)
- return 0;
-
- return -EINVAL;
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
}
-static int
+static void
mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
@@ -2706,39 +2637,11 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct phy_device *phydev;
struct dsa_port *dp;
- if (!mt753x_is_mac_port(port)) {
- dev_err(priv->dev, "port %d is not a MAC port\n", port);
- return -EINVAL;
- }
-
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(interface)) {
dp = dsa_to_port(ds, port);
phydev = dp->user->phydev;
- return mt7531_rgmii_setup(priv, port, interface, phydev);
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* handled in SGMII PCS driver */
- return 0;
- default:
- return -EINVAL;
+ mt7531_rgmii_setup(priv, port, interface, phydev);
}
-
- return -EINVAL;
-}
-
-static int
-mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->mac_port_config(ds, port, mode, state->interface);
}
static struct phylink_pcs *
@@ -2764,54 +2667,13 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
- u32 mcr_cur, mcr_new;
-
- switch (port) {
- case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (priv->p5_interface == state->interface)
- break;
-
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- if (priv->p5_intf_sel != P5_DISABLED)
- priv->p5_interface = state->interface;
- break;
- case 6: /* 1st cpu port */
- if (priv->p6_interface == state->interface)
- break;
-
- mt753x_pad_setup(ds, state);
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- priv->p6_interface = state->interface;
- break;
- default:
-unsupported:
- dev_err(ds->dev, "%s: unsupported %s port: %i\n",
- __func__, phy_modes(state->interface), port);
- return;
- }
-
- mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
- mcr_new = mcr_cur;
- mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
- mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
- PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
+ if ((port == 5 || port == 6) && priv->info->mac_port_config)
+ priv->info->mac_port_config(ds, port, mode, state->interface);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
- mcr_new |= PMCR_EXT_PHY;
-
- if (mcr_new != mcr_cur)
- mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
+ mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY);
}
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -2835,18 +2697,10 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
- /* MT753x MAC works in 1G full duplex mode for all up-clocked
- * variants.
- */
- if (interface == PHY_INTERFACE_MODE_INTERNAL ||
- interface == PHY_INTERFACE_MODE_TRGMII ||
- (phy_interface_mode_is_8023z(interface))) {
- speed = SPEED_1000;
- duplex = DUPLEX_FULL;
- }
-
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
mcr |= PMCR_FORCE_SPEED_1000;
break;
case SPEED_100:
@@ -2864,6 +2718,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
mcr |= PMCR_FORCE_EEE1G;
break;
case SPEED_100:
@@ -2875,63 +2730,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
-static int
-mt7531_cpu_port_config(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
- phy_interface_t interface;
- int speed;
- int ret;
-
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- interface = PHY_INTERFACE_MODE_RGMII;
- else
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- priv->p5_interface = interface;
- break;
- case 6:
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- priv->p6_interface = interface;
- break;
- default:
- return -EINVAL;
- }
-
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- speed = SPEED_2500;
- else
- speed = SPEED_1000;
-
- ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
- if (ret)
- return ret;
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
- speed, DUPLEX_FULL, true, true);
-
- return 0;
-}
-
-static int
-mt7988_cpu_port_config(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
-
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED,
- PHY_INTERFACE_MODE_INTERNAL, NULL,
- SPEED_10000, DUPLEX_FULL, true, true);
-
- return 0;
-}
-
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -3014,17 +2812,9 @@ static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int i, ret;
-
- /* Initialise the PCS devices */
- for (i = 0; i < priv->ds->num_ports; i++) {
- priv->pcs[i].pcs.ops = priv->info->pcs_ops;
- priv->pcs[i].pcs.neg_mode = true;
- priv->pcs[i].priv = priv;
- priv->pcs[i].port = i;
- }
+ int ret = priv->info->sw_setup(ds);
+ int i;
- ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3036,8 +2826,16 @@ mt753x_setup(struct dsa_switch *ds)
if (ret && priv->irq)
mt7530_free_irq_common(priv);
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].pcs.neg_mode = true;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+
if (priv->create_sgmii) {
- ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv));
+ ret = priv->create_sgmii(priv);
if (ret && priv->irq)
mt7530_free_irq(priv);
}
@@ -3046,7 +2844,7 @@ mt753x_setup(struct dsa_switch *ds)
}
static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
@@ -3058,7 +2856,7 @@ static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
}
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
@@ -3075,9 +2873,34 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
-static int mt7988_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+static void
+mt753x_conduit_state_change(struct dsa_switch *ds,
+ const struct net_device *conduit,
+ bool operational)
{
- return 0;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
+ struct mt7530_priv *priv = ds->priv;
+ int val = 0;
+ u8 mask;
+
+ /* Set the CPU port to trap frames to for MT7530. Trapped frames will be
+ * forwarded to the numerically smallest CPU port whose conduit
+ * interface is up.
+ */
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
+
+ mask = BIT(cpu_dp->index);
+
+ if (operational)
+ priv->active_cpu_ports |= mask;
+ else
+ priv->active_cpu_ports &= ~mask;
+
+ if (priv->active_cpu_ports)
+ val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports));
+
+ mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val);
}
static int mt7988_setup(struct dsa_switch *ds)
@@ -3130,6 +2953,7 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
.set_mac_eee = mt753x_set_mac_eee,
+ .conduit_state_change = mt753x_conduit_state_change,
};
EXPORT_SYMBOL_GPL(mt7530_switch_ops);
@@ -3142,7 +2966,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
- .pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
@@ -3154,7 +2977,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
- .pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
@@ -3166,8 +2988,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
- .pad_setup = mt7531_pad_setup,
- .cpu_port_config = mt7531_cpu_port_config,
.mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
},
@@ -3179,10 +2999,7 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
- .pad_setup = mt7988_pad_setup,
- .cpu_port_config = mt7988_cpu_port_config,
.mac_port_get_caps = mt7988_mac_port_get_caps,
- .mac_port_config = mt7988_mac_config,
},
};
EXPORT_SYMBOL_GPL(mt753x_table);
@@ -3209,10 +3026,8 @@ mt7530_probe_common(struct mt7530_priv *priv)
/* Sanity check if these required device operations are filled
* properly.
*/
- if (!priv->info->sw_setup || !priv->info->pad_setup ||
- !priv->info->phy_read_c22 || !priv->info->phy_write_c22 ||
- !priv->info->mac_port_get_caps ||
- !priv->info->mac_port_config)
+ if (!priv->info->sw_setup || !priv->info->phy_read_c22 ||
+ !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps)
return -EINVAL;
priv->id = priv->info->id;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 17e42d30fff4..a71166e0a7fc 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -41,8 +41,8 @@ enum mt753x_id {
#define UNU_FFP(x) (((x) & 0xff) << 8)
#define UNU_FFP_MASK UNU_FFP(~0)
#define CPU_EN BIT(7)
-#define CPU_PORT(x) ((x) << 4)
-#define CPU_MASK (0xf << 4)
+#define CPU_PORT_MASK GENMASK(6, 4)
+#define CPU_PORT(x) FIELD_PREP(CPU_PORT_MASK, x)
#define MIRROR_EN BIT(3)
#define MIRROR_PORT(x) ((x) & 0x7)
#define MIRROR_MASK 0x7
@@ -304,20 +304,11 @@ enum mt7530_vlan_port_acc_frm {
MT7531_FORCE_DPX | \
MT7531_FORCE_RX_FC | \
MT7531_FORCE_TX_FC)
-#define PMCR_FORCE_MODE_ID(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_FORCE_MODE : PMCR_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
-#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
- PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
- PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
- PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_SPEED_1000 | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
@@ -683,11 +674,10 @@ struct mt7530_port {
/* Port 5 interface select definitions */
enum p5_interface_select {
- P5_DISABLED = 0,
+ P5_DISABLED,
P5_INTF_SEL_PHY_P0,
P5_INTF_SEL_PHY_P4,
P5_INTF_SEL_GMAC5,
- P5_INTF_SEL_GMAC5_SGMII,
};
struct mt7530_priv;
@@ -705,8 +695,6 @@ struct mt753x_pcs {
* @phy_write_c22: Holding the way writing PHY port using C22
* @phy_read_c45: Holding the way reading PHY port using C45
* @phy_write_c45: Holding the way writing PHY port using C45
- * @pad_setup: Holding the way setting up the bus pad for a certain
- * MAC port
* @phy_mode_supported: Check if the PHY type is being supported on a certain
* port
* @mac_port_validate: Holding the way to set addition validate type for a
@@ -727,16 +715,14 @@ struct mt753x_info {
int regnum);
int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad,
int regnum, u16 val);
- int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
- int (*cpu_port_config)(struct dsa_switch *ds, int port);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_config)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
+ void (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -754,12 +740,14 @@ struct mt753x_info {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
- * @p6_interface Holding the current port 6 interface
* @p5_intf_sel: Holding the current port 5 interface select
+ * @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
+ * has got SGMII
* @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
* @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
+ * @active_cpu_ports: Holding the active CPU ports
*/
struct mt7530_priv {
struct device *dev;
@@ -773,9 +761,8 @@ struct mt7530_priv {
const struct mt753x_info *info;
unsigned int id;
bool mcm;
- phy_interface_t p6_interface;
- phy_interface_t p5_interface;
- unsigned int p5_intf_sel;
+ enum p5_interface_select p5_intf_sel;
+ bool p5_sgmii;
u8 mirror_rx;
u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
@@ -785,7 +772,8 @@ struct mt7530_priv {
int irq;
struct irq_domain *irq_domain;
u32 irq_enable;
- int (*create_sgmii)(struct mt7530_priv *priv, bool dual_sgmii);
+ int (*create_sgmii)(struct mt7530_priv *priv);
+ u8 active_cpu_ports;
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 383b3c4d6f59..9ed1821184ec 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1451,14 +1451,14 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
@@ -3659,7 +3659,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
int err;
if (!chip->info->ops->phy_read_c45)
- return -EOPNOTSUPP;
+ return -ENODEV;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
@@ -3712,7 +3712,10 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (external) {
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
+ if (chip->info->family == MV88E6XXX_FAMILY_6393)
+ err = mv88e6393x_g2_scratch_gpio_set_smi(chip, true);
+ else
+ err = mv88e6390_g2_scratch_gpio_set_smi(chip, true);
mv88e6xxx_reg_unlock(chip);
if (err)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index d9434f7cae53..82f9b410de0b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -378,8 +378,10 @@ extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external);
int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index a9d6e40321a2..61ab6cc4fbfc 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -240,7 +240,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
};
/**
- * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * mv88e6390_g2_scratch_gpio_set_smi - set gpio muxing for external smi
* @chip: chip private data
* @external: set mux for external smi, or free for gpio usage
*
@@ -248,7 +248,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
* an external SMI interface, or they may be made free for other
* GPIO uses.
*/
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external)
{
int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
@@ -291,6 +291,37 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
}
/**
+ * mv88e6393x_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * @chip: chip private data
+ * @external: set mux for external smi, or free for gpio usage
+ *
+ * MV88E6191X/6193X/6393X GPIO pins 9 and 10 can be configured as an
+ * external SMI interface or as regular GPIO-s.
+ *
+ * They however have a different register layout then the existing
+ * function.
+ */
+
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external)
+{
+ int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val);
+ if (err)
+ return err;
+
+ if (external)
+ val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+ else
+ val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+
+ return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
+}
+
+/**
* mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
* @chip: chip private data
* @port: port number to check for serdes
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 4d677f836807..5a27d047a38e 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -95,7 +95,7 @@ static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
}
}
-static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
@@ -137,6 +137,7 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
+ mpcs->phylink_pcs.neg_mode = true;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index c51f40960961..dab66c0c6f64 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -950,15 +950,15 @@ qca8k_mdio_register(struct qca8k_priv *priv)
struct device *dev = ds->dev;
struct device_node *mdio;
struct mii_bus *bus;
- int err = 0;
+ int ret = 0;
mdio = of_get_child_by_name(dev->of_node, "mdio");
if (mdio && !of_device_is_available(mdio))
- goto out;
+ goto out_put_node;
bus = devm_mdiobus_alloc(dev);
if (!bus) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out_put_node;
}
@@ -984,12 +984,11 @@ qca8k_mdio_register(struct qca8k_priv *priv)
bus->write = qca8k_legacy_mdio_write;
}
- err = devm_of_mdiobus_register(dev, bus, mdio);
+ ret = devm_of_mdiobus_register(dev, bus, mdio);
out_put_node:
of_node_put(mdio);
-out:
- return err;
+ return ret;
}
static int
@@ -998,7 +997,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
phy_interface_t mode;
- int err;
+ int ret;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
@@ -1008,11 +1007,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
- err = of_property_read_u32(port, "reg", &reg);
- if (err) {
+ ret = of_property_read_u32(port, "reg", &reg);
+ if (ret) {
of_node_put(port);
of_node_put(ports);
- return err;
+ return ret;
}
if (!dsa_is_user_port(priv->ds, reg))
@@ -2051,12 +2050,11 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
- GPIOD_ASIS);
+ GPIOD_OUT_HIGH);
if (IS_ERR(priv->reset_gpio))
return PTR_ERR(priv->reset_gpio);
if (priv->reset_gpio) {
- gpiod_set_value_cansleep(priv->reset_gpio, 1);
/* The active low duration must be greater than 10 ms
* and checkpatch.pl wants 20 ms.
*/
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 2358cd399c7e..7f80035c5441 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -534,7 +534,7 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
struct qca8k_priv *priv = ds->priv;
@@ -558,7 +558,7 @@ exit:
}
int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index c8785c36c54e..2184d8d2d5a9 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -518,8 +518,8 @@ void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
-int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 060165a85fb7..6989972eebc3 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -16,37 +16,29 @@ menuconfig NET_DSA_REALTEK
if NET_DSA_REALTEK
config NET_DSA_REALTEK_MDIO
- tristate "Realtek MDIO interface driver"
+ bool "Realtek MDIO interface support"
depends on OF
- depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
- depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
- depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches configured
through MDIO.
config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI interface driver"
+ bool "Realtek SMI interface support"
depends on OF
- depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
- depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
- depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches connected
through SMI.
config NET_DSA_REALTEK_RTL8365MB
- tristate "Realtek RTL8365MB switch subdriver"
- imply NET_DSA_REALTEK_SMI
- imply NET_DSA_REALTEK_MDIO
+ tristate "Realtek RTL8365MB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL8_4
help
Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
config NET_DSA_REALTEK_RTL8366RB
- tristate "Realtek RTL8366RB switch subdriver"
- imply NET_DSA_REALTEK_SMI
- imply NET_DSA_REALTEK_MDIO
+ tristate "Realtek RTL8366RB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL4_A
help
Select to enable support for Realtek RTL8366RB.
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 0aab57252a7c..35491dc20d6d 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -1,6 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK) += realtek_dsa.o
+realtek_dsa-objs := rtl83xx.o
+
+ifdef CONFIG_NET_DSA_REALTEK_MDIO
+realtek_dsa-objs += realtek-mdio.o
+endif
+
+ifdef CONFIG_NET_DSA_REALTEK_SMI
+realtek_dsa-objs += realtek-smi.o
+endif
+
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 292e6d087e8b..04b758e5a680 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -25,6 +25,8 @@
#include <linux/regmap.h>
#include "realtek.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Read/write via mdiobus */
#define REALTEK_MDIO_CTRL0_REG 31
@@ -99,192 +101,87 @@ out_unlock:
return ret;
}
-static void realtek_mdio_lock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_lock(&priv->map_lock);
-}
-
-static void realtek_mdio_unlock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_unlock(&priv->map_lock);
-}
-
-static const struct regmap_config realtek_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
+static const struct realtek_interface_info realtek_mdio_info = {
.reg_read = realtek_mdio_read,
.reg_write = realtek_mdio_write,
- .cache_type = REGCACHE_NONE,
- .lock = realtek_mdio_lock,
- .unlock = realtek_mdio_unlock,
};
-static const struct regmap_config realtek_mdio_nolock_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_mdio_read,
- .reg_write = realtek_mdio_write,
- .cache_type = REGCACHE_NONE,
- .disable_locking = true,
-};
-
-static int realtek_mdio_probe(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_probe() - Probe a platform device for an MDIO-connected switch
+ * @mdiodev: mdio_device to probe on.
+ *
+ * This function should be used as the .probe in an mdio_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for MDIO-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_mdio_probe(struct mdio_device *mdiodev)
{
- struct realtek_priv *priv;
struct device *dev = &mdiodev->dev;
- const struct realtek_variant *var;
- struct regmap_config rc;
- struct device_node *np;
+ struct realtek_priv *priv;
int ret;
- var = of_device_get_match_data(dev);
- if (!var)
- return -EINVAL;
-
- priv = devm_kzalloc(&mdiodev->dev,
- size_add(sizeof(*priv), var->chip_data_sz),
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- mutex_init(&priv->map_lock);
+ priv = rtl83xx_probe(dev, &realtek_mdio_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- rc = realtek_mdio_regmap_config;
- rc.lock_arg = priv;
- priv->map = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map)) {
- ret = PTR_ERR(priv->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- rc = realtek_mdio_nolock_regmap_config;
- priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map_nolock)) {
- ret = PTR_ERR(priv->map_nolock);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- priv->mdio_addr = mdiodev->addr;
priv->bus = mdiodev->bus;
- priv->dev = &mdiodev->dev;
- priv->chip_data = (void *)priv + sizeof(*priv);
-
- priv->clk_delay = var->clk_delay;
- priv->cmd_read = var->cmd_read;
- priv->cmd_write = var->cmd_write;
- priv->ops = var->ops;
-
+ priv->mdio_addr = mdiodev->addr;
priv->write_reg_noack = realtek_mdio_write;
- np = dev->of_node;
-
- dev_set_drvdata(dev, priv);
-
- /* TODO: if power is software controlled, set up any regulators here */
- priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(priv->reset);
- }
-
- if (priv->reset) {
- gpiod_set_value(priv->reset, 1);
- dev_dbg(dev, "asserted RESET\n");
- msleep(REALTEK_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_HW_START_DELAY);
- dev_dbg(dev, "deasserted RESET\n");
- }
-
- ret = priv->ops->detect(priv);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->num_ports;
- priv->ds->priv = priv;
- priv->ds->ops = var->ds_ops_mdio;
-
- ret = dsa_register_switch(priv->ds);
+ ret = rtl83xx_register_switch(priv);
if (ret) {
- dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ rtl83xx_remove(priv);
return ret;
}
return 0;
}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, REALTEK_DSA);
-static void realtek_mdio_remove(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_remove() - Remove the driver of an MDIO-connected switch
+ * @mdiodev: mdio_device to be removed.
+ *
+ * This function should be used as the .remove_new in an mdio_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_remove(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
- dsa_unregister_switch(priv->ds);
+ rtl83xx_unregister_switch(priv);
- /* leave the device reset asserted */
- if (priv->reset)
- gpiod_set_value(priv->reset, 1);
+ rtl83xx_remove(priv);
}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, REALTEK_DSA);
-static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_shutdown() - Shutdown the driver of a MDIO-connected switch
+ * @mdiodev: mdio_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_shutdown(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
- dsa_switch_shutdown(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
+ rtl83xx_shutdown(priv);
}
-
-static const struct of_device_id realtek_mdio_of_match[] = {
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
- { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
- { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
-#endif
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
-
-static struct mdio_driver realtek_mdio_driver = {
- .mdiodrv.driver = {
- .name = "realtek-mdio",
- .of_match_table = realtek_mdio_of_match,
- },
- .probe = realtek_mdio_probe,
- .remove = realtek_mdio_remove,
- .shutdown = realtek_mdio_shutdown,
-};
-
-mdio_module_driver(realtek_mdio_driver);
-
-MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
-MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/realtek-mdio.h b/drivers/net/dsa/realtek/realtek-mdio.h
new file mode 100644
index 000000000000..ee70f6a5b8ff
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_MDIO_H
+#define _REALTEK_MDIO_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO)
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return mdio_driver_register(drv);
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+ mdio_driver_unregister(drv);
+}
+
+int realtek_mdio_probe(struct mdio_device *mdiodev);
+void realtek_mdio_remove(struct mdio_device *mdiodev);
+void realtek_mdio_shutdown(struct mdio_device *mdiodev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+}
+
+static inline int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+}
+
+static inline void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+#endif /* _REALTEK_MDIO_H */
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 755546ed8db6..88590ae95a75 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -31,7 +31,6 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
@@ -40,12 +39,14 @@
#include <linux/if_bridge.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "rtl83xx.h"
#define REALTEK_SMI_ACK_RETRY_COUNT 5
static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
{
- ndelay(priv->clk_delay);
+ ndelay(priv->variant->clk_delay);
}
static void realtek_smi_start(struct realtek_priv *priv)
@@ -208,7 +209,7 @@ static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
realtek_smi_start(priv);
/* Send READ command */
- ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_read);
if (ret)
goto out;
@@ -249,7 +250,7 @@ static int realtek_smi_write_reg(struct realtek_priv *priv,
realtek_smi_start(priv);
/* Send WRITE command */
- ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_write);
if (ret)
goto out;
@@ -310,258 +311,98 @@ static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
return realtek_smi_read_reg(priv, reg, val);
}
-static void realtek_smi_lock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_lock(&priv->map_lock);
-}
-
-static void realtek_smi_unlock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_unlock(&priv->map_lock);
-}
-
-static const struct regmap_config realtek_smi_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
+static const struct realtek_interface_info realtek_smi_info = {
.reg_read = realtek_smi_read,
.reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
- .lock = realtek_smi_lock,
- .unlock = realtek_smi_unlock,
};
-static const struct regmap_config realtek_smi_nolock_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
- .disable_locking = true,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_priv *priv = bus->priv;
-
- return priv->ops->phy_read(priv, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_priv *priv = bus->priv;
-
- return priv->ops->phy_write(priv, addr, regnum, val);
-}
-
-static int realtek_smi_setup_mdio(struct dsa_switch *ds)
-{
- struct realtek_priv *priv = ds->priv;
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(priv->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- priv->user_mii_bus = devm_mdiobus_alloc(priv->dev);
- if (!priv->user_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- priv->user_mii_bus->priv = priv;
- priv->user_mii_bus->name = "SMI user MII";
- priv->user_mii_bus->read = realtek_smi_mdio_read;
- priv->user_mii_bus->write = realtek_smi_mdio_write;
- snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- ds->index);
- priv->user_mii_bus->dev.of_node = mdio_np;
- priv->user_mii_bus->parent = priv->dev;
- ds->user_mii_bus = priv->user_mii_bus;
-
- ret = devm_of_mdiobus_register(priv->dev, priv->user_mii_bus, mdio_np);
- if (ret) {
- dev_err(priv->dev, "unable to register MDIO bus %s\n",
- priv->user_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
+/**
+ * realtek_smi_probe() - Probe a platform device for an SMI-connected switch
+ * @pdev: platform_device to probe on.
+ *
+ * This function should be used as the .probe in a platform_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for SMI-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_smi_probe(struct platform_device *pdev)
{
- const struct realtek_variant *var;
struct device *dev = &pdev->dev;
struct realtek_priv *priv;
- struct regmap_config rc;
- struct device_node *np;
int ret;
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->chip_data = (void *)priv + sizeof(*priv);
-
- mutex_init(&priv->map_lock);
-
- rc = realtek_smi_regmap_config;
- rc.lock_arg = priv;
- priv->map = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map)) {
- ret = PTR_ERR(priv->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- rc = realtek_smi_nolock_regmap_config;
- priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map_nolock)) {
- ret = PTR_ERR(priv->map_nolock);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- priv->dev = dev;
- priv->clk_delay = var->clk_delay;
- priv->cmd_read = var->cmd_read;
- priv->cmd_write = var->cmd_write;
- priv->ops = var->ops;
-
- priv->setup_interface = realtek_smi_setup_mdio;
- priv->write_reg_noack = realtek_smi_write_reg_noack;
-
- dev_set_drvdata(dev, priv);
- spin_lock_init(&priv->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(priv->reset);
- }
- if (priv->reset) {
- gpiod_set_value(priv->reset, 1);
- dev_dbg(dev, "asserted RESET\n");
- msleep(REALTEK_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_HW_START_DELAY);
- dev_dbg(dev, "deasserted RESET\n");
- }
+ priv = rtl83xx_probe(dev, &realtek_smi_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
/* Fetch MDIO pins */
priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(priv->mdc))
+ if (IS_ERR(priv->mdc)) {
+ rtl83xx_remove(priv);
return PTR_ERR(priv->mdc);
+ }
+
priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(priv->mdio))
+ if (IS_ERR(priv->mdio)) {
+ rtl83xx_remove(priv);
return PTR_ERR(priv->mdio);
-
- priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = priv->ops->detect(priv);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
}
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->num_ports;
- priv->ds->priv = priv;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
- priv->ds->ops = var->ds_ops_smi;
- ret = dsa_register_switch(priv->ds);
+ ret = rtl83xx_register_switch(priv);
if (ret) {
- dev_err_probe(dev, ret, "unable to register switch\n");
+ rtl83xx_remove(priv);
return ret;
}
+
return 0;
}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, REALTEK_DSA);
-static void realtek_smi_remove(struct platform_device *pdev)
+/**
+ * realtek_smi_remove() - Remove the driver of a SMI-connected switch
+ * @pdev: platform_device to be removed.
+ *
+ * This function should be used as the .remove_new in a platform_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_remove(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
- dsa_unregister_switch(priv->ds);
- if (priv->user_mii_bus)
- of_node_put(priv->user_mii_bus->dev.of_node);
+ rtl83xx_unregister_switch(priv);
- /* leave the device reset asserted */
- if (priv->reset)
- gpiod_set_value(priv->reset, 1);
+ rtl83xx_remove(priv);
}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, REALTEK_DSA);
-static void realtek_smi_shutdown(struct platform_device *pdev)
+/**
+ * realtek_smi_shutdown() - Shutdown the driver of a SMI-connected switch
+ * @pdev: platform_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_shutdown(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
- dsa_switch_shutdown(priv->ds);
-
- platform_set_drvdata(pdev, NULL);
+ rtl83xx_shutdown(priv);
}
-
-static const struct of_device_id realtek_smi_of_match[] = {
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
-#endif
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = realtek_smi_of_match,
- },
- .probe = realtek_smi_probe,
- .remove_new = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
-MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/realtek-smi.h b/drivers/net/dsa/realtek/realtek-smi.h
new file mode 100644
index 000000000000..ea49a2edd3c8
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_SMI_H
+#define _REALTEK_SMI_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI)
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return platform_driver_register(drv);
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+ platform_driver_unregister(drv);
+}
+
+int realtek_smi_probe(struct platform_device *pdev);
+void realtek_smi_remove(struct platform_device *pdev);
+void realtek_smi_shutdown(struct platform_device *pdev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+}
+
+static inline int realtek_smi_probe(struct platform_device *pdev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_smi_remove(struct platform_device *pdev)
+{
+}
+
+static inline void realtek_smi_shutdown(struct platform_device *pdev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index 790488e9c667..e0b1aa01337b 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
+#include <linux/reset.h>
#define REALTEK_HW_STOP_DELAY 25 /* msecs */
#define REALTEK_HW_START_DELAY 100 /* msecs */
@@ -48,6 +49,7 @@ struct rtl8366_vlan_4k {
struct realtek_priv {
struct device *dev;
+ struct reset_control *reset_ctl;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
@@ -58,11 +60,10 @@ struct realtek_priv {
struct mii_bus *bus;
int mdio_addr;
- unsigned int clk_delay;
- u8 cmd_read;
- u8 cmd_write;
+ const struct realtek_variant *variant;
+
spinlock_t lock; /* Locks around command writes */
- struct dsa_switch *ds;
+ struct dsa_switch ds;
struct irq_domain *irqdomain;
bool leds_disabled;
@@ -73,7 +74,6 @@ struct realtek_priv {
struct rtl8366_mib_counter *mib_counters;
const struct realtek_ops *ops;
- int (*setup_interface)(struct dsa_switch *ds);
int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
@@ -91,7 +91,6 @@ struct realtek_ops {
int (*detect)(struct realtek_priv *priv);
int (*reset_chip)(struct realtek_priv *priv);
int (*setup)(struct realtek_priv *priv);
- void (*cleanup)(struct realtek_priv *priv);
int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
@@ -116,8 +115,7 @@ struct realtek_ops {
};
struct realtek_variant {
- const struct dsa_switch_ops *ds_ops_smi;
- const struct dsa_switch_ops *ds_ops_mdio;
+ const struct dsa_switch_ops *ds_ops;
const struct realtek_ops *ops;
unsigned int clk_delay;
u8 cmd_read;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index b072045eb154..12665a8a3412 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -101,6 +101,9 @@
#include <linux/if_vlan.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Family-specific data and limits */
#define RTL8365MB_PHYADDRMAX 7
@@ -206,10 +209,10 @@
#define RTL8365MB_EXT_PORT_MODE_100FX 13
/* External interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT0,EXT1 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
- ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ ((_extint) <= 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
(_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
0x0)
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
@@ -689,7 +692,7 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 val;
int ret;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
@@ -722,7 +725,7 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
*data = val & 0xFFFF;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
@@ -733,7 +736,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 val;
int ret;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
@@ -764,7 +767,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
goto out;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return 0;
}
@@ -825,17 +828,6 @@ static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
return 0;
}
-static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- return rtl8365mb_phy_read(ds->priv, phy, regnum);
-}
-
-static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
- u16 val)
-{
- return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
-}
-
static const struct rtl8365mb_extint *
rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
{
@@ -878,6 +870,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
{
const struct rtl8365mb_extint *extint =
rtl8365mb_get_port_extint(priv, port);
+ struct dsa_switch *ds = &priv->ds;
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
@@ -888,7 +881,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
if (!extint)
return -ENODEV;
- dp = dsa_to_port(priv->ds, port);
+ dp = dsa_to_port(ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -1541,6 +1534,7 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1551,7 +1545,7 @@ static void rtl8365mb_stats_setup(struct realtek_priv *priv)
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1567,12 +1561,13 @@ static void rtl8365mb_stats_setup(struct realtek_priv *priv)
static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
@@ -1971,7 +1966,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
cpu->mask |= BIT(cpu_dp->index);
if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
@@ -1986,7 +1981,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Forward only to the CPU */
@@ -2003,7 +1998,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Set up per-port private data */
p->priv = priv;
@@ -2014,12 +2009,10 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
if (ret)
goto out_teardown_irq;
- if (priv->setup_interface) {
- ret = priv->setup_interface(ds);
- if (ret) {
- dev_err(priv->dev, "could not set up MDIO bus\n");
- goto out_teardown_irq;
- }
+ ret = rtl83xx_setup_user_mdio(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
}
/* Start statistics counter polling */
@@ -2113,7 +2106,7 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
return 0;
}
-static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
@@ -2134,29 +2127,6 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
.port_max_mtu = rtl8365mb_port_max_mtu,
};
-static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
- .get_tag_protocol = rtl8365mb_get_tag_protocol,
- .change_tag_protocol = rtl8365mb_change_tag_protocol,
- .setup = rtl8365mb_setup,
- .teardown = rtl8365mb_teardown,
- .phylink_get_caps = rtl8365mb_phylink_get_caps,
- .phylink_mac_config = rtl8365mb_phylink_mac_config,
- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
- .phy_read = rtl8365mb_dsa_phy_read,
- .phy_write = rtl8365mb_dsa_phy_write,
- .port_stp_state_set = rtl8365mb_port_stp_state_set,
- .get_strings = rtl8365mb_get_strings,
- .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
- .get_sset_count = rtl8365mb_get_sset_count,
- .get_eth_phy_stats = rtl8365mb_get_phy_stats,
- .get_eth_mac_stats = rtl8365mb_get_mac_stats,
- .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
- .get_stats64 = rtl8365mb_get_stats64,
- .port_change_mtu = rtl8365mb_port_change_mtu,
- .port_max_mtu = rtl8365mb_port_max_mtu,
-};
-
static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
@@ -2164,16 +2134,66 @@ static const struct realtek_ops rtl8365mb_ops = {
};
const struct realtek_variant rtl8365mb_variant = {
- .ds_ops_smi = &rtl8365mb_switch_ops_smi,
- .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ds_ops = &rtl8365mb_switch_ops,
.ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
-EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+static const struct of_device_id rtl8365mb_of_match[] = {
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8365mb_of_match);
+
+static struct platform_driver rtl8365mb_smi_driver = {
+ .driver = {
+ .name = "rtl8365mb-smi",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove_new = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8365mb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8365mb-mdio",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8365mb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8365mb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8365mb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8365mb_init);
+
+static void __exit rtl8365mb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8365mb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+}
+module_exit(rtl8365mb_exit);
MODULE_AUTHOR("Alvin Šipraga <alsi@bang-olufsen.dk>");
MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c
index 59f98d2c8769..7c6520ba3a26 100644
--- a/drivers/net/dsa/realtek/rtl8366-core.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -34,7 +34,7 @@ int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, REALTEK_DSA);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
@@ -187,7 +187,7 @@ int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, REALTEK_DSA);
int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
@@ -217,7 +217,7 @@ int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, REALTEK_DSA);
int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
@@ -243,7 +243,7 @@ int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
priv->vlan4k_enabled = enable;
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, REALTEK_DSA);
int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
@@ -265,7 +265,7 @@ int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, REALTEK_DSA);
int rtl8366_reset_vlan(struct realtek_priv *priv)
{
@@ -290,7 +290,7 @@ int rtl8366_reset_vlan(struct realtek_priv *priv)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, REALTEK_DSA);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
@@ -345,7 +345,7 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, REALTEK_DSA);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
@@ -389,7 +389,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, REALTEK_DSA);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -403,7 +403,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
for (i = 0; i < priv->num_mib_counters; i++)
ethtool_puts(&data, priv->mib_counters[i].name);
}
-EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, REALTEK_DSA);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
@@ -417,7 +417,7 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
return priv->num_mib_counters;
}
-EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, REALTEK_DSA);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
@@ -441,4 +441,4 @@ void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
data[i] = mibvalue;
}
}
-EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index e3b6a470ca67..e10ae94cf771 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -23,6 +23,9 @@
#include <linux/regmap.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
@@ -1030,12 +1033,10 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
dev_info(priv->dev, "no interrupt support\n");
- if (priv->setup_interface) {
- ret = priv->setup_interface(ds);
- if (ret) {
- dev_err(priv->dev, "could not set up MDIO bus\n");
- return -ENODEV;
- }
+ ret = rtl83xx_setup_user_mdio(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
}
return 0;
@@ -1650,6 +1651,7 @@ static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
+ struct dsa_switch *ds = &priv->ds;
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
@@ -1674,7 +1676,7 @@ static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
@@ -1718,7 +1720,7 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
@@ -1746,7 +1748,7 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
phy, regnum, reg, val);
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
@@ -1760,7 +1762,7 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
@@ -1777,22 +1779,11 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
goto out;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
-static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- return rtl8366rb_phy_read(ds->priv, phy, regnum);
-}
-
-static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
- u16 val)
-{
- return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
-}
-
static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
@@ -1858,7 +1849,7 @@ static int rtl8366rb_detect(struct realtek_priv *priv)
return 0;
}
-static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
@@ -1882,32 +1873,6 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
.port_max_mtu = rtl8366rb_max_mtu,
};
-static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
- .get_tag_protocol = rtl8366_get_tag_protocol,
- .setup = rtl8366rb_setup,
- .phy_read = rtl8366rb_dsa_phy_read,
- .phy_write = rtl8366rb_dsa_phy_write,
- .phylink_get_caps = rtl8366rb_phylink_get_caps,
- .phylink_mac_link_up = rtl8366rb_mac_link_up,
- .phylink_mac_link_down = rtl8366rb_mac_link_down,
- .get_strings = rtl8366_get_strings,
- .get_ethtool_stats = rtl8366_get_ethtool_stats,
- .get_sset_count = rtl8366_get_sset_count,
- .port_bridge_join = rtl8366rb_port_bridge_join,
- .port_bridge_leave = rtl8366rb_port_bridge_leave,
- .port_vlan_filtering = rtl8366rb_vlan_filtering,
- .port_vlan_add = rtl8366_vlan_add,
- .port_vlan_del = rtl8366_vlan_del,
- .port_enable = rtl8366rb_port_enable,
- .port_disable = rtl8366rb_port_disable,
- .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
- .port_bridge_flags = rtl8366rb_port_bridge_flags,
- .port_stp_state_set = rtl8366rb_port_stp_state_set,
- .port_fast_age = rtl8366rb_port_fast_age,
- .port_change_mtu = rtl8366rb_change_mtu,
- .port_max_mtu = rtl8366rb_max_mtu,
-};
-
static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
@@ -1925,16 +1890,66 @@ static const struct realtek_ops rtl8366rb_ops = {
};
const struct realtek_variant rtl8366rb_variant = {
- .ds_ops_smi = &rtl8366rb_switch_ops_smi,
- .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ds_ops = &rtl8366rb_switch_ops,
.ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
-EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+static const struct of_device_id rtl8366rb_of_match[] = {
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8366rb_of_match);
+
+static struct platform_driver rtl8366rb_smi_driver = {
+ .driver = {
+ .name = "rtl8366rb-smi",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove_new = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8366rb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8366rb-mdio",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8366rb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8366rb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8366rb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8366rb_init);
+
+static void __exit rtl8366rb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8366rb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+}
+module_exit(rtl8366rb_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
new file mode 100644
index 000000000000..d2e876805393
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_mdio.h>
+
+#include "realtek.h"
+#include "rtl83xx.h"
+
+/**
+ * rtl83xx_lock() - Locks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function is passed to regmap to be used as the lock function.
+ * It is also used externally to block regmap before executing multiple
+ * operations that must happen in sequence (which will use
+ * realtek_priv.map_nolock instead).
+ *
+ * Context: Can sleep. Holds priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, REALTEK_DSA);
+
+/**
+ * rtl83xx_unlock() - Unlocks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function unlocks the lock acquired by rtl83xx_lock.
+ *
+ * Context: Releases priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, REALTEK_DSA);
+
+static int rtl83xx_user_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int rtl83xx_user_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+/**
+ * rtl83xx_setup_user_mdio() - register the user mii bus driver
+ * @ds: DSA switch associated with this user_mii_bus
+ *
+ * Registers the MDIO bus for built-in Ethernet PHYs, and associates it with
+ * the mandatory 'mdio' child OF node of the switch.
+ *
+ * Context: Can sleep.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret = 0;
+
+ mdio_np = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ bus->priv = priv;
+ bus->name = "Realtek user MII";
+ bus->read = rtl83xx_user_mdio_read;
+ bus->write = rtl83xx_user_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s:user_mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ ret = devm_of_mdiobus_register(priv->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ goto err_put_node;
+ }
+
+ priv->user_mii_bus = bus;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, REALTEK_DSA);
+
+/**
+ * rtl83xx_probe() - probe a Realtek switch
+ * @dev: the device being probed
+ * @interface_info: specific management interface info.
+ *
+ * This function initializes realtek_priv and reads data from the device tree
+ * node. The switch is hard resetted if a method is provided.
+ *
+ * Context: Can sleep.
+ * Return: Pointer to the realtek_priv or ERR_PTR() in case of failure.
+ *
+ * The realtek_priv pointer does not need to be freed as it is controlled by
+ * devres.
+ */
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info)
+{
+ const struct realtek_variant *var;
+ struct realtek_priv *priv;
+ struct regmap_config rc = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = interface_info->reg_read,
+ .reg_write = interface_info->reg_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = rtl83xx_lock,
+ .unlock = rtl83xx_unlock,
+ };
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return ERR_PTR(-EINVAL);
+
+ priv = devm_kzalloc(dev, size_add(sizeof(*priv), var->chip_data_sz),
+ GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&priv->map_lock);
+
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ rc.disable_locking = true;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->variant = var;
+ priv->ops = var->ops;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ spin_lock_init(&priv->lock);
+
+ priv->leds_disabled = of_property_read_bool(dev->of_node,
+ "realtek,disable-leds");
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(priv->reset_ctl)) {
+ ret = PTR_ERR(priv->reset_ctl);
+ dev_err_probe(dev, ret, "failed to get reset control\n");
+ return ERR_CAST(priv->reset_ctl);
+ }
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return ERR_CAST(priv->reset);
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ if (priv->reset_ctl || priv->reset) {
+ rtl83xx_reset_assert(priv);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ rtl83xx_reset_deassert(priv);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ return priv;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, REALTEK_DSA);
+
+/**
+ * rtl83xx_register_switch() - detects and register a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function first checks the switch chip ID and register a DSA
+ * switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_register_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ int ret;
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to detect switch\n");
+ return ret;
+ }
+
+ ds->priv = priv;
+ ds->dev = priv->dev;
+ ds->ops = priv->variant->ds_ops;
+ ds->num_ports = priv->num_ports;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to register switch\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, REALTEK_DSA);
+
+/**
+ * rtl83xx_unregister_switch() - unregister a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function unregister a DSA switch.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_unregister_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_unregister_switch(ds);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, REALTEK_DSA);
+
+/**
+ * rtl83xx_shutdown() - shutdown a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function shuts down the DSA switch and cleans the platform driver data,
+ * to prevent realtek_{smi,mdio}_remove() from running afterwards, which is
+ * possible if the parent bus implements its own .shutdown() as .remove().
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_shutdown(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(priv->dev, NULL);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
+
+/**
+ * rtl83xx_remove() - Cleanup a realtek switch driver
+ * @priv: realtek_priv pointer
+ *
+ * If a method is provided, this function asserts the hard reset of the switch
+ * in order to avoid leaking traffic when the driver is gone.
+ *
+ * Context: Might sleep if priv->gdev->chip->can_sleep.
+ * Return: nothing
+ */
+void rtl83xx_remove(struct realtek_priv *priv)
+{
+ /* leave the device reset asserted */
+ rtl83xx_reset_assert(priv);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
+
+void rtl83xx_reset_assert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to assert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, true);
+}
+
+void rtl83xx_reset_deassert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_deassert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to deassert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, false);
+}
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Realtek DSA switches common module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/rtl83xx.h b/drivers/net/dsa/realtek/rtl83xx.h
new file mode 100644
index 000000000000..c8a0ff8fd75e
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL83XX_H
+#define _RTL83XX_H
+
+struct realtek_interface_info {
+ int (*reg_read)(void *ctx, u32 reg, u32 *val);
+ int (*reg_write)(void *ctx, u32 reg, u32 val);
+};
+
+void rtl83xx_lock(void *ctx);
+void rtl83xx_unlock(void *ctx);
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds);
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info);
+int rtl83xx_register_switch(struct realtek_priv *priv);
+void rtl83xx_unregister_switch(struct realtek_priv *priv);
+void rtl83xx_shutdown(struct realtek_priv *priv);
+void rtl83xx_remove(struct realtek_priv *priv);
+void rtl83xx_reset_assert(struct realtek_priv *priv);
+void rtl83xx_reset_deassert(struct realtek_priv *priv);
+
+#endif /* _RTL83XX_H */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 768454aa36d6..d29b5d7af0d7 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -67,18 +67,12 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
+ netdev_lockdep_set_classes(dev);
return 0;
}
-static void dummy_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
{
if (new_carrier)
@@ -90,7 +84,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
- .ndo_uninit = dummy_dev_uninit,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 0e0aa4016858..c5636245f1ca 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
module_init(ns8390_module_init);
module_exit(ns8390_module_exit);
#endif /* MODULE */
+MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6834742057b3..6d429b11e9c6 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
module_init(NS8390p_init_module);
module_exit(NS8390p_cleanup_module);
+MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index a09f383dd249..828edca8d30c 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -610,4 +610,5 @@ static int init_pcmcia(void)
return 1;
}
+MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 24f49a8ff903..fd9dcdc356e6 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
module_init(hydra_init_module);
module_exit(hydra_cleanup_module);
+MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 265976e3b64a..6cc0e190aa79 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
module_init(stnic_probe);
module_exit(stnic_cleanup);
+MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index d70390e9d03d..c24dd4fe7a10 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
module_init(zorro8390_init_module);
module_exit(zorro8390_cleanup_module);
+MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 5a274b99f299..6a19b5393ed1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -15,9 +15,6 @@ if ETHERNET
config MDIO
tristate
-config SUNGEM_PHY
- tristate
-
source "drivers/net/ethernet/3com/Kconfig"
source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index da3bdd302502..760a9a60bc15 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -21,6 +21,7 @@ config ADIN1110
tristate "Analog Devices ADIN1110 MAC-PHY"
depends on SPI && NET_SWITCHDEV
select CRC8
+ select PHYLIB
help
Say yes here to build support for Analog Devices ADIN1110
Low Power 10BASE-T1L Ethernet MAC-PHY.
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index d7c274af6d4d..8b4ef5121308 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -464,8 +464,9 @@ static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* bitfield of ADIN1110_MDIOACC register will contain
* the requested register value.
*/
- ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
- (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ ret = readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
if (ret < 0)
return ret;
@@ -495,8 +496,9 @@ static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
if (ret < 0)
return ret;
- return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
- (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ return readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
}
/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 633b321d7fdd..9e9e4a03f1a8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &sq->dma_addr, GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &cq->dma_addr, GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
- &aenq->dma_addr, GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
aenq_caps = 0;
aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
- aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
- << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
- ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ aenq_caps |=
+ (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- netdev_err(ena_dev->net_device,
- "AENQ handlers pointer is NULL\n");
+ netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
}
if (unlikely(!admin_queue->comp_ctx)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is NULL\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
return NULL;
}
if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is occupied\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
return NULL;
}
@@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- netdev_dbg(admin_queue->ena_dev->net_device,
- "Admin queue is full.\n");
+ netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
struct ena_comp_ctx *comp_ctx;
u16 i;
- admin_queue->comp_ctx =
- devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!admin_queue->comp_ctx)) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
@@ -336,20 +328,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ &io_sq->desc_addr.phys_addr, GFP_KERNEL);
}
if (!io_sq->desc_addr.virt_addr) {
- netdev_err(ena_dev->net_device,
- "Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -367,16 +356,14 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->bounce_buf_ctrl.base_buffer =
- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- netdev_err(ena_dev->net_device,
- "Bounce buffer memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -425,13 +412,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
prev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
GFP_KERNEL);
}
@@ -514,8 +499,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
u8 comp_status)
{
if (unlikely(comp_status != 0))
- netdev_err(admin_queue->ena_dev->net_device,
- "Admin command failed[%u]\n", comp_status);
+ netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
+ comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -580,8 +565,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Command was aborted\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -589,8 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
- comp_ctx->status);
+ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
@@ -634,8 +617,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set LLQ configurations: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@@ -658,8 +640,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_default_cfg->llq_header_location;
} else {
netdev_err(ena_dev->net_device,
- "Invalid header location control, supported: 0x%x\n",
- supported_feat);
+ "Invalid header location control, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -681,8 +662,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_stride_ctrl,
- supported_feat, llq_info->desc_stride_ctrl);
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
+ llq_info->desc_stride_ctrl);
}
} else {
llq_info->desc_stride_ctrl = 0;
@@ -704,8 +685,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size = 256;
} else {
netdev_err(ena_dev->net_device,
- "Invalid entry_size_ctrl, supported: 0x%x\n",
- supported_feat);
+ "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -750,8 +730,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_num_decs_before_header,
- supported_feat, llq_info->descs_num_before_header);
+ llq_default_cfg->llq_num_decs_before_header, supported_feat,
+ llq_info->descs_num_before_header);
}
/* Check for accelerated queue supported */
llq_accel_mode_get = llq_features->accel_mode.u.get;
@@ -767,8 +747,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
rc = ena_com_set_llq(ena_dev);
if (rc)
- netdev_err(ena_dev->net_device,
- "Cannot set LLQ configuration: %d\n", rc);
+ netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@@ -780,8 +759,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
- usecs_to_jiffies(
- admin_queue->completion_timeout));
+ usecs_to_jiffies(admin_queue->completion_timeout));
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -797,8 +775,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (comp_ctx->status == ENA_CMD_COMPLETED) {
netdev_err(admin_queue->ena_dev->net_device,
"The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode,
- admin_queue->auto_polling ? "ON" : "OFF");
+ comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
/* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
@@ -867,15 +844,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
if (unlikely(i == timeout)) {
netdev_err(ena_dev->net_device,
"Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
+ mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
goto err;
}
if (read_resp->reg_off != offset) {
- netdev_err(ena_dev->net_device,
- "Read failure: wrong offset provided\n");
+ netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -934,8 +909,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy io sq error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -949,8 +923,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_cq->cdesc_addr.virt_addr) {
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_cq->cdesc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr);
io_cq->cdesc_addr.virt_addr = NULL;
@@ -959,8 +932,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr);
io_sq->desc_addr.virt_addr = NULL;
@@ -985,8 +957,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- netdev_err(ena_dev->net_device,
- "Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
@@ -1026,8 +997,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- feature_id);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
return -EOPNOTSUPP;
}
@@ -1064,8 +1034,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
if (unlikely(ret))
netdev_err(ena_dev->net_device,
- "Failed to submit get_feature command %d error: %d\n",
- feature_id, ret);
+ "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
return ret;
}
@@ -1104,13 +1073,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION))
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
return -EOPNOTSUPP;
- rss->hash_key =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
+ rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_key))
return -ENOMEM;
@@ -1123,8 +1090,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_key)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- rss->hash_key, rss->hash_key_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
+ rss->hash_key_dma_addr);
rss->hash_key = NULL;
}
@@ -1132,9 +1099,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- rss->hash_ctrl =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_ctrl))
return -ENOMEM;
@@ -1147,8 +1113,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_ctrl)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- rss->hash_ctrl, rss->hash_ctrl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr);
rss->hash_ctrl = NULL;
}
@@ -1177,15 +1143,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- rss->rss_ind_tbl =
- dma_alloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
+ GFP_KERNEL);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
tbl_size = (1ULL << log_size) * sizeof(u16);
- rss->host_rss_ind_tbl =
- devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
if (unlikely(!rss->host_rss_ind_tbl))
goto mem_err2;
@@ -1197,8 +1161,7 @@ mem_err2:
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
rss->rss_ind_tbl = NULL;
mem_err1:
rss->tbl_log_size = 0;
@@ -1261,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
}
@@ -1273,8 +1235,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO SQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@@ -1284,16 +1245,12 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(uintptr_t)cmd_completion.sq_doorbell_offset);
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
- + cmd_completion.llq_headers_offset);
-
io_sq->desc_addr.pbuf_dev_addr =
(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
cmd_completion.llq_descriptors_offset);
}
- netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
- io_sq->idx, io_sq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1420,8 +1377,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@@ -1430,18 +1386,12 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.cq_interrupt_unmask_register_offset);
- if (cmd_completion.cq_head_db_register_offset)
- io_cq->cq_head_db_reg =
- (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- cmd_completion.cq_head_db_register_offset);
-
if (cmd_completion.numa_node_register_offset)
io_cq->numa_node_cfg_reg =
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
- io_cq->idx, io_cq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1451,8 +1401,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Invalid queue number %d but the max is %d\n", qid,
+ netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1492,8 +1441,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- ena_delay_exponential_backoff_us(exp++,
- ena_dev->ena_min_poll_delay_us);
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
spin_lock_irqsave(&admin_queue->q_lock, flags);
}
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -1519,8 +1467,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@@ -1588,8 +1535,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to config AENQ ret: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@@ -1610,8 +1556,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
- width);
+ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
return -EINVAL;
}
@@ -1633,19 +1578,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
ctrl_ver = ena_com_reg_bar_read32(ena_dev,
ENA_REGS_CONTROLLER_VERSION_OFF);
- if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
- (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- dev_info(ena_dev->dmadev,
- "ENA controller version: %d.%d.%d implementation version %d\n",
+ dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1694,20 +1636,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
- dma_free_coherent(ena_dev->dmadev, size, sq->entries,
- sq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
sq->entries = NULL;
size = ADMIN_CQ_SIZE(admin_queue->q_depth);
if (cq->entries)
- dma_free_coherent(ena_dev->dmadev, size, cq->entries,
- cq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
cq->entries = NULL;
size = ADMIN_AENQ_SIZE(aenq->q_depth);
if (ena_dev->aenq.entries)
- dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
- aenq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
aenq->entries = NULL;
}
@@ -1733,10 +1672,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
spin_lock_init(&mmio_read->lock);
- mmio_read->read_resp =
- dma_alloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
goto err;
@@ -1767,8 +1704,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
- dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
- mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr);
mmio_read->read_resp = NULL;
}
@@ -1800,8 +1737,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, abort com init\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
return -ENODEV;
}
@@ -1878,8 +1814,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
ctx->qid, ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1905,8 +1840,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
/* header length is limited to 8 bits */
- io_sq->tx_max_header_size =
- min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+ io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
if (ret)
@@ -1938,8 +1872,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@@ -1983,8 +1916,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- if (get_resp.u.max_queue_ext.version !=
- ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
return -EINVAL;
memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
@@ -2025,18 +1957,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
if (!rc)
- memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
- sizeof(get_resp.u.hw_hints));
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
else if (rc == -EOPNOTSUPP)
- memset(&get_feat_ctx->hw_hints, 0x0,
- sizeof(get_feat_ctx->hw_hints));
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
else
return rc;
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
if (!rc)
- memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
- sizeof(get_resp.u.llq));
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
else if (rc == -EOPNOTSUPP)
memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
else
@@ -2084,8 +2013,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
- while ((READ_ONCE(aenq_common->flags) &
- ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the phase bit (ownership) is as expected before
* reading the rest of the descriptor.
*/
@@ -2094,8 +2022,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
- netdev_dbg(ena_dev->net_device,
- "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
@@ -2124,8 +2051,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head,
- ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2137,15 +2063,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
- (cap == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
return -ETIME;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, can't reset device\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
return -EINVAL;
}
@@ -2168,8 +2092,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn on\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
return rc;
}
@@ -2177,8 +2100,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn off\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
return rc;
}
@@ -2215,8 +2137,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to get stats. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
return ret;
}
@@ -2228,8 +2149,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
- netdev_err(ena_dev->net_device,
- "Capability %d isn't supported\n",
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
ENA_ADMIN_ENI_STATS);
return -EOPNOTSUPP;
}
@@ -2266,8 +2186,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- ENA_ADMIN_MTU);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
return -EOPNOTSUPP;
}
@@ -2286,8 +2205,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set mtu %d. error: %d\n", mtu, ret);
+ netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
@@ -2301,8 +2219,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ret = ena_com_get_feature(ena_dev, &resp,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to get offload capabilities %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
return ret;
}
@@ -2320,8 +2237,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
struct ena_admin_get_feat_resp get_resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
return -EOPNOTSUPP;
@@ -2334,8 +2250,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- netdev_err(ena_dev->net_device,
- "Func hash %d isn't supported by device, abort\n",
+ netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return -EOPNOTSUPP;
}
@@ -2365,8 +2280,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to set hash function %d. error: %d\n",
+ netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
rss->hash_func, ret);
return -EINVAL;
}
@@ -2398,16 +2312,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
- netdev_err(ena_dev->net_device,
- "Flow hash function %d isn't supported\n", func);
+ netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
if ((func == ENA_ADMIN_TOEPLITZ) && key) {
if (key_len != sizeof(hash_key->key)) {
netdev_err(ena_dev->net_device,
- "key len (%u) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
+ "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
+ sizeof(hash_key->key));
return -EINVAL;
}
memcpy(hash_key->key, key, key_len);
@@ -2495,8 +2408,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_INPUT)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
return -EOPNOTSUPP;
@@ -2527,8 +2439,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set hash input. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
return ret;
}
@@ -2605,8 +2516,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
- proto);
+ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
return -EINVAL;
}
@@ -2658,8 +2568,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
@@ -2699,8 +2608,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set indirect table. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@@ -2779,9 +2687,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- host_attr->host_info =
- dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
+ host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->host_info))
return -ENOMEM;
@@ -2827,8 +2734,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
if (host_attr->debug_area_virt_addr) {
dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
host_attr->debug_area_virt_addr = NULL;
}
}
@@ -2877,8 +2783,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set host attributes: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
return ret;
}
@@ -2896,8 +2801,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
- netdev_err(ena_dev->net_device,
- "Illegal interrupt delay granularity value\n");
+ netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
return -EFAULT;
}
@@ -2935,14 +2839,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == -EOPNOTSUPP) {
- netdev_dbg(ena_dev->net_device,
- "Feature %d isn't supported\n",
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
netdev_err(ena_dev->net_device,
- "Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
+ "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
}
/* no moderation supported, disable adaptive support */
@@ -2990,8 +2892,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- netdev_err(ena_dev->net_device,
- "The size of the LLQ entry is smaller than needed\n");
+ netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 3c5081d9d25d..fea57eb8e58b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -109,16 +109,13 @@ struct ena_com_io_cq {
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
- /* The completion queue head doorbell register */
- u32 __iomem *cq_head_db_reg;
-
/* numa configuration register (for TPH) */
u32 __iomem *numa_node_cfg_reg;
/* The value to write to the above register to unmask
* the interrupt of this queue
*/
- u32 msix_vector;
+ u32 msix_vector ____cacheline_aligned;
enum queue_direction direction;
@@ -134,7 +131,6 @@ struct ena_com_io_cq {
/* Device queue index */
u16 idx;
u16 head;
- u16 last_head_update;
u8 phase;
u8 cdesc_entry_size_in_bytes;
@@ -158,7 +154,6 @@ struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
u32 __iomem *db_addr;
- u8 __iomem *header_addr;
enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index f9f886289b97..933e619b3a31 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
io_sq->entries_in_tx_burst_left--;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
- io_sq->qid, io_sq->entries_in_tx_burst_left);
+ "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
+ io_sq->entries_in_tx_burst_left);
}
/* Make sure everything was written into the bounce buffer before
@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
wmb();
/* The line is completed. Copy it to dev */
- __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
- bounce_buffer, (llq_info->desc_list_entry_size) / 8);
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
+ (llq_info->desc_list_entry_size) / 8);
io_sq->tail++;
@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
header_offset =
llq_info->descs_num_before_header * io_sq->desc_entry_size;
- if (unlikely((header_offset + header_len) >
- llq_info->desc_list_entry_size)) {
+ if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return NULL;
}
@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
+ ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
}
/*****************************************************************************/
@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(header_len > io_sq->tx_max_header_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Header size is too large %d max header: %d\n",
- header_len, io_sq->tx_max_header_size);
+ "Header size is too large %d max header: %d\n", header_len,
+ io_sq->tx_max_header_size);
return -EINVAL;
}
- if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
- !buffer_to_push)) {
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Push header wasn't provided in LLQ mode\n");
return -EINVAL;
@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
}
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
- nb_hw_desc);
+ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
- ena_rx_ctx->max_bufs);
+ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
return -ENOSPC;
}
@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
io_sq->next_to_comp += nb_hw_desc;
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
- io_sq->qid, io_sq->next_to_comp);
+ "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
+ io_sq->next_to_comp);
/* Get rx flags from the last pkt */
ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->req_id = req_id;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
- __func__, io_sq->qid, req_id);
+ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
+ req_id);
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 372b259279ec..72b019758caa 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -8,8 +8,6 @@
#include "ena_com.h"
-/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
-#define ENA_COMP_HEAD_THRESH 4
/* we allow 2 DMA descriptors per LLQ entry */
#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
@@ -145,8 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
}
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Queue: %d num_descs: %d num_entries_needed: %d\n",
- io_sq->qid, num_descs, num_entries_needed);
+ "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
+ num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
}
@@ -157,43 +155,20 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 tail = io_sq->tail;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Write submission queue doorbell for queue: %d tail: %d\n",
- io_sq->qid, tail);
+ "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Reset available entries in tx burst for queue %d to %d\n",
- io_sq->qid, max_entries_in_tx_burst);
+ "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
+ max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
return 0;
}
-static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
-{
- u16 unreported_comp, head;
- bool need_update;
-
- if (unlikely(io_cq->cq_head_db_reg)) {
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
- if (unlikely(need_update)) {
- netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- writel(head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
- }
- }
-
- return 0;
-}
-
static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
u8 numa_node)
{
@@ -248,8 +223,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
- netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Invalid req id %d\n", cdesc->req_id);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
+ cdesc->req_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 1c0a7828d397..09e7da1a69c9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
- NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
static struct ena_aenq_handlers aenq_handlers;
@@ -47,19 +47,44 @@ static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
struct ena_adapter *adapter = netdev_priv(dev);
+ unsigned int time_since_last_napi, threshold;
+ struct ena_ring *tx_ring;
+ int napi_scheduled;
+
+ if (txqueue >= adapter->num_io_queues) {
+ netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue);
+ goto schedule_reset;
+ }
+
+ threshold = jiffies_to_usecs(dev->watchdog_timeo);
+ tx_ring = &adapter->tx_ring[txqueue];
+ time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED);
+
+ netdev_err(dev,
+ "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n",
+ txqueue,
+ threshold,
+ time_since_last_napi,
+ napi_scheduled);
+
+ if (threshold < time_since_last_napi && napi_scheduled) {
+ netdev_err(dev,
+ "napi handler hasn't been called for a long time but is scheduled\n");
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ }
+schedule_reset:
/* Change the state of the device to trigger reset
* Check that we are not in the middle or a trigger already
*/
-
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
+ ena_reset_device(adapter, reset_reason);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
-
- netif_err(adapter, tx_err, dev, "Transmit time out\n");
}
static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
@@ -116,11 +141,9 @@ int ena_xmit_common(struct ena_adapter *adapter,
if (unlikely(rc)) {
netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
- ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
- &ring->syncp);
+ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
if (rc != -ENOMEM)
- ena_reset_device(adapter,
- ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
return rc;
}
@@ -485,8 +508,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
*/
page = dev_alloc_page();
if (!page) {
- ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
return ERR_PTR(-ENOSPC);
}
@@ -523,7 +545,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
/* We handle DMA here */
page = ena_alloc_map_page(rx_ring, &dma);
- if (unlikely(IS_ERR(page)))
+ if (IS_ERR(page))
return PTR_ERR(page);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
@@ -545,8 +567,8 @@ static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info,
unsigned long attrs)
{
- dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL, attrs);
+ dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
}
static void ena_free_rx_page(struct ena_ring *rx_ring,
@@ -819,8 +841,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
&req_id);
if (rc) {
if (unlikely(rc == -EINVAL))
- handle_invalid_req_id(tx_ring, req_id, NULL,
- false);
+ handle_invalid_req_id(tx_ring, req_id, NULL, false);
break;
}
@@ -856,7 +877,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
@@ -1046,8 +1066,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
DMA_FROM_DEVICE);
if (!reuse_rx_buf_page)
- ena_unmap_rx_buff_attrs(rx_ring, rx_info,
- DMA_ATTR_SKIP_CPU_SYNC);
+ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
page_offset + buf_offset, len, buf_len);
@@ -1303,10 +1322,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_REFILL_THRESH_PACKET);
/* Optimization, try to batch new rx buffers */
- if (refill_required > refill_threshold) {
- ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+ if (refill_required > refill_threshold)
ena_refill_rx_bufs(rx_ring, refill_required);
- }
if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush();
@@ -1320,8 +1337,7 @@ error:
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
- ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
@@ -1811,8 +1827,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
if (!ena_dev->rss.tbl_log_size) {
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
- netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -2134,6 +2149,12 @@ int ena_up(struct ena_adapter *adapter)
*/
ena_init_napi_in_range(adapter, 0, io_queue_count);
+ /* Enabling DIM needs to happen before enabling IRQs since DIM
+ * is run from napi routine
+ */
+ if (ena_com_interrupt_moderation_supported(adapter->ena_dev))
+ ena_com_enable_adaptive_moderation(adapter->ena_dev);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -2184,7 +2205,7 @@ void ena_down(struct ena_adapter *adapter)
{
int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
- netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__);
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -2197,8 +2218,6 @@ void ena_down(struct ena_adapter *adapter)
/* After this point the napi handler won't enable the tx queue */
ena_napi_disable_in_range(adapter, 0, io_queue_count);
- /* After destroy the queue there won't be any new interrupts */
-
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc;
@@ -2588,8 +2607,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(rc))
goto error_drop_packet;
- skb_tx_timestamp(skb);
-
next_to_use = tx_ring->next_to_use;
req_id = tx_ring->free_ids[next_to_use];
tx_info = &tx_ring->tx_buffer_info[req_id];
@@ -2653,6 +2670,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ skb_tx_timestamp(skb);
+
if (netif_xmit_stopped(txq) || !netdev_xmit_more())
/* trigger the dma engine. ena_ring_tx_doorbell()
* calls a memory barrier inside it.
@@ -2670,22 +2689,6 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- u16 qid;
- /* we suspect that this is good for in--kernel network services that
- * want to loop incoming skb rx to tx in normal user generated traffic,
- * most probably we will not get to this
- */
- if (skb_rx_queue_recorded(skb))
- qid = skb_get_rx_queue(skb);
- else
- qid = netdev_pick_tx(dev, skb, NULL);
-
- return qid;
-}
-
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
@@ -2764,8 +2767,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(adapter->ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
- netif_warn(adapter, drv, adapter->netdev,
- "Cannot set host attributes\n");
+ netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
else
netif_err(adapter, drv, adapter->netdev,
"Cannot set host attributes\n");
@@ -2863,18 +2865,16 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_open = ena_open,
.ndo_stop = ena_close,
.ndo_start_xmit = ena_start_xmit,
- .ndo_select_queue = ena_select_queue,
.ndo_get_stats64 = ena_get_stats64,
.ndo_tx_timeout = ena_tx_timeout,
.ndo_change_mtu = ena_change_mtu,
- .ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
.ndo_bpf = ena_xdp,
.ndo_xdp_xmit = ena_xdp_xmit,
};
-static void ena_calc_io_queue_size(struct ena_adapter *adapter,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -2933,6 +2933,18 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+ if (max_tx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n",
+ max_tx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ if (max_rx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n",
+ max_rx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
/* When forcing large headers, we multiply the entry size by 2, and therefore divide
* the queue size by 2, leaving the amount of memory used by the queues unchanged.
*/
@@ -2963,6 +2975,8 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
adapter->max_rx_ring_size = max_rx_queue_size;
adapter->requested_tx_ring_size = tx_queue_size;
adapter->requested_rx_ring_size = rx_queue_size;
+
+ return 0;
}
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -3070,6 +3084,7 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
bool *wd_state)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
struct ena_llq_configurations llq_config;
struct device *dev = &pdev->dev;
bool readless_supported;
@@ -3159,15 +3174,19 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (rc) {
- dev_err(dev, "ENA device init failed\n");
+ netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc);
goto err_admin_init;
}
- ena_calc_io_queue_size(adapter, get_feat_ctx);
+ rc = ena_calc_io_queue_size(adapter, get_feat_ctx);
+ if (unlikely(rc))
+ goto err_admin_init;
return 0;
err_admin_init:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
err_mmio_read_less:
@@ -3226,7 +3245,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
- if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ if (dev_up)
ena_down(adapter);
/* Stop the device from sending AENQ events (in case reset flag is set
@@ -3372,14 +3391,18 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
unsigned int time_since_last_napi;
unsigned int missing_tx_comp_to;
bool is_tx_comp_time_expired;
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
+ int napi_scheduled;
u32 missed_tx = 0;
int i, rc = 0;
+ missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
+
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
@@ -3406,25 +3429,45 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
adapter->missing_tx_completion_to);
if (unlikely(is_tx_comp_time_expired)) {
- if (!tx_buf->print_once) {
- time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
- missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
- netif_notice(adapter, tx_err, adapter->netdev,
- "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
- tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
+ time_since_last_napi =
+ jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED);
+
+ if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) {
+ /* We suspect napi isn't called because the
+ * bottom half is not run. Require a bigger
+ * timeout for these cases
+ */
+ if (!time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to))
+ continue;
+
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
- tx_buf->print_once = 1;
missed_tx++;
+
+ if (tx_buf->print_once)
+ continue;
+
+ netif_notice(adapter, tx_err, adapter->netdev,
+ "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n",
+ tx_ring->qid, i, time_since_last_napi, napi_scheduled);
+
+ tx_buf->print_once = 1;
}
}
if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
netif_err(adapter, tx_err, adapter->netdev,
- "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+ "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n",
missed_tx,
- adapter->missing_tx_completion_threshold);
- ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
+ adapter->missing_tx_completion_threshold,
+ missing_tx_comp_to);
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Resetting the device\n");
+
+ ena_reset_device(adapter, reset_reason);
rc = -EIO;
}
@@ -3762,8 +3805,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
}
- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
- ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
+ 0xFFFFFFFF);
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
goto err_fill_indir;
@@ -4040,8 +4083,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
free_irq_cpu_rmap(netdev->rx_cpu_rmap);
netdev->rx_cpu_rmap = NULL;
}
-#endif /* CONFIG_RFS_ACCEL */
+#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 1e007a41a525..2c3d6a77ea79 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -21,6 +21,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
index fc1c4ef73ba3..337c435d3ce9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -412,7 +412,6 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d done. total pkts: %d\n",
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index 5beadabc2136..c83a0a80d533 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -63,6 +63,15 @@ static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
return nq_work;
}
+static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
+{
+ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
+ pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
+ return false;
+
+ return refcount_inc_not_zero(&pdsc->adminq_refcnt);
+}
+
void pdsc_process_adminq(struct pdsc_qcq *qcq)
{
union pds_core_adminq_comp *comp;
@@ -73,11 +82,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
unsigned long irqflags;
int nq_work = 0;
int aq_work = 0;
- int credits;
- /* Don't process AdminQ when shutting down */
- if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
- dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ /* Don't process AdminQ when it's not up */
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
__func__);
return;
}
@@ -119,11 +127,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
credits:
/* Return the interrupt credits, one for each completion */
- credits = nq_work + aq_work;
- if (credits)
- pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
- credits,
- PDS_CORE_INTR_CRED_REARM);
+ pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
+ nq_work + aq_work,
+ PDS_CORE_INTR_CRED_REARM);
+ refcount_dec(&pdsc->adminq_refcnt);
}
void pdsc_work_thread(struct work_struct *work)
@@ -135,18 +142,19 @@ void pdsc_work_thread(struct work_struct *work)
irqreturn_t pdsc_adminq_isr(int irq, void *data)
{
- struct pdsc_qcq *qcq = data;
- struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc *pdsc = data;
+ struct pdsc_qcq *qcq;
- /* Don't process AdminQ when shutting down */
- if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
- dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ /* Don't process AdminQ when it's not up */
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
__func__);
return IRQ_HANDLED;
}
+ qcq = &pdsc->adminqcq;
queue_work(pdsc->wq, &qcq->work);
- pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
+ refcount_dec(&pdsc->adminq_refcnt);
return IRQ_HANDLED;
}
@@ -179,10 +187,16 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Check that the FW is running */
if (!pdsc_is_fw_running(pdsc)) {
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
-
- dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
- __func__, fw_status);
+ if (pdsc->info_regs) {
+ u8 fw_status =
+ ioread8(&pdsc->info_regs->fw_status);
+
+ dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ } else {
+ dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
+ __func__);
+ }
ret = -ENXIO;
goto err_out_unlock;
@@ -230,6 +244,12 @@ int pdsc_adminq_post(struct pdsc *pdsc,
int err = 0;
int index;
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
+ __func__, cmd->opcode);
+ return -ENXIO;
+ }
+
wc.qcq = &pdsc->adminqcq;
index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
if (index < 0) {
@@ -248,10 +268,16 @@ int pdsc_adminq_post(struct pdsc *pdsc,
break;
if (!pdsc_is_fw_running(pdsc)) {
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
-
- dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
- __func__, fw_status);
+ if (pdsc->info_regs) {
+ u8 fw_status =
+ ioread8(&pdsc->info_regs->fw_status);
+
+ dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ } else {
+ dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
+ __func__);
+ }
err = -ENXIO;
break;
}
@@ -285,6 +311,8 @@ err_out:
queue_work(pdsc->wq, &pdsc->health_work);
}
+ refcount_dec(&pdsc->adminq_refcnt);
+
return err;
}
EXPORT_SYMBOL_GPL(pdsc_adminq_post);
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 11c23a7f3172..2babea110991 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
if (err < 0) {
dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out;
+ kfree(padev);
+ return ERR_PTR(err);
}
err = auxiliary_device_add(aux_dev);
if (err) {
dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out_uninit;
+ auxiliary_device_uninit(aux_dev);
+ return ERR_PTR(err);
}
return padev;
-
-err_out_uninit:
- auxiliary_device_uninit(aux_dev);
-err_out:
- kfree(padev);
- return ERR_PTR(err);
}
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
@@ -184,6 +180,9 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
struct pds_auxiliary_dev *padev;
int err = 0;
+ if (!cf)
+ return -ENODEV;
+
mutex_lock(&pf->config_lock);
padev = pf->vfs[cf->vf_id].padev;
@@ -202,14 +201,27 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
{
struct pds_auxiliary_dev *padev;
- enum pds_core_vif_types vt;
char devname[PDS_DEVNAME_LEN];
+ enum pds_core_vif_types vt;
+ unsigned long mask;
u16 vt_support;
int client_id;
int err = 0;
+ if (!cf)
+ return -ENODEV;
+
mutex_lock(&pf->config_lock);
+ mask = BIT_ULL(PDSC_S_FW_DEAD) |
+ BIT_ULL(PDSC_S_STOPPING_DRIVER);
+ if (cf->state & mask) {
+ dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n",
+ __func__, cf->state);
+ err = -ENXIO;
+ goto out_unlock;
+ }
+
/* We only support vDPA so far, so it is the only one to
* be verified that it is available in the Core device and
* enabled in the devlink param. In the future this might
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 0d2091e9eb28..9662ee72814c 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -125,10 +125,11 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
snprintf(name, sizeof(name), "%s-%d-%s",
PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
- index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq);
+ index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
if (index < 0)
return index;
qcq->intx = index;
+ qcq->cq.bound_intr = &pdsc->intr_info[index];
return 0;
}
@@ -222,7 +223,6 @@ int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
goto err_out_free_irq;
}
- qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
qcq->cq.num_descs = num_descs;
qcq->cq.desc_size = cq_desc_size;
qcq->cq.tail_idx = 0;
@@ -300,6 +300,17 @@ err_out:
return err;
}
+static void pdsc_core_uninit(struct pdsc *pdsc)
+{
+ pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ if (pdsc->kern_dbpage) {
+ iounmap(pdsc->kern_dbpage);
+ pdsc->kern_dbpage = NULL;
+ }
+}
+
static int pdsc_core_init(struct pdsc *pdsc)
{
union pds_core_dev_comp comp = {};
@@ -310,9 +321,32 @@ static int pdsc_core_init(struct pdsc *pdsc)
struct pds_core_dev_init_data_in cidi;
u32 dbid_count;
u32 dbpage_num;
+ int numdescs;
size_t sz;
int err;
+ /* Scale the descriptor ring length based on number of CPUs and VFs */
+ numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
+ numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
+ numdescs = roundup_pow_of_two(numdescs);
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ numdescs,
+ sizeof(union pds_core_adminq_cmd),
+ sizeof(union pds_core_adminq_comp),
+ 0, &pdsc->adminqcq);
+ if (err)
+ return err;
+
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
+ PDS_CORE_QCQ_F_NOTIFYQ,
+ PDSC_NOTIFYQ_LENGTH,
+ sizeof(struct pds_core_notifyq_cmd),
+ sizeof(union pds_core_notifyq_comp),
+ 0, &pdsc->notifyqcq);
+ if (err)
+ goto err_out_uninit;
+
cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
@@ -336,7 +370,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
if (err) {
dev_err(pdsc->dev, "Device init command failed: %pe\n",
ERR_PTR(err));
- return err;
+ goto err_out_uninit;
}
pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
@@ -346,7 +380,8 @@ static int pdsc_core_init(struct pdsc *pdsc)
pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
if (!pdsc->kern_dbpage) {
dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out_uninit;
}
pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
@@ -359,6 +394,10 @@ static int pdsc_core_init(struct pdsc *pdsc)
pdsc->last_eid = 0;
+ return 0;
+
+err_out_uninit:
+ pdsc_core_uninit(pdsc);
return err;
}
@@ -401,41 +440,12 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
int pdsc_setup(struct pdsc *pdsc, bool init)
{
- int numdescs;
int err;
- if (init)
- err = pdsc_dev_init(pdsc);
- else
- err = pdsc_dev_reinit(pdsc);
+ err = pdsc_dev_init(pdsc);
if (err)
return err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
- err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
- PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
- numdescs,
- sizeof(union pds_core_adminq_cmd),
- sizeof(union pds_core_adminq_comp),
- 0, &pdsc->adminqcq);
- if (err)
- goto err_out_teardown;
-
- err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
- PDS_CORE_QCQ_F_NOTIFYQ,
- PDSC_NOTIFYQ_LENGTH,
- sizeof(struct pds_core_notifyq_cmd),
- sizeof(union pds_core_notifyq_comp),
- 0, &pdsc->notifyqcq);
- if (err)
- goto err_out_teardown;
-
- /* NotifyQ rides on the AdminQ interrupt */
- pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
-
/* Set up the Core with the AdminQ and NotifyQ info */
err = pdsc_core_init(pdsc);
if (err)
@@ -450,6 +460,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
pdsc_debugfs_add_viftype(pdsc);
}
+ refcount_set(&pdsc->adminq_refcnt, 1);
clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
return 0;
@@ -460,32 +471,19 @@ err_out_teardown:
void pdsc_teardown(struct pdsc *pdsc, bool removing)
{
- int i;
-
if (!pdsc->pdev->is_virtfn)
pdsc_devcmd_reset(pdsc);
- pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
- pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+ if (pdsc->adminqcq.work.func)
+ cancel_work_sync(&pdsc->adminqcq.work);
+
+ pdsc_core_uninit(pdsc);
if (removing) {
kfree(pdsc->viftype_status);
pdsc->viftype_status = NULL;
}
- if (pdsc->intr_info) {
- for (i = 0; i < pdsc->nintrs; i++)
- pdsc_intr_free(pdsc, i);
-
- if (removing) {
- kfree(pdsc->intr_info);
- pdsc->intr_info = NULL;
- }
- }
-
- if (pdsc->kern_dbpage) {
- iounmap(pdsc->kern_dbpage);
- pdsc->kern_dbpage = NULL;
- }
+ pdsc_dev_uninit(pdsc);
set_bit(PDSC_S_FW_DEAD, &pdsc->state);
}
@@ -512,6 +510,24 @@ void pdsc_stop(struct pdsc *pdsc)
PDS_CORE_INTR_MASK_SET);
}
+static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
+{
+ /* The driver initializes the adminq_refcnt to 1 when the adminq is
+ * allocated and ready for use. Other users/requesters will increment
+ * the refcnt while in use. If the refcnt is down to 1 then the adminq
+ * is not in use and the refcnt can be cleared and adminq freed. Before
+ * calling this function the driver will set PDSC_S_FW_DEAD, which
+ * prevent subsequent attempts to use the adminq and increment the
+ * refcnt to fail. This guarantees that this function will eventually
+ * exit.
+ */
+ while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
+ dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
+ __func__);
+ cpu_relax();
+ }
+}
+
void pdsc_fw_down(struct pdsc *pdsc)
{
union pds_core_notifyq_comp reset_event = {
@@ -527,6 +543,8 @@ void pdsc_fw_down(struct pdsc *pdsc)
if (pdsc->pdev->is_virtfn)
return;
+ pdsc_adminq_wait_and_dec_once_unused(pdsc);
+
/* Notify clients of fw_down */
if (pdsc->fw_reporter)
devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
@@ -577,14 +595,19 @@ err_out:
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+ u8 fw_status;
+
+ /* some sort of teardown already in progress */
+ if (!pdsc->info_regs)
+ return;
+
+ fw_status = ioread8(&pdsc->info_regs->fw_status);
/* is PCI broken? */
if (fw_status != PDS_RC_BAD_PCI)
return;
- pdsc_reset_prepare(pdsc->pdev);
- pdsc_reset_done(pdsc->pdev);
+ pci_reset_function(pdsc->pdev);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index e35d3e7006bf..92d7657dd614 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -184,6 +184,7 @@ struct pdsc {
struct mutex devcmd_lock; /* lock for dev_cmd operations */
struct mutex config_lock; /* lock for configuration operations */
spinlock_t adminq_lock; /* lock for adminq operations */
+ refcount_t adminq_refcnt;
struct pds_core_dev_info_regs __iomem *info_regs;
struct pds_core_dev_cmd_regs __iomem *cmd_regs;
struct pds_core_intr __iomem *intr_ctrl;
@@ -280,11 +281,8 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
union pds_core_dev_comp *comp, int max_seconds);
int pdsc_devcmd_init(struct pdsc *pdsc);
int pdsc_devcmd_reset(struct pdsc *pdsc);
-int pdsc_dev_reinit(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
-
-void pdsc_reset_prepare(struct pci_dev *pdev);
-void pdsc_reset_done(struct pci_dev *pdev);
+void pdsc_dev_uninit(struct pdsc *pdsc);
int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
irq_handler_t handler, void *data);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 8ec392299b7d..6bdd02b7aa6d 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -32,8 +32,8 @@ void pdsc_debugfs_del_dev(struct pdsc *pdsc)
static int identity_show(struct seq_file *seq, void *v)
{
- struct pdsc *pdsc = seq->private;
struct pds_core_dev_identity *ident;
+ struct pdsc *pdsc = seq->private;
int vt;
ident = &pdsc->dev_ident;
@@ -64,6 +64,10 @@ DEFINE_SHOW_ATTRIBUTE(identity);
void pdsc_debugfs_add_ident(struct pdsc *pdsc)
{
+ /* This file will already exist in the reset flow */
+ if (debugfs_lookup("identity", pdsc->dentry))
+ return;
+
debugfs_create_file("identity", 0400, pdsc->dentry,
pdsc, &identity_fops);
}
@@ -102,10 +106,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
{
- struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
- struct dentry *intr_dentry;
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry, *intr_dentry;
struct debugfs_regset32 *intr_ctrl_regset;
- struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
struct pdsc_queue *q = &qcq->q;
struct pdsc_cq *cq = &qcq->cq;
@@ -143,6 +145,8 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx);
if (qcq->flags & PDS_CORE_QCQ_F_INTR) {
+ struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
+
intr_dentry = debugfs_create_dir("intr", qcq->dentry);
if (IS_ERR_OR_NULL(intr_dentry))
return;
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index 31940b857e0e..e494e1298dc9 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -57,6 +57,9 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
bool pdsc_is_fw_running(struct pdsc *pdsc)
{
+ if (!pdsc->info_regs)
+ return false;
+
pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status);
pdsc->last_fw_time = jiffies;
pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat);
@@ -182,13 +185,17 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
{
int err;
+ if (!pdsc->cmd_regs)
+ return -ENXIO;
+
memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
pdsc_devcmd_dbell(pdsc);
err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
- memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
queue_work(pdsc->wq, &pdsc->health_work);
+ else
+ memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
return err;
}
@@ -309,11 +316,20 @@ static int pdsc_identify(struct pdsc *pdsc)
return 0;
}
-int pdsc_dev_reinit(struct pdsc *pdsc)
+void pdsc_dev_uninit(struct pdsc *pdsc)
{
- pdsc_init_devinfo(pdsc);
+ if (pdsc->intr_info) {
+ int i;
+
+ for (i = 0; i < pdsc->nintrs; i++)
+ pdsc_intr_free(pdsc, i);
- return pdsc_identify(pdsc);
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ pdsc->nintrs = 0;
+ }
+
+ pci_free_irq_vectors(pdsc->pdev);
}
int pdsc_dev_init(struct pdsc *pdsc)
@@ -341,10 +357,8 @@ int pdsc_dev_init(struct pdsc *pdsc)
/* Get intr_info struct array for tracking */
pdsc->intr_info = kcalloc(nintrs, sizeof(*pdsc->intr_info), GFP_KERNEL);
- if (!pdsc->intr_info) {
- err = -ENOMEM;
- goto err_out;
- }
+ if (!pdsc->intr_info)
+ return -ENOMEM;
err = pci_alloc_irq_vectors(pdsc->pdev, nintrs, nintrs, PCI_IRQ_MSIX);
if (err != nintrs) {
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index e9948ea5bbcd..54864f27c87a 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -111,7 +111,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
mutex_lock(&pdsc->devcmd_lock);
err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2);
- memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ if (!err)
+ memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
if (err && err != -EIO)
return err;
diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c
index 90a811f3878a..fa626719e68d 100644
--- a/drivers/net/ethernet/amd/pds_core/fw.c
+++ b/drivers/net/ethernet/amd/pds_core/fw.c
@@ -107,6 +107,9 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
dev_info(pdsc->dev, "Installing firmware\n");
+ if (!pdsc->cmd_regs)
+ return -ENXIO;
+
dl = priv_to_devlink(pdsc);
devlink_flash_update_status_notify(dl, "Preparing to flash",
NULL, 0, 0);
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 3080898d7b95..ab6133e7db42 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -37,9 +37,15 @@ static void pdsc_unmap_bars(struct pdsc *pdsc)
struct pdsc_dev_bar *bars = pdsc->bars;
unsigned int i;
+ pdsc->info_regs = NULL;
+ pdsc->cmd_regs = NULL;
+ pdsc->intr_status = NULL;
+ pdsc->intr_ctrl = NULL;
+
for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
if (bars[i].vaddr)
pci_iounmap(pdsc->pdev, bars[i].vaddr);
+ bars[i].vaddr = NULL;
}
}
@@ -293,7 +299,7 @@ err_out_stop:
err_out_teardown:
pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
err_out_unmap_bars:
- del_timer_sync(&pdsc->wdtimer);
+ timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
destroy_workqueue(pdsc->wq);
mutex_destroy(&pdsc->config_lock);
@@ -420,7 +426,7 @@ static void pdsc_remove(struct pci_dev *pdev)
*/
pdsc_sriov_configure(pdev, 0);
- del_timer_sync(&pdsc->wdtimer);
+ timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
destroy_workqueue(pdsc->wq);
@@ -433,7 +439,6 @@ static void pdsc_remove(struct pci_dev *pdev)
mutex_destroy(&pdsc->config_lock);
mutex_destroy(&pdsc->devcmd_lock);
- pci_free_irq_vectors(pdev);
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
}
@@ -445,19 +450,47 @@ static void pdsc_remove(struct pci_dev *pdev)
devlink_free(dl);
}
-void pdsc_reset_prepare(struct pci_dev *pdev)
+static void pdsc_stop_health_thread(struct pdsc *pdsc)
+{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ timer_shutdown_sync(&pdsc->wdtimer);
+ if (pdsc->health_work.func)
+ cancel_work_sync(&pdsc->health_work);
+}
+
+static void pdsc_restart_health_thread(struct pdsc *pdsc)
+{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+ mod_timer(&pdsc->wdtimer, jiffies + 1);
+}
+
+static void pdsc_reset_prepare(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
+ pdsc_stop_health_thread(pdsc);
pdsc_fw_down(pdsc);
- pci_free_irq_vectors(pdev);
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_del(pdsc, pf);
+ }
+
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
}
-void pdsc_reset_done(struct pci_dev *pdev)
+static void pdsc_reset_done(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
struct device *dev = pdsc->dev;
@@ -486,12 +519,44 @@ void pdsc_reset_done(struct pci_dev *pdev)
}
pdsc_fw_up(pdsc);
+ pdsc_restart_health_thread(pdsc);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_add(pdsc, pf);
+ }
+}
+
+static pci_ers_result_t pdsc_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t error)
+{
+ if (error == pci_channel_io_frozen) {
+ pdsc_reset_prepare(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void pdsc_pci_error_resume(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
+ pci_reset_function_locked(pdev);
}
static const struct pci_error_handlers pdsc_err_handler = {
/* FLR handling */
.reset_prepare = pdsc_reset_prepare,
.reset_done = pdsc_reset_done,
+
+ /* AER handling */
+ .error_detected = pdsc_pci_error_detected,
+ .resume = pdsc_pci_error_resume,
};
static struct pci_driver pdsc_driver = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 18a6c8d99fa0..a2606ee3b0a5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -15,6 +15,7 @@
#include "aq_macsec.h"
#include "aq_main.h"
+#include <linux/linkmode.h>
#include <linux/ptp_clock_kernel.h>
static void aq_ethtool_get_regs(struct net_device *ndev,
@@ -681,23 +682,19 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev,
return 0;
}
-static u32 eee_mask_to_ethtool_mask(u32 speed)
+static void eee_mask_to_ethtool_mask(unsigned long *mode, u32 speed)
{
- u32 rate = 0;
-
if (speed & AQ_NIC_RATE_EEE_10G)
- rate |= SUPPORTED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_1G)
- rate |= SUPPORTED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_100M)
- rate |= SUPPORTED_100baseT_Full;
-
- return rate;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
}
-static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
@@ -713,14 +710,14 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
if (err < 0)
return err;
- eee->supported = eee_mask_to_ethtool_mask(supported_rates);
+ eee_mask_to_ethtool_mask(eee->supported, supported_rates);
if (aq_nic->aq_nic_cfg.eee_speeds)
- eee->advertised = eee->supported;
+ linkmode_copy(eee->advertised, eee->supported);
- eee->lp_advertised = eee_mask_to_ethtool_mask(rate);
+ eee_mask_to_ethtool_mask(eee->lp_advertised, rate);
- eee->eee_enabled = !!eee->advertised;
+ eee->eee_enabled = !linkmode_empty(eee->advertised);
eee->tx_lpi_enabled = eee->eee_enabled;
if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK)
@@ -729,7 +726,7 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
return 0;
}
-static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index abd4832e4ed2..5acb3e16b567 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
return 0;
err_exit_hwts_rx:
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
err_exit_ptp_rx:
aq_ring_free(&aq_ptp->ptp_rx);
err_exit_ptp_tx:
@@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
aq_ring_free(&aq_ptp->ptp_tx);
aq_ring_free(&aq_ptp->ptp_rx);
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index cda8597b4e14..f7433abd6591 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self)
}
}
+void aq_ring_hwts_rx_free(struct aq_ring_s *self)
+{
+ if (!self)
+ return;
+
+ if (self->dx_ring) {
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size + AQ_CFG_RXDS_DEF,
+ self->dx_ring, self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
+}
+
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
{
unsigned int count;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 52847310740a..d627ace850ff 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self);
int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic, unsigned int idx,
unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_free(struct aq_ring_s *self);
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 29b04a274d07..a806dadc4196 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -31,6 +31,20 @@ static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
priv->irq_mask |= mask;
}
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ /* Only supported with internal phys */
+ if (!intf->internal_phy)
+ return;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+}
+
void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
{
struct bcmasp_priv *priv = intf->parent;
@@ -79,6 +93,9 @@ static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
__napi_schedule_irqoff(&intf->tx_napi);
}
}
+
+ if (status & ASP_INTR2_PHY_EVENT(intf->channel))
+ phy_mac_interrupt(intf->ndev->phydev);
}
static irqreturn_t bcmasp_isr(int irq, void *data)
@@ -535,9 +552,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
- if (j == *rule_cnt)
- return -EMSGSIZE;
-
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -547,6 +561,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
priv->net_filters[i - 1].wake_filter)
continue;
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
rule_locs[j++] = priv->net_filters[i].fs.location;
}
@@ -972,7 +989,26 @@ static void bcmasp_core_init(struct bcmasp_priv *priv)
ASP_INTR2_CLEAR);
}
-static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
+static void bcmasp_core_clock_select_many(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CPU_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CPU_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
{
u32 reg;
@@ -1166,6 +1202,24 @@ static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
}
}
+static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
+{
+ u32 reg, phy_lpi_overwrite;
+
+ reg = rx_edpkt_core_rl(intf->parent, ASP_EDPKT_SPARE_REG);
+ phy_lpi_overwrite = intf->internal_phy ? ASP_EDPKT_SPARE_REG_EPHY_LPI :
+ ASP_EDPKT_SPARE_REG_GPHY_LPI;
+
+ if (en)
+ reg |= phy_lpi_overwrite;
+ else
+ reg &= ~phy_lpi_overwrite;
+
+ rx_edpkt_core_wl(intf->parent, reg, ASP_EDPKT_SPARE_REG);
+
+ usleep_range(50, 100);
+}
+
static struct bcmasp_hw_info v20_hw_info = {
.rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
.umac2fb = UMAC2FB_OFFSET,
@@ -1178,6 +1232,7 @@ static const struct bcmasp_plat_data v20_plat_data = {
.init_wol = bcmasp_init_wol_per_intf,
.enable_wol = bcmasp_enable_wol_per_intf,
.destroy_wol = bcmasp_wol_irq_destroy_per_intf,
+ .core_clock_select = bcmasp_core_clock_select_one,
.hw_info = &v20_hw_info,
};
@@ -1194,17 +1249,39 @@ static const struct bcmasp_plat_data v21_plat_data = {
.init_wol = bcmasp_init_wol_shared,
.enable_wol = bcmasp_enable_wol_shared,
.destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_one,
+ .hw_info = &v21_hw_info,
+};
+
+static const struct bcmasp_plat_data v22_plat_data = {
+ .init_wol = bcmasp_init_wol_shared,
+ .enable_wol = bcmasp_enable_wol_shared,
+ .destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_many,
.hw_info = &v21_hw_info,
+ .eee_fixup = bcmasp_eee_fixup,
};
+static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
+{
+ priv->init_wol = pdata->init_wol;
+ priv->enable_wol = pdata->enable_wol;
+ priv->destroy_wol = pdata->destroy_wol;
+ priv->core_clock_select = pdata->core_clock_select;
+ priv->eee_fixup = pdata->eee_fixup;
+ priv->hw_info = pdata->hw_info;
+}
+
static const struct of_device_id bcmasp_of_match[] = {
{ .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
+ { .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ /* sentinel */ },
@@ -1265,16 +1342,13 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!pdata)
return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
- priv->hw_info = pdata->hw_info;
+ bcmasp_set_pdata(priv, pdata);
/* Enable all clocks to ensure successful probing */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
/* Switch to the main clock */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
bcmasp_intr2_mask_set_all(priv);
bcmasp_intr2_clear_all(priv);
@@ -1381,7 +1455,7 @@ static int __maybe_unused bcmasp_suspend(struct device *d)
*/
bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
- bcmasp_core_clock_select(priv, true);
+ priv->core_clock_select(priv, true);
clk_disable_unprepare(priv->clk);
@@ -1399,7 +1473,7 @@ static int __maybe_unused bcmasp_resume(struct device *d)
return ret;
/* Switch to the main clock domain */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
/* Re-enable all clocks for re-initialization */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index ec90add6b03e..f93cb3da44b0 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -19,6 +19,8 @@
#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14)
#define ASP_INTR2_UMC0_WAKE BIT(22)
#define ASP_INTR2_UMC1_WAKE BIT(28)
+#define ASP_INTR2_PHY_EVENT(intr) ((intr) ? BIT(30) | BIT(31) : \
+ BIT(24) | BIT(25))
#define ASP_WAKEUP_INTR2_OFFSET 0x1200
#define ASP_WAKEUP_INTR2_STATUS 0x0
@@ -33,6 +35,12 @@
#define ASP_WAKEUP_INTR2_FILT_1 BIT(3)
#define ASP_WAKEUP_INTR2_FW BIT(4)
+#define ASP_CTRL2_OFFSET 0x2000
+#define ASP_CTRL2_CORE_CLOCK_SELECT 0x0
+#define ASP_CTRL2_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL2_CPU_CLOCK_SELECT 0x4
+#define ASP_CTRL2_CPU_CLOCK_SELECT_MAIN BIT(0)
+
#define ASP_TX_ANALYTICS_OFFSET 0x4c000
#define ASP_TX_ANALYTICS_CTRL 0x0
@@ -134,8 +142,11 @@ enum asp_rx_net_filter_block {
#define ASP_EDPKT_RX_PKT_CNT 0x138
#define ASP_EDPKT_HDR_EXTR_CNT 0x13c
#define ASP_EDPKT_HDR_OUT_CNT 0x140
+#define ASP_EDPKT_SPARE_REG 0x174
+#define ASP_EDPKT_SPARE_REG_EPHY_LPI BIT(4)
+#define ASP_EDPKT_SPARE_REG_GPHY_LPI BIT(3)
-#define ASP_CTRL 0x101000
+#define ASP_CTRL_OFFSET 0x101000
#define ASP_CTRL_ASP_SW_INIT 0x04
#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0)
#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1)
@@ -306,6 +317,7 @@ struct bcmasp_intf {
struct bcmasp_desc *rx_edpkt_cpu;
dma_addr_t rx_edpkt_dma_addr;
dma_addr_t rx_edpkt_dma_read;
+ dma_addr_t rx_edpkt_dma_valid;
/* RX buffer prefetcher ring*/
void *rx_ring_cpu;
@@ -337,7 +349,7 @@ struct bcmasp_intf {
int wol_irq;
unsigned int wol_irq_enabled:1;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -372,6 +384,8 @@ struct bcmasp_plat_data {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
struct bcmasp_hw_info *hw_info;
};
@@ -390,6 +404,8 @@ struct bcmasp_priv {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
void __iomem *base;
struct bcmasp_hw_info *hw_info;
@@ -530,7 +546,8 @@ BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
-BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL);
+BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -541,6 +558,8 @@ void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en);
void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en);
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en);
+
void bcmasp_flush_rx_port(struct bcmasp_intf *intf);
extern const struct ethtool_ops bcmasp_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index ce6a3d56fb23..484fc2b5626f 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -360,29 +360,26 @@ void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
umac_wl(intf, reg, UMC_EEE_CTRL);
intf->eee.eee_enabled = enable;
- intf->eee.eee_active = enable;
}
-static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
+ struct ethtool_keee *p = &intf->eee;
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
+ struct ethtool_keee *p = &intf->eee;
int ret;
if (!dev->phydev)
@@ -399,7 +396,6 @@ static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
}
umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.eee_active = ret >= 0;
intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
bcmasp_eee_enable_set(intf, true);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 53e542881255..dd06b68b33ed 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -382,6 +382,7 @@ static void bcmasp_netif_start(struct net_device *dev)
bcmasp_enable_rx_irq(intf, 1);
bcmasp_enable_tx_irq(intf, 1);
+ bcmasp_enable_phy_irq(intf, 1);
phy_start(dev->phydev);
}
@@ -607,6 +608,7 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
+ bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -658,8 +660,8 @@ static void bcmasp_adj_link(struct net_device *dev)
reg |= cmd_bits;
umac_wl(intf, reg, UMC_CMD);
- intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, intf->eee.eee_active);
+ active = phy_init_eee(phydev, 0) >= 0;
+ bcmasp_eee_enable_set(intf, active);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -673,38 +675,78 @@ static void bcmasp_adj_link(struct net_device *dev)
phy_print_status(phydev);
}
-static int bcmasp_init_rx(struct bcmasp_intf *intf)
+static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct page *buffer_pg;
- dma_addr_t dma;
- void *p;
- u32 reg;
- int ret;
+ /* Alloc RX */
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
-
- dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(kdev, dma)) {
- __free_pages(buffer_pg, intf->rx_buf_order);
+ if (!buffer_pg)
return -ENOMEM;
- }
+
intf->rx_ring_cpu = page_to_virt(buffer_pg);
- intf->rx_ring_dma = dma;
- intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, intf->rx_ring_dma))
+ goto free_rx_buffer;
+
+ intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->rx_edpkt_dma_addr, GFP_KERNEL);
+ if (!intf->rx_edpkt_cpu)
+ goto free_rx_buffer_dma;
+
+ /* Alloc TX */
+ intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->tx_spb_dma_addr, GFP_KERNEL);
+ if (!intf->tx_spb_cpu)
+ goto free_rx_edpkt_dma;
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
+ intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
GFP_KERNEL);
- if (!p) {
- ret = -ENOMEM;
- goto free_rx_ring;
- }
- intf->rx_edpkt_cpu = p;
+ if (!intf->tx_cbs)
+ goto free_tx_spb_dma;
- netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
+ return 0;
+
+free_tx_spb_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+free_rx_edpkt_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+free_rx_buffer_dma:
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+free_rx_buffer:
+ __free_pages(buffer_pg, intf->rx_buf_order);
+
+ return -ENOMEM;
+}
+
+static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ /* RX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+
+ /* TX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+ kfree(intf->tx_cbs);
+}
+static void bcmasp_init_rx(struct bcmasp_intf *intf)
+{
+ /* Restart from index 0 */
+ intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
intf->rx_edpkt_index = 0;
@@ -730,64 +772,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_END);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_VALID);
-
- reg = UMAC2FB_CFG_DEFAULT_EN |
- ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
- reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
- umac2fb_wl(intf, reg, UMAC2FB_CFG);
-
- return 0;
-
-free_rx_ring:
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
- return ret;
+ umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
+ UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
+ UMAC2FB_CFG);
}
-static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
-
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
- intf->rx_edpkt_dma_addr);
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
-}
-static int bcmasp_init_tx(struct bcmasp_intf *intf)
+static void bcmasp_init_tx(struct bcmasp_intf *intf)
{
- struct device *kdev = &intf->parent->pdev->dev;
- void *p;
- int ret;
-
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
- GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- intf->tx_spb_cpu = p;
+ /* Restart from index 0 */
intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
-
- intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
- GFP_KERNEL);
- if (!intf->tx_cbs) {
- ret = -ENOMEM;
- goto free_tx_spb;
- }
-
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
- netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
-
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
@@ -803,26 +804,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
-
- return 0;
-
-free_tx_spb:
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- return ret;
-}
-
-static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
-
- /* Free descriptors */
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- /* Free cbs */
- kfree(intf->tx_cbs);
}
static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
@@ -910,12 +891,10 @@ static void bcmasp_netif_deinit(struct net_device *dev)
/* Disable interrupts */
bcmasp_enable_tx_irq(intf, 0);
bcmasp_enable_rx_irq(intf, 0);
+ bcmasp_enable_phy_irq(intf, 0);
netif_napi_del(&intf->tx_napi);
- bcmasp_reclaim_free_all_tx(intf);
-
netif_napi_del(&intf->rx_napi);
- bcmasp_reclaim_free_all_rx(intf);
}
static int bcmasp_stop(struct net_device *dev)
@@ -929,6 +908,8 @@ static int bcmasp_stop(struct net_device *dev)
bcmasp_netif_deinit(dev);
+ bcmasp_reclaim_free_buffers(intf);
+
phy_disconnect(dev->phydev);
/* Disable internal EPHY or external PHY */
@@ -1048,6 +1029,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
netdev_err(dev, "could not attach to PHY\n");
goto err_phy_disable;
}
+
+ if (intf->internal_phy)
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
+
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
} else if (!intf->wolopts) {
ret = phy_resume(dev->phydev);
if (ret)
@@ -1067,17 +1054,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
intf->old_link = -1;
intf->old_pause = -1;
- ret = bcmasp_init_tx(intf);
- if (ret)
- goto err_phy_disconnect;
-
- /* Turn on asp */
+ bcmasp_init_tx(intf);
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
bcmasp_enable_tx(intf, 1);
- ret = bcmasp_init_rx(intf);
- if (ret)
- goto err_reclaim_tx;
-
+ bcmasp_init_rx(intf);
+ netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
bcmasp_enable_rx(intf, 1);
/* Turn on UniMAC TX/RX */
@@ -1091,11 +1073,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
-err_reclaim_tx:
- bcmasp_reclaim_free_all_tx(intf);
-err_phy_disconnect:
- if (phydev)
- phy_disconnect(phydev);
err_phy_disable:
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
@@ -1111,13 +1088,24 @@ static int bcmasp_open(struct net_device *dev)
netif_dbg(intf, ifup, dev, "bcmasp open\n");
- ret = clk_prepare_enable(intf->parent->clk);
+ ret = bcmasp_alloc_buffers(intf);
if (ret)
return ret;
- ret = bcmasp_netif_init(dev, true);
+ ret = clk_prepare_enable(intf->parent->clk);
if (ret)
+ goto err_free_mem;
+
+ ret = bcmasp_netif_init(dev, true);
+ if (ret) {
clk_disable_unprepare(intf->parent->clk);
+ goto err_free_mem;
+ }
+
+ return ret;
+
+err_free_mem:
+ bcmasp_reclaim_free_buffers(intf);
return ret;
}
@@ -1326,6 +1314,9 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
+ if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, true);
+
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
}
@@ -1374,6 +1365,9 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
+ if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, false);
+
reg = umac_rl(intf, UMC_MPD_CTRL);
reg &= ~UMC_MPD_CTRL_MPD_EN;
umac_wl(intf, reg, UMC_MPD_CTRL);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 3e7c8671cd11..72df1bb10172 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
};
module_platform_driver(bcm4908_enet_driver);
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 9b83d5361699..50b8e97a811d 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 6e4f36aaf5db..36f9bad28e6a 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -362,4 +362,5 @@ module_init(bgmac_init)
module_exit(bgmac_exit)
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 0b21fd5bd457..77425c7a32db 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
};
module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 448a1b90de5e..6ffdc4229407 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e9c1e1bb5580..c9b6acd8c892 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strscpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
@@ -3537,7 +3538,7 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_inner_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3569,7 +3570,7 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3612,7 +3613,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
- u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+ u8 hlen = skb_network_offset(skb) >> 1;
/* for now NS flag is not used in Linux */
pbd->global_data =
@@ -3620,8 +3621,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
- pbd->ip_hlen_w = (skb_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ pbd->ip_hlen_w = skb_network_header_len(skb) >> 1;
hlen += pbd->ip_hlen_w;
@@ -3666,8 +3666,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
u8 outerip_off, outerip_len = 0;
/* from outer IP to transport */
- hlen_w = (skb_inner_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ hlen_w = skb_inner_transport_offset(skb) >> 1;
/* transport len */
hlen_w += inner_tcp_hdrlen(skb) >> 1;
@@ -3713,7 +3712,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
0, IPPROTO_TCP, 0));
}
- outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+ outerip_off = (skb_network_offset(skb)) >> 1;
*global_data |=
outerip_off |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 81d232e6d05f..58956ed8f531 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
@@ -2081,34 +2081,31 @@ static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Storage only interface"
};
-static u32 bnx2x_eee_to_adv(u32 eee_adv)
+static void bnx2x_eee_to_linkmode(unsigned long *mode, u32 eee_adv)
{
- u32 modes = 0;
-
if (eee_adv & SHMEM_EEE_100M_ADV)
- modes |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_1G_ADV)
- modes |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_10G_ADV)
- modes |= ADVERTISED_10000baseT_Full;
-
- return modes;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
}
-static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+static u32 bnx2x_linkmode_to_eee(const unsigned long *mode, u32 shift)
{
u32 eee_adv = 0;
- if (modes & ADVERTISED_100baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_100M_ADV;
- if (modes & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_1G_ADV;
- if (modes & ADVERTISED_10000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_10G_ADV;
return eee_adv << shift;
}
-static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2120,16 +2117,17 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
eee_cfg = bp->link_vars.eee_status;
- edata->supported =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
- SHMEM_EEE_SUPPORTED_SHIFT);
+ bnx2x_eee_to_linkmode(edata->supported,
+ (eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ bnx2x_eee_to_linkmode(edata->advertised,
+ (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
- SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->lp_advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
- SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ bnx2x_eee_to_linkmode(edata->lp_advertised,
+ (eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
/* SHMEM value is in 16u units --> Convert to 1u units. */
edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
@@ -2141,7 +2139,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2162,8 +2160,8 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- advertised = bnx2x_adv_to_eee(edata->advertised,
- SHMEM_EEE_ADV_STATUS_SHIFT);
+ advertised = bnx2x_linkmode_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
DP(BNX2X_MSG_ETHTOOL,
"Direct manipulation of EEE advertisement is not supported\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 02808513ffe4..ea310057fe3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0aacd3c6ed5c..493b724848c8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -246,6 +246,49 @@ static const u16 bnxt_async_events_arr[] = {
static struct workqueue_struct *bnxt_pf_wq;
+#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
+#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
+
+const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
+ .ports = {
+ .src = 0,
+ .dst = 0,
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_NONE,
+ .dst = BNXT_IPV6_MASK_NONE,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_ALL,
+ .dst = BNXT_IPV6_MASK_ALL,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v4addrs = {
+ .src = cpu_to_be32(0xffffffff),
+ .dst = cpu_to_be32(0xffffffff),
+ },
+ },
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
@@ -3817,7 +3860,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
int i, j, rc, ulp_base_vec, ulp_msix;
- int tcs = netdev_get_num_tc(bp->dev);
+ int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
@@ -4168,8 +4211,12 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
- num_vnics += bp->rx_nr_rings;
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ num_vnics++;
+ else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ num_vnics += bp->rx_nr_rings;
+ }
#endif
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
@@ -4186,6 +4233,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
static void bnxt_init_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int i;
for (i = 0; i < bp->nr_vnics; i++) {
@@ -4199,20 +4247,33 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (!i) {
+ if (i == BNXT_VNIC_DEFAULT) {
u8 *key = (void *)vnic->rss_hash_key;
int k;
+ if (!bp->rss_hash_key_valid &&
+ !bp->rss_hash_key_updated) {
+ get_random_bytes(bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bp->rss_hash_key_updated)
+ continue;
+
+ bp->rss_hash_key_updated = false;
+ bp->rss_hash_key_valid = true;
+
bp->toeplitz_prefix = 0;
- get_random_bytes(vnic->rss_hash_key,
- HW_HASH_KEY_SIZE);
for (k = 0; k < 8; k++) {
bp->toeplitz_prefix <<= 8;
bp->toeplitz_prefix |= key[k];
}
} else {
- memcpy(vnic->rss_hash_key,
- bp->vnic_info[0].rss_hash_key,
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
HW_HASH_KEY_SIZE);
}
}
@@ -4798,6 +4859,44 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
}
}
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ u8 type = fltr->type, flags = fltr->flags;
+
+ INIT_LIST_HEAD(&fltr->list);
+ if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
+ (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
+ list_add_tail(&fltr->list, &bp->usr_fltr_list);
+}
+
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ if (!list_empty(&fltr->list))
+ list_del_init(&fltr->list);
+}
+
+void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
+ continue;
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+}
+
+static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ hlist_del(&fltr->hash);
+ bnxt_del_one_usr_fltr(bp, fltr);
+ if (fltr->flags) {
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+}
+
static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
@@ -4813,12 +4912,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bnxt_del_l2_filter(bp, fltr->l2_fltr);
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
if (!all)
@@ -4840,7 +4937,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -4859,14 +4956,10 @@ static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
head = &bp->l2_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- if (fltr->base.flags) {
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- }
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
}
@@ -5039,8 +5132,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (rc)
goto alloc_mem_err;
- bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
- BNXT_VNIC_UCAST_FLAG;
+ bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
+ BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
+ bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
+ BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
+
rc = bnxt_alloc_vnic_attributes(bp);
if (rc)
goto alloc_mem_err;
@@ -5342,6 +5440,7 @@ void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
if (fltr->base.flags) {
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
bp->ntp_fltr_count--;
@@ -5480,13 +5579,15 @@ static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
int bit_id;
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_MAX_FLTR, 0);
+ bp->max_fltr, 0);
if (bit_id < 0)
return -ENOMEM;
fltr->base.sw_id = (u16)bit_id;
+ bp->ntp_fltr_count++;
}
head = &bp->l2_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
atomic_set(&fltr->refcnt, 1);
return 0;
@@ -5519,6 +5620,40 @@ static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
return fltr;
}
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr) {
+ fltr = ERR_PTR(-EEXIST);
+ goto l2_filter_exit;
+ }
+ fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
+ if (!fltr) {
+ fltr = ERR_PTR(-ENOMEM);
+ goto l2_filter_exit;
+ }
+ fltr->base.flags = flags;
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ if (rc) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr);
+ return ERR_PTR(rc);
+ }
+
+l2_filter_exit:
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return fltr;
+}
+
static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
{
#ifdef CONFIG_BNXT_SRIOV
@@ -5650,15 +5785,38 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
mask[i] = cpu_to_be32(~0);
}
+static void
+bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
+ struct hwrm_cfa_ntuple_filter_alloc_input *req,
+ u16 rxq)
+{
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ struct bnxt_vnic_info *vnic;
+ u32 enables;
+
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
+ req->enables |= cpu_to_le32(enables);
+ req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
+ } else {
+ u32 flags;
+
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req->flags |= cpu_to_le32(flags);
+ req->dst_id = cpu_to_le16(rxq);
+ }
+}
+
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
+ struct bnxt_flow_masks *masks = &fltr->fmasks;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
- u32 flags = 0;
int rc;
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
@@ -5668,16 +5826,16 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
l2_fltr = fltr->l2_fltr;
req->l2_filter_id = l2_fltr->base.filter_id;
-
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->base.rxq);
+ if (fltr->base.flags & BNXT_ACT_DROP) {
+ req->flags =
+ cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
+ } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
@@ -5687,25 +5845,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
- }
+ *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
} else {
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -5713,14 +5861,10 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
- req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
- req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
- }
+ req->src_port = keys->ports.src;
+ req->src_port_mask = masks->ports.src;
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = masks->ports.dst;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -5935,8 +6079,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ if (!rx_rings)
+ return 0;
+ return bnxt_calc_nr_ring_pages(rx_rings - 1,
+ BNXT_RSS_TABLE_ENTRIES_P5);
+ }
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2;
return 1;
@@ -5967,7 +6115,10 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
for (i = 0; i < tbl_size; i++) {
u16 ring_id, j;
- j = bp->rss_indir_tbl[i];
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else
+ j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
ring_id = rxr->rx_ring_struct.fw_ring_id;
@@ -5981,10 +6132,13 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_fill_hw_rss_tbl_p5(bp, vnic);
- else
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+ } else {
bnxt_fill_hw_rss_tbl(bp, vnic);
+ }
if (bp->rss_hash_delta) {
req->hash_type = cpu_to_le32(bp->rss_hash_delta);
@@ -6057,7 +6211,7 @@ exit:
static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct hwrm_vnic_rss_qcfg_output *resp;
struct hwrm_vnic_rss_qcfg_input *req;
@@ -6161,6 +6315,7 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
@@ -6190,8 +6345,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
- req->rss_rule =
- cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
@@ -6288,7 +6442,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == 0)
+ if (vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -6347,6 +6501,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
}
if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
}
hwrm_req_drop(bp, req);
return rc;
@@ -6914,6 +7076,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_hw_ring_grps =
le32_to_cpu(resp->alloc_hw_ring_grps);
hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
@@ -6926,7 +7089,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
if (cp < (rx + tx)) {
rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
if (rc)
- return rc;
+ goto get_rings_exit;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
hw_resc->resv_rx_rings = rx;
@@ -6938,8 +7101,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
+get_rings_exit:
hwrm_req_drop(bp, req);
- return 0;
+ return rc;
}
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -6968,8 +7132,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
static bool bnxt_rfs_supported(struct bnxt *bp);
static struct hwrm_func_cfg_input *
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 enables = 0;
@@ -6978,51 +7141,42 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return NULL;
req->fid = cpu_to_le16(0xffff);
- enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- req->num_tx_rings = cpu_to_le16(tx_rings);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
if (BNXT_NEW_RM(bp)) {
- enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cp_p5 ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= rx_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
- enables |= cp_rings ?
+ enables |= hwr->cp ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- }
- enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
-
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ enables |= hwr->grp ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
+ 0;
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs =
- cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
+ req->num_msix = cpu_to_le16(hwr->cp);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
- bnxt_rfs_supported(bp))
- req->num_rsscos_ctxs =
- cpu_to_le16(ring_grps + 1);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
}
req->enables = cpu_to_le32(enables);
return req;
}
static struct hwrm_func_vf_cfg_input *
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 enables = 0;
@@ -7030,49 +7184,46 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
return NULL;
- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp_p5 ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
- enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
+ enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->grp ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req->num_tx_rings = cpu_to_le16(tx_rings);
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
req->enables = cpu_to_le32(enables);
return req;
}
static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
int rc;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7086,25 +7237,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return rc;
if (bp->hwrm_spec_code < 0x10601)
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return bnxt_hwrm_get_rings(bp);
}
static int
-bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
int rc;
if (!BNXT_NEW_RM(bp)) {
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return 0;
}
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7115,15 +7264,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return bnxt_hwrm_get_rings(bp);
}
-static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int stat, int vnic)
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, hwr);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, hwr);
}
int bnxt_nq_rings_in_use(struct bnxt *bp)
@@ -7166,6 +7312,24 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
return cp + ulp_stat;
}
+static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ if (!hwr->grp)
+ return 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
+
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ rss_ctx *= hwr->vnic;
+ return rss_ctx;
+ }
+ if (BNXT_VF(bp))
+ return BNXT_VF_MAX_RSS_CTX;
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
+ return hwr->grp + 1;
+ return 1;
+}
+
/* Check if a default RSS map needs to be setup. This function is only
* used on older firmware that does not require reserving RX rings.
*/
@@ -7181,13 +7345,24 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
}
}
+static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return 2;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return rx_rings + 1;
+ }
+ return 1;
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int cp = bnxt_cp_rings_in_use(bp);
int nq = bnxt_nq_rings_in_use(bp);
int rx = bp->rx_nr_rings, stat;
- int vnic = 1, grp = rx;
+ int vnic, grp = rx;
if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
bp->hwrm_spec_code >= 0x10601)
@@ -7202,9 +7377,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
bnxt_check_rss_tbl_no_rmgr(bp);
return false;
}
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+
+ vnic = bnxt_get_total_vnics(bp, rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
@@ -7219,47 +7394,65 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
return false;
}
-static int __bnxt_reserve_rings(struct bnxt *bp)
+static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_nq_rings_in_use(bp);
- int tx = bp->tx_nr_rings;
- int rx = bp->rx_nr_rings;
- int grp, rx_rings, rc;
- int vnic = 1, stat;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ if (BNXT_NEW_RM(bp)) {
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->cp = hw_resc->resv_irqs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr->cp_p5 = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+ }
+}
+
+static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
+ hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
+}
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_rings hwr = {0};
+ int rx_rings, rc;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ hwr.tx = bp->tx_nr_rings;
+ hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- grp = bp->rx_nr_rings;
- stat = bnxt_get_func_stat_ctxs(bp);
+ hwr.rx <<= 1;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ hwr.stat = bnxt_get_func_stat_ctxs(bp);
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
return rc;
- tx = hw_resc->resv_tx_rings;
- if (BNXT_NEW_RM(bp)) {
- rx = hw_resc->resv_rx_rings;
- cp = hw_resc->resv_irqs;
- grp = hw_resc->resv_hw_ring_grps;
- vnic = hw_resc->resv_vnics;
- stat = hw_resc->resv_stat_ctxs;
- }
+ bnxt_copy_reserved_rings(bp, &hwr);
- rx_rings = rx;
+ rx_rings = hwr.rx;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (rx >= 2) {
- rx_rings = rx >> 1;
+ if (hwr.rx >= 2) {
+ rx_rings = hwr.rx >> 1;
} else {
if (netif_running(bp->dev))
return -ENOMEM;
@@ -7271,17 +7464,17 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bnxt_set_ring_params(bp);
}
}
- rx_rings = min_t(int, rx_rings, grp);
- cp = min_t(int, cp, bp->cp_nr_rings);
- if (stat > bnxt_get_ulp_stat_ctxs(bp))
- stat -= bnxt_get_ulp_stat_ctxs(bp);
- cp = min_t(int, cp, stat);
- rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ rx_rings = min_t(int, rx_rings, hwr.grp);
+ hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
+ if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp = min_t(int, hwr.cp, hwr.stat);
+ rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx = rx_rings << 1;
- tx_cp = bnxt_num_tx_to_cp(bp, tx);
- cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
- bp->tx_nr_rings = tx;
+ hwr.rx = rx_rings << 1;
+ tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
+ hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
* if absolutely necessary
@@ -7298,9 +7491,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
bp->rx_nr_rings = rx_rings;
- bp->cp_nr_rings = cp;
+ bp->cp_nr_rings = hwr.cp;
- if (!tx || !rx || !cp || !grp || !vnic || !stat)
+ if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev))
@@ -7309,9 +7502,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 flags;
@@ -7319,8 +7510,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (!BNXT_NEW_RM(bp))
return 0;
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -7334,15 +7524,12 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 flags;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -7360,20 +7547,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
- return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, stats,
- vnics);
+ return bnxt_hwrm_check_pf_rings(bp, hwr);
- return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ return bnxt_hwrm_check_vf_rings(bp, hwr);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -8701,6 +8883,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -8709,12 +8898,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
- pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
- pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
- pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
- pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
- pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
- pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
@@ -8817,6 +9000,14 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
+
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
+
hwrm_cfa_adv_qcaps_exit:
hwrm_req_drop(bp, req);
return rc;
@@ -9681,10 +9872,28 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
return __bnxt_setup_vnic(bp, vnic_id);
}
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
+ u16 start_rx_ring_idx, int rx_rings)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ return rc;
+ }
+ return bnxt_setup_vnic(bp, vnic_id);
+}
+
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
int i, rc = 0;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
+ bp->rx_nr_rings);
+
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
@@ -9700,14 +9909,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
- break;
- }
- rc = bnxt_setup_vnic(bp, vnic_id);
- if (rc)
+ if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
break;
}
return rc;
@@ -9748,7 +9950,7 @@ static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int rc = 0;
unsigned int rx_nr_rings = bp->rx_nr_rings;
@@ -9777,7 +9979,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -9786,7 +9988,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, 0);
+ rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@@ -9938,7 +10140,7 @@ static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
{
- int tcs = netdev_get_num_tc(bp->dev);
+ int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
@@ -9947,7 +10149,7 @@ int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
{
- int tcs = netdev_get_num_tc(bp->dev);
+ int tcs = bp->num_tc;
return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
bp->tx_nr_rings_xdp;
@@ -9977,7 +10179,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
struct net_device *dev = bp->dev;
int tcs, i;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
if (tcs) {
int i, off, count;
@@ -10009,8 +10211,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
- if (netdev_get_num_tc(bp->dev))
+ if (bp->num_tc) {
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
+ }
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
@@ -10236,8 +10440,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
- int tcs = netdev_get_num_tc(bp->dev);
bool irq_cleared = false;
+ int tcs = bp->num_tc;
int rc;
if (!bnxt_need_reserve_rings(bp))
@@ -10263,6 +10467,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
if (bp->tx_nr_rings_xdp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
else
@@ -10610,10 +10815,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
- eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
@@ -10755,7 +10960,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->module_status = resp->module_status;
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds;
eee->eee_active = 0;
@@ -10764,8 +10969,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_active = 1;
fw_speeds = le16_to_cpu(
resp->link_partner_adv_eee_link_speed_mask);
- eee->lp_advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
}
/* Pull initial EEE config */
@@ -10775,8 +10979,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_enabled = 1;
fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
- eee->advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
if (resp->eee_config_phy_addr &
PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
@@ -10946,7 +11149,7 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
static void bnxt_hwrm_set_eee(struct bnxt *bp,
struct hwrm_port_phy_cfg_input *req)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
if (eee->eee_enabled) {
u16 eee_speeds;
@@ -11076,6 +11279,7 @@ static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
+ hw_resc->resv_rsscos_ctxs = 0;
if (!fw_reset) {
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
@@ -11311,22 +11515,25 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true;
if (eee->eee_enabled) {
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
eee->eee_enabled = 0;
return false;
}
- if (eee->advertised & ~advertising) {
- eee->advertised = advertising & eee->supported;
+ if (linkmode_andnot(tmp, eee->advertised, advertising)) {
+ linkmode_and(eee->advertised, advertising,
+ eee->supported);
return false;
}
}
@@ -11431,6 +11638,42 @@ static int bnxt_reinit_after_abort(struct bnxt *bp)
return rc;
}
+static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ struct bnxt_ntuple_filter *ntp_fltr;
+ struct bnxt_l2_filter *l2_fltr;
+
+ if (list_empty(&fltr->list))
+ return;
+
+ if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
+ ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ ntp_fltr->l2_fltr = l2_fltr;
+ if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
+ fltr->sw_id);
+ }
+ } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
+ l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
+ if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
+ fltr->sw_id);
+ }
+ }
+}
+
+static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
+ bnxt_cfg_one_usr_fltr(bp, usr_fltr);
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -11517,6 +11760,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
+ bnxt_cfg_usr_fltrs(bp);
return 0;
open_err_irq:
@@ -11564,10 +11808,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
+ bnxt_init_napi(bp);
set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
rc = bnxt_init_nic(bp, true);
if (rc) {
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ bnxt_del_napi(bp);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -11586,6 +11832,7 @@ half_open_err:
void bnxt_half_close_nic(struct bnxt *bp)
{
bnxt_hwrm_resource_free(bp, false, true);
+ bnxt_del_napi(bp);
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
@@ -11955,8 +12202,8 @@ void bnxt_get_ring_err_stats(struct bnxt *bp,
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct netdev_hw_addr *ha;
u8 *haddr;
int mc_count = 0;
@@ -11990,7 +12237,7 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
static bool bnxt_uc_list_updated(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int off = 0;
@@ -12017,7 +12264,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (!test_bit(BNXT_STATE_OPEN, &bp->state))
return;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
mask = vnic->rx_mask;
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
@@ -12048,7 +12295,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -12160,21 +12407,32 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
- int vnics, max_vnics, max_rss_ctxs;
+ struct bnxt_hw_rings hwr = {0};
+ int max_vnics, max_rss_ctxs;
+ hwr.rss_ctx = 1;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ /* 2 VNICS: default + Ntuple */
+ hwr.vnic = 2;
+ hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ hwr.vnic;
+ goto check_reserve_vnic;
+ }
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- vnics = 1 + bp->rx_nr_rings;
+ hwr.vnic = 1 + bp->rx_nr_rings;
+check_reserve_vnic:
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- /* RSS contexts not a limiting factor */
- if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
- max_rss_ctxs = max_vnics;
- if (vnics > max_vnics || vnics > max_rss_ctxs) {
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
+ hwr.rss_ctx = hwr.vnic;
+
+ if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
@@ -12185,15 +12443,19 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
return true;
- if (vnics == bp->hw_resc.resv_vnics)
+ if (hwr.vnic == bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
- if (vnics <= bp->hw_resc.resv_vnics)
+ bnxt_hwrm_reserve_rings(bp, &hwr);
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
+ hwr.vnic = 1;
+ hwr.rss_ctx = 0;
+ bnxt_hwrm_reserve_rings(bp, &hwr);
return false;
}
@@ -12232,14 +12494,24 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
return features;
}
+static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init, u32 flags, bool update_tpa)
+{
+ bnxt_close_nic(bp, irq_re_init, link_re_init);
+ bp->flags = flags;
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return bnxt_open_nic(bp, irq_re_init, link_re_init);
+}
+
static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
{
+ bool update_tpa = false, update_ntuple = false;
struct bnxt *bp = netdev_priv(dev);
u32 flags = bp->flags;
u32 changes;
int rc = 0;
bool re_init = false;
- bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
if (features & NETIF_F_GRO_HW)
@@ -12255,6 +12527,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_NTUPLE)
flags |= BNXT_FLAG_RFS;
+ else
+ bnxt_clear_usr_fltrs(bp, true);
changes = flags ^ bp->flags;
if (changes & BNXT_FLAG_TPA) {
@@ -12268,6 +12542,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & ~BNXT_FLAG_TPA)
re_init = true;
+ if (changes & BNXT_FLAG_RFS)
+ update_ntuple = true;
+
if (flags != bp->flags) {
u32 old_flags = bp->flags;
@@ -12278,14 +12555,12 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
- if (re_init) {
- bnxt_close_nic(bp, false, false);
- bp->flags = flags;
- if (update_tpa)
- bnxt_set_ring_params(bp);
+ if (update_ntuple)
+ return bnxt_reinit_features(bp, true, false, flags, update_tpa);
+
+ if (re_init)
+ return bnxt_reinit_features(bp, false, false, flags, update_tpa);
- return bnxt_open_nic(bp, false, false);
- }
if (update_tpa) {
bp->flags = flags;
rc = bnxt_set_tpa(bp,
@@ -13115,9 +13390,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
- int tx_rings_needed, stats;
+ struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
- int cp, vnics;
if (tcs)
tx_sets = tcs;
@@ -13130,26 +13404,27 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
- tx_rings_needed = tx * tx_sets + tx_xdp;
- if (max_tx < tx_rings_needed)
+ hwr.rx = rx_rings;
+ hwr.tx = tx * tx_sets + tx_xdp;
+ if (max_tx < hwr.tx)
return -ENOMEM;
- vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
- BNXT_FLAG_RFS)
- vnics += rx;
+ hwr.vnic = bnxt_get_total_vnics(bp, rx);
- tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
- cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
- if (max_cp < cp)
+ tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
+ hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < hwr.cp)
return -ENOMEM;
- stats = cp;
+ hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- cp += bnxt_get_ulp_msix_num(bp);
- stats += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.grp = rx;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
}
- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- stats, vnics);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.tx + rx;
+ return bnxt_hwrm_check_rings(bp, &hwr);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -13232,6 +13507,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
+ /* FW may be unresponsive after FLR. FLR must complete within 100 msec
+ * so wait before continuing with recovery.
+ */
+ if (rc)
+ msleep(100);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
rc = bnxt_try_recover_fw(bp);
@@ -13747,6 +14027,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
eth_hw_addr_set(dev, addr->sa_data);
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13784,7 +14065,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
- if (netdev_get_num_tc(dev) == tc)
+ if (bp->num_tc == tc)
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -13802,9 +14083,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (tc) {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
netdev_set_num_tc(dev, tc);
+ bp->num_tc = tc;
} else {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
+ bp->num_tc = 0;
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
@@ -13867,7 +14150,7 @@ u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
if (skb)
return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
}
@@ -13878,7 +14161,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
int bit_id;
spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return -ENOMEM;
@@ -13890,6 +14173,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
head = &bp->ntp_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
return 0;
@@ -13898,45 +14182,39 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
+ struct bnxt_flow_masks *masks1 = &f1->fmasks;
+ struct bnxt_flow_masks *masks2 = &f2->fmasks;
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (f1->ntuple_flags != f2->ntuple_flags)
- return false;
-
if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto)
return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
+ masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
return false;
} else {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- memcmp(&keys1->addrs.v6addrs.src,
- &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src))) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- memcmp(&keys1->addrs.v6addrs.dst,
- &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst))))
+ if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
+ &masks2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
+ &masks2->addrs.v6addrs.dst))
return false;
}
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
- keys1->ports.src != keys2->ports.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
- keys1->ports.dst != keys2->ports.dst))
- return false;
-
- if (keys1->control.flags == keys2->control.flags &&
- f1->l2_fltr == f2->l2_fltr)
- return true;
-
- return false;
+ return keys1->ports.src == keys2->ports.src &&
+ masks1->ports.src == masks2->ports.src &&
+ keys1->ports.dst == keys2->ports.dst &&
+ masks1->ports.dst == masks2->ports.dst &&
+ keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr;
}
struct bnxt_ntuple_filter *
@@ -13967,7 +14245,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u32 flags;
if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
} else {
struct bnxt_l2_key key;
@@ -14001,10 +14279,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
- bp->hwrm_spec_code < 0x10601) {
- rc = -EPROTONOSUPPORT;
- goto err_free;
+ new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
}
flags = fkeys->control.flags;
if (((flags & FLOW_DIS_ENCAPSULATION) &&
@@ -14012,9 +14293,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
-
new_fltr->l2_fltr = l2_fltr;
- new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
@@ -14049,6 +14328,7 @@ void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count--;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_del_l2_filter(bp, fltr->l2_fltr);
@@ -14243,6 +14523,70 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bridge_setlink = bnxt_bridge_setlink,
};
+static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_cp_ring_info *cpr;
+ u64 *sw;
+
+ cpr = &bp->bnapi[i]->cp_ring;
+ sw = cpr->stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
+
+ stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
+}
+
+static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_napi *bnapi;
+ u64 *sw;
+
+ bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
+ sw = bnapi->cp_ring.stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
+}
+
+static void bnxt_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ rx->packets = bp->net_stats_prev.rx_packets;
+ rx->bytes = bp->net_stats_prev.rx_bytes;
+ rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+
+ tx->packets = bp->net_stats_prev.tx_packets;
+ tx->bytes = bp->net_stats_prev.tx_bytes;
+}
+
+static const struct netdev_stat_ops bnxt_stat_ops = {
+ .get_queue_stats_rx = bnxt_get_queue_stats_rx,
+ .get_queue_stats_tx = bnxt_get_queue_stats_tx,
+ .get_base_stats = bnxt_get_base_stats,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -14648,6 +14992,7 @@ void bnxt_print_device_info(struct bnxt *bp)
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct bnxt_hw_resc *hw_resc;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -14689,6 +15034,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_free;
dev->netdev_ops = &bnxt_netdev_ops;
+ dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
@@ -14806,6 +15152,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ hw_resc = &bp->hw_resc;
+ bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNXT_L2_FLTR_MAX_FLTR;
+ /* Older firmware may not report these filters properly */
+ if (bp->max_fltr < BNXT_MAX_FLTR)
+ bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
@@ -14858,6 +15210,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_dl;
+ INIT_LIST_HEAD(&bp->usr_fltr_list);
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index b8ef1717cb65..dd849e715c9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1213,6 +1213,9 @@ struct bnxt_ring_grp_info {
u16 cp_fw_ring_id;
};
+#define BNXT_VNIC_DEFAULT 0
+#define BNXT_VNIC_NTUPLE 1
+
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
#define BNXT_MAX_CTX_PER_VNIC 8
@@ -1252,11 +1255,24 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
+#define BNXT_VNIC_NTUPLE_FLAG 0x20
+};
+
+struct bnxt_hw_rings {
+ int tx;
+ int rx;
+ int grp;
+ int cp;
+ int cp_p5;
+ int stat;
+ int vnic;
+ int rss_ctx;
};
struct bnxt_hw_resc {
u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
u16 min_cp_rings;
u16 max_cp_rings;
u16 resv_cp_rings;
@@ -1281,6 +1297,12 @@ struct bnxt_hw_resc {
u16 max_nqs;
u16 max_irqs;
u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -1315,12 +1337,6 @@ struct bnxt_pf_info {
u16 active_vfs;
u16 registered_vfs;
u16 max_vfs;
- u32 max_encap_records;
- u32 max_decap_records;
- u32 max_tx_em_flows;
- u32 max_tx_wm_flows;
- u32 max_rx_em_flows;
- u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
u8 vf_resv_strategy;
@@ -1334,6 +1350,7 @@ struct bnxt_pf_info {
struct bnxt_filter_base {
struct hlist_node hash;
+ struct list_head list;
__le64 filter_id;
u8 type;
#define BNXT_FLTR_TYPE_NTUPLE 1
@@ -1355,19 +1372,21 @@ struct bnxt_filter_base {
struct rcu_head rcu;
};
+struct bnxt_flow_masks {
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL;
+
struct bnxt_ntuple_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct flow_keys fkeys;
+ struct bnxt_flow_masks fmasks;
struct bnxt_l2_filter *l2_fltr;
- u32 ntuple_flags;
-#define BNXT_NTUPLE_MATCH_SRC_IP 1
-#define BNXT_NTUPLE_MATCH_DST_IP 2
-#define BNXT_NTUPLE_MATCH_SRC_PORT 4
-#define BNXT_NTUPLE_MATCH_DST_PORT 8
-#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
- BNXT_NTUPLE_MATCH_DST_IP | \
- BNXT_NTUPLE_MATCH_SRC_PORT | \
- BNXT_NTUPLE_MATCH_DST_PORT)
u32 flow_id;
};
@@ -1394,6 +1413,7 @@ struct bnxt_ipv6_tuple {
#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
struct bnxt_l2_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct bnxt_l2_key l2_key;
atomic_t refcnt;
@@ -2217,6 +2237,14 @@ struct bnxt {
#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
+#define BNXT_RSS_CAP_AH_V4_RSS_CAP BIT(4)
+#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
+#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
+#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
u16 max_mtu;
u8 max_tc;
@@ -2225,6 +2253,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 num_tc;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2300,12 +2329,17 @@ struct bnxt {
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
#define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
+ #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
+#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
+ (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -2427,6 +2461,7 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+ int max_fltr;
#define BNXT_L2_FLTR_MAX_FLTR 1024
#define BNXT_L2_FLTR_HASH_SIZE 32
@@ -2436,12 +2471,14 @@ struct bnxt {
u32 hash_seed;
u64 toeplitz_prefix;
+ struct list_head usr_fltr_list;
+
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
struct mutex link_lock;
struct bnxt_link_info link_info;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
@@ -2640,10 +2677,16 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags);
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 63e067038385..0dbb880a7aa0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
}
}
if (bp->ieee_ets) {
- int tc = netdev_get_num_tc(bp->dev);
+ int tc = bp->num_tc;
if (!tc)
tc = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 27b983c0a8a9..1d240a27455a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -884,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
tx_grps = max(tcs, 1);
if (bp->tx_nr_rings_xdp)
tx_grps++;
@@ -944,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -968,6 +968,7 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1058,11 +1059,17 @@ static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
+ u32 count;
+
cmd->data = bp->ntp_fltr_count;
rcu_read_lock();
+ count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
+ cmd->rule_cnt);
cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
- rule_locs, 0, cmd->rule_cnt);
+ rule_locs, count,
+ cmd->rule_cnt);
rcu_read_unlock();
return 0;
@@ -1074,13 +1081,44 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
int rc = -EINVAL;
- if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ if (fs->location >= bp->max_fltr)
return rc;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE,
+ fs->location);
+ if (fltr_base) {
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_l2_key *l2_key;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ l2_key = &l2_fltr->l2_key;
+ fs->flow_type = ETHER_FLOW;
+ ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
+ eth_broadcast_addr(m_ether->h_dest);
+ if (l2_key->vlan) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ fs->flow_type |= FLOW_EXT;
+ m_ext->vlan_tci = htons(0xfff);
+ h_ext->vlan_tci = htons(l2_key->vlan);
+ }
+ if (fltr_base->flags & BNXT_ACT_RING_DST)
+ fs->ring_cookie = fltr_base->rxq;
+ if (fltr_base->flags & BNXT_ACT_FUNC_DST)
+ fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ rcu_read_unlock();
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
fs->location);
@@ -1091,59 +1129,74 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
fkeys = &fltr->fkeys;
+ fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
+ fkeys->basic.ip_proto == IPPROTO_RAW) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ if (fkeys->basic.ip_proto == IPPROTO_ICMP)
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
+ else
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
+ fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V4_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
+ if (fs->flow_type == TCP_V4_FLOW ||
+ fs->flow_type == UDP_V4_FLOW) {
fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
}
} else {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
+ fkeys->basic.ip_proto == IPPROTO_RAW) {
+ fs->flow_type = IPV6_USER_FLOW;
+ if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
+ else
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
+ fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V6_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
- fkeys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
- fkeys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
+ fmasks->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
+ fmasks->addrs.v6addrs.dst;
+ if (fs->flow_type == TCP_V6_FLOW ||
+ fs->flow_type == UDP_V6_FLOW) {
fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
}
}
- fs->ring_cookie = fltr->base.rxq;
+ if (fltr->base.flags & BNXT_ACT_DROP)
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fs->ring_cookie = fltr->base.rxq;
rc = 0;
fltr_err:
@@ -1152,17 +1205,78 @@ fltr_err:
return rc;
}
-#define IPV4_ALL_MASK ((__force __be32)~0)
-#define L4_PORT_ALL_MASK ((__force __be16)~0)
+static int bnxt_add_l2_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
+ u16 vnic_id;
+ u8 flags;
+ int rc;
+
+ if (BNXT_CHIP_P5_PLUS(bp))
+ return -EOPNOTSUPP;
-static bool ipv6_mask_is_full(__be32 mask[4])
+ if (!is_broadcast_ether_addr(m_ether->h_dest))
+ return -EINVAL;
+ ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
+ key.vlan = 0;
+ if (fs->flow_type & FLOW_EXT) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
+ return -EINVAL;
+ key.vlan = ntohs(h_ext->vlan_tci);
+ }
+
+ if (vf) {
+ flags = BNXT_ACT_FUNC_DST;
+ vnic_id = 0xffff;
+ vf--;
+ } else {
+ flags = BNXT_ACT_RING_DST;
+ vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
+ }
+ fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = vnic_id;
+ fltr->base.rxq = ring;
+ fltr->base.vf_idx = vf;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ fs->location = fltr->base.sw_id;
+ return rc;
+}
+
+static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
+ struct ethtool_usrip4_spec *ip_mask)
{
- return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
+ if (ip_mask->l4_4_bytes || ip_mask->tos ||
+ ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
+ ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
+ (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
+ return false;
+ return true;
}
-static bool ipv6_mask_is_zero(__be32 mask[4])
+static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
+ struct ethtool_usrip6_spec *ip_mask)
{
- return !(mask[0] | mask[1] | mask[2] | mask[3]);
+ if (ip_mask->l4_4_bytes || ip_mask->tclass ||
+ ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
+ (ip_spec->l4_proto != IPPROTO_RAW &&
+ ip_spec->l4_proto != IPPROTO_ICMPV6))
+ return false;
+ return true;
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
@@ -1172,6 +1286,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
struct bnxt_ntuple_filter *new_fltr, *fltr;
struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_flow_masks *fmasks;
u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
u32 idx;
@@ -1183,17 +1298,42 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
+ if (flow_type == IP_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec))
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_type == IPV6_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec))
+ return -EOPNOTSUPP;
+ }
+
new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
if (!new_fltr)
return -ENOMEM;
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
new_fltr->l2_fltr = l2_fltr;
+ fmasks = &new_fltr->fmasks;
fkeys = &new_fltr->fkeys;
rc = -EOPNOTSUPP;
switch (flow_type) {
+ case IP_USER_FLOW: {
+ struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
+
+ fkeys->basic.ip_proto = ip_spec->proto;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ break;
+ }
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
@@ -1203,32 +1343,26 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if (flow_type == UDP_V4_FLOW)
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
+ break;
+ }
+ case IPV6_USER_FLOW: {
+ struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
- if (ip_mask->ip4src == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.src = ip_spec->ip4src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (ip_mask->ip4src) {
- goto ntuple_err;
- }
- if (ip_mask->ip4dst == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (ip_mask->ip4dst) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->basic.ip_proto = ip_spec->l4_proto;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
break;
}
case TCP_V6_FLOW:
@@ -1241,40 +1375,21 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
- if (ipv6_mask_is_full(ip_mask->ip6src)) {
- fkeys->addrs.v6addrs.src =
- *(struct in6_addr *)&ip_spec->ip6src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
- goto ntuple_err;
- }
- if (ipv6_mask_is_full(ip_mask->ip6dst)) {
- fkeys->addrs.v6addrs.dst =
- *(struct in6_addr *)&ip_spec->ip6dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
break;
}
default:
rc = -EOPNOTSUPP;
goto ntuple_err;
}
- if (!new_fltr->ntuple_flags)
+ if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
goto ntuple_err;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
@@ -1287,8 +1402,11 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
}
rcu_read_unlock();
- new_fltr->base.rxq = ring;
new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ new_fltr->base.flags |= BNXT_ACT_DROP;
+ else
+ new_fltr->base.rxq = ring;
__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
@@ -1321,6 +1439,18 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (fs->location != RX_CLS_LOC_ANY)
return -EINVAL;
+ flow_type = fs->flow_type;
+ if ((flow_type == IP_USER_FLOW ||
+ flow_type == IPV6_USER_FLOW) &&
+ !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
+ return -EOPNOTSUPP;
+ if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
+ return bnxt_add_ntuple_cls_rule(bp, fs);
+
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
if (BNXT_VF(bp) && vf)
@@ -1330,12 +1460,8 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (!vf && ring >= bp->rx_nr_rings)
return -EINVAL;
- flow_type = fs->flow_type;
- if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
- return -EINVAL;
- flow_type &= ~FLOW_EXT;
if (flow_type == ETHER_FLOW)
- rc = -EOPNOTSUPP;
+ rc = bnxt_add_l2_cls_rule(bp, fs);
else
rc = bnxt_add_ntuple_cls_rule(bp, fs);
return rc;
@@ -1346,11 +1472,22 @@ static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ u32 id = fs->location;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, id);
+ if (fltr_base) {
+ struct bnxt_l2_filter *l2_fltr;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ rcu_read_unlock();
+ bnxt_hwrm_l2_filter_free(bp, l2_fltr);
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
- BNXT_NTP_FLTR_HASH_SIZE,
- fs->location);
+ BNXT_NTP_FLTR_HASH_SIZE, id);
if (!fltr_base) {
rcu_read_unlock();
return -ENOENT;
@@ -1396,8 +1533,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
@@ -1415,8 +1558,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -1463,6 +1612,24 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
+ } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
} else if (tuple == 4) {
return -EINVAL;
}
@@ -1521,7 +1688,7 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
+ cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1574,7 +1741,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ BNXT_RSS_TABLE_ENTRIES_P5;
return HW_HASH_INDEX_SIZE;
}
@@ -1595,7 +1763,7 @@ static int bnxt_get_rxfh(struct net_device *dev,
if (!bp->vnic_info)
return 0;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (rxfh->indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
@@ -1618,8 +1786,10 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->key)
- return -EOPNOTSUPP;
+ if (rxfh->key) {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
@@ -1630,7 +1800,7 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
}
-
+ bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -1750,31 +1920,21 @@ static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+/* TODO: support 25GB, 40GB, 50GB with different cable type */
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
{
- u32 speed_mask = 0;
+ linkmode_zero(mode);
- /* TODO: support 25GB, 40GB, 50GB with different cable type */
- /* set the advertised speeds */
if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
- speed_mask |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
- speed_mask |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
- speed_mask |= ADVERTISED_2500baseX_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
- speed_mask |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= ADVERTISED_40000baseCR4_Full;
-
- if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
- speed_mask |= ADVERTISED_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_TX)
- speed_mask |= ADVERTISED_Asym_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_RX)
- speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- return speed_mask;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
}
enum bnxt_media_type {
@@ -2642,23 +2802,22 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
return 0;
}
-u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
{
u16 fw_speed_mask = 0;
- /* only support autoneg at speed 100, 1000, and 10000 */
- if (advertising & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
- }
- if (advertising & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half)) {
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
- }
- if (advertising & ADVERTISED_10000baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
- if (advertising & ADVERTISED_40000baseCR4_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
return fw_speed_mask;
@@ -3883,12 +4042,13 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
-static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
struct bnxt *bp = netdev_priv(dev);
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -3898,7 +4058,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
- advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!edata->eee_enabled)
goto eee_ok;
@@ -3918,16 +4078,15 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
}
- if (!edata->advertised) {
- edata->advertised = advertising & eee->supported;
- } else if (edata->advertised & ~advertising) {
- netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
- edata->advertised, advertising);
+ if (linkmode_empty(edata->advertised)) {
+ linkmode_and(edata->advertised, advertising, eee->supported);
+ } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
+ netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
rc = -EINVAL;
goto eee_exit;
}
- eee->advertised = edata->advertised;
+ linkmode_copy(eee->advertised, edata->advertised);
eee->tx_lpi_enabled = edata->tx_lpi_enabled;
eee->tx_lpi_timer = edata->tx_lpi_timer;
eee_ok:
@@ -3941,7 +4100,7 @@ eee_exit:
return rc;
}
-static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3953,12 +4112,12 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
/* Preserve tx_lpi_timer so that the last value will be used
* by default when it is re-enabled.
*/
- edata->advertised = 0;
+ linkmode_zero(edata->advertised);
edata->tx_lpi_enabled = 0;
}
if (!bp->eee.eee_active)
- edata->lp_advertised = 0;
+ linkmode_zero(edata->lp_advertised);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index a8ecef8ab82c..e2ee030237d4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -43,12 +43,14 @@ struct bnxt_led_cfg {
#define BNXT_PXP_REG_LEN 0x3110
+#define BNXT_IP_PROTO_FULL_MASK 0xFF
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
-u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds);
u32 bnxt_fw_to_ethtool_speed(u16);
-u16 bnxt_get_fw_auto_link_speeds(u32);
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index adad188e38b8..cc07660330f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -684,7 +684,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(ptp->tx_skb, &timestamp);
} else {
- netdev_WARN_ONCE(bp->dev,
+ netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c2b25fc623ec..4079538bc310 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog)
tx_xdp = bp->rx_nr_rings;
- tc = netdev_get_num_tc(dev);
+ tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d7ae71287b1..7396e2823e32 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1313,14 +1313,13 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
}
priv->eee.eee_enabled = enable;
- priv->eee.eee_active = enable;
priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
-static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1328,18 +1327,17 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
+ bool active;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1352,9 +1350,9 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false, false);
} else {
- p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
+ active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
+ bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1985c0ec4da2..7523b60b3c1c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -645,7 +645,7 @@ struct bcmgenet_priv {
struct bcmgenet_mib_counters mib;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define GENET_IO_MACRO(name, offset) \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 97ea76d443ab..9ada89355747 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -30,6 +30,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
+ bool active;
/* speed */
if (phydev->speed == SPEED_1000)
@@ -88,9 +89,9 @@ static void bcmgenet_mac_config(struct net_device *dev)
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
- priv->eee.eee_enabled && priv->eee.eee_active,
+ priv->eee.eee_enabled && active,
priv->eee.tx_lpi_enabled);
}
@@ -475,6 +476,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppd.wait_func = bcmgenet_mii_wait;
ppd.wait_func_data = priv;
ppd.bus_name = "bcmgenet MII bus";
+ /* Pass a reference to our "main" clock which is used for MDIO
+ * transfers
+ */
+ ppd.clk = priv->clk;
/* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD
* and is 2 * 32-bits word long, 8 bytes total.
@@ -673,7 +678,5 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
- clk_prepare_enable(priv->clk);
platform_device_unregister(priv->mii_pdev);
- clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 04964bbe08cf..eee759054aad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2338,10 +2338,10 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
tg3_phy_toggle_auxctl_smdsp(tp, false);
}
-static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
{
u32 val;
- struct ethtool_eee *dest = &tp->eee;
+ struct ethtool_keee *dest = &tp->eee;
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return;
@@ -2362,13 +2362,13 @@ static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
/* Pull lp advertised settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
return;
- dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
/* Pull advertised and eee_enabled settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
return;
dest->eee_enabled = !!val;
- dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
/* Pull tx_lpi_enabled */
val = tr32(TG3_CPMU_EEE_MODE);
@@ -4354,23 +4354,12 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!err) {
u32 err2;
- val = 0;
- /* Advertise 100-BaseTX EEE ability */
- if (advertise & ADVERTISED_100baseT_Full)
- val |= MDIO_AN_EEE_ADV_100TX;
- /* Advertise 1000-BaseT EEE ability */
- if (advertise & ADVERTISED_1000baseT_Full)
- val |= MDIO_AN_EEE_ADV_1000T;
-
- if (!tp->eee.eee_enabled) {
+ if (!tp->eee.eee_enabled)
val = 0;
- tp->eee.advertised = 0;
- } else {
- tp->eee.advertised = advertise &
- (ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full);
- }
+ else
+ val = ethtool_adv_to_mmd_eee_adv_t(advertise);
+ mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
if (err)
val = 0;
@@ -4618,7 +4607,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
static bool tg3_phy_eee_config_ok(struct tg3 *tp)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee = {};
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return true;
@@ -4626,13 +4615,13 @@ static bool tg3_phy_eee_config_ok(struct tg3 *tp)
tg3_eee_pull_config(tp, &eee);
if (tp->eee.eee_enabled) {
- if (tp->eee.advertised != eee.advertised ||
+ if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
return false;
} else {
/* EEE is disabled but we're advertising */
- if (eee.advertised)
+ if (!linkmode_empty(eee.advertised))
return false;
}
@@ -14180,7 +14169,7 @@ static int tg3_set_coalesce(struct net_device *dev,
return 0;
}
-static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14189,7 +14178,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (edata->advertised != tp->eee.advertised) {
+ if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
netdev_warn(tp->dev,
"Direct manipulation of EEE advertisement is not supported\n");
return -EINVAL;
@@ -14202,7 +14191,9 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EINVAL;
}
- tp->eee = *edata;
+ tp->eee.eee_enabled = edata->eee_enabled;
+ tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
+ tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
tg3_warn_mgmt_link_flap(tp);
@@ -14217,7 +14208,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -15655,10 +15646,13 @@ static int tg3_phy_probe(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
- tp->eee.supported = SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full;
- tp->eee.advertised = ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full;
+ linkmode_zero(tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_copy(tp->eee.advertised, tp->eee.supported);
+
tp->eee.eee_enabled = 1;
tp->eee.tx_lpi_enabled = 1;
tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5016475e5005..cf1b2b123c7e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3419,7 +3419,7 @@ struct tg3 {
unsigned int irq_cnt;
struct ethtool_coalesce coal;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
/* firmware info */
const char *fw_needed;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 31191b520b58..c32174484a96 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1091,10 +1091,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
*/
static void
-bnad_tx_cleanup(struct delayed_work *work)
+bnad_tx_cleanup(struct work_struct *work)
{
struct bnad_tx_info *tx_info =
- container_of(work, struct bnad_tx_info, tx_cleanup_work);
+ container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
struct bnad *bnad = NULL;
struct bna_tcb *tcb;
unsigned long flags;
@@ -1170,7 +1170,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
* Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
*/
static void
-bnad_rx_cleanup(void *work)
+bnad_rx_cleanup(struct work_struct *work)
{
struct bnad_rx_info *rx_info =
container_of(work, struct bnad_rx_info, rx_cleanup_work);
@@ -1991,8 +1991,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
}
tx_info->tx = tx;
- INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
- (work_func_t)bnad_tx_cleanup);
+ INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
/* Register ISR for the Tx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2248,8 +2247,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
rx_info->rx = rx;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- INIT_WORK(&rx_info->rx_cleanup_work,
- (work_func_t)(bnad_rx_cleanup));
+ INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
/*
* Init NAPI, so that state is set to NAPI_STATE_SCHED,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 9cc6303c82ff..f38d31bfab1b 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -27,6 +27,7 @@
#include "octeon_network.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
MODULE_LICENSE("GPL");
/* OOM task polling interval */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b5ff2e1a9975..49d5808b7d11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -804,20 +804,6 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
}
/**
- * calc_tx_descs - calculate the number of Tx descriptors for a packet
- * @skb: the packet
- * @chip_ver: chip version
- *
- * Returns the number of Tx descriptors needed for the given Ethernet
- * packet, including the needed WR and CPL headers.
- */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
- unsigned int chip_ver)
-{
- return flits_to_desc(calc_tx_flits(skb, chip_ver));
-}
-
-/**
* cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 1c2a540db13d..1f495cfd7959 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
module_platform_driver(ep93xx_eth_driver);
+MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 37bd38d772e8..d266a87297a5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -872,7 +872,7 @@ error:
return NETDEV_TX_OK;
}
-/* dev_base_lock rwlock held, nominally process context */
+/* rcu_read_lock potentially held, nominally process context */
static void enic_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *net_stats)
{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 20fcb20b42ed..66b577835338 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
tlv->type = htons(type);
tlv->length = htons(length);
- memcpy(tlv->value, value, length);
+ unsafe_memcpy(tlv->value, value, length,
+ /* Flexible array of flexible arrays */);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
vp->length = htonl(ntohl(vp->length) +
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index c2c5c589a5e3..44af1d13d931 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -590,5 +590,6 @@ module_pci_driver(pci_driver);
module_param(polling_frequency, long, 0444);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+MODULE_DESCRIPTION("Beckhoff CX5020 EtherCAT Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index df40c720e7b2..4b15af6b7122 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -229,8 +229,10 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
* would delay a working loopback anyway, let's ensure that loopback
* is working immediately by setting link mode directly
*/
- if (!retval && enable)
+ if (!retval && enable) {
+ netif_carrier_on(adapter->netdev);
tsnep_set_link_mode(adapter);
+ }
return retval;
}
@@ -238,7 +240,7 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
- struct ethtool_eee ethtool_eee;
+ struct ethtool_keee ethtool_keee;
int retval;
retval = phy_connect_direct(adapter->netdev, adapter->phydev,
@@ -257,8 +259,8 @@ static int tsnep_phy_open(struct tsnep_adapter *adapter)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* disable EEE autoneg, EEE not supported by TSNEP */
- memset(&ethtool_eee, 0, sizeof(ethtool_eee));
- phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+ memset(&ethtool_keee, 0, sizeof(ethtool_keee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_keee);
adapter->phydev->irq = PHY_MAC_INTERRUPT;
phy_start(adapter->phydev);
@@ -719,17 +721,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
struct xdp_buff *xdp,
- struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx,
+ bool zc)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
bool xmit;
+ u32 type;
if (unlikely(!xdpf))
return false;
+ /* no page pool for zero copy */
+ if (zc)
+ type = TSNEP_TX_TYPE_XDP_NDO;
+ else
+ type = TSNEP_TX_TYPE_XDP_TX;
+
__netif_tx_lock(tx_nq, smp_processor_id());
- xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
/* Avoid transmit queue timeout since we share it with the slow path */
if (xmit)
@@ -1258,6 +1268,14 @@ static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
return desc_refilled;
}
+static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available)
+{
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+}
+
static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
struct xdp_buff *xdp, int *status,
struct netdev_queue *tx_nq, struct tsnep_tx *tx)
@@ -1273,7 +1291,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
case XDP_PASS:
return false;
case XDP_TX:
- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
@@ -1323,7 +1341,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
case XDP_PASS:
return false;
case XDP_TX:
- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
@@ -1485,7 +1503,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
xdp_prepare_buff(&xdp, page_address(entry->page),
XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
- length, false);
+ length - ETH_FCS_LEN, false);
consume = tsnep_xdp_run_prog(rx, prog, &xdp,
&xdp_status, tx_nq, tx);
@@ -1568,7 +1586,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
prefetch(entry->xdp->data);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
- xsk_buff_set_size(entry->xdp, length);
+ xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
/* RX metadata with timestamps is in front of actual data,
@@ -1619,10 +1637,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
if (xsk_uses_need_wakeup(rx->xsk_pool)) {
- if (desc_available)
- xsk_set_rx_need_wakeup(rx->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ tsnep_xsk_rx_need_wakeup(rx, desc_available);
return done;
}
@@ -1762,6 +1777,13 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
allocated--;
}
}
+
+ /* set need wakeup flag immediately if ring is not filled completely,
+ * first polling would be too late as need wakeup signalisation would
+ * be delayed for an indefinite time
+ */
+ if (xsk_uses_need_wakeup(rx->xsk_pool))
+ tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx));
}
static bool tsnep_pending(struct tsnep_queue *queue)
@@ -2549,8 +2571,7 @@ static int tsnep_probe(struct platform_device *pdev)
mutex_init(&adapter->rxnfc_lock);
INIT_LIST_HEAD(&adapter->rxnfc_rules);
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
netdev->mem_start = io->start;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 07c2b701b5fa..9ebe751c1df0 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -661,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
module_platform_driver(nps_enet_driver);
MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index cffbf27c4656..9f07f4947b63 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2402,7 +2402,7 @@ static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
static int enetc_phylink_connect(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
int err;
if (!priv->phylink) {
@@ -2418,7 +2418,7 @@ static int enetc_phylink_connect(struct net_device *ndev)
}
/* disable EEE autoneg, until ENETC driver supports it */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phylink_ethtool_set_eee(priv->phylink, &edata);
phylink_start(priv->phylink);
@@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
+MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a8fbcada6b01..a19cb2a786fd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -672,7 +672,7 @@ struct fec_enet_private {
unsigned int itr_clk_rate;
/* tx lpi eee mode */
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d42594f32275..d7693fdf640d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -85,8 +85,6 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
-/* Pause frame feild and FIFO threshold */
-#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
#define FEC_ENET_RSFL_V 16
#define FEC_ENET_RAEM_V 0x8
@@ -240,8 +238,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64
/* FEC receive acceleration */
-#define FEC_RACC_IPDIS (1 << 1)
-#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_IPDIS BIT(1)
+#define FEC_RACC_PRODIS BIT(2)
#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
@@ -273,8 +271,23 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN (1 << 2)
-#define FEC_ECR_SLEEP (1 << 3)
+#define FEC_ECR_RESET BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP BIT(3)
+#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_BYTESWP BIT(8)
+/* FEC RCR bits definition */
+#define FEC_RCR_LOOP BIT(0)
+#define FEC_RCR_HALFDPX BIT(1)
+#define FEC_RCR_MII BIT(2)
+#define FEC_RCR_PROMISC BIT(3)
+#define FEC_RCR_BC_REJ BIT(4)
+#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RMII BIT(8)
+#define FEC_RCR_10BASET BIT(9)
+/* TX WMARK bits */
+#define FEC_TXWMRK_STRFWD BIT(8)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -1062,7 +1075,7 @@ fec_restart(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
- u32 ecntl = 0x2; /* ETHEREN */
+ u32 ecntl = FEC_ECR_ETHEREN;
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1137,18 +1150,18 @@ fec_restart(struct net_device *ndev)
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
rcntl |= (1 << 6);
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
- rcntl |= (1 << 8);
+ rcntl |= FEC_RCR_RMII;
else
- rcntl &= ~(1 << 8);
+ rcntl &= ~FEC_RCR_RMII;
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
ecntl |= (1 << 5);
else if (ndev->phydev->speed == SPEED_100)
- rcntl &= ~(1 << 9);
+ rcntl &= ~FEC_RCR_10BASET;
else
- rcntl |= (1 << 9);
+ rcntl |= FEC_RCR_10BASET;
}
} else {
#ifdef FEC_MIIGSK_ENR
@@ -1181,7 +1194,7 @@ fec_restart(struct net_device *ndev)
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
ndev->phydev && ndev->phydev->pause)) {
- rcntl |= FEC_ENET_FCE;
+ rcntl |= FEC_RCR_FLOWCTL;
/* set FIFO threshold parameter to reduce overrun */
writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
@@ -1192,7 +1205,7 @@ fec_restart(struct net_device *ndev)
/* OPD */
writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
} else {
- rcntl &= ~FEC_ENET_FCE;
+ rcntl &= ~FEC_RCR_FLOWCTL;
}
#endif /* !defined(CONFIG_M5272) */
@@ -1207,13 +1220,13 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
- ecntl |= (1 << 8);
+ ecntl |= FEC_ECR_BYTESWP;
/* enable ENET store and forward mode */
- writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
- ecntl |= (1 << 4);
+ ecntl |= FEC_ECR_EN1588;
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1312,7 +1325,7 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
u32 val;
/* We cannot expect a graceful transmit stop without link !!! */
@@ -1331,7 +1344,7 @@ fec_stop(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
- writel(1, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
udelay(10);
}
} else {
@@ -1345,12 +1358,11 @@ fec_stop(struct net_device *ndev)
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- writel(2, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
}
-
static void
fec_timeout(struct net_device *ndev, unsigned int txqueue)
{
@@ -2005,6 +2017,37 @@ static int fec_get_mac(struct net_device *ndev)
/*
* Phy section
*/
+
+/* LPI Sleep Ts count base on tx clk (clk_ref).
+ * The lpi sleep cnt value = X us / (cycle_ns).
+ */
+static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->clk_ref_rate / 1000) / 1000;
+}
+
+static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_keee *p = &fep->eee;
+ unsigned int sleep_cycle, wake_cycle;
+
+ if (enable) {
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ wake_cycle = sleep_cycle;
+ } else {
+ sleep_cycle = 0;
+ wake_cycle = 0;
+ }
+
+ writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
+ writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
+
+ return 0;
+}
+
static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2036,6 +2079,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
/* if any of the above changed restart the FEC */
if (status_change) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
@@ -2043,8 +2087,11 @@ static void fec_enet_adjust_link(struct net_device *ndev)
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_stop(ndev);
@@ -2401,6 +2448,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
else
phy_set_max_speed(phy_dev, 100);
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ phy_support_eee(phy_dev);
+
fep->link = 0;
fep->full_duplex = 0;
@@ -3107,50 +3157,11 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-/* LPI Sleep Ts count base on tx clk (clk_ref).
- * The lpi sleep cnt value = X us / (cycle_ns).
- */
-static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- return us * (fep->clk_ref_rate / 1000) / 1000;
-}
-
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- unsigned int sleep_cycle, wake_cycle;
- int ret = 0;
-
- if (enable) {
- ret = phy_init_eee(ndev->phydev, false);
- if (ret)
- return ret;
-
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
- wake_cycle = sleep_cycle;
- } else {
- sleep_cycle = 0;
- wake_cycle = 0;
- }
-
- p->tx_lpi_enabled = enable;
- p->eee_enabled = enable;
- p->eee_active = enable;
-
- writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
- writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
-
- return 0;
-}
-
static int
-fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
+ struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3158,20 +3169,16 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->eee_enabled = p->eee_enabled;
- edata->eee_active = p->eee_active;
edata->tx_lpi_timer = p->tx_lpi_timer;
- edata->tx_lpi_enabled = p->tx_lpi_enabled;
return phy_ethtool_get_eee(ndev->phydev, edata);
}
static int
-fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- int ret = 0;
+ struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3181,15 +3188,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
p->tx_lpi_timer = edata->tx_lpi_timer;
- if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
- !edata->tx_lpi_timer)
- ret = fec_enet_eee_mode_set(ndev, false);
- else
- ret = fec_enet_eee_mode_set(ndev, true);
-
- if (ret)
- return ret;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
@@ -4769,4 +4767,5 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
+MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9ba15d3183d7..758535adc9ff 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1073,6 +1073,14 @@ int memac_initialization(struct mac_device *mac_dev,
unsigned long capabilities;
unsigned long *supported;
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * 10GBASE-R (aka XFI), so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
mac_dev->phylink_ops = &memac_mac_ops;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;
@@ -1139,7 +1147,7 @@ int memac_initialization(struct mac_device *mac_dev,
* (and therefore that xfi_pcs cannot be set). If we are defaulting to
* XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
*/
- if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER)
memac->xfi_pcs = pcs;
else
memac->sgmii_pcs = pcs;
@@ -1153,14 +1161,6 @@ int memac_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
- /* The internal connection to the serdes is XGMII, but this isn't
- * really correct for the phy mode (which is the external connection).
- * However, this is how all older device trees say that they want
- * 10GBASE-R (aka XFI), so just convert it for them.
- */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
- mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
-
/* TODO: The following interface modes are supported by (some) hardware
* but not by this driver:
* - 1000BASE-KX
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 70dd982a5edc..026f7270a54d 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
module_platform_driver(fsl_pq_mdio_driver);
+MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e3dfbd7a4236..a811238c018d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1649,7 +1649,7 @@ static int init_phy(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
linkmode_set_bit_array(phy_10_100_features_array,
ARRAY_SIZE(phy_10_100_features_array),
@@ -1681,7 +1681,7 @@ static int init_phy(struct net_device *dev)
phy_support_asym_pause(phydev);
/* disable EEE autoneg, EEE not supported by eTSEC */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phy_ethtool_set_eee(phydev, &edata);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index fd290f3ad6ec..4814c96d5fe7 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
@@ -51,12 +52,16 @@
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+#define GVE_MAX_RX_BUFFER_SIZE 4096
+
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
+#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
+
#define DQO_QPL_DEFAULT_TX_PAGES 512
#define DQO_QPL_DEFAULT_RX_PAGES 2048
@@ -150,6 +155,11 @@ struct gve_rx_compl_queue_dqo {
u32 mask; /* Mask for indices to the size of the ring */
};
+struct gve_header_buf {
+ u8 *data;
+ dma_addr_t addr;
+};
+
/* Stores state for tracking buffers posted to HW */
struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
@@ -252,19 +262,26 @@ struct gve_rx_ring {
/* track number of used buffers */
u16 used_buf_states_cnt;
+
+ /* Address info of the buffers for header-split */
+ struct gve_header_buf hdr_bufs;
} dqo;
};
u64 rbytes; /* free-running bytes received */
+ u64 rx_hsplit_bytes; /* free-running header bytes received */
u64 rpackets; /* free-running packets received */
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u64 rx_hsplit_pkt; /* free-running packets with headers split */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
+ /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
+ u64 rx_hsplit_unsplit_pkt;
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
@@ -664,6 +681,7 @@ struct gve_rx_alloc_rings_cfg {
struct gve_qpl_config *qpl_cfg;
u16 ring_size;
+ u16 packet_buffer_size;
bool raw_addressing;
bool enable_header_split;
@@ -778,13 +796,17 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- int data_buffer_size_dqo;
+ u16 data_buffer_size_dqo;
+ u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
/* Interrupt coalescing settings */
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
+
+ u16 header_buf_size; /* device configured, header-split supported if non-zero */
+ bool header_split_enabled; /* True if the header split is enabled by the user */
};
enum gve_service_task_flags_bit {
@@ -1122,6 +1144,9 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
+u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
+bool gve_header_split_supported(const struct gve_priv *priv);
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 12fbd723ecc6..ae12ac38e18b 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -40,7 +40,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
- struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -147,6 +148,23 @@ void gve_parse_device_option(struct gve_priv *priv,
}
*dev_op_jumbo_frames = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_BUFFER_SIZES:
+ if (option_length < sizeof(**dev_op_buffer_sizes) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Buffer Sizes",
+ (int)sizeof(**dev_op_buffer_sizes),
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_buffer_sizes))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Buffer Sizes");
+ *dev_op_buffer_sizes = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -164,7 +182,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
- struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -185,7 +204,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl);
+ dev_op_dqo_qpl, dev_op_buffer_sizes);
dev_opt = next_opt;
}
@@ -640,6 +659,9 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be16(rx_buff_ring_entries);
cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
+ if (priv->header_split_enabled)
+ cmd.create_rx_queue.header_buffer_size =
+ cpu_to_be16(priv->header_buf_size);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -755,7 +777,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_jumbo_frames
*dev_op_jumbo_frames,
const struct gve_device_option_dqo_qpl
- *dev_op_dqo_qpl)
+ *dev_op_dqo_qpl,
+ const struct gve_device_option_buffer_sizes
+ *dev_op_buffer_sizes)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -779,10 +803,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (priv->rx_pages_per_qpl == 0)
priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
+
+ if (dev_op_buffer_sizes &&
+ (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
+ priv->max_rx_buffer_size =
+ be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
+ priv->header_buf_size =
+ be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
+ dev_info(&priv->pdev->dev,
+ "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
+ priv->max_rx_buffer_size, priv->header_buf_size);
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
@@ -816,7 +852,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames,
- &dev_op_dqo_qpl);
+ &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes);
if (err)
goto free_device_descriptor;
@@ -885,7 +922,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
- dev_op_jumbo_frames, dev_op_dqo_qpl);
+ dev_op_jumbo_frames, dev_op_dqo_qpl,
+ dev_op_buffer_sizes);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5865ccdccbd0..5ac972e45ff8 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -125,6 +125,15 @@ struct gve_device_option_jumbo_frames {
static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
+struct gve_device_option_buffer_sizes {
+ /* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
+ __be32 supported_features_mask;
+ __be16 packet_buffer_size;
+ __be16 header_buffer_size;
+};
+
+static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -140,6 +149,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
@@ -149,10 +159,12 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -165,6 +177,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
+ gve_driver_capability_flexible_buffer_size = 5,
};
#define GVE_CAP1(a) BIT((int)a)
@@ -176,7 +189,8 @@ enum gve_driver_capbility {
(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
- GVE_CAP1(gve_driver_capability_alt_miss_compl))
+ GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
+ GVE_CAP1(gve_driver_capability_flexible_buffer_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
@@ -260,7 +274,9 @@ struct gve_adminq_create_rx_queue {
__be16 packet_buffer_size;
__be16 rx_buff_ring_size;
u8 enable_rsc;
- u8 padding[5];
+ u8 padding1;
+ __be16 header_buffer_size;
+ u8 padding2[2];
};
static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index e5397aa1e48f..9aebfb843d9d 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -4,7 +4,6 @@
* Copyright (C) 2015-2021 Google, Inc.
*/
-#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
@@ -40,17 +39,18 @@ static u32 gve_get_msglevel(struct net_device *netdev)
* as declared in enum xdp_action inside file uapi/linux/bpf.h .
*/
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
- "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
- "rx_dropped", "tx_dropped", "tx_timeouts",
+ "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
+ "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
+ "rx_hsplit_unsplit_pkt",
"interface_up_cnt", "interface_down_cnt", "reset_cnt",
"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
};
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
- "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
- "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
- "rx_frag_alloc_cnt[%u]",
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
+ "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
+ "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
@@ -154,11 +154,13 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
- tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
+ u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
+ tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
+ tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
tmp_tx_pkts, tmp_tx_bytes;
- u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
- rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
+ u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
+ rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
+ tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats;
int *rx_qid_to_stats_idx;
@@ -185,8 +187,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
kfree(rx_qid_to_stats_idx);
return;
}
- for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
- rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
+ for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
+ rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
+ rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
+ ring = 0;
ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) {
do {
@@ -195,18 +199,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
+ tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
+ tmp_rx_hsplit_unsplit_pkt =
+ rx->rx_hsplit_unsplit_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
+ rx_hsplit_pkt += tmp_rx_hsplit_pkt;
rx_bytes += tmp_rx_bytes;
rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
+ rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
}
}
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
@@ -227,6 +236,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = 0;
data[i++] = rx_pkts;
+ data[i++] = rx_hsplit_pkt;
data[i++] = tx_pkts;
data[i++] = rx_bytes;
data[i++] = tx_bytes;
@@ -238,6 +248,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail;
data[i++] = rx_desc_err_dropped_pkt;
+ data[i++] = rx_hsplit_unsplit_pkt;
data[i++] = priv->interface_up_cnt;
data[i++] = priv->interface_down_cnt;
data[i++] = priv->reset_cnt;
@@ -277,6 +288,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
+ tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
@@ -284,6 +296,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
+ data[i++] = tmp_rx_hsplit_bytes;
data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt;
@@ -480,6 +493,29 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->tx_max_pending = priv->tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
+
+ if (!gve_header_split_supported(priv))
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+ else if (priv->header_split_enabled)
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ else
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
+}
+
+static int gve_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *cmd,
+ struct kernel_ethtool_ringparam *kernel_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (priv->tx_desc_cnt != cmd->tx_pending ||
+ priv->rx_desc_cnt != cmd->rx_pending) {
+ dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
+ return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -655,6 +691,7 @@ static int gve_set_coalesce(struct net_device *netdev,
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -667,6 +704,7 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
.get_ringparam = gve_get_ringparam,
+ .set_ringparam = gve_set_ringparam,
.reset = gve_user_reset,
.get_tunable = gve_get_tunable,
.set_tunable = gve_set_tunable,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index db6d9ae7cd78..166bd827a6d7 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1276,17 +1276,10 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
static void gve_drain_page_cache(struct gve_priv *priv)
{
- struct page_frag_cache *nc;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- nc = &priv->rx[i].page_cache;
- if (nc->va) {
- __page_frag_cache_drain(virt_to_page(nc->va),
- nc->pagecnt_bias);
- nc->va = NULL;
- }
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ page_frag_cache_drain(&priv->rx[i].page_cache);
}
static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
@@ -1307,9 +1300,13 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->enable_header_split = priv->header_split_enabled;
cfg->qpls = priv->qpls;
cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->rx_desc_cnt;
+ cfg->packet_buffer_size = gve_is_gqi(priv) ?
+ GVE_DEFAULT_RX_BUFFER_SIZE :
+ priv->data_buffer_size_dqo;
cfg->rx = priv->rx;
}
@@ -1448,12 +1445,9 @@ static int gve_queues_start(struct gve_priv *priv,
if (err)
goto reset;
- if (!gve_is_gqi(priv)) {
- /* Hard code this for now. This may be tuned in the future for
- * performance.
- */
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
- }
+ priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
+ priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+
err = gve_create_rings(priv);
if (err)
goto reset;
@@ -2065,6 +2059,56 @@ out:
priv->tx_timeo_cnt++;
}
+u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
+{
+ if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
+ return GVE_MAX_RX_BUFFER_SIZE;
+ else
+ return GVE_DEFAULT_RX_BUFFER_SIZE;
+}
+
+/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
+bool gve_header_split_supported(const struct gve_priv *priv)
+{
+ return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+}
+
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+ bool enable_hdr_split;
+ int err = 0;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ return 0;
+
+ if (!gve_header_split_supported(priv)) {
+ dev_err(&priv->pdev->dev, "Header-split not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ enable_hdr_split = true;
+ else
+ enable_hdr_split = false;
+
+ if (enable_hdr_split == priv->header_split_enabled)
+ return 0;
+
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+
+ rx_alloc_cfg.enable_header_split = enable_hdr_split;
+ rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
+
+ if (netif_running(priv->dev))
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ return err;
+}
+
static int gve_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2511,6 +2555,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
+ priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index c294a1595b6a..20f5a9e7fae9 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -105,7 +105,8 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
struct gve_rx_slot_page_info *page_info,
- union gve_rx_data_slot *data_slot)
+ union gve_rx_data_slot *data_slot,
+ struct gve_rx_ring *rx)
{
struct page *page;
dma_addr_t dma;
@@ -113,8 +114,12 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
GFP_ATOMIC);
- if (err)
+ if (err) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return err;
+ }
gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
return 0;
@@ -156,8 +161,9 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
&rx->data.data_ring[i].qpl_offset);
continue;
}
- err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
- &rx->data.data_ring[i]);
+ err = gve_rx_alloc_buffer(priv, &priv->pdev->dev,
+ &rx->data.page_info[i],
+ &rx->data.data_ring[i], rx);
if (err)
goto alloc_err_rda;
}
@@ -396,7 +402,7 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
- u16 packet_buffer_size, u16 len,
+ unsigned int truesize, u16 len,
struct gve_rx_ctx *ctx)
{
u32 offset = page_info->page_offset + page_info->pad;
@@ -429,10 +435,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
if (skb != ctx->skb_head) {
ctx->skb_head->len += len;
ctx->skb_head->data_len += len;
- ctx->skb_head->truesize += packet_buffer_size;
+ ctx->skb_head->truesize += truesize;
}
skb_add_rx_frag(skb, num_frags, page_info->page,
- offset, len, packet_buffer_size);
+ offset, len, truesize);
return ctx->skb_head;
}
@@ -526,7 +532,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
memcpy(alloc_page_info.page_address, src, page_info->pad + len);
skb = gve_rx_add_frags(napi, &alloc_page_info,
- rx->packet_buffer_size,
+ PAGE_SIZE,
len, ctx);
u64_stats_update_begin(&rx->statss);
@@ -936,10 +942,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL;
if (gve_rx_alloc_buffer(priv, dev, page_info,
- data_slot)) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_buf_alloc_fail++;
- u64_stats_update_end(&rx->statss);
+ data_slot, rx)) {
break;
}
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8e6aeb5b3ed4..8e8071308aeb 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -199,6 +199,18 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return 0;
}
+static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
+
+ if (rx->dqo.hdr_bufs.data) {
+ dma_free_coherent(hdev, priv->header_buf_size * buf_count,
+ rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr);
+ rx->dqo.hdr_bufs.data = NULL;
+ }
+}
+
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -258,9 +270,24 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
kvfree(rx->dqo.buf_states);
rx->dqo.buf_states = NULL;
+ gve_rx_free_hdr_bufs(priv, rx);
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
+
+ rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
+ &rx->dqo.hdr_bufs.addr, GFP_KERNEL);
+ if (!rx->dqo.hdr_bufs.data)
+ return -ENOMEM;
+
+ return 0;
+}
+
void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -302,6 +329,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->dqo.buf_states)
return -ENOMEM;
+ /* Allocate header buffers for header-split */
+ if (cfg->enable_header_split)
+ if (gve_rx_alloc_hdr_bufs(priv, rx))
+ goto err;
+
/* Set up linked list of buffer IDs */
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1;
@@ -443,6 +475,10 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
buf_state->page_info.page_offset);
+ if (rx->dqo.hdr_bufs.data)
+ desc->header_buf_addr =
+ cpu_to_le64(rx->dqo.hdr_bufs.addr +
+ priv->header_buf_size * bufq->tail);
bufq->tail = (bufq->tail + 1) & bufq->mask;
complq->num_free_slots--;
@@ -458,7 +494,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const int data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = priv->data_buffer_size_dqo;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -645,13 +681,16 @@ static int gve_rx_append_frags(struct napi_struct *napi,
*/
static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const struct gve_rx_compl_desc_dqo *compl_desc,
- int queue_idx)
+ u32 desc_idx, int queue_idx)
{
const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
+ const bool hbo = compl_desc->header_buffer_overflow;
const bool eop = compl_desc->end_of_packet != 0;
+ const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
u16 buf_len;
+ u16 hdr_len;
if (unlikely(buffer_id >= rx->dqo.num_buf_states)) {
net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
@@ -672,12 +711,35 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
buf_len = compl_desc->packet_len;
+ hdr_len = compl_desc->header_len;
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
prefetch(buf_state->page_info.page);
+ /* Copy the header into the skb in the case of header split */
+ if (hsplit) {
+ int unsplit = 0;
+
+ if (hdr_len && !hbo) {
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ rx->dqo.hdr_bufs.data +
+ desc_idx * priv->header_buf_size,
+ hdr_len);
+ if (unlikely(!rx->ctx.skb_head))
+ goto error;
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+ } else {
+ unsplit = 1;
+ }
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_hsplit_pkt++;
+ rx->rx_hsplit_unsplit_pkt += unsplit;
+ rx->rx_hsplit_bytes += hdr_len;
+ u64_stats_update_end(&rx->statss);
+ }
+
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
buf_state->page_info.page_offset,
@@ -820,7 +882,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* Do not read data until we own the descriptor */
dma_rmb();
- err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+ err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
gve_rx_free_skb(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 535b1796b91d..2349750075a5 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -64,11 +64,9 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
rx->ntfy_id = ntfy_idx;
}
-struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info, u16 len)
+struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
+ u8 *data, u16 len)
{
- void *va = page_info->page_address + page_info->page_offset +
- page_info->pad;
struct sk_buff *skb;
skb = napi_alloc_skb(napi, len);
@@ -76,12 +74,21 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
return NULL;
__skb_put(skb, len);
- skb_copy_to_linear_data_offset(skb, 0, va, len);
+ skb_copy_to_linear_data_offset(skb, 0, data, len);
skb->protocol = eth_type_trans(skb, dev);
return skb;
}
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info, u16 len)
+{
+ void *va = page_info->page_address + page_info->page_offset +
+ page_info->pad;
+
+ return gve_rx_copy_data(dev, napi, va, len);
+}
+
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
{
page_info->pagecnt_bias--;
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 277921a629f7..bf2e9a0adb36 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -19,6 +19,9 @@ bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
+struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
+ u8 *data, u16 len);
+
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 8a1027ad340d..d4293f76d69d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -12,7 +12,9 @@
#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
-static struct class *hnae_class;
+static const struct class hnae_class = {
+ .name = "hnae",
+};
static void
hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
@@ -111,7 +113,7 @@ static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
WARN_ON(!fwnode);
- dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
+ dev = class_find_device(&hnae_class, NULL, fwnode, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL;
}
@@ -415,7 +417,7 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->owner = owner;
hdev->id = (int)atomic_inc_return(&id);
hdev->cls_dev.parent = hdev->dev;
- hdev->cls_dev.class = hnae_class;
+ hdev->cls_dev.class = &hnae_class;
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
@@ -448,13 +450,12 @@ EXPORT_SYMBOL(hnae_ae_unregister);
static int __init hnae_init(void)
{
- hnae_class = class_create("hnae");
- return PTR_ERR_OR_ZERO(hnae_class);
+ return class_register(&hnae_class);
}
static void __exit hnae_exit(void)
{
- class_destroy(hnae_class);
+ class_unregister(&hnae_class);
}
subsys_initcall(hnae_init);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index f1695c889d3a..19668a8d22f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2473,9 +2473,9 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
return features;
if (skb->encapsulation)
- len = skb_inner_transport_header(skb) - skb->data;
+ len = skb_inner_transport_offset(skb);
else
- len = skb_transport_header(skb) - skb->data;
+ len = skb_transport_offset(skb);
/* Assume L4 is 60 byte as TCP is the only protocol with a
* a flexible value, and it's max len is 60 bytes.
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 5e27470c6b1e..f2d4669c81cf 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -987,7 +987,7 @@ static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
{
#ifdef DEBUG
printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
- printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
+ printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status));
printk("%s: check, whether you set the right interrupt number!\n",dev->name);
#endif
sun3_82586_close(dev);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index d55638ad8704..639fbb12bd35 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -368,6 +368,15 @@ config IGC
To compile this driver as a module, choose M here. The module
will be called igc.
+
+config IGC_LEDS
+ def_bool LEDS_TRIGGER_NETDEV
+ depends on IGC && LEDS_CLASS
+ depends on LEDS_CLASS=y || IGC=m
+ help
+ Optional support for controlling the NIC LED's with the netdev
+ LED trigger.
+
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 01f0f12035ca..3fcb8daaa243 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -171,8 +171,8 @@ static int debug = 3;
static int eeprom_bad_csum_allow = 0;
static int use_io = 0;
module_param(debug, int, 0);
-module_param(eeprom_bad_csum_allow, int, 0);
-module_param(use_io, int, 0);
+module_param(eeprom_bad_csum_allow, int, 0444);
+module_param(use_io, int, 0444);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index a187582d2299..ba9c19e6994c 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
* INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
* bits to count nanoseconds leaving the rest for fractional nonseconds.
+ *
+ * Any given INCVALUE also has an associated maximum adjustment value. This
+ * maximum adjustment value is the largest increase (or decrease) which can be
+ * safely applied without overflowing the INCVALUE. Since INCVALUE has
+ * a maximum range of 24 bits, its largest value is 0xFFFFFF.
+ *
+ * To understand where the maximum value comes from, consider the following
+ * equation:
+ *
+ * new_incval = base_incval + (base_incval * adjustment) / 1billion
+ *
+ * To avoid overflow that means:
+ * max_incval = base_incval + (base_incval * max_adj) / billion
+ *
+ * Re-arranging:
+ * max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion)
*/
#define INCVALUE_96MHZ 125
#define INCVALUE_SHIFT_96MHZ 17
#define INCPERIOD_SHIFT_96MHZ 2
#define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ)
+#define MAX_PPB_96MHZ 23999900 /* 23,999,900 ppb */
#define INCVALUE_25MHZ 40
#define INCVALUE_SHIFT_25MHZ 18
#define INCPERIOD_25MHZ 1
+#define MAX_PPB_25MHZ 599999900 /* 599,999,900 ppb */
#define INCVALUE_24MHZ 125
#define INCVALUE_SHIFT_24MHZ 14
#define INCPERIOD_24MHZ 3
+#define MAX_PPB_24MHZ 999999999 /* 999,999,999 ppb */
#define INCVALUE_38400KHZ 26
#define INCVALUE_SHIFT_38400KHZ 19
#define INCPERIOD_38400KHZ 1
+#define MAX_PPB_38400KHZ 230769100 /* 230,769,100 ppb */
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index fc0f98ea6133..dc553c51d79a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2186,7 +2186,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}
-static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2223,16 +2223,16 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
if (ret_val)
goto release;
- edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->supported, phy_data);
/* EEE Advertised */
- edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert);
/* EEE Link Partner Advertised */
ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
if (ret_val)
goto release;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
/* EEE PCS Status */
ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
@@ -2262,11 +2262,13 @@ release:
return ret_val;
}
-static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
ret_val = e1000e_get_eee(netdev, &eee_curr);
@@ -2283,12 +2285,17 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a2788fd5f8bb..19e450a5bd31 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2559,7 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
(u16)(mac_reg & 0xFFFF));
hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
- FIELD_GET(E1000_RAH_AV, mac_reg));
+ (u16)((mac_reg & E1000_RAH_AV) >> 16));
}
e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index af5d9d97a0d6..cc8c531ec3df 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6688,14 +6688,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) {
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
-
- if (retval)
- return retval;
+ if (retval)
+ return retval;
+ }
}
/* Ensure that the appropriate bits are set in LPI_CTRL
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 02d871bc112a..bbcfd529399b 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch2lan:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+ break;
case e1000_pch_lpt:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+ else
+ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
+ break;
case e1000_pch_spt:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ break;
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
@@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lnp:
case e1000_pch_ptp:
case e1000_pch_nvp:
- if ((hw->mac.type < e1000_pch_lpt) ||
- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
- adapter->ptp_clock_info.max_adj = 24000000 - 1;
- break;
- }
- fallthrough;
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ else
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
- adapter->ptp_clock_info.max_adj = 600000000 - 1;
+ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9b701615c7c6..ba24f3fa92c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -687,6 +687,54 @@ struct i40e_pf {
};
/**
+ * __i40e_pf_next_vsi - get next valid VSI
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VSI pointer in pf->vsi array and
+ * updates idx position. Returns NULL if no VSI is found.
+ **/
+static __always_inline struct i40e_vsi *
+__i40e_pf_next_vsi(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < pf->num_alloc_vsi) {
+ if (pf->vsi[*idx])
+ return pf->vsi[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_vsi(_pf, _i, _vsi) \
+ for (_i = 0, _vsi = __i40e_pf_next_vsi(_pf, &_i); \
+ _vsi; \
+ _i++, _vsi = __i40e_pf_next_vsi(_pf, &_i))
+
+/**
+ * __i40e_pf_next_veb - get next valid VEB
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VEB pointer in pf->veb array and
+ * updates idx position. Returns NULL if no VEB is found.
+ **/
+static __always_inline struct i40e_veb *
+__i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < I40E_MAX_VEB) {
+ if (pf->veb[*idx])
+ return pf->veb[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_veb(_pf, _i, _veb) \
+ for (_i = 0, _veb = __i40e_pf_next_veb(_pf, &_i); \
+ _veb; \
+ _i++, _veb = __i40e_pf_next_veb(_pf, &_i))
+
+/**
* i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
@@ -735,7 +783,6 @@ struct i40e_new_mac_filter {
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
- u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
u16 stats_idx; /* index of VEB parent */
@@ -1120,14 +1167,12 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
static inline struct i40e_vsi *
i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- struct i40e_vsi *vsi = pf->vsi[i];
-
- if (vsi && vsi->type == type)
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->type == type)
return vsi;
- }
return NULL;
}
@@ -1309,4 +1354,40 @@ static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw)
struct device *i40e_hw_to_dev(struct i40e_hw *hw);
+/**
+ * i40e_pf_get_vsi_by_seid - find VSI by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_vsi *
+i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_vsi *vsi;
+ int i;
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->seid == seid)
+ return vsi;
+
+ return NULL;
+}
+
+/**
+ * i40e_pf_get_veb_by_seid - find VEB by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_veb *
+i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == seid)
+ return veb;
+
+ return NULL;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 9d88ed6105fd..8db1eb0c1768 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1523,7 +1523,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
- I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK);
reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK,
bw_share[i]);
reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 6b60dc9b7736..d76497566e40 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -43,7 +43,7 @@
#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
#define I40E_LLDP_TLV_OUI_SHIFT 8
-#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT)
/* Defines for IEEE ETS TLV */
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index b96a92187ab3..8aa43aefe84c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -947,16 +947,16 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_dcb_app_priority_table *app)
{
+ struct i40e_vsi *vsi;
int v, err;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] && pf->vsi[v]->netdev) {
- err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ if (vsi->netdev) {
+ err = i40e_dcbnl_vsi_del_app(vsi, app);
dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
- pf->vsi[v]->seid, err, app->selector,
+ vsi->seid, err, app->selector,
app->protocolid, app->priority);
}
- }
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef70ddbe9c2f..f9ba45f596c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -24,31 +24,13 @@ enum ring_type {
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
{
- int i;
-
- if (seid < 0)
+ if (seid < 0) {
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
- else
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
- return pf->vsi[i];
-
- return NULL;
-}
-/**
- * i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf: the PF structure to search for the veb
- * @seid: seid of the veb it is searching for
- **/
-static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
-{
- int i;
+ return NULL;
+ }
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == seid)
- return pf->veb[i];
- return NULL;
+ return i40e_pf_get_vsi_by_seid(pf, seid);
}
/**************************************************************
@@ -653,12 +635,11 @@ out:
**/
static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
- i, pf->vsi[i]->seid);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
}
/**
@@ -696,15 +677,14 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
{
struct i40e_veb *veb;
- veb = i40e_dbg_find_veb(pf, seid);
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
if (!veb) {
dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
- "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
- veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
- veb->uplink_seid,
+ "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
+ veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
i40e_dbg_dump_eth_stats(pf, &veb->stats);
}
@@ -718,11 +698,8 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
struct i40e_veb *veb;
int i;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- veb = pf->veb[i];
- if (veb)
- i40e_dbg_dump_veb_seid(pf, veb->seid);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
}
/**
@@ -851,10 +828,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "add relay", 9) == 0) {
struct i40e_veb *veb;
- int uplink_seid, i;
+ u8 enabled_tc = 0x1;
+ int uplink_seid;
cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
- if (cnt != 2) {
+ if (cnt == 0) {
+ uplink_seid = 0;
+ vsi_seid = 0;
+ } else if (cnt != 2) {
dev_info(&pf->pdev->dev,
"add relay: bad command string, cnt=%d\n",
cnt);
@@ -866,33 +847,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "add relay: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
- break;
- if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
- uplink_seid != pf->mac_seid) {
+ if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
dev_info(&pf->pdev->dev,
"add relay: relay uplink %d not found\n",
uplink_seid);
goto command_write_done;
+ } else if (uplink_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ enabled_tc = vsi->tc_config.enabled_tc;
+ } else if (vsi_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI must be 0 for floating relay\n");
+ goto command_write_done;
}
- veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
- vsi->tc_config.enabled_tc);
+ veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
if (veb)
dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
else
dev_info(&pf->pdev->dev, "add relay failed\n");
} else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ struct i40e_veb *veb;
int i;
+
cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev,
@@ -906,9 +890,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* find the veb */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == veb_seid)
break;
+
if (i >= I40E_MAX_VEB) {
dev_info(&pf->pdev->dev,
"del relay: relay %d not found\n", veb_seid);
@@ -916,7 +901,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
- i40e_veb_release(pf->veb[i]);
+ i40e_veb_release(veb);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
unsigned int v;
int ret;
@@ -1251,8 +1236,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (cnt == 0) {
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- i40e_vsi_reset_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_reset_stats(vsi);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c841779713f6..42e7e6cdaa6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5644,7 +5644,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
@@ -5664,16 +5664,12 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
- edata->supported = SUPPORTED_Autoneg;
- edata->lp_advertised = edata->supported;
-
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
- edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
@@ -5682,7 +5678,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
}
static int i40e_is_eee_param_supported(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5691,7 +5687,6 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
u32 value;
const char *name;
} param[] = {
- {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
{edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
@@ -5709,7 +5704,7 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
return 0;
}
-static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ae8f9f135725..f86578857e8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -310,11 +310,12 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
**/
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->id == id))
- return pf->vsi[i];
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->id == id)
+ return vsi;
return NULL;
}
@@ -552,24 +553,19 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
{
+ struct i40e_veb *veb;
int i;
memset(&pf->stats, 0, sizeof(pf->stats));
memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
pf->stat_offsets_loaded = false;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i]) {
- memset(&pf->veb[i]->stats, 0,
- sizeof(pf->veb[i]->stats));
- memset(&pf->veb[i]->stats_offsets, 0,
- sizeof(pf->veb[i]->stats_offsets));
- memset(&pf->veb[i]->tc_stats, 0,
- sizeof(pf->veb[i]->tc_stats));
- memset(&pf->veb[i]->tc_stats_offsets, 0,
- sizeof(pf->veb[i]->tc_stats_offsets));
- pf->veb[i]->stat_offsets_loaded = false;
- }
+ i40e_pf_for_each_veb(pf, i, veb) {
+ memset(&veb->stats, 0, sizeof(veb->stats));
+ memset(&veb->stats_offsets, 0, sizeof(veb->stats_offsets));
+ memset(&veb->tc_stats, 0, sizeof(veb->tc_stats));
+ memset(&veb->tc_stats_offsets, 0, sizeof(veb->tc_stats_offsets));
+ veb->stat_offsets_loaded = false;
}
pf->hw_csum_rx_error = 0;
}
@@ -2879,6 +2875,7 @@ err_no_memory_locked:
**/
static void i40e_sync_filters_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
if (!pf)
@@ -2890,11 +2887,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
}
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] &&
- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
- !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
- int ret = i40e_sync_vsi_filters(pf->vsi[v]);
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+ !test_bit(__I40E_VSI_RELEASING, vsi->state)) {
+ int ret = i40e_sync_vsi_filters(vsi);
if (ret) {
/* come back and try again later */
@@ -3588,40 +3584,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx;
int err = 0;
bool ok;
- int ret;
bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
- if (ring->vsi->type == I40E_VSI_MAIN)
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->rx_buf_len = vsi->rx_buf_len;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (ring->vsi->type != I40E_VSI_MAIN)
+ goto skip;
+
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ring->rx_buf_len =
- xsk_pool_get_rx_frame_size(ring->xsk_pool);
- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
- if (ret)
- return ret;
+ if (err)
+ return err;
dev_info(&vsi->back->pdev->dev,
"Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index);
} else {
- ring->rx_buf_len = vsi->rx_buf_len;
- if (ring->vsi->type == I40E_VSI_MAIN) {
- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (ret)
- return ret;
- }
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
}
+skip:
xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
@@ -4911,27 +4922,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int pf_q, err, q_end;
+ u32 pf_q, tx_q_end, rx_q_end;
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- q_end = vsi->base_queue + vsi->num_queue_pairs;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
- i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+ tx_q_end = vsi->base_queue +
+ vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false);
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
- err = i40e_control_wait_rx_q(pf, pf_q, false);
- if (err)
- dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d disable timeout\n",
- vsi->seid, pf_q);
- }
+ rx_q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++)
+ i40e_control_rx_q(pf, pf_q, false);
msleep(I40E_DISABLE_TX_GAP_MSEC);
- pf_q = vsi->base_queue;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
i40e_vsi_wait_queues_disabled(vsi);
@@ -5155,6 +5162,7 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
**/
static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
@@ -5164,9 +5172,10 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
I40E_IWARP_IRQ_PILE_ID);
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- i40e_vsi_free_q_vectors(pf->vsi[i]);
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_free_q_vectors(vsi);
+
i40e_reset_interrupt_capability(pf);
}
@@ -5263,12 +5272,11 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
**/
static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_quiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_quiesce_vsi(vsi);
}
/**
@@ -5277,12 +5285,11 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
**/
static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_unquiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_unquiesce_vsi(vsi);
}
/**
@@ -5343,14 +5350,13 @@ wait_rx:
**/
static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v, ret = 0;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
- if (pf->vsi[v]) {
- ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
- if (ret)
- break;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ ret = i40e_vsi_wait_queues_disabled(vsi);
+ if (ret)
+ break;
}
return ret;
@@ -6767,32 +6773,29 @@ out:
**/
static void i40e_dcb_reconfigure(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
u8 tc_map = 0;
int ret;
- u8 v;
+ int v;
/* Enable the TCs available on PF to all VEBs */
tc_map = i40e_pf_get_tc_map(pf);
if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
return;
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
- ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_veb_config_tc(veb, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VEB seid=%d\n",
- pf->veb[v]->seid);
+ veb->seid);
/* Will try to configure as many components */
}
}
/* Update each VSI */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v])
- continue;
-
+ i40e_pf_for_each_vsi(pf, v, vsi) {
/* - Enable all TCs for the LAN VSI
* - For all others keep them at TC0 for now
*/
@@ -6801,17 +6804,17 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
- ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ ret = i40e_vsi_config_tc(vsi, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VSI seid=%d\n",
- pf->vsi[v]->seid);
+ vsi->seid);
/* Will try to configure as many components */
} else {
/* Re-configure VSI vectors based on updated TC map */
- i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
- if (pf->vsi[v]->netdev)
- i40e_dcbnl_set_all(pf->vsi[v]);
+ i40e_vsi_map_rings_to_vectors(vsi);
+ if (vsi->netdev)
+ i40e_dcbnl_set_all(vsi);
}
}
}
@@ -9246,7 +9249,9 @@ int i40e_close(struct net_device *netdev)
**/
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
+ struct i40e_vsi *vsi;
u32 val;
+ int i;
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9302,29 +9307,20 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is enabled\n");
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that requested a re-init */
- dev_info(&pf->pdev->dev,
- "VSI reinit requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
+ dev_info(&pf->pdev->dev, "VSI reinit requested\n");
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
vsi->state))
- i40e_vsi_reinit_locked(pf->vsi[v]);
+ i40e_vsi_reinit_locked(vsi);
}
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that needs to be brought down */
dev_info(&pf->pdev->dev, "VSI down requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
vsi->state)) {
set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi);
@@ -9877,6 +9873,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
**/
static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
{
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
int i;
@@ -9884,15 +9881,10 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
return;
pf = veb->pf;
- /* depth first... */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
- i40e_veb_link_event(pf->veb[i], link_up);
-
- /* ... now the local VSIs */
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
- i40e_vsi_link_event(pf->vsi[i], link_up);
+ /* Send link event to contained VSIs */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == veb->seid)
+ i40e_vsi_link_event(vsi, link_up);
}
/**
@@ -9984,6 +9976,8 @@ static void i40e_link_event(struct i40e_pf *pf)
**/
static void i40e_watchdog_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* if interface is down do nothing */
@@ -10004,15 +9998,14 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->netdev)
- i40e_update_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->netdev)
+ i40e_update_stats(vsi);
if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
/* Update the stats for the active switching components */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i])
- i40e_update_veb_stats(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_update_veb_stats(veb);
}
i40e_ptp_rx_hang(pf);
@@ -10357,89 +10350,84 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
}
/**
- * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * i40e_reconstitute_veb - rebuild the VEB and VSIs connected to it
* @veb: pointer to the VEB instance
*
- * This is a recursive function that first builds the attached VSIs then
- * recurses in to build the next layer of VEB. We track the connections
- * through our own index numbers because the seid's from the HW could
- * change across the reset.
+ * This is a function that builds the attached VSIs. We track the connections
+ * through our own index numbers because the seid's from the HW could change
+ * across the reset.
**/
static int i40e_reconstitute_veb(struct i40e_veb *veb)
{
struct i40e_vsi *ctl_vsi = NULL;
struct i40e_pf *pf = veb->pf;
- int v, veb_idx;
- int ret;
+ struct i40e_vsi *vsi;
+ int v, ret;
- /* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
- if (pf->vsi[v] &&
- pf->vsi[v]->veb_idx == veb->idx &&
- pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
- ctl_vsi = pf->vsi[v];
- break;
- }
- }
- if (!ctl_vsi) {
- dev_info(&pf->pdev->dev,
- "missing owner VSI for veb_idx %d\n", veb->idx);
- ret = -ENOENT;
- goto end_reconstitute;
+ /* As we do not maintain PV (port virtualizer) switch element then
+ * there can be only one non-floating VEB that have uplink to MAC SEID
+ * and its control VSI is the main one.
+ */
+ if (WARN_ON(veb->uplink_seid && veb->uplink_seid != pf->mac_seid)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid uplink SEID for VEB %d\n", veb->idx);
+ return -ENOENT;
}
- if (ctl_vsi != pf->vsi[pf->lan_vsi])
- ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- ret = i40e_add_vsi(ctl_vsi);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "rebuild of veb_idx %d owner VSI failed: %d\n",
- veb->idx, ret);
- goto end_reconstitute;
+
+ if (veb->uplink_seid == pf->mac_seid) {
+ /* Check that the LAN VSI has VEB owning flag set */
+ ctl_vsi = pf->vsi[pf->lan_vsi];
+
+ if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
+ !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
+ dev_err(&pf->pdev->dev,
+ "Invalid control VSI for VEB %d\n", veb->idx);
+ return -ENOENT;
+ }
+
+ /* Add the control VSI to switch */
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Rebuild of owner VSI for VEB %d failed: %d\n",
+ veb->idx, ret);
+ return ret;
+ }
+
+ i40e_vsi_reset_stats(ctl_vsi);
}
- i40e_vsi_reset_stats(ctl_vsi);
/* create the VEB in the switch and move the VSI onto the VEB */
ret = i40e_add_veb(veb, ctl_vsi);
if (ret)
- goto end_reconstitute;
+ return ret;
- if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
- veb->bridge_mode = BRIDGE_MODE_VEB;
- else
- veb->bridge_mode = BRIDGE_MODE_VEPA;
- i40e_config_bridge_mode(veb);
+ if (veb->uplink_seid) {
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
+ veb->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ i40e_config_bridge_mode(veb);
+ }
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if (vsi == ctl_vsi)
continue;
- if (pf->vsi[v]->veb_idx == veb->idx) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
+ if (vsi->veb_idx == veb->idx) {
vsi->uplink_seid = veb->seid;
ret = i40e_add_vsi(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of vsi_idx %d failed: %d\n",
v, ret);
- goto end_reconstitute;
+ return ret;
}
i40e_vsi_reset_stats(vsi);
}
}
- /* create any VEBs attached to this VEB - RECURSION */
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
- pf->veb[veb_idx]->uplink_seid = veb->seid;
- ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
- if (ret)
- break;
- }
- }
-
-end_reconstitute:
return ret;
}
@@ -10707,6 +10695,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
int ret = 0;
u32 v;
@@ -10721,11 +10710,9 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v]) {
- i40e_clean_xps_state(pf->vsi[v]);
- pf->vsi[v]->seid = 0;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ i40e_clean_xps_state(vsi);
+ vsi->seid = 0;
}
i40e_shutdown_adminq(&pf->hw);
@@ -10839,6 +10826,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
+ struct i40e_veb *veb;
int ret;
u32 val;
int v;
@@ -10980,35 +10968,29 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
*/
if (vsi->uplink_seid != pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
- /* find the one VEB connected to the MAC, and find orphans */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
-
- if (pf->veb[v]->uplink_seid == pf->mac_seid ||
- pf->veb[v]->uplink_seid == 0) {
- ret = i40e_reconstitute_veb(pf->veb[v]);
- if (!ret)
- continue;
+ /* Rebuild VEBs */
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_reconstitute_veb(veb);
+ if (!ret)
+ continue;
- /* If Main VEB failed, we're in deep doodoo,
- * so give up rebuilding the switch and set up
- * for minimal rebuild of PF VSI.
- * If orphan failed, we'll report the error
- * but try to keep going.
- */
- if (pf->veb[v]->uplink_seid == pf->mac_seid) {
- dev_info(&pf->pdev->dev,
- "rebuild of switch failed: %d, will try to set up simple PF connection\n",
- ret);
- vsi->uplink_seid = pf->mac_seid;
- break;
- } else if (pf->veb[v]->uplink_seid == 0) {
- dev_info(&pf->pdev->dev,
- "rebuild of orphan VEB failed: %d\n",
- ret);
- }
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (veb->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ vsi->uplink_seid = pf->mac_seid;
+ break;
+ } else if (veb->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
}
}
}
@@ -12087,6 +12069,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
*/
static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int err, i;
/* We cleared the MSI and MSI-X flags when disabling the old interrupt
@@ -12103,13 +12086,12 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
/* Now that we've re-acquired IRQs, we need to remap the vectors and
* rings together again.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
- if (err)
- goto err_unwind;
- i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ err = i40e_vsi_alloc_q_vectors(vsi);
+ if (err)
+ goto err_unwind;
+
+ i40e_vsi_map_rings_to_vectors(vsi);
}
err = i40e_setup_misc_vector(pf);
@@ -13111,19 +13093,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
struct nlattr *attr, *br_spec;
- int i, rem;
+ struct i40e_veb *veb;
+ int rem;
/* Only for PF VSI for now */
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
return -EOPNOTSUPP;
/* Find the HW bridge for PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
@@ -13188,19 +13167,14 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
- int i;
+ struct i40e_veb *veb;
/* Only for PF VSI for now */
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
return -EOPNOTSUPP;
/* Find the HW bridge for the PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb)
return 0;
@@ -13234,12 +13208,12 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
@@ -13549,9 +13523,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
return err;
i40e_queue_pair_disable_irq(vsi, queue_pair);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
- i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
@@ -14134,7 +14108,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
- struct i40e_veb *veb = NULL;
+ struct i40e_veb *veb;
struct i40e_pf *pf;
u16 uplink_seid;
int i, n, bkt;
@@ -14198,29 +14172,28 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
/* If this was the last thing on the VEB, except for the
* controlling VSI, remove the VEB, which puts the controlling
- * VSI onto the next level down in the switch.
+ * VSI onto the uplink port.
*
* Well, okay, there's one more exception here: don't remove
- * the orphan VEBs yet. We'll wait for an explicit remove request
+ * the floating VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] &&
- pf->vsi[i]->uplink_seid == uplink_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- n++; /* count the VSIs */
- }
- }
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == uplink_seid)
- n++; /* count the VEBs */
- if (pf->veb[i]->seid == uplink_seid)
- veb = pf->veb[i];
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
+ if (veb && veb->uplink_seid) {
+ n = 0;
+
+ /* Count non-controlling VSIs present on the VEB */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == uplink_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ n++;
+
+ /* If there is no VSI except the control one then release
+ * the VEB and put the control VSI onto VEB uplink.
+ */
+ if (!n)
+ i40e_veb_release(veb);
}
- if (n == 0 && veb && veb->uplink_seid != 0)
- i40e_veb_release(veb);
return 0;
}
@@ -14378,8 +14351,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
u16 alloc_queue_pairs;
- int ret, i;
int v_idx;
+ int ret;
/* The requested uplink_seid must be either
* - the PF's port seid
@@ -14394,21 +14367,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
*
* Find which uplink_seid we were given and create a new VEB if needed
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
- veb = pf->veb[i];
- break;
- }
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
if (!veb && uplink_seid != pf->mac_seid) {
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
- vsi = pf->vsi[i];
- break;
- }
- }
+ vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid);
if (!vsi) {
dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
uplink_seid);
@@ -14437,10 +14398,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
i40e_config_bridge_mode(veb);
}
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb) {
dev_info(&pf->pdev->dev, "couldn't add VEB\n");
return NULL;
@@ -14670,29 +14628,24 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
struct i40e_pf *pf = branch->pf;
u16 branch_seid = branch->seid;
u16 veb_idx = branch->idx;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* release any VEBs on this VEB - RECURSION */
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == branch->seid)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == branch->seid)
+ i40e_switch_branch_release(veb);
/* Release the VSIs on this VEB, but not the owner VSI.
*
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!pf->vsi[i])
- continue;
- if (pf->vsi[i]->uplink_seid == branch_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- i40e_vsi_release(pf->vsi[i]);
- }
- }
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == branch_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ i40e_vsi_release(vsi);
/* There's one corner case where the VEB might not have been
* removed, so double check it here and remove it if needed.
@@ -14730,38 +14683,35 @@ static void i40e_veb_clear(struct i40e_veb *veb)
**/
void i40e_veb_release(struct i40e_veb *veb)
{
- struct i40e_vsi *vsi = NULL;
+ struct i40e_vsi *vsi, *vsi_it;
struct i40e_pf *pf;
int i, n = 0;
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ i40e_pf_for_each_vsi(pf, i, vsi_it)
+ if (vsi_it->uplink_seid == veb->seid) {
+ if (vsi_it->flags & I40E_VSI_FLAG_VEB_OWNER)
+ vsi = vsi_it;
n++;
- vsi = pf->vsi[i];
}
- }
- if (n != 1) {
+
+ /* Floating VEB has to be empty and regular one must have
+ * single owner VSI.
+ */
+ if ((veb->uplink_seid && n != 1) || (!veb->uplink_seid && n != 0)) {
dev_info(&pf->pdev->dev,
"can't remove VEB %d with %d VSIs left\n",
veb->seid, n);
return;
}
- /* move the remaining VSI to uplink veb */
- vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ /* For regular VEB move the owner VSI to uplink port */
if (veb->uplink_seid) {
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
vsi->uplink_seid = veb->uplink_seid;
- if (veb->uplink_seid == pf->mac_seid)
- vsi->veb_idx = I40E_NO_VEB;
- else
- vsi->veb_idx = veb->veb_idx;
- } else {
- /* floating VEB */
- vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ vsi->veb_idx = I40E_NO_VEB;
}
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
@@ -14779,8 +14729,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
int ret;
- ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, false,
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0,
+ veb->enabled_tc, vsi ? false : true,
&veb->seid, enable_stats, NULL);
/* get a VEB from the hardware */
@@ -14812,9 +14762,11 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
return -ENOENT;
}
- vsi->uplink_seid = veb->seid;
- vsi->veb_idx = veb->idx;
- vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ if (vsi) {
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ }
return 0;
}
@@ -14839,8 +14791,9 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
u16 uplink_seid, u16 vsi_seid,
u8 enabled_tc)
{
- struct i40e_veb *veb, *uplink_veb = NULL;
- int vsi_idx, veb_idx;
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb;
+ int veb_idx;
int ret;
/* if one seid is 0, the other must be 0 to create a floating relay */
@@ -14853,26 +14806,11 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
- if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
- break;
- if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
- dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
- vsi_seid);
- return NULL;
- }
-
- if (uplink_seid && uplink_seid != pf->mac_seid) {
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] &&
- pf->veb[veb_idx]->seid == uplink_seid) {
- uplink_veb = pf->veb[veb_idx];
- break;
- }
- }
- if (!uplink_veb) {
- dev_info(&pf->pdev->dev,
- "uplink seid %d not found\n", uplink_seid);
+ if (vsi_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
return NULL;
}
}
@@ -14884,14 +14822,14 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
veb = pf->veb[veb_idx];
veb->flags = flags;
veb->uplink_seid = uplink_seid;
- veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
/* create the VEB in the switch */
- ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ ret = i40e_add_veb(veb, vsi);
if (ret)
goto err_veb;
- if (vsi_idx == pf->lan_vsi)
+
+ if (vsi && vsi->idx == pf->lan_vsi)
pf->lan_veb = veb->idx;
return veb;
@@ -14919,6 +14857,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
u8 element_type = ele->element_type;
u16 seid = le16_to_cpu(ele->seid);
+ struct i40e_veb *veb;
if (printconfig)
dev_info(&pf->pdev->dev,
@@ -14937,13 +14876,10 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
int v;
/* find existing or else empty VEB */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
- pf->lan_veb = v;
- break;
- }
- }
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
+ if (veb) {
+ pf->lan_veb = veb->idx;
+ } else {
v = i40e_veb_mem_alloc(pf);
if (v < 0)
break;
@@ -14956,7 +14892,6 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
pf->veb[pf->lan_veb]->seid = seid;
pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
pf->veb[pf->lan_veb]->pf = pf;
- pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
break;
case I40E_SWITCH_ELEMENT_TYPE_VSI:
if (num_reported != 1)
@@ -15619,6 +15554,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
#endif /* CONFIG_I40E_DCB */
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
struct i40e_hw *hw;
u16 wol_nvm_bits;
@@ -15629,7 +15565,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
- u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -15979,12 +15914,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
/* if FDIR VSI was set up, start it now */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- i40e_vsi_open(pf->vsi[i]);
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_open(vsi);
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -16230,6 +16162,8 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int ret_code;
int i;
@@ -16287,24 +16221,19 @@ static void i40e_remove(struct pci_dev *pdev)
/* If there is a switch structure or any orphans, remove them.
* This will leave only the PF's VSI remaining.
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
-
- if (pf->veb[i]->uplink_seid == pf->mac_seid ||
- pf->veb[i]->uplink_seid == 0)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == pf->mac_seid ||
+ veb->uplink_seid == 0)
+ i40e_switch_branch_release(veb);
/* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
- for (i = pf->num_alloc_vsi; i--;)
- if (pf->vsi[i]) {
- i40e_vsi_close(pf->vsi[i]);
- i40e_vsi_release(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ i40e_vsi_close(vsi);
+ i40e_vsi_release(vsi);
+ pf->vsi[i] = NULL;
+ }
i40e_cloud_filter_exit(pf);
@@ -16341,18 +16270,17 @@ unmap:
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
rtnl_lock();
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
- i40e_vsi_clear_rings(pf->vsi[i]);
- i40e_vsi_clear(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_clear(vsi);
+ pf->vsi[i] = NULL;
}
rtnl_unlock();
- for (i = 0; i < I40E_MAX_VEB; i++) {
- kfree(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb) {
+ kfree(veb);
pf->veb[i] = NULL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index af4269330581..ce1f11b8ad65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -567,8 +567,7 @@ static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
**/
static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min)
{
- return (hw->aq.fw_maj_ver > maj ||
- (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min));
+ return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min);
}
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 971ba3322038..0d7177083708 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1548,7 +1548,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int err;
u64_stats_init(&rx_ring->syncp);
@@ -1569,14 +1568,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
- /* XDP RX-queue info only needed for RX rings exposed to XDP */
- if (rx_ring->vsi->type == I40E_VSI_MAIN) {
- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
- if (err < 0)
- return err;
- }
-
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
rx_ring->rx_bi =
@@ -2087,7 +2078,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
struct xdp_buff *xdp)
{
- u32 next = rx_ring->next_to_clean;
+ u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ u32 next = rx_ring->next_to_clean, i = 0;
struct i40e_rx_buffer *rx_buffer;
xdp->flags = 0;
@@ -2100,10 +2092,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
if (!rx_buffer->page)
continue;
- if (xdp_res == I40E_XDP_CONSUMED)
- rx_buffer->pagecnt_bias++;
- else
+ if (xdp_res != I40E_XDP_CONSUMED)
i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+ else if (i++ <= nr_frags)
+ rx_buffer->pagecnt_bias++;
/* EOP buffer will be put in i40e_clean_rx_irq() */
if (next == rx_ring->next_to_process)
@@ -2117,20 +2109,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp,
- u32 nr_frags)
+ struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
struct i40e_rx_buffer *rx_buffer;
+ struct skb_shared_info *sinfo;
unsigned int headlen;
struct sk_buff *skb;
+ u32 nr_frags = 0;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
@@ -2168,6 +2160,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
@@ -2187,9 +2183,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
- sinfo = xdp_get_shared_info_from_buff(xdp);
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
@@ -2212,17 +2207,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp,
- u32 nr_frags)
+ struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo;
struct sk_buff *skb;
+ u32 nr_frags;
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
@@ -2231,6 +2226,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
*/
net_prefetch(xdp->data_meta);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
@@ -2243,9 +2243,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
nr_frags * xdp->frame_sz,
@@ -2589,9 +2586,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
total_rx_bytes += size;
} else {
if (ring_uses_build_skb(rx_ring))
- skb = i40e_build_skb(rx_ring, xdp, nfrags);
+ skb = i40e_build_skb(rx_ring, xdp);
else
- skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+ skb = i40e_construct_skb(rx_ring, xdp);
/* drop if we failed to retrieve a buffer */
if (!skb) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 908cdbd3ec5d..b34c71770887 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2848,6 +2848,24 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/**
+ * i40e_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !vf->trusted)
+ return false;
+
+ return true;
+}
+
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2907,8 +2925,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
+ if (!i40e_can_vf_change_mac(vf) &&
+ !is_multicast_ether_addr(addr) &&
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
@@ -3114,19 +3132,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = -EINVAL;
goto error_param;
}
- if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
- was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
- for (i = 0; i < al->num_elements; i++)
+ for (i = 0; i < al->num_elements; i++) {
+ const u8 *addr = al->list[i].addr;
+
+ /* Allow to delete VF primary MAC only if it was not set
+ * administratively by PF or if VF is trusted.
+ */
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
+ i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ }
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index af7d5fa6cdc1..11500003af0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start), 0, size);
+ virt_to_page(xdp->data_hard_start),
+ XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@@ -498,7 +499,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
&rx_bytes, xdp_res, &failure);
- first->flags = 0;
next_to_clean = next_to_process;
if (failure)
break;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 335fd13e86f7..aefec6bd3b67 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -4423,12 +4423,12 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 367b613d92c0..365c03d1c462 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -493,7 +493,6 @@ enum ice_pf_flags {
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
- ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -606,6 +605,7 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 hw_rx_eipe_error;
u32 oicr_err_reg;
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
@@ -896,6 +896,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
}
void ice_debugfs_fwlog_init(struct ice_pf *pf);
+void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
@@ -983,6 +984,8 @@ void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+int ice_init_dev(struct ice_pf *pf);
+void ice_deinit_dev(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index cca0e753f38f..7cee365cc7d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2018-2020, Intel Corporation. */
#include "ice.h"
+#include <net/rps.h>
/**
* ice_is_arfs_active - helper to check is aRFS is active
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 533b923cae2d..d2fd315556a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector = vsi->q_vectors[v_idx];
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
- if (vsi->netdev)
- netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, NULL);
+ ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
tx_ring->q_vector = NULL;
}
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
- if (vsi->netdev)
- netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, NULL);
+ ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
rx_ring->q_vector = NULL;
}
@@ -538,7 +536,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
@@ -547,19 +545,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF) {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index,
- ring->q_vector->napi.napi_id,
- ring->vsi->rx_buf_len);
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
@@ -571,13 +577,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- __xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index,
- ring->q_vector->napi.napi_id,
- ring->vsi->rx_buf_len);
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
@@ -624,6 +631,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+ if (q_idx >= vsi->num_rxq)
+ return -EINVAL;
+
+ return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+ }
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
+ ice_vsi_cfg_frame_size(vsi);
+setup_rings:
+ /* set up individual rings */
+ ice_for_each_rxq(vsi, i) {
+ int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
@@ -819,7 +882,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
*/
-int
+static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
@@ -890,6 +953,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
return 0;
}
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx)
+{
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+
+ if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+ return -EINVAL;
+
+ qg_buf->num_txqs = 1;
+
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ * @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+static int
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
+{
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ int err = 0;
+ u16 q_idx;
+
+ qg_buf->num_txqs = 1;
+
+ for (q_idx = 0; q_idx < count; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+{
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
+}
+
+/**
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
+ */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+{
+ int ret;
+ int i;
+
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
+ if (ret)
+ return ret;
+
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
+
+ return 0;
+}
+
/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 17321ba75602..b711bc921928 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,8 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -14,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
- struct ice_aqc_add_tx_qgrp *qg_buf);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx);
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 10c32cd80fff..4d8111aeb0ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -154,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823L_SFP:
hw->mac_type = ICE_MAC_GENERIC;
break;
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ hw->mac_type = ICE_MAC_GENERIC_3K_E825;
+ break;
case ICE_DEV_ID_E830_BACKPLANE:
case ICE_DEV_ID_E830_QSFP56:
case ICE_DEV_ID_E830_SFP:
@@ -170,6 +176,18 @@ static int ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_is_generic_mac - check if device's mac_type is generic
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if mac_type is generic (with SBQ support), false if not
+ */
+bool ice_is_generic_mac(struct ice_hw *hw)
+{
+ return (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->mac_type == ICE_MAC_GENERIC_3K_E825);
+}
+
+/**
* ice_is_e810
* @hw: pointer to the hardware structure
*
@@ -241,6 +259,25 @@ bool ice_is_e823(struct ice_hw *hw)
}
/**
+ * ice_is_e825c - Check if a device is E825C family device
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if the device is E825-C based, false if not.
+ */
+bool ice_is_e825c(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -965,9 +1002,9 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ void *mac_buf __free(kfree);
u16 mac_buf_len;
- void *mac_buf;
int status;
/* Set MAC type based on DeviceID */
@@ -1045,7 +1082,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
status = -ENOMEM;
goto err_unroll_sched;
@@ -1055,7 +1092,6 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
NULL);
- devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
status);
@@ -1082,18 +1118,15 @@ int ice_init_hw(struct ice_hw *hw)
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
- mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
- sizeof(struct ice_aqc_manage_mac_read_resp),
- GFP_KERNEL);
- mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
-
+ mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
+ GFP_KERNEL);
if (!mac_buf) {
status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
- devm_kfree(ice_hw_to_dev(hw), mac_buf);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -1362,9 +1395,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index)
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
@@ -3240,19 +3272,14 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_hw *hw;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
- hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
- GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
-
- devm_kfree(ice_hw_to_dev(hw), pcaps);
}
return status;
@@ -3393,8 +3420,8 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
- struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_hw *hw;
int status;
@@ -3404,7 +3431,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
*aq_failures = 0;
hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
@@ -3456,7 +3483,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
}
out:
- devm_kfree(ice_hw_to_dev(hw), pcaps);
return status;
}
@@ -3535,7 +3561,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_hw *hw;
int status;
@@ -3604,8 +3630,6 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
}
out:
- kfree(pcaps);
-
return status;
}
@@ -4325,13 +4349,13 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_write_byte - write a byte to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_byte - write a byte to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -4342,14 +4366,11 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = (u8)(BIT(ce_info->width) - 1);
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
src_byte = *from;
- src_byte &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_byte <<= shift_width;
+ src_byte &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4364,13 +4385,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_word - write a word to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_word - write a word to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -4382,17 +4403,14 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = BIT(ce_info->width) - 1;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_word = *(u16 *)from;
- src_word &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_word <<= shift_width;
+ src_word &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4407,13 +4425,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_dword - write a dword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_dword - write a dword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -4425,25 +4443,14 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 32 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 5 bits so the shift will do nothing
- */
- if (ce_info->width < 32)
- mask = BIT(ce_info->width) - 1;
- else
- mask = (u32)~0;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_dword = *(u32 *)from;
- src_dword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_dword <<= shift_width;
+ src_dword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4458,13 +4465,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_qword - write a qword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_qword - write a qword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -4476,25 +4483,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 64 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 6 bits so the shift will do nothing
- */
- if (ce_info->width < 64)
- mask = BIT_ULL(ce_info->width) - 1;
- else
- mask = (u64)~0;
+ mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_qword = *(u64 *)from;
- src_qword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_qword <<= shift_width;
+ src_qword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4513,11 +4509,10 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
- * @ce_info: a description of the structure to be transformed
+ * @ce_info: List of Rx context elements
*/
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -4533,16 +4528,16 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
}
switch (ce_info[f].size_of) {
case sizeof(u8):
- ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u16):
- ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u32):
- ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u64):
- ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 3e933f75e948..ffb22c7ce28b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -53,9 +53,8 @@ int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index);
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -72,9 +71,8 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
@@ -112,6 +110,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
+bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
@@ -251,6 +250,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
+bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e7d2474c431c..ffe660f34992 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -666,7 +666,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw)
/* The device sideband queue is only supported on devices with the
* generic MAC type.
*/
- return hw->mac_type == ICE_MAC_GENERIC;
+ return ice_is_generic_mac(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 8b7504a9df31..7532d11ad7f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1825,6 +1825,7 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
seg_id = SEGMENT_TYPE_ICE_E830;
break;
case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
default:
seg_id = SEGMENT_TYPE_ICE_E810;
break;
@@ -1845,6 +1846,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
case ICE_MAC_E830:
sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825;
+ break;
case ICE_MAC_GENERIC:
default:
sign_type = SEGMENT_SIGN_TYPE_RSA2K;
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index c2bfba6b9ead..d252d98218d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -64,9 +64,6 @@ static const char * const ice_fwlog_level_string[] = {
"verbose",
};
-/* the order in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
static const char * const ice_fwlog_log_size[] = {
"128K",
"256K",
@@ -648,6 +645,16 @@ err_create_module_files:
}
/**
+ * ice_debugfs_pf_deinit - cleanup PF's debugfs
+ * @pf: pointer to the PF struct
+ */
+void ice_debugfs_pf_deinit(struct ice_pf *pf)
+{
+ debugfs_remove_recursive(pf->ice_debugfs_pf);
+ pf->ice_debugfs_pf = NULL;
+}
+
+/**
* ice_debugfs_init - create root directory for debugfs entries
*/
void ice_debugfs_init(void)
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index a2d384dbfc76..9dfae9bce758 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -71,5 +71,13 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Connection E825-C for backplane */
+#define ICE_DEV_ID_E825C_BACKPLANE 0x579c
+/* Intel(R) Ethernet Connection E825-C for QSFP */
+#define ICE_DEV_ID_E825C_QSFP 0x579d
+/* Intel(R) Ethernet Connection E825-C for SFP */
+#define ICE_DEV_ID_E825C_SFP 0x579e
+/* Intel(R) Ethernet Connection E825-C 1GbE */
+#define ICE_DEV_ID_E825C_SGMII 0x579f
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 65be56f2af9e..b516e42b41f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -445,6 +445,20 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
}
/**
+ * ice_devlink_reinit_down - unload given PF
+ * @pf: pointer to the PF struct
+ */
+static void ice_devlink_reinit_down(struct ice_pf *pf)
+{
+ /* No need to take devl_lock, it's already taken by devlink API */
+ ice_unload(pf);
+ rtnl_lock();
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ rtnl_unlock();
+ ice_deinit_dev(pf);
+}
+
+/**
* ice_devlink_reload_down - prepare for reload
* @devlink: pointer to the devlink instance to reload
* @netns_change: if true, the network namespace is changing
@@ -477,7 +491,7 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
"Remove all VFs before doing reinit\n");
return -EOPNOTSUPP;
}
- ice_unload(pf);
+ ice_devlink_reinit_down(pf);
return 0;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
return ice_devlink_reload_empr_start(pf, extack);
@@ -1270,6 +1284,45 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
}
/**
+ * ice_devlink_reinit_up - do reinit of the given PF
+ * @pf: pointer to the PF struct
+ */
+static int ice_devlink_reinit_up(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_cfg_params params;
+ int err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ rtnl_lock();
+ err = ice_vsi_cfg(vsi, &params);
+ rtnl_unlock();
+ if (err)
+ goto err_vsi_cfg;
+
+ /* No need to take devl_lock, it's already taken by devlink API */
+ err = ice_load(pf);
+ if (err)
+ goto err_load;
+
+ return 0;
+
+err_load:
+ rtnl_lock();
+ ice_vsi_decfg(vsi);
+ rtnl_unlock();
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
* ice_devlink_reload_up - do reload up after reinit
* @devlink: pointer to the devlink instance reloading
* @action: the action requested
@@ -1289,7 +1342,7 @@ ice_devlink_reload_up(struct devlink *devlink,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return ice_load(pf);
+ return ice_devlink_reinit_up(pf);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
return ice_devlink_reload_empr_finish(pf, extack);
@@ -1569,6 +1622,7 @@ static const struct devlink_port_ops ice_devlink_port_ops = {
* @pf: the PF to create a devlink port for
*
* Create and register a devlink_port for this PF.
+ * This function has to be called under devl_lock.
*
* Return: zero on success or an error code on failure.
*/
@@ -1581,6 +1635,8 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
struct device *dev;
int err;
+ devlink = priv_to_devlink(pf);
+
dev = ice_pf_to_dev(pf);
devlink_port = &pf->devlink_port;
@@ -1601,10 +1657,9 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
ice_devlink_set_switch_id(pf, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
- err = devlink_port_register_with_ops(devlink, devlink_port, vsi->idx,
- &ice_devlink_port_ops);
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
if (err) {
dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
pf->hw.pf_id, err);
@@ -1619,10 +1674,11 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
* @pf: the PF to cleanup
*
* Unregisters the devlink_port structure associated with this PF.
+ * This function has to be called under devl_lock.
*/
void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- devlink_port_unregister(&pf->devlink_port);
+ devl_port_unregister(&pf->devlink_port);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index b9c5eced6326..e92be6f130a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -31,6 +31,26 @@ static const char * const pin_type_name[] = {
};
/**
+ * ice_dpll_is_reset - check if reset is in progress
+ * @pf: private board structure
+ * @extack: error reporting
+ *
+ * If reset is in progress, fill extack with error.
+ *
+ * Return:
+ * * false - no reset in progress
+ * * true - reset in progress
+ */
+static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack)
+{
+ if (ice_is_reset_in_progress(pf->state)) {
+ NL_SET_ERR_MSG(extack, "PF reset in progress");
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_dpll_pin_freq_set - set pin's frequency
* @pf: private board structure
* @pin: pointer to a pin
@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
mutex_unlock(&pf->dplls.lock);
@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
+ * @dpll_idx: dpll index to connect to output pin
* @pin_type: type of pin being enabled
* @extack: error reporting
*
@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
*/
static int
ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
- enum ice_dpll_pin_type pin_type,
+ u8 dpll_idx, enum ice_dpll_pin_type pin_type,
struct netlink_ext_ack *extack)
{
u8 flags = 0;
@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL;
if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
- ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx,
+ 0, 0);
break;
default:
return -EINVAL;
@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
case ICE_DPLL_PIN_TYPE_INPUT:
ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
NULL, &pin->flags[0],
- &pin->freq, NULL);
+ &pin->freq, &pin->phase_adjust);
if (ret)
goto err;
if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
- &pin->flags[0], NULL,
+ &pin->flags[0], &parent,
&pin->freq, NULL);
if (ret)
goto err;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
- pin->state[0] = DPLL_PIN_STATE_CONNECTED;
- else
- pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+
+ parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ parent == pf->dplls.eec.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ parent == pf->dplls.pps.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
for (parent = 0; parent < pf->dplls.rclk.num_parents;
@@ -488,6 +527,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
* @status: on success holds dpll's lock status
+ * @status_error: status error value
* @extack: error reporting
*
* Dpll subsystem callback, provides dpll's lock status.
@@ -500,6 +540,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
static int
ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
struct netlink_ext_ack *extack)
{
struct ice_dpll *d = dpll_priv;
@@ -568,9 +609,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
if (enable)
- ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type,
+ extack);
else
ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
if (!ret)
@@ -603,6 +648,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct netlink_ext_ack *extack)
{
bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+
+ if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
+ return 0;
return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
extack, ICE_DPLL_PIN_TYPE_OUTPUT);
@@ -665,14 +715,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
if (ret)
goto unlock;
- if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT ||
+ pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
*state = p->state[d->dpll_idx];
- else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
- *state = p->state[0];
ret = 0;
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -790,6 +842,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
mutex_unlock(&pf->dplls.lock);
@@ -910,6 +965,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
u8 flag, flags_en = 0;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
switch (type) {
case ICE_DPLL_PIN_TYPE_INPUT:
@@ -1069,6 +1127,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1123,6 +1184,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1305,8 +1369,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
- int ret;
+ int ret = 0;
+ if (ice_is_reset_in_progress(pf->state))
+ goto resched;
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
@@ -1326,6 +1392,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+resched:
/* Run twice a second or reschedule if update failed */
kthread_queue_delayed_work(d->kworker, &d->work,
ret ? msecs_to_jiffies(10) :
@@ -1532,7 +1599,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
}
if (WARN_ON_ONCE(!vsi || !vsi->netdev))
return;
- netdev_dpll_pin_clear(vsi->netdev);
+ dpll_netdev_pin_clear(vsi->netdev);
dpll_pin_put(rclk->pin);
}
@@ -1576,7 +1643,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (WARN_ON((!vsi || !vsi->netdev)))
return -EINVAL;
- netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+ dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
@@ -2055,6 +2122,7 @@ void ice_dpll_init(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
int err = 0;
+ mutex_init(&d->lock);
err = ice_dpll_init_info(pf, cgu);
if (err)
goto err_exit;
@@ -2067,7 +2135,6 @@ void ice_dpll_init(struct ice_pf *pf)
err = ice_dpll_init_pins(pf, cgu);
if (err)
goto deinit_pps;
- mutex_init(&d->lock);
if (cgu) {
err = ice_dpll_init_worker(pf);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a19b06f18e40..bb912155676e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -129,6 +129,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
+ ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
@@ -801,7 +802,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf)
return -EINVAL;
- data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
+ data = kzalloc(size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -944,11 +945,9 @@ static u64 ice_loopback_test(struct net_device *netdev)
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- struct device *dev;
- u8 *tx_frame;
+ u8 *tx_frame __free(kfree);
int i;
- dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
@@ -993,7 +992,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
for (i = 0; i < num_frames; i++) {
if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 8;
- goto lbtest_free_frame;
+ goto remove_mac_filters;
}
}
@@ -1003,8 +1002,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
else if (valid_frames != num_frames)
ret = 10;
-lbtest_free_frame:
- devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
@@ -3360,7 +3357,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
struct ice_pf *pf = ice_netdev_to_pf(dev);
/* only report timestamping if PTP is enabled */
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
index 92b5dac481cd..4fd15387a7e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -188,6 +188,8 @@ void ice_fwlog_deinit(struct ice_hw *hw)
if (hw->bus.func)
return;
+ ice_debugfs_pf_deinit(hw->back);
+
/* make sure FW logging is disabled to not put the FW in a weird state
* for the next driver load
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 2a25323105e5..467372d541d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
}
/**
+ * ice_pkg_has_lport_extract - check if lport extraction supported
+ * @hw: HW struct
+ */
+static bool ice_pkg_has_lport_extract(struct ice_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) {
+ u16 offset;
+ u8 fv_prot;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i,
+ &fv_prot, &offset);
+ if (fv_prot == ICE_FV_PROT_MDID &&
+ offset == ICE_LP_EXT_BUF_OFFSET)
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_lag_find_primary - returns pointer to primary interfaces lag struct
* @lag: local interfaces lag struct
*/
@@ -1206,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
}
/**
- * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG
* @pf: PF struct
*/
static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
@@ -1219,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
else
ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
- if (caps->sriov_lag)
+ if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw))
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index ede833dfa658..183b38792ef2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -17,6 +17,9 @@ enum ice_lag_role {
#define ICE_LAG_INVALID_PORT 0xFF
#define ICE_LAG_RESET_RETRIES 5
+#define ICE_SW_DEFAULT_PROFILE 0
+#define ICE_FV_PROT_MDID 255
+#define ICE_LP_EXT_BUF_OFFSET 32
struct ice_pf;
struct ice_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 9be724291ef8..71f061667980 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1672,27 +1672,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
- * @vsi: VSI
- */
-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
-{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
- }
-}
-
-/**
* ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check
*
@@ -1795,114 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
-{
- if (q_idx >= vsi->num_rxq)
- return -EINVAL;
-
- return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
-}
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
-
- if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
-}
-
-/**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
- */
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
-{
- u16 i;
-
- if (vsi->type == ICE_VSI_VF)
- goto setup_rings;
-
- ice_vsi_cfg_frame_size(vsi);
-setup_rings:
- /* set up individual rings */
- ice_for_each_rxq(vsi, i) {
- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
-
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- * @rings: Tx ring array to be configured
- * @count: number of Tx ring array elements
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- int err = 0;
- u16 q_idx;
-
- qg_buf->num_txqs = 1;
-
- for (q_idx = 0; q_idx < count; q_idx++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
- * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
-{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
-}
-
-/**
- * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx queues dedicated for XDP in given VSI for operation.
- */
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
-{
- int ret;
- int i;
-
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
- if (ret)
- return ret;
-
- ice_for_each_rxq(vsi, i)
- ice_tx_xsk_pool(vsi, i);
-
- return 0;
-}
-
/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
@@ -2426,7 +2297,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi, true);
+ ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false;
@@ -2849,74 +2720,19 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
/**
- * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
- * @vsi: the VSI being un-configured
- */
-void ice_vsi_dis_irq(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
- int i;
-
- /* disable interrupt causation from each queue */
- if (vsi->tx_rings) {
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i]) {
- u16 reg;
-
- reg = vsi->tx_rings[i]->reg_idx;
- val = rd32(hw, QINT_TQCTL(reg));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(reg), val);
- }
- }
- }
-
- if (vsi->rx_rings) {
- ice_for_each_rxq(vsi, i) {
- if (vsi->rx_rings[i]) {
- u16 reg;
-
- reg = vsi->rx_rings[i]->reg_idx;
- val = rd32(hw, QINT_RQCTL(reg));
- val &= ~QINT_RQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_RQCTL(reg), val);
- }
- }
- }
-
- /* disable each interrupt */
- ice_for_each_q_vector(vsi, i) {
- if (!vsi->q_vectors[i])
- continue;
- wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
- }
-
- ice_flush(hw);
-
- /* don't call synchronize_irq() for VF's from the host */
- if (vsi->type == ICE_VSI_VF)
- return;
-
- ice_for_each_q_vector(vsi, i)
- synchronize_irq(vsi->q_vectors[i]->irq.virq);
-}
-
-/**
- * ice_queue_set_napi - Set the napi instance for the queue
+ * __ice_queue_set_napi - Set the napi instance for the queue
* @dev: device to which NAPI and queue belong
* @queue_index: Index of queue
* @type: queue type as RX or TX
* @napi: NAPI context
* @locked: is the rtnl_lock already held
*
- * Set the napi instance for the queue
+ * Set the napi instance for the queue. Caller indicates the lock status.
*/
static void
-ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi,
- bool locked)
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
{
if (!locked)
rtnl_lock();
@@ -2926,26 +2742,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
}
/**
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (!vsi->netdev)
+ return;
+
+ if (current_work() == &pf->serv_task ||
+ test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+ test_bit(ICE_DOWN, pf->state) ||
+ test_bit(ICE_SUSPENDED, pf->state))
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ false);
+ else
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ true);
+}
+
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
* @q_vector: q_vector pointer
* @locked: is the rtnl_lock already held
*
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
* Associate the q_vector napi with all the queue[s] on the vector
*/
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
{
struct ice_rx_ring *rx_ring;
struct ice_tx_ring *tx_ring;
ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
- locked);
+ ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
- locked);
+ ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
/* Also set the interrupt number for the NAPI */
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
}
@@ -2953,11 +2822,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
/**
* ice_vsi_set_napi_queues
* @vsi: VSI pointer
- * @locked: is the rtnl_lock already held
*
* Associate queue[s] with napi for all vectors
*/
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{
int i;
@@ -2965,7 +2833,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
return;
ice_for_each_q_vector(vsi, i)
- ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
}
/**
@@ -3140,7 +3008,7 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
}
}
- tx_ring_stats = vsi_stat->rx_ring_stats;
+ tx_ring_stats = vsi_stat->tx_ring_stats;
vsi_stat->tx_ring_stats =
krealloc_array(vsi_stat->tx_ring_stats, req_txq,
sizeof(*vsi_stat->tx_ring_stats),
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 71bd27244941..9cd23afe5f15 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -54,14 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi);
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
-
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
-
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
-
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
@@ -72,8 +64,6 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
-
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -91,9 +81,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
int ice_vsi_release(struct ice_vsi *vsi);
@@ -114,8 +110,6 @@ void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool ena_ts);
-void ice_vsi_dis_irq(struct ice_vsi *vsi);
-
void ice_vsi_free_irq(struct ice_vsi *vsi);
void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index dd4a9bc0dfdc..5d7660bc8846 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -613,7 +613,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_prepare_for_reset(pf);
+ ice_ptp_prepare_for_reset(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
@@ -1649,8 +1649,10 @@ static void ice_clean_sbq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- /* Nothing to do here if sideband queue is not supported */
- if (!ice_is_sbq_supported(hw)) {
+ /* if mac_type is not generic, sideband is not supported
+ * and there's nothing to do here
+ */
+ if (!ice_is_generic_mac(hw)) {
clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
return;
}
@@ -3495,7 +3497,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
- ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
}
}
@@ -4572,90 +4574,6 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-static int ice_start_eth(struct ice_vsi *vsi)
-{
- int err;
-
- err = ice_init_mac_fltr(vsi->back);
- if (err)
- return err;
-
- err = ice_vsi_open(vsi);
- if (err)
- ice_fltr_remove_all(vsi);
-
- return err;
-}
-
-static void ice_stop_eth(struct ice_vsi *vsi)
-{
- ice_fltr_remove_all(vsi);
- ice_vsi_close(vsi);
-}
-
-static int ice_init_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- int err;
-
- if (!vsi)
- return -EINVAL;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- err = ice_cfg_netdev(vsi);
- if (err)
- return err;
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- err = ice_init_mac_fltr(pf);
- if (err)
- goto err_init_mac_fltr;
-
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create_pf_port;
-
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
-
- err = ice_register_netdev(vsi);
- if (err)
- goto err_register_netdev;
-
- err = ice_tc_indir_block_register(vsi);
- if (err)
- goto err_tc_indir_block_register;
-
- ice_napi_add(vsi);
-
- return 0;
-
-err_tc_indir_block_register:
- ice_unregister_netdev(vsi);
-err_register_netdev:
- ice_devlink_destroy_pf_port(pf);
-err_devlink_create_pf_port:
-err_init_mac_fltr:
- ice_decfg_netdev(vsi);
- return err;
-}
-
-static void ice_deinit_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
-
- if (!vsi)
- return;
-
- ice_vsi_close(vsi);
- ice_unregister_netdev(vsi);
- ice_devlink_destroy_pf_port(pf);
- ice_tc_indir_block_unregister(vsi);
- ice_decfg_netdev(vsi);
-}
-
/**
* ice_wait_for_fw - wait for full FW readiness
* @hw: pointer to the hardware structure
@@ -4681,7 +4599,7 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
return -ETIMEDOUT;
}
-static int ice_init_dev(struct ice_pf *pf)
+int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -4774,7 +4692,7 @@ err_init_pf:
return err;
}
-static void ice_deinit_dev(struct ice_pf *pf)
+void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
ice_deinit_pf(pf);
@@ -5079,31 +4997,47 @@ static void ice_deinit(struct ice_pf *pf)
/**
* ice_load - load pf by init hw and starting VSI
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
int ice_load(struct ice_pf *pf)
{
- struct ice_vsi_cfg_params params = {};
struct ice_vsi *vsi;
int err;
- err = ice_init_dev(pf);
+ devl_assert_locked(priv_to_devlink(pf));
+
+ vsi = ice_get_main_vsi(pf);
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
if (err)
return err;
- vsi = ice_get_main_vsi(pf);
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
+
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create_pf_port;
- rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+
+ err = ice_register_netdev(vsi);
if (err)
- goto err_vsi_cfg;
+ goto err_register_netdev;
- err = ice_start_eth(ice_get_main_vsi(pf));
+ err = ice_tc_indir_block_register(vsi);
if (err)
- goto err_start_eth;
- rtnl_unlock();
+ goto err_tc_indir_block_register;
+
+ ice_napi_add(vsi);
err = ice_init_rdma(pf);
if (err)
@@ -5117,29 +5051,35 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
- ice_vsi_close(ice_get_main_vsi(pf));
- rtnl_lock();
-err_start_eth:
- ice_vsi_decfg(ice_get_main_vsi(pf));
-err_vsi_cfg:
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
return err;
}
/**
* ice_unload - unload pf by stopping VSI and deinit hw
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
void ice_unload(struct ice_pf *pf)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ devl_assert_locked(priv_to_devlink(pf));
+
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- rtnl_lock();
- ice_stop_eth(ice_get_main_vsi(pf));
- ice_vsi_decfg(ice_get_main_vsi(pf));
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_decfg_netdev(vsi);
}
/**
@@ -5237,27 +5177,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err)
goto err_init;
- err = ice_init_eth(pf);
+ devl_lock(priv_to_devlink(pf));
+ err = ice_load(pf);
+ devl_unlock(priv_to_devlink(pf));
if (err)
- goto err_init_eth;
-
- err = ice_init_rdma(pf);
- if (err)
- goto err_init_rdma;
+ goto err_load;
err = ice_init_devlink(pf);
if (err)
goto err_init_devlink;
- ice_init_features(pf);
-
return 0;
err_init_devlink:
- ice_deinit_rdma(pf);
-err_init_rdma:
- ice_deinit_eth(pf);
-err_init_eth:
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+err_load:
ice_deinit(pf);
err_init:
pci_disable_device(pdev);
@@ -5340,8 +5276,6 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
- ice_debugfs_exit();
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -5355,12 +5289,14 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_deinit_features(pf);
+
ice_deinit_devlink(pf);
- ice_deinit_rdma(pf);
- ice_deinit_eth(pf);
- ice_deinit(pf);
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+
+ ice_deinit(pf);
ice_vsi_release_all(pf);
ice_setup_mc_magic_wake(pf);
@@ -5447,6 +5383,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ ice_vsi_set_napi_queues(pf->vsi[v]);
}
ret = ice_req_irq_msix_misc(pf);
@@ -5752,6 +5689,10 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
@@ -5841,6 +5782,7 @@ module_init(ice_module_init);
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
+ ice_debugfs_exit();
destroy_workqueue(ice_wq);
destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
@@ -7060,6 +7002,50 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+static void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each Rx queue; Tx queues are
+ * handled in ice_vsi_stop_tx_ring()
+ */
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ ice_for_each_q_vector(vsi, i) {
+ if (!vsi->q_vectors[i])
+ continue;
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ }
+
+ ice_flush(hw);
+
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(vsi->q_vectors[i]->irq.virq);
+}
+
+/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*
@@ -7548,7 +7534,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_reset(pf);
+ ice_ptp_rebuild(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
@@ -8012,6 +7998,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index 82bc54fec7f3..a2562f04267f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -24,7 +24,7 @@
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define ice_flush(a) rd32((a), GLGEN_STAT)
-#define ICE_M(m, s) ((m) << (s))
+#define ICE_M(m, s) ((m ## U) << (s))
struct ice_dma_mem {
void *va;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 3b6605c8585e..c11eba07283c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -601,17 +601,13 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
/* Read the low 32 bit value */
raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
- /* For PHYs which don't implement a proper timestamp ready bitmap,
- * verify that the timestamp value is different from the last cached
- * timestamp. If it is not, skip this for now assuming it hasn't yet
- * been captured by hardware.
+ /* Devices using this interface always verify the timestamp differs
+ * relative to the last cached timestamp value.
*/
- if (!drop_ts && tx->verify_cached &&
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
return;
- if (tx->verify_cached && raw_tstamp)
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
@@ -701,9 +697,11 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
hw = &pf->hw;
/* Read the Tx ready status first */
- err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
- if (err)
- return;
+ if (tx->has_ready_bitmap) {
+ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
+ if (err)
+ return;
+ }
/* Drop packets if the link went down */
link_up = ptp_port->link_up;
@@ -731,7 +729,8 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* If we do not, the hardware logic for generating a new
* interrupt can get stuck on some devices.
*/
- if (!(tstamp_ready & BIT_ULL(phy_idx))) {
+ if (tx->has_ready_bitmap &&
+ !(tstamp_ready & BIT_ULL(phy_idx))) {
if (drop_ts)
goto skip_ts_read;
@@ -751,7 +750,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* from the last cached timestamp. If it is not, skip this for
* now assuming it hasn't yet been captured by hardware.
*/
- if (!drop_ts && tx->verify_cached &&
+ if (!drop_ts && !tx->has_ready_bitmap &&
raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
@@ -761,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
skip_ts_read:
spin_lock_irqsave(&tx->lock, flags);
- if (tx->verify_cached && raw_tstamp)
+ if (!tx->has_ready_bitmap && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
@@ -965,6 +964,22 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
+ * @pf: Board private structure
+ *
+ * Called by the clock owner to flush all the Tx timestamp trackers associated
+ * with the clock.
+ */
+static void
+ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+
+ list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
+ ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
+}
+
+/**
* ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
* @pf: Board private structure
* @tx: Tx tracking structure to release
@@ -1014,7 +1029,7 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
tx->block = port / ICE_PORTS_PER_QUAD;
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
- tx->verify_cached = 0;
+ tx->has_ready_bitmap = 1;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1037,7 +1052,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->verify_cached = 1;
+ tx->has_ready_bitmap = 0;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1430,7 +1445,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
@@ -1456,14 +1471,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
}
/**
- * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
* @pf: PF private structure
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
* Utility function to enable or disable Tx timestamp interrupt and threshold
*/
-static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
struct ice_hw *hw = &pf->hw;
int err = 0;
@@ -2162,7 +2177,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
config = &pf->ptp.tstamp_config;
@@ -2232,7 +2247,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -2616,7 +2631,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
err = ice_ptp_update_cached_phctime(pf);
@@ -2629,36 +2644,72 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
- * ice_ptp_reset - Initialize PTP hardware clock support after reset
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
+
+ if (ptp->state != ICE_PTP_READY)
+ return;
+
+ ptp->state = ICE_PTP_RESETTING;
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_disable_timestamp_mode(pf);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+
+ if (reset_type == ICE_RESET_PFR)
+ return;
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
+
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
+
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
+}
+
+/**
+ * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
* @pf: Board private structure
+ *
+ * Companion function for ice_ptp_rebuild() which handles tasks that only the
+ * PTP clock owner instance should perform.
*/
-void ice_ptp_reset(struct ice_pf *pf)
+static int ice_ptp_rebuild_owner(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
u64 time_diff;
-
- if (test_bit(ICE_PFR_REQ, pf->state) ||
- !ice_pf_src_tmr_owned(pf))
- goto pfr;
+ int err;
err = ice_ptp_init_phc(hw);
if (err)
- goto err;
+ return err;
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
- goto err;
+ return err;
}
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Write the initial Time value to PHY and LAN using the cached PHC
@@ -2674,38 +2725,54 @@ void ice_ptp_reset(struct ice_pf *pf)
err = ice_ptp_write_init(pf, &ts);
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ /* Flush software tracking of any outstanding timestamps since we're
+ * about to flush the PHY timestamp block.
+ */
+ ice_ptp_flush_all_tx_tracker(pf);
+
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
- goto err;
- }
+ return err;
-pfr:
- /* Init Tx structures */
- if (ice_is_e810(&pf->hw)) {
- err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
- } else {
- kthread_init_delayed_work(&ptp->port.ov_work,
- ice_ptp_wait_for_offsets);
- err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx,
- ptp->port.port_num);
+ ice_ptp_restart_all_phy(pf);
}
- if (err)
+
+ return 0;
+}
+
+/**
+ * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ int err;
+
+ if (ptp->state == ICE_PTP_READY) {
+ ice_ptp_prepare_for_reset(pf, reset_type);
+ } else if (ptp->state != ICE_PTP_RESETTING) {
+ err = -EINVAL;
+ dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
goto err;
+ }
- set_bit(ICE_FLAG_PTP, pf->flags);
+ if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
+ err = ice_ptp_rebuild_owner(pf);
+ if (err)
+ goto err;
+ }
- /* Restart the PHY timestamping block */
- if (!test_bit(ICE_PFR_REQ, pf->state) &&
- ice_pf_src_tmr_owned(pf))
- ice_ptp_restart_all_phy(pf);
+ ptp->state = ICE_PTP_READY;
/* Start periodic work going */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
@@ -2714,6 +2781,7 @@ pfr:
return;
err:
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
@@ -2923,39 +2991,6 @@ int ice_ptp_clock_index(struct ice_pf *pf)
}
/**
- * ice_ptp_prepare_for_reset - Prepare PTP for reset
- * @pf: Board private structure
- */
-void ice_ptp_prepare_for_reset(struct ice_pf *pf)
-{
- struct ice_ptp *ptp = &pf->ptp;
- u8 src_tmr;
-
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_disable_timestamp_mode(pf);
-
- kthread_cancel_delayed_work_sync(&ptp->work);
-
- if (test_bit(ICE_PFR_REQ, pf->state))
- return;
-
- ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
-
- /* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
-
- src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
-
- /* Disable source clock */
- wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
-
- /* Acquire PHC and system timer to restore after reset */
- ptp->reset_time = ktime_get_real_ns();
-}
-
-/**
* ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
* @pf: Board private structure
*
@@ -2967,7 +3002,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
+ int err;
err = ice_ptp_init_phc(hw);
if (err) {
@@ -3002,7 +3037,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
goto err_exit;
}
@@ -3195,6 +3230,8 @@ void ice_ptp_init(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int err;
+ ptp->state = ICE_PTP_INITIALIZING;
+
ice_ptp_init_phy_model(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3219,12 +3256,13 @@ void ice_ptp_init(struct ice_pf *pf)
/* Configure initial Tx interrupt settings */
ice_ptp_cfg_tx_interrupt(pf);
- set_bit(ICE_FLAG_PTP, pf->flags);
- err = ice_ptp_init_work(pf, ptp);
+ err = ice_ptp_create_auxbus_device(pf);
if (err)
goto err;
- err = ice_ptp_create_auxbus_device(pf);
+ ptp->state = ICE_PTP_READY;
+
+ err = ice_ptp_init_work(pf, ptp);
if (err)
goto err;
@@ -3237,7 +3275,7 @@ err:
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- clear_bit(ICE_FLAG_PTP, pf->flags);
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3250,9 +3288,11 @@ err:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
+ pf->ptp.state = ICE_PTP_UNINIT;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_disable_timestamp_mode(pf);
@@ -3260,8 +3300,6 @@ void ice_ptp_release(struct ice_pf *pf)
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
@@ -3271,6 +3309,9 @@ void ice_ptp_release(struct ice_pf *pf)
pf->ptp.kworker = NULL;
}
+ if (ice_pf_src_tmr_owned(pf))
+ ice_ptp_unregister_auxbus_driver(pf);
+
if (!pf->ptp.clock)
return;
@@ -3280,7 +3321,5 @@ void ice_ptp_release(struct ice_pf *pf)
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
- ice_ptp_unregister_auxbus_driver(pf);
-
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 087dd32d8762..3af20025043a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -100,7 +100,7 @@ struct ice_perout_channel {
* the last timestamp we read for a given index. If the current timestamp
* value is the same as the cached value, we assume a new timestamp hasn't
* been captured. This avoids reporting stale timestamps to the stack. This is
- * only done if the verify_cached flag is set in ice_ptp_tx structure.
+ * only done if the has_ready_bitmap flag is not set in ice_ptp_tx structure.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
@@ -130,7 +130,9 @@ enum ice_tx_tstamp_work {
* @init: if true, the tracker is initialized;
* @calibrating: if true, the PHY is calibrating the Tx offset. During this
* window, timestamps are temporarily disabled.
- * @verify_cached: if true, verify new timestamp differs from last read value
+ * @has_ready_bitmap: if true, the hardware has a valid Tx timestamp ready
+ * bitmap register. If false, fall back to verifying new
+ * timestamp values against previously cached copy.
* @last_ll_ts_idx_read: index of the last LL TS read by the FW
*/
struct ice_ptp_tx {
@@ -143,7 +145,7 @@ struct ice_ptp_tx {
u8 len;
u8 init : 1;
u8 calibrating : 1;
- u8 verify_cached : 1;
+ u8 has_ready_bitmap : 1;
s8 last_ll_ts_idx_read;
};
@@ -203,8 +205,17 @@ struct ice_ptp_port_owner {
#define GLTSYN_TGT_H_IDX_MAX 4
+enum ice_ptp_state {
+ ICE_PTP_UNINIT = 0,
+ ICE_PTP_INITIALIZING,
+ ICE_PTP_READY,
+ ICE_PTP_RESETTING,
+ ICE_PTP_ERROR,
+};
+
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @state: current state of PTP state machine
* @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
* @ports_owner: data for the auxiliary driver owner
@@ -227,6 +238,7 @@ struct ice_ptp_port_owner {
* @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
+ enum ice_ptp_state state;
enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
struct ice_ptp_port_owner ports_owner;
@@ -304,8 +316,9 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
-void ice_ptp_reset(struct ice_pf *pf);
-void ice_ptp_prepare_for_reset(struct ice_pf *pf);
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
@@ -345,8 +358,15 @@ ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
return 0;
}
-static inline void ice_ptp_reset(struct ice_pf *pf) { }
-static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
+static inline void ice_ptp_rebuild(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
+
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index a94a1c48c3de..a958fcf3e6be 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -240,7 +240,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
}
vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
return vsi;
}
@@ -1068,6 +1067,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
struct ice_pf *pf = pci_get_drvdata(pdev);
u16 prev_msix, prev_queues, queues;
bool needs_rebuild = false;
+ struct ice_vsi *vsi;
struct ice_vf *vf;
int id;
@@ -1102,6 +1102,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (!vf)
return -ENOENT;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -ENOENT;
+
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
@@ -1122,7 +1126,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (vf->first_vector_idx < 0)
goto unroll;
- if (ice_vf_reconfig_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
/* Try to rebuild with previous values */
needs_rebuild = true;
goto unroll;
@@ -1148,8 +1152,10 @@ unroll:
if (vf->first_vector_idx < 0)
return -EINVAL;
- if (needs_rebuild)
+ if (needs_rebuild) {
ice_vf_reconfig_vsi(vf);
+ ice_vf_init_host_cfg(vf, vsi);
+ }
ice_ena_vf_mappings(vf);
ice_put_vf(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 74d13cc5a3a7..97d41d6ebf1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
- if (rx_ring->vsi->type == ICE_VSI_PF &&
- !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
- goto err;
return 0;
err:
@@ -603,9 +598,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- rx_buf->act = ret;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ ice_set_rx_bufs_act(xdp, rx_ring, ret);
}
/**
@@ -893,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
}
if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
return -ENOMEM;
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
+ /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+ * can pop off frags but driver has to handle it on its own
+ */
+ rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@@ -1251,6 +1247,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1266,10 +1263,12 @@ construct_skb:
ICE_XDP_CONSUMED);
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
break;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b3379ff73674..af955b0e5dc5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -358,6 +358,7 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
+ u32 nr_frags;
dma_addr_t dma; /* physical address of ring */
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 839e5da24ad5..f8f1d2bdc1be 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -143,8 +143,12 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
- if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
+ ring->vsi->back->hw_rx_eipe_error++;
+ return;
+ }
+
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))))
goto checksum_fail;
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 762047508619..afcead4baef4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -12,26 +12,39 @@
* act: action to store onto Rx buffers related to XDP buffer parts
*
* Set action that should be taken before putting Rx buffer from first frag
- * to one before last. Last one is handled by caller of this function as it
- * is the EOP frag that is currently being processed. This function is
- * supposed to be called only when XDP buffer contains frags.
+ * to the last.
*/
static inline void
ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
const unsigned int act)
{
- const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
- u32 first = rx_ring->first_desc;
- u32 nr_frags = sinfo->nr_frags;
+ u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
struct ice_rx_buf *buf;
for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[first];
+ buf = &rx_ring->rx_buf[idx];
buf->act = act;
- if (++first == cnt)
- first = 0;
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ /* adjust pagecnt_bias on frags freed by XDP prog */
+ if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+ u32 delta = rx_ring->nr_frags - sinfo_frags;
+
+ while (delta) {
+ if (idx == 0)
+ idx = cnt - 1;
+ else
+ idx--;
+ buf = &rx_ring->rx_buf[idx];
+ buf->pagecnt_bias--;
+ delta--;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 41ab6d7bbd9e..9ff92dba5823 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -132,6 +132,7 @@ enum ice_mac_type {
ICE_MAC_E810,
ICE_MAC_E830,
ICE_MAC_GENERIC,
+ ICE_MAC_GENERIC_3K_E825,
};
/* Media Types */
@@ -1072,7 +1073,7 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_OROM_VER_BUILD_SHIFT 8
#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
#define ICE_OROM_VER_SHIFT 24
-#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
+#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 2ffdae9a82df..21d26e19338a 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -280,12 +280,6 @@ int ice_vf_reconfig_vsi(struct ice_vf *vf)
return err;
}
- /* Update the lan_vsi_num field since it might have been changed. The
- * PF lan_vsi_idx number remains the same so we don't need to change
- * that.
- */
- vf->lan_vsi_num = vsi->vsi_num;
-
return 0;
}
@@ -315,7 +309,6 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
* vf->lan_vsi_idx
*/
vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
return 0;
}
@@ -1315,13 +1308,12 @@ int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
}
/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
* @vf: VF to remove access to VSI for
*/
void ice_vf_invalidate_vsi(struct ice_vf *vf)
{
vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 0cc9034065c5..fec16919ec19 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -109,11 +109,6 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index c925813ec9ca..1ff9818b4c84 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vf->driver_caps = *(u32 *)msg;
else
vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
vf->driver_caps);
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
- } else {
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
- else
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
- }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
@@ -506,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
@@ -552,27 +545,20 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
*/
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi(pf, vsi_id);
-
- return (vsi && (vsi->vf == vf));
+ return vsi_id == ICE_VF_VSI_ID;
}
/**
* ice_vc_isvalid_q_id
- * @vf: pointer to the VF info
- * @vsi_id: VSI ID
+ * @vsi: VSI to check queue ID against
* @qid: VSI relative queue ID
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
{
- struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
/* allocated Tx and Rx queues should be always equal for VF VSI */
- return (vsi && (qid < vsi->alloc_txq));
+ return qid < vsi->alloc_txq;
}
/**
@@ -1330,7 +1316,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
*/
q_map = vqs->rx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1352,7 +1338,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1457,7 +1443,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1483,7 +1469,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
} else if (q_map) {
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1539,7 +1525,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_rx++;
@@ -1553,7 +1539,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_tx++;
@@ -1710,7 +1696,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->txq.headwb_enabled ||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 60dfbe05980a..3a4115869153 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -19,6 +19,15 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* VFs only get a single VSI. For ice hardware, the VF does not need to know
+ * its VSI index. However, the virtchnl interface requires a VSI number,
+ * mainly due to legacy hardware.
+ *
+ * Since the VF doesn't need this information, report a static value to the VF
+ * instead of leaking any information about the PF or hardware setup.
+ */
+#define ICE_VF_VSI_ID 1
+
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 5e19d48a05b4..d796dbd2a440 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -13,8 +13,6 @@
* - opcodes needed by VF when caps are activated
*
* Caps that don't use new opcodes (no opcodes should be allowed):
- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
* - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
* - VIRTCHNL_VF_OFFLOAD_CRC
* - VIRTCHNL_VF_OFFLOAD_RX_POLLING
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index f001553e1a1a..8e4ff3af86c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -94,9 +94,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
return -EINVAL;
- if (vsi_id != vf->lan_vsi_num)
- return -EINVAL;
-
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5d1ae8e4058a..1857220d27fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
return -EBUSY;
usleep_range(1000, 2000);
}
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
@@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
@@ -217,53 +218,39 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
int err;
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
- memset(qg_buf, 0, size);
- qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
if (err)
return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
- err = ice_vsi_cfg_rxq(rx_ring);
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err)
return err;
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
return err;
- clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return 0;
}
@@ -825,7 +812,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start), 0, size);
+ virt_to_page(xdp->data_hard_start),
+ XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@@ -895,7 +883,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (!first) {
first = xdp;
- xdp_buff_clear_frags_flag(first);
} else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
break;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 0acc125decb3..e7a036538246 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -37,8 +37,6 @@ struct idpf_vport_max_q;
#define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
-#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
-#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
#define IDPF_MAX_WAIT 500
@@ -66,14 +64,12 @@ struct idpf_mac_filter {
/**
* enum idpf_state - State machine to handle bring up
- * @__IDPF_STARTUP: Start the state machine
* @__IDPF_VER_CHECK: Negotiate virtchnl version
* @__IDPF_GET_CAPS: Negotiate capabilities
* @__IDPF_INIT_SW: Init based on given capabilities
* @__IDPF_STATE_LAST: Must be last, used to determine size
*/
enum idpf_state {
- __IDPF_STARTUP,
__IDPF_VER_CHECK,
__IDPF_GET_CAPS,
__IDPF_INIT_SW,
@@ -87,6 +83,7 @@ enum idpf_state {
* @IDPF_HR_RESET_IN_PROG: Reset in progress
* @IDPF_REMOVE_IN_PROG: Driver remove in progress
* @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
+ * @IDPF_VC_CORE_INIT: virtchnl core has been init
* @IDPF_FLAGS_NBITS: Must be last
*/
enum idpf_flags {
@@ -95,6 +92,7 @@ enum idpf_flags {
IDPF_HR_RESET_IN_PROG,
IDPF_REMOVE_IN_PROG,
IDPF_MB_INTR_MODE,
+ IDPF_VC_CORE_INIT,
IDPF_FLAGS_NBITS,
};
@@ -209,71 +207,6 @@ struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
};
-/* These macros allow us to generate an enum and a matching char * array of
- * stringified enums that are always in sync. Checkpatch issues a bogus warning
- * about this being a complex macro; but it's wrong, these are never used as a
- * statement and instead only used to define the enum and array.
- */
-#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
- STATE(IDPF_VC_CREATE_VPORT) \
- STATE(IDPF_VC_CREATE_VPORT_ERR) \
- STATE(IDPF_VC_ENA_VPORT) \
- STATE(IDPF_VC_ENA_VPORT_ERR) \
- STATE(IDPF_VC_DIS_VPORT) \
- STATE(IDPF_VC_DIS_VPORT_ERR) \
- STATE(IDPF_VC_DESTROY_VPORT) \
- STATE(IDPF_VC_DESTROY_VPORT_ERR) \
- STATE(IDPF_VC_CONFIG_TXQ) \
- STATE(IDPF_VC_CONFIG_TXQ_ERR) \
- STATE(IDPF_VC_CONFIG_RXQ) \
- STATE(IDPF_VC_CONFIG_RXQ_ERR) \
- STATE(IDPF_VC_ENA_QUEUES) \
- STATE(IDPF_VC_ENA_QUEUES_ERR) \
- STATE(IDPF_VC_DIS_QUEUES) \
- STATE(IDPF_VC_DIS_QUEUES_ERR) \
- STATE(IDPF_VC_MAP_IRQ) \
- STATE(IDPF_VC_MAP_IRQ_ERR) \
- STATE(IDPF_VC_UNMAP_IRQ) \
- STATE(IDPF_VC_UNMAP_IRQ_ERR) \
- STATE(IDPF_VC_ADD_QUEUES) \
- STATE(IDPF_VC_ADD_QUEUES_ERR) \
- STATE(IDPF_VC_DEL_QUEUES) \
- STATE(IDPF_VC_DEL_QUEUES_ERR) \
- STATE(IDPF_VC_ALLOC_VECTORS) \
- STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_DEALLOC_VECTORS) \
- STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_SET_SRIOV_VFS) \
- STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
- STATE(IDPF_VC_GET_RSS_LUT) \
- STATE(IDPF_VC_GET_RSS_LUT_ERR) \
- STATE(IDPF_VC_SET_RSS_LUT) \
- STATE(IDPF_VC_SET_RSS_LUT_ERR) \
- STATE(IDPF_VC_GET_RSS_KEY) \
- STATE(IDPF_VC_GET_RSS_KEY_ERR) \
- STATE(IDPF_VC_SET_RSS_KEY) \
- STATE(IDPF_VC_SET_RSS_KEY_ERR) \
- STATE(IDPF_VC_GET_STATS) \
- STATE(IDPF_VC_GET_STATS_ERR) \
- STATE(IDPF_VC_ADD_MAC_ADDR) \
- STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
- STATE(IDPF_VC_DEL_MAC_ADDR) \
- STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
- STATE(IDPF_VC_GET_PTYPE_INFO) \
- STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
- STATE(IDPF_VC_LOOPBACK_STATE) \
- STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
- STATE(IDPF_VC_NBITS)
-
-#define IDPF_GEN_ENUM(ENUM) ENUM,
-#define IDPF_GEN_STRING(STRING) #STRING,
-
-enum idpf_vport_vc_state {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
-};
-
-extern const char * const idpf_vport_vc_state_str[];
-
/**
* enum idpf_vport_reset_cause - Vport soft reset causes
* @IDPF_SR_Q_CHANGE: Soft reset queue change
@@ -358,11 +291,7 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @link_speed_mbps: Link speed in mbps
- * @vc_msg: Virtchnl message buffer
- * @vc_state: Virtchnl message state
- * @vchnl_wq: Wait queue for virtchnl messages
* @sw_marker_wq: workqueue for marker packets
- * @vc_buf_lock: Lock to protect virtchnl buffer
*/
struct idpf_vport {
u16 num_txq;
@@ -408,12 +337,7 @@ struct idpf_vport {
bool link_up;
u32 link_speed_mbps;
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
-
- wait_queue_head_t vchnl_wq;
wait_queue_head_t sw_marker_wq;
- struct mutex vc_buf_lock;
};
/**
@@ -476,15 +400,11 @@ struct idpf_vport_user_config_data {
* enum idpf_vport_config_flags - Vport config flags
* @IDPF_VPORT_REG_NETDEV: Register netdev
* @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
- * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
- * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
* @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
*/
enum idpf_vport_config_flags {
IDPF_VPORT_REG_NETDEV,
IDPF_VPORT_UP_REQUESTED,
- IDPF_VPORT_ADD_MAC_REQ,
- IDPF_VPORT_DEL_MAC_REQ,
IDPF_VPORT_CONFIG_FLAGS_NBITS,
};
@@ -555,11 +475,13 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- void *req_qs_chunks;
+ struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
+struct idpf_vc_xn_manager;
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -601,9 +523,7 @@ struct idpf_vport_config {
* @stats_task: Periodic statistics retrieval task
* @stats_wq: Workqueue for statistics task
* @caps: Negotiated capabilities with device
- * @vchnl_wq: Wait queue for virtchnl messages
- * @vc_state: Virtchnl message state
- * @vc_msg: Virtchnl message buffer
+ * @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
@@ -659,10 +579,8 @@ struct idpf_adapter {
struct delayed_work stats_task;
struct workqueue_struct *stats_wq;
struct virtchnl2_get_capabilities caps;
+ struct idpf_vc_xn_manager *vcxn_mngr;
- wait_queue_head_t vchnl_wq;
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
struct idpf_dev_ops dev_ops;
int num_vfs;
bool crc_enable;
@@ -903,68 +821,18 @@ void idpf_mbx_task(struct work_struct *work);
void idpf_vc_event_task(struct work_struct *work);
void idpf_dev_ops_init(struct idpf_adapter *adapter);
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
-int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
-void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
-int idpf_vc_core_init(struct idpf_adapter *adapter);
-void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_intr_req(struct idpf_adapter *adapter);
void idpf_intr_rel(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
- struct idpf_vec_regs *reg_vals);
u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
-int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
void idpf_deinit_task(struct idpf_adapter *adapter);
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs,
struct idpf_vector_info *vec_info);
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
-int idpf_get_vec_ids(struct idpf_adapter *adapter,
- u16 *vecids, int num_vecids,
- struct virtchnl2_vector_chunks *chunks);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg);
void idpf_set_ethtool_ops(struct net_device *netdev);
-int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
- bool add, bool async);
-int idpf_set_promiscuous(struct idpf_adapter *adapter,
- struct idpf_vport_user_config_data *config_data,
- u32 vport_id);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
-u32 idpf_get_vport_id(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_check_supported_desc_ids(struct idpf_vport *vport);
void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
u16 itr, bool tx);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index c7f43d2fcd13..4849590a5591 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -516,6 +516,8 @@ post_buffs_out:
/* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1;
+ dma_wmb();
+
wr32(hw, cq->reg.tail, cq->next_to_post);
}
@@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
int err = 0;
u16 i;
- if (*num_q_msg == 0)
- return 0;
- else if (*num_q_msg > cq->ring_size)
- return -EBADR;
-
/* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index 8dee098bbfb0..e8e046ef2f0d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -69,6 +69,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+ struct {
+ u32 rsvd;
+ u16 data;
+ u16 flags;
+ } sw_cookie;
} ctx;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 34ad1ac46b78..3df9935685e9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 5fea2fd957eb..5d3532c27d57 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2,14 +2,11 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq;
-const char * const idpf_vport_vc_state_str[] = {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
-};
-
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct
@@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
*/
void idpf_intr_rel(struct idpf_adapter *adapter)
{
- int err;
-
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
-
- err = idpf_send_dealloc_vectors_msg(adapter);
- if (err)
- dev_err(&adapter->pdev->dev,
- "Failed to deallocate vectors: %d\n", err);
-
+ idpf_send_dealloc_vectors_msg(adapter);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -783,6 +773,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
+ netdev->dev_port = idx;
+
/* configure default MTU size */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = vport->max_mtu;
@@ -973,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q;
u16 idx = vport->idx;
- int i;
vport_config = adapter->vport_config[vport->idx];
idpf_deinit_rss(vport);
@@ -983,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
idpf_send_destroy_vport_msg(vport);
- /* Set all bits as we dont know on which vc_state the vport vhnl_wq
- * is waiting on and wakeup the virtchnl workqueue even if it is
- * waiting for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, vport->vc_state);
- wake_up(&vport->vchnl_wq);
-
- mutex_destroy(&vport->vc_buf_lock);
-
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, vport->vc_state);
-
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
max_q.max_txq = vport_config->max_q.max_txq;
@@ -1251,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
msecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
+ idpf_recv_mb_msg(adapter);
}
/**
@@ -1541,9 +1518,7 @@ void idpf_init_task(struct work_struct *work)
vport_config = adapter->vport_config[index];
init_waitqueue_head(&vport->sw_marker_wq);
- init_waitqueue_head(&vport->vchnl_wq);
- mutex_init(&vport->vc_buf_lock);
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
@@ -1821,6 +1796,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
goto unlock_mutex;
}
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
/* Initialize the state machine, also allocate memory and request
* resources
*/
@@ -1900,7 +1877,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1949,7 +1926,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Since idpf_vport_queues_alloc was called with new_port, the queue
* back pointers are currently pointing to the local new_vport. Reset
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index e1febc74cefd..f784eea044bd 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_devids.h"
+#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
@@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev)
idpf_sriov_configure(pdev, 0);
idpf_vc_core_deinit(adapter);
+
/* Be a good citizen and leave the device clean on exit */
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
@@ -66,6 +68,8 @@ destroy_wqs:
adapter->vport_config = NULL;
kfree(adapter->netdevs);
adapter->netdevs = NULL;
+ kfree(adapter->vcxn_mngr);
+ adapter->vcxn_mngr = NULL;
mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock);
@@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock);
- init_waitqueue_head(&adapter->vchnl_wq);
-
INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 2f8ad79ae3f0..6dd7a66bb897 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 8ade4e3a9fe1..629cb5cb7c9f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_vf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_VF_ITR_IDX_SPACING 0x40
@@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
+ idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index d0cdd63b3d5b..a5f9b7a5effe 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2,46 +2,192 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
+ * buffer updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
+/**
+ * struct idpf_vc_xn_manager - Manager for tracking transactions
+ * @ring: backing and lookup for transactions
+ * @free_xn_bm: bitmap for free transactions
+ * @xn_bm_lock: make bitmap access synchronous where necessary
+ * @salt: used to make cookie unique every message
+ */
+struct idpf_vc_xn_manager {
+ struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
+ DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spinlock_t xn_bm_lock;
+ u8 salt;
+};
+
+/**
+ * idpf_vid_to_vport - Translate vport id to vport pointer
+ * @adapter: private data struct
+ * @v_id: vport id to translate
+ *
+ * Returns vport matching v_id, NULL if not found.
+ */
+static
+struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
+{
+ u16 num_max_vports = idpf_get_max_vports(adapter);
+ int i;
+
+ for (i = 0; i < num_max_vports; i++)
+ if (adapter->vport_ids[i] == v_id)
+ return adapter->vports[i];
+
+ return NULL;
+}
+
+/**
+ * idpf_handle_event_link - Handle link event message
+ * @adapter: private data struct
+ * @v2e: virtchnl event message
+ */
+static void idpf_handle_event_link(struct idpf_adapter *adapter,
+ const struct virtchnl2_event *v2e)
+{
+ struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
+ if (!vport) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
+ v2e->vport_id);
+ return;
+ }
+ np = netdev_priv(vport->netdev);
+
+ vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+
+ if (vport->link_up == v2e->link_status)
+ return;
+
+ vport->link_up = v2e->link_status;
+
+ if (np->state != __IDPF_VPORT_UP)
+ return;
+
+ if (vport->link_up) {
+ netif_tx_start_all_queues(vport->netdev);
+ netif_carrier_on(vport->netdev);
+ } else {
+ netif_tx_stop_all_queues(vport->netdev);
+ netif_carrier_off(vport->netdev);
+ }
+}
/**
* idpf_recv_event_msg - Receive virtchnl event message
- * @vport: virtual port structure
+ * @adapter: Driver specific private structure
* @ctlq_msg: message to copy from
*
* Receive virtchnl event message
*/
-static void idpf_recv_event_msg(struct idpf_vport *vport,
+static void idpf_recv_event_msg(struct idpf_adapter *adapter,
struct idpf_ctlq_msg *ctlq_msg)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ int payload_size = ctlq_msg->ctx.indirect.payload->size;
struct virtchnl2_event *v2e;
- bool link_status;
u32 event;
+ if (payload_size < sizeof(*v2e)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode,
+ payload_size);
+ return;
+ }
+
v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
event = le32_to_cpu(v2e->event);
switch (event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
- link_status = v2e->link_status;
-
- if (vport->link_up == link_status)
- break;
-
- vport->link_up = link_status;
- if (np->state == __IDPF_VPORT_UP) {
- if (vport->link_up) {
- netif_carrier_on(vport->netdev);
- netif_tx_start_all_queues(vport->netdev);
- } else {
- netif_tx_stop_all_queues(vport->netdev);
- netif_carrier_off(vport->netdev);
- }
- }
- break;
+ idpf_handle_event_link(adapter, v2e);
+ return;
default:
- dev_err(&vport->adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"Unknown event %d from PF\n", event);
break;
}
@@ -93,13 +239,14 @@ err_kfree:
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
+ * @cookie: unique SW generated cookie per message
*
* Will prepare the control queue message and initiates the send api
*
* Returns 0 on success, negative on failure
*/
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg)
+ u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
err = -ENOMEM;
goto dma_alloc_error;
}
- memcpy(dma_mem->va, msg, msg_size);
+
+ /* It's possible we're just sending an opcode but no buffer */
+ if (msg && msg_size)
+ memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->ctx.indirect.payload = dma_mem;
+ ctlq_msg->ctx.sw_cookie.data = cookie;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
if (err)
@@ -159,592 +310,432 @@ dma_mem_error:
return err;
}
-/**
- * idpf_find_vport - Find vport pointer from control queue message
- * @adapter: driver specific private structure
- * @vport: address of vport pointer to copy the vport from adapters vport list
- * @ctlq_msg: control queue message
+/* API for virtchnl "transaction" support ("xn" for short).
*
- * Return 0 on success, error value on failure. Also this function does check
- * for the opcodes which expect to receive payload and return error value if
- * it is not the case.
+ * We are reusing the completion lock to serialize the accesses to the
+ * transaction state for simplicity, but it could be its own separate synchro
+ * as well. For now, this API is only used from within a workqueue context;
+ * raw_spin_lock() is enough.
*/
-static int idpf_find_vport(struct idpf_adapter *adapter,
- struct idpf_vport **vport,
- struct idpf_ctlq_msg *ctlq_msg)
-{
- bool no_op = false, vid_found = false;
- int i, err = 0;
- char *vc_msg;
- u32 v_id;
+/**
+ * idpf_vc_xn_lock - Request exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_lock(xn) \
+ raw_spin_lock(&(xn)->completed.wait.lock)
- vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
- if (!vc_msg)
- return -ENOMEM;
+/**
+ * idpf_vc_xn_unlock - Release exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_unlock(xn) \
+ raw_spin_unlock(&(xn)->completed.wait.lock)
- if (ctlq_msg->data_len) {
- size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
+/**
+ * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
+ * reset the transaction state.
+ * @xn: struct idpf_vc_xn to update
+ */
+static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
+{
+ xn->reply.iov_base = NULL;
+ xn->reply.iov_len = 0;
- if (!payload_size) {
- dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
- kfree(vc_msg);
+ if (xn->state != IDPF_VC_XN_SHUTDOWN)
+ xn->state = IDPF_VC_XN_IDLE;
+}
- return -EINVAL;
- }
+/**
+ * idpf_vc_xn_init - Initialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
+ */
+static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+{
+ int i;
- memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
- }
-
- switch (ctlq_msg->cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- case VIRTCHNL2_OP_CREATE_VPORT:
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- goto free_vc_msg;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- case VIRTCHNL2_OP_DISABLE_VPORT:
- case VIRTCHNL2_OP_DESTROY_VPORT:
- v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- case VIRTCHNL2_OP_DEL_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- case VIRTCHNL2_OP_SET_RSS_LUT:
- v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- case VIRTCHNL2_OP_SET_RSS_KEY:
- v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_EVENT:
- v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
- break;
- default:
- no_op = true;
- break;
- }
+ spin_lock_init(&vcxn_mngr->xn_bm_lock);
- if (no_op)
- goto free_vc_msg;
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- for (i = 0; i < idpf_get_max_vports(adapter); i++) {
- if (adapter->vport_ids[i] == v_id) {
- vid_found = true;
- break;
- }
+ xn->state = IDPF_VC_XN_IDLE;
+ xn->idx = i;
+ idpf_vc_xn_release_bufs(xn);
+ init_completion(&xn->completed);
}
- if (vid_found)
- *vport = adapter->vports[i];
- else
- err = -EINVAL;
-
-free_vc_msg:
- kfree(vc_msg);
-
- return err;
+ bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
}
/**
- * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @err_enum: err bit to set on error
+ * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
*
- * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
- * negative on failure.
+ * All waiting threads will be woken-up and their transaction aborted. Further
+ * operations on that object will fail.
*/
-static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state err_enum)
+static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
- if (ctlq_msg->cookie.mbx.chnl_retval) {
- if (vport)
- set_bit(err_enum, vport->vc_state);
- else
- set_bit(err_enum, adapter->vc_state);
+ int i;
- return -EINVAL;
- }
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
- if (vport)
- memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
- else
- memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- return 0;
+ idpf_vc_xn_lock(xn);
+ xn->state = IDPF_VC_XN_SHUTDOWN;
+ idpf_vc_xn_release_bufs(xn);
+ idpf_vc_xn_unlock(xn);
+ complete_all(&xn->completed);
+ }
}
/**
- * idpf_recv_vchnl_op - helper function with common logic when handling the
- * reception of VIRTCHNL OPs.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @state: state bit used on timeout check
- * @err_state: err bit to set on error
+ * idpf_vc_xn_pop_free - Pop a free transaction from free list
+ * @vcxn_mngr: transaction manager to pop from
+ *
+ * Returns NULL if no free transactions
*/
-static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_state)
+static
+struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
{
- wait_queue_head_t *vchnl_wq;
- int err;
+ struct idpf_vc_xn *xn = NULL;
+ unsigned long free_idx;
- if (vport)
- vchnl_wq = &vport->vchnl_wq;
- else
- vchnl_wq = &adapter->vchnl_wq;
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ if (free_idx == IDPF_VC_XN_RING_LEN)
+ goto do_unlock;
- err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
- if (wq_has_sleeper(vchnl_wq)) {
- if (vport)
- set_bit(state, vport->vc_state);
- else
- set_bit(state, adapter->vc_state);
+ clear_bit(free_idx, vcxn_mngr->free_xn_bm);
+ xn = &vcxn_mngr->ring[free_idx];
+ xn->salt = vcxn_mngr->salt++;
- wake_up(vchnl_wq);
- } else {
- if (!err) {
- dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
- ctlq_msg->cookie.mbx.chnl_opcode);
- } else {
- /* Clear the errors since there is no sleeper to pass
- * them on
- */
- if (vport)
- clear_bit(err_state, vport->vc_state);
- else
- clear_bit(err_state, adapter->vc_state);
- }
- }
+do_unlock:
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
+
+ return xn;
}
/**
- * idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
- * @op: virtchannel operation code
- * @msg: Received message holding buffer
- * @msg_size: message size
- *
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * idpf_vc_xn_push_free - Push a free transaction to free list
+ * @vcxn_mngr: transaction manager to push to
+ * @xn: transaction to push
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size)
+static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
+ struct idpf_vc_xn *xn)
{
- struct idpf_vport *vport = NULL;
- struct idpf_ctlq_msg ctlq_msg;
- struct idpf_dma_mem *dma_mem;
- bool work_done = false;
- int num_retry = 2000;
- u16 num_q_msg;
- int err;
-
- while (1) {
- struct idpf_vport_config *vport_config;
- int payload_size = 0;
-
- /* Try to get one message */
- num_q_msg = 1;
- dma_mem = NULL;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
- /* If no message then decide if we have to retry based on
- * opcode
- */
- if (err || !num_q_msg) {
- /* Increasing num_retry to consider the delayed
- * responses because of large number of VF's mailbox
- * messages. If the mailbox message is received from
- * the other side, we come out of the sleep cycle
- * immediately else we wait for more time.
- */
- if (!op || !num_retry--)
- break;
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
- err = -EIO;
- break;
- }
- msleep(20);
- continue;
- }
+ idpf_vc_xn_release_bufs(xn);
+ set_bit(xn->idx, vcxn_mngr->free_xn_bm);
+}
- /* If we are here a message is received. Check if we are looking
- * for a specific message based on opcode. If it is different
- * ignore and post buffers
+/**
+ * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @params: parameters for this particular transaction including
+ * -vc_op: virtchannel operation to send
+ * -send_buf: kvec iov for send buf and len
+ * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
+ * -timeout_ms: timeout waiting for a reply (milliseconds)
+ * -async: don't wait for message reply, will lose caller context
+ * -async_handler: callback to handle async replies
+ *
+ * @returns >= 0 for success, the size of the initial reply (may or may not be
+ * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
+ * error.
+ */
+static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
+{
+ const struct kvec *send_buf = &params->send_buf;
+ struct idpf_vc_xn *xn;
+ ssize_t retval;
+ u16 cookie;
+
+ xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
+ /* no free transactions available */
+ if (!xn)
+ return -ENOSPC;
+
+ idpf_vc_xn_lock(xn);
+ if (xn->state == IDPF_VC_XN_SHUTDOWN) {
+ retval = -ENXIO;
+ goto only_unlock;
+ } else if (xn->state != IDPF_VC_XN_IDLE) {
+ /* We're just going to clobber this transaction even though
+ * it's not IDLE. If we don't reuse it we could theoretically
+ * eventually leak all the free transactions and not be able to
+ * send any messages. At least this way we make an attempt to
+ * remain functional even though something really bad is
+ * happening that's corrupting what was supposed to be free
+ * transactions.
*/
- if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
- goto post_buffs;
+ WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
+ xn->idx, xn->vc_op);
+ }
- err = idpf_find_vport(adapter, &vport, &ctlq_msg);
- if (err)
- goto post_buffs;
+ xn->reply = params->recv_buf;
+ xn->reply_sz = 0;
+ xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
+ xn->vc_op = params->vc_op;
+ xn->async_handler = params->async_handler;
+ idpf_vc_xn_unlock(xn);
- if (ctlq_msg.data_len)
- payload_size = ctlq_msg.ctx.indirect.payload->size;
+ if (!params->async)
+ reinit_completion(&xn->completed);
+ cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
+ FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- /* All conditions are met. Either a message requested is
- * received or we received a message to be processed
- */
- switch (ctlq_msg.cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
- ctlq_msg.cookie.mbx.chnl_opcode,
- ctlq_msg.cookie.mbx.chnl_retval);
- err = -EBADMSG;
- } else if (msg) {
- memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
- min_t(int, payload_size, msg_size));
- }
- work_done = true;
- break;
- case VIRTCHNL2_OP_CREATE_VPORT:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DESTROY_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_QUEUES,
- IDPF_VC_ADD_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DEL_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
- break;
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- /* This message can only be sent asynchronously. As
- * such we'll have lost the context in which it was
- * called and thus can only really report if it looks
- * like an error occurred. Don't bother setting ERR bit
- * or waking chnl_wq since no work queue will be waiting
- * to read the message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- }
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously. We don't
- * normally print errors here, instead
- * prefer to handle errors in the function
- * calling wait_for_event. However, if
- * asynchronous, the context in which the
- * message was sent is lost. We can't really do
- * anything about at it this point, but we
- * should at a minimum indicate that it looks
- * like something went wrong. Also don't bother
- * setting ERR bit or waking vchnl_wq since no
- * one will be waiting to read the async
- * message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_MAC_ADDR,
- IDPF_VC_ADD_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously like the
- * VIRTCHNL2_OP_ADD_MAC_ADDR
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_MAC_ADDR,
- IDPF_VC_DEL_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_EVENT:
- idpf_recv_event_msg(vport, &ctlq_msg);
- break;
- default:
- dev_warn(&adapter->pdev->dev,
- "Unhandled virtchnl response %d\n",
- ctlq_msg.cookie.mbx.chnl_opcode);
- break;
- }
+ retval = idpf_send_mb_msg(adapter, params->vc_op,
+ send_buf->iov_len, send_buf->iov_base,
+ cookie);
+ if (retval) {
+ idpf_vc_xn_lock(xn);
+ goto release_and_unlock;
+ }
-post_buffs:
- if (ctlq_msg.data_len)
- dma_mem = ctlq_msg.ctx.indirect.payload;
- else
- num_q_msg = 0;
+ if (params->async)
+ return 0;
- err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
- &num_q_msg, &dma_mem);
- /* If post failed clear the only buffer we supplied */
- if (err && dma_mem)
- dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
- dma_mem->va, dma_mem->pa);
+ wait_for_completion_timeout(&xn->completed,
+ msecs_to_jiffies(params->timeout_ms));
- /* Applies only if we are looking for a specific opcode */
- if (work_done)
- break;
+ /* No need to check the return value; we check the final state of the
+ * transaction below. It's possible the transaction actually gets more
+ * timeout than specified if we get preempted here but after
+ * wait_for_completion_timeout returns. This should be non-issue
+ * however.
+ */
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_SHUTDOWN:
+ retval = -ENXIO;
+ goto only_unlock;
+ case IDPF_VC_XN_WAITING:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
+ params->vc_op, params->timeout_ms);
+ retval = -ETIME;
+ break;
+ case IDPF_VC_XN_COMPLETED_SUCCESS:
+ retval = xn->reply_sz;
+ break;
+ case IDPF_VC_XN_COMPLETED_FAILED:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
+ params->vc_op);
+ retval = -EIO;
+ break;
+ default:
+ /* Invalid state. */
+ WARN_ON_ONCE(1);
+ retval = -EIO;
+ break;
}
- return err;
+release_and_unlock:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+ /* If we receive a VC reply after here, it will be dropped. */
+only_unlock:
+ idpf_vc_xn_unlock(xn);
+
+ return retval;
}
/**
- * __idpf_wait_for_event - wrapper function for wait on virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
- * @timeout: Max time to wait
+ * idpf_vc_xn_forward_async - Handle async reply receives
+ * @adapter: private data struct
+ * @xn: transaction to handle
+ * @ctlq_msg: corresponding ctlq_msg
*
- * Checks if state is set upon expiry of timeout. Returns 0 on success,
- * negative on failure.
+ * For async sends we're going to lose the caller's context so, if an
+ * async_handler was provided, it can deal with the reply, otherwise we'll just
+ * check and report if there is an error.
*/
-static int __idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check,
- int timeout)
+static int
+idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
{
- int time_to_wait, num_waits;
- wait_queue_head_t *vchnl_wq;
- unsigned long *vc_state;
+ int err = 0;
- time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
- num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ err = -EINVAL;
+ goto release_bufs;
+ }
- if (vport) {
- vchnl_wq = &vport->vchnl_wq;
- vc_state = vport->vc_state;
- } else {
- vchnl_wq = &adapter->vchnl_wq;
- vc_state = adapter->vc_state;
+ if (xn->async_handler) {
+ err = xn->async_handler(adapter, xn, ctlq_msg);
+ goto release_bufs;
+ }
+
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
}
- while (num_waits) {
- int event;
+release_bufs:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+
+ return err;
+}
+
+/**
+ * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @ctlq_msg: controlq message to send back to receiving thread
+ */
+static int
+idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ const void *payload = NULL;
+ size_t payload_size = 0;
+ struct idpf_vc_xn *xn;
+ u16 msg_info;
+ int err = 0;
+ u16 xn_idx;
+ u16 salt;
+
+ msg_info = ctlq_msg->ctx.sw_cookie.data;
+ xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
+ if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
+ xn_idx);
+ return -EINVAL;
+ }
+ xn = &adapter->vcxn_mngr->ring[xn_idx];
+ salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
+ if (xn->salt != salt) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
+ xn->salt, salt);
+ return -EINVAL;
+ }
- /* If we are here and a reset is detected do not wait but
- * return. Reset timing is out of drivers control. So
- * while we are cleaning resources as part of reset if the
- * underlying HW mailbox is gone, wait on mailbox messages
- * is not meaningful
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_WAITING:
+ /* success */
+ break;
+ case IDPF_VC_XN_IDLE:
+ dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ case IDPF_VC_XN_SHUTDOWN:
+ /* ENXIO is a bit special here as the recv msg loop uses that
+ * know if it should stop trying to clean the ring if we lost
+ * the virtchnl. We need to stop playing with registers and
+ * yield.
*/
- if (idpf_is_reset_detected(adapter))
- return 0;
+ err = -ENXIO;
+ goto out_unlock;
+ case IDPF_VC_XN_ASYNC:
+ err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
+ idpf_vc_xn_unlock(xn);
+ return err;
+ default:
+ dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EBUSY;
+ goto out_unlock;
+ }
- event = wait_event_timeout(*vchnl_wq,
- test_and_clear_bit(state, vc_state),
- msecs_to_jiffies(time_to_wait));
- if (event) {
- if (test_and_clear_bit(err_check, vc_state)) {
- dev_err(&adapter->pdev->dev, "VC response error %s\n",
- idpf_vport_vc_state_str[err_check]);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return -EINVAL;
- }
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return 0;
- }
- num_waits--;
+ if (ctlq_msg->data_len) {
+ payload = ctlq_msg->ctx.indirect.payload->va;
+ payload_size = ctlq_msg->ctx.indirect.payload->size;
}
- /* Timeout occurred */
- dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
- idpf_vport_vc_state_str[state]);
+ xn->reply_sz = payload_size;
+ xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
- return -ETIMEDOUT;
+ if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
+ memcpy(xn->reply.iov_base, payload,
+ min_t(size_t, xn->reply.iov_len, payload_size));
+
+out_unlock:
+ idpf_vc_xn_unlock(xn);
+ /* we _cannot_ hold lock while calling complete */
+ complete(&xn->completed);
+
+ return err;
}
/**
- * idpf_min_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
+ * idpf_recv_mb_msg - Receive message over mailbox
+ * @adapter: Driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Will receive control queue message and posts the receive buffer. Returns 0
+ * on success and negative on failure.
*/
-static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter)
{
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
-}
+ struct idpf_ctlq_msg ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ int post_err, err;
+ u16 num_recv;
-/**
- * idpf_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout after 500ms
- * @err_check: check if this specific error bit is set
- *
- * Returns 0 on success, negative on failure.
- */
-static int idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
-{
- /* Increasing the timeout in __IDPF_INIT_SW flow to consider large
- * number of VF's mailbox message responses. When a message is received
- * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
- * the timeout expires. Only in the error case i.e. if no message is
- * received on mailbox, we wait for the complete timeout which is
- * less likely to happen.
- */
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO);
+ while (1) {
+ /* This will get <= num_recv messages and output how many
+ * actually received on num_recv.
+ */
+ num_recv = 1;
+ err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ if (err || !num_recv)
+ break;
+
+ if (ctlq_msg.data_len) {
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ } else {
+ dma_mem = NULL;
+ num_recv = 0;
+ }
+
+ if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
+ idpf_recv_event_msg(adapter, &ctlq_msg);
+ else
+ err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
+
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
+ adapter->hw.arq,
+ &num_recv, &dma_mem);
+
+ /* If post failed clear the only buffer we supplied */
+ if (post_err) {
+ if (dma_mem)
+ dmam_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
+ break;
+ }
+
+ /* virtchnl trying to shutdown, stop cleaning */
+ if (err == -ENXIO)
+ break;
+ }
+
+ return err;
}
/**
@@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
*/
static int idpf_send_ver_msg(struct idpf_adapter *adapter)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_version_info vvi;
+ ssize_t reply_sz;
+ u32 major, minor;
+ int err = 0;
if (adapter->virt_ver_maj) {
vvi.major = cpu_to_le32(adapter->virt_ver_maj);
@@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)
vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
}
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
- (u8 *)&vvi);
-}
-
-/**
- * idpf_recv_ver_msg - Receive virtchnl version message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
- * to send version message again, otherwise negative on failure.
- */
-static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
-{
- struct virtchnl2_version_info vvi;
- u32 major, minor;
- int err;
+ xn_params.vc_op = VIRTCHNL2_OP_VERSION;
+ xn_params.send_buf.iov_base = &vvi;
+ xn_params.send_buf.iov_len = sizeof(vvi);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
- sizeof(vvi));
- if (err)
- return err;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(vvi))
+ return -EIO;
major = le32_to_cpu(vvi.major);
minor = le32_to_cpu(vvi.minor);
if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
- dev_warn(&adapter->pdev->dev,
- "Virtchnl major version (%d) greater than supported\n",
- major);
-
+ dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
return -EINVAL;
}
if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
minor > IDPF_VIRTCHNL_VERSION_MINOR)
- dev_warn(&adapter->pdev->dev,
- "Virtchnl minor version (%d) didn't match\n", minor);
+ dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
/* If we have a mismatch, resend version to update receiver on what
* version we will use.
@@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
*/
static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
{
- struct virtchnl2_get_capabilities caps = { };
+ struct virtchnl2_get_capabilities caps = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
caps.csum_caps =
cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
@@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_PROMISC |
VIRTCHNL2_CAP_LOOPBACK);
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
- (u8 *)&caps);
-}
+ xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
+ xn_params.send_buf.iov_base = &caps;
+ xn_params.send_buf.iov_len = sizeof(caps);
+ xn_params.recv_buf.iov_base = &adapter->caps;
+ xn_params.recv_buf.iov_len = sizeof(adapter->caps);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
-/**
- * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl get capabilities message. Returns 0 on success, negative on
- * failure.
- */
-static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
-{
- return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
- sizeof(struct virtchnl2_get_capabilities));
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(adapter->caps))
+ return -EIO;
+
+ return 0;
}
/**
@@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q)
{
struct virtchnl2_create_vport *vport_msg;
+ struct idpf_vc_xn_params xn_params = {};
u16 idx = adapter->next_vport;
int err, buf_size;
+ ssize_t reply_sz;
buf_size = sizeof(struct virtchnl2_create_vport);
if (!adapter->vport_params_reqd[idx]) {
@@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
return err;
}
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
- (u8 *)vport_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- if (err) {
- dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
-
- goto rel_lock;
- }
-
if (!adapter->vport_params_recvd[idx]) {
adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
GFP_KERNEL);
if (!adapter->vport_params_recvd[idx]) {
err = -ENOMEM;
- goto rel_lock;
+ goto free_vport_params;
}
}
- vport_msg = adapter->vport_params_recvd[idx];
- memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
+ xn_params.send_buf.iov_base = vport_msg;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto free_vport_params;
+ }
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
+ err = -EIO;
+ goto free_vport_params;
+ }
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
+ return 0;
+
+free_vport_params:
+ kfree(adapter->vport_params_recvd[idx]);
+ adapter->vport_params_recvd[idx] = NULL;
+ kfree(adapter->vport_params_reqd[idx]);
+ adapter->vport_params_reqd[idx] = NULL;
return err;
}
@@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
*/
int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1397,26 +1377,19 @@ rel_lock:
*/
int idpf_send_enable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1428,26 +1401,19 @@ rel_lock:
*/
int idpf_send_disable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1459,11 +1425,13 @@ rel_lock:
*/
static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_tx_queues *ctq;
+ struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
+ struct virtchnl2_txq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_txq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_txq + vport->num_complq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
@@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!ctq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(ctq, 0, buf_sz);
@@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_TX_QUEUES,
- buf_sz, (u8 *)ctq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = ctq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ctq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
@@ -1591,11 +1544,13 @@ error:
*/
static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_rx_queues *crq;
+ struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
+ struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_rxq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_rxq + vport->num_bufq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
@@ -1676,10 +1631,8 @@ common_qi_fields:
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1693,12 +1646,11 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!crq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(crq, 0, buf_sz);
@@ -1706,17 +1658,11 @@ common_qi_fields:
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_RX_QUEUES,
- buf_sz, (u8 *)crq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = crq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1725,42 +1671,28 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(crq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
* idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
* queues message
* @vport: virtual port data structure
- * @vc_op: virtchnl op code to send
+ * @ena: if true enable, false disable
*
* Send enable or disable queues virtchnl message. Returns 0 on success,
* negative on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
{
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_queue_chunks *qcs;
- struct virtchnl2_queue_chunk *qc;
u32 config_sz, chunk_sz, buf_sz;
- int i, j, k = 0, err = 0;
-
- /* validate virtchnl op */
- switch (vc_op) {
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- break;
- default:
- return -EINVAL;
- }
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_txq = vport->num_txq + vport->num_complq;
num_rxq = vport->num_rxq + vport->num_bufq;
@@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->txq_model))
goto setup_rx;
@@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
- if (vport->num_complq != (k - vport->num_txq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_complq != (k - vport->num_txq))
+ return -EINVAL;
setup_rx:
for (i = 0; i < vport->num_rxq_grp; i++) {
@@ -1823,10 +1751,8 @@ setup_rx:
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->rxq_model))
goto send_msg;
@@ -1845,10 +1771,8 @@ setup_rx:
}
if (vport->num_bufq != k - (vport->num_txq +
vport->num_complq +
- vport->num_rxq)) {
- err = -EINVAL;
- goto error;
- }
+ vport->num_rxq))
+ return -EINVAL;
send_msg:
/* Chunk up the queue info into multiple messages */
@@ -1861,12 +1785,16 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!eq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (ena) {
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(eq, 0, buf_sz);
@@ -1875,20 +1803,11 @@ send_msg:
qcs = &eq->chunks;
memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
- if (err)
- goto mbx_error;
-
- if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- else
- err = idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -1897,13 +1816,7 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
-error:
- kfree(qc);
-
- return err;
+ return 0;
}
/**
@@ -1917,12 +1830,13 @@ error:
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_queue_vector_maps *vqvm;
- struct virtchnl2_queue_vector *vqv;
+ struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
u32 num_msgs, num_chunks, num_q;
- int i, j, k = 0, err = 0;
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_q = vport->num_txq + vport->num_rxq;
@@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
@@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_complq)
+ return -EINVAL;
} else {
- if (vport->num_rxq != k - vport->num_txq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_txq)
+ return -EINVAL;
}
/* Chunk up the vector info into multiple messages */
@@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm) {
- err = -ENOMEM;
- goto error;
- }
+ if (!vqvm)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (map) {
+ xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(vqvm, 0, buf_sz);
+ xn_params.send_buf.iov_base = vqvm;
+ xn_params.send_buf.iov_len = buf_sz;
vqvm->vport_id = cpu_to_le32(vport->vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
- if (map) {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- } else {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err =
- idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- }
- if (err)
- goto mbx_error;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(vqvm);
-error:
- kfree(vqv);
-
- return err;
+ return 0;
}
/**
@@ -2062,7 +1953,7 @@ error:
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
+ return idpf_send_ena_dis_queues_msg(vport, true);
}
/**
@@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err, i;
- err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
+ err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
@@ -2087,8 +1978,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
/* schedule the napi to receive all the marker packets */
+ local_bh_disable();
for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi);
+ local_bh_enable();
return idpf_wait_for_marker_event(vport);
}
@@ -2122,22 +2015,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun
*/
int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_create_vport *vport_params;
struct virtchnl2_queue_reg_chunks *chunks;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx;
- int buf_size, err;
+ ssize_t reply_sz;
u16 num_chunks;
+ int buf_size;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
+ chunks = &vport_config->req_qs_chunks->chunks;
} else {
- vport_params = adapter->vport_params_recvd[vport_idx];
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
chunks = &vport_params->chunks;
}
@@ -2154,21 +2046,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
num_chunks);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
- buf_size, (u8 *)eq);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
+ xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2203,14 +2087,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport)
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- struct virtchnl2_add_queues aq = { };
- struct virtchnl2_add_queues *vc_msg;
+ struct virtchnl2_add_queues aq = {};
u16 vport_idx = vport->idx;
- int size, err;
+ ssize_t reply_sz;
+ int size;
+
+ vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!vc_msg)
+ return -ENOMEM;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
+ kfree(vport_config->req_qs_chunks);
+ vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q);
@@ -2218,47 +2109,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
aq.num_rx_q = cpu_to_le16(num_rx_q);
aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
- mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
- sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
- if (err)
- goto rel_lock;
-
- /* We want vport to be const to prevent incidental code changes making
- * changes to the vport config. We're making a special exception here
- * to discard const to use the virtchnl.
- */
- err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
- IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
- if (err)
- goto rel_lock;
-
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
+ xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &aq;
+ xn_params.send_buf.iov_len = sizeof(aq);
+ xn_params.recv_buf.iov_base = vc_msg;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
/* compare vc_msg num queues with vport num queues */
if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
- err = -EINVAL;
- goto rel_lock;
- }
+ le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
le16_to_cpu(vc_msg->chunks.num_chunks));
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
-rel_lock:
- mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
+ vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
+ if (!vport_config->req_qs_chunks)
+ return -ENOMEM;
- return err;
+ return 0;
}
/**
@@ -2270,53 +2147,49 @@ rel_lock:
*/
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
{
- struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
- struct virtchnl2_alloc_vectors ac = { };
+ struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
+ struct virtchnl2_alloc_vectors ac = {};
+ ssize_t reply_sz;
u16 num_vchunks;
- int size, err;
+ int size;
ac.num_vectors = cpu_to_le16(num_vectors);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
- sizeof(ac), (u8 *)&ac);
- if (err)
- goto rel_lock;
+ rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_vec)
+ return -ENOMEM;
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
+ xn_params.send_buf.iov_base = &ac;
+ xn_params.send_buf.iov_len = sizeof(ac);
+ xn_params.recv_buf.iov_base = rcvd_vec;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
-
size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
- if (size > sizeof(adapter->vc_msg)) {
- err = -EINVAL;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
kfree(adapter->req_vec_chunks);
- adapter->req_vec_chunks = NULL;
- adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
- if (!adapter->req_vec_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
+ if (!adapter->req_vec_chunks)
+ return -ENOMEM;
- alloc_vec = adapter->req_vec_chunks;
- if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
+ if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
- err = -EINVAL;
+ return -EINVAL;
}
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2329,29 +2202,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
- int buf_size, err;
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+ int buf_size;
buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
- (u8 *)vcs);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
+ xn_params.send_buf.iov_base = vcs;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2374,25 +2242,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter)
*/
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
{
- struct virtchnl2_sriov_vfs_info svi = { };
- int err;
+ struct virtchnl2_sriov_vfs_info svi = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
svi.num_vfs = cpu_to_le16(num_vfs);
+ xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &svi;
+ xn_params.send_buf.iov_len = sizeof(svi);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
- sizeof(svi), (u8 *)&svi);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
-
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2405,10 +2266,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_vport_stats stats_msg = { };
- struct virtchnl2_vport_stats *stats;
- int err;
+ struct virtchnl2_vport_stats stats_msg = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
/* Don't send get_stats message if the link is down */
if (np->state <= __IDPF_VPORT_DOWN)
@@ -2416,46 +2277,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
+ xn_params.send_buf.iov_base = &stats_msg;
+ xn_params.send_buf.iov_len = sizeof(stats_msg);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
- sizeof(struct virtchnl2_vport_stats),
- (u8 *)&stats_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- if (err)
- goto rel_lock;
-
- stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(stats_msg))
+ return -EIO;
spin_lock_bh(&np->stats_lock);
- netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
- le64_to_cpu(stats->rx_multicast) +
- le64_to_cpu(stats->rx_broadcast);
- netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
- netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
- netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
- netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
-
- netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
- le64_to_cpu(stats->tx_multicast) +
- le64_to_cpu(stats->tx_broadcast);
- netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
- netstats->tx_errors = le64_to_cpu(stats->tx_errors);
- netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
-
- vport->port_stats.vport_stats = *stats;
+ netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
+ le64_to_cpu(stats_msg.rx_multicast) +
+ le64_to_cpu(stats_msg.rx_broadcast);
+ netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
+ le64_to_cpu(stats_msg.tx_multicast) +
+ le64_to_cpu(stats_msg.tx_broadcast);
+ netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
+ netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
+ netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
+ netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
+ netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
+ netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
+
+ vport->port_stats.vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2467,70 +2320,70 @@ rel_lock:
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_lut *recv_rl;
+ struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
+ struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_lut *rl;
int buf_size, lut_buf_size;
- int i, err;
+ ssize_t reply_sz;
+ int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
rl->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
- if (!get) {
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = rl;
+ xn_params.send_buf.iov_len = buf_size;
+
+ if (get) {
+ recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rl)
+ return -ENOMEM;
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
+ xn_params.recv_buf.iov_base = recv_rl;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ } else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
-
- goto free_mem;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_lut))
+ return -EIO;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
+ lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
+ if (reply_sz < lut_buf_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- if (err)
- goto free_mem;
-
- recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
+ /* size didn't change, we can reuse existing lut buf */
if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
goto do_memcpy;
rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
kfree(rss_data->rss_lut);
- lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
if (!rss_data->rss_lut) {
rss_data->rss_lut_size = 0;
- err = -ENOMEM;
- goto free_mem;
+ return -ENOMEM;
}
do_memcpy:
- memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
-free_mem:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rl);
+ memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
- return err;
+ return 0;
}
/**
@@ -2542,68 +2395,70 @@ free_mem:
*/
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_key *recv_rk;
+ struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
+ struct virtchnl2_rss_key *rk __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_key *rk;
- int i, buf_size, err;
+ ssize_t reply_sz;
+ int i, buf_size;
+ u16 key_size;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
rk->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
+ xn_params.send_buf.iov_base = rk;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
if (get) {
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- if (err)
- goto error;
-
- recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
- if (rss_data->rss_key_size !=
- le16_to_cpu(recv_rk->key_len)) {
- rss_data->rss_key_size =
- min_t(u16, NETDEV_RSS_KEY_LEN,
- le16_to_cpu(recv_rk->key_len));
- kfree(rss_data->rss_key);
- rss_data->rss_key = kzalloc(rss_data->rss_key_size,
- GFP_KERNEL);
- if (!rss_data->rss_key) {
- rss_data->rss_key_size = 0;
- err = -ENOMEM;
- goto error;
- }
- }
- memcpy(rss_data->rss_key, recv_rk->key_flex,
- rss_data->rss_key_size);
+ recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rk)
+ return -ENOMEM;
+
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
+ xn_params.recv_buf.iov_base = recv_rk;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
} else {
rk->key_len = cpu_to_le16(rss_data->rss_key_size);
for (i = 0; i < rss_data->rss_key_size; i++)
rk->key_flex[i] = rss_data->rss_key[i];
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
+ }
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_key))
+ return -EIO;
+
+ key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(recv_rk->key_len));
+ if (reply_sz < key_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
+ /* key len didn't change, reuse existing buf */
+ if (rss_data->rss_key_size == key_size)
+ goto do_memcpy;
+
+ rss_data->rss_key_size = key_size;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ rss_data->rss_key_size = 0;
+ return -ENOMEM;
}
-error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rk);
+do_memcpy:
+ memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
- return err;
+ return 0;
}
/**
@@ -2655,13 +2510,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
*/
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
+ struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
+ struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
- struct virtchnl2_get_ptype_info get_ptype_info;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_get_ptype_info *ptype_info;
+ struct idpf_vc_xn_params xn_params = {};
u16 next_ptype_id = 0;
- int err = 0, i, j, k;
+ ssize_t reply_sz;
+ int i, j, k;
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
@@ -2670,43 +2527,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
+ if (!get_ptype_info)
+ return -ENOMEM;
+
ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!ptype_info)
return -ENOMEM;
- mutex_lock(&adapter->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
+ xn_params.send_buf.iov_base = get_ptype_info;
+ xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
+ xn_params.recv_buf.iov_base = ptype_info;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
while (next_ptype_id < max_ptype) {
- get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
+ get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(max_ptype - next_ptype_id);
else
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
- sizeof(struct virtchnl2_get_ptype_info),
- (u8 *)&get_ptype_info);
- if (err)
- goto vc_buf_unlock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- if (err)
- goto vc_buf_unlock;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
+ return -EIO;
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
- if (ptypes_recvd > max_ptype) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptypes_recvd > max_ptype)
+ return -EINVAL;
- next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
- le16_to_cpu(get_ptype_info.num_ptypes);
+ next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
+ le16_to_cpu(get_ptype_info->num_ptypes);
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
@@ -2719,17 +2577,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
((u8 *)ptype_info + ptype_offset);
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
- if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID) {
- err = 0;
- goto vc_buf_unlock;
- }
+ IDPF_INVALID_PTYPE_ID)
+ return 0;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
@@ -2857,11 +2711,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
}
}
-vc_buf_unlock:
- mutex_unlock(&adapter->vc_buf_lock);
- kfree(ptype_info);
-
- return err;
+ return 0;
}
/**
@@ -2873,27 +2723,20 @@ vc_buf_unlock:
*/
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
- int err;
+ ssize_t reply_sz;
loopback.vport_id = cpu_to_le32(vport->vport_id);
loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
- sizeof(loopback), (u8 *)&loopback);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &loopback;
+ xn_params.send_buf.iov_len = sizeof(loopback);
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2958,7 +2801,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
return -ENOENT;
}
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
return 0;
}
@@ -3055,35 +2898,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
u16 num_max_vports;
int err = 0;
+ if (!adapter->vcxn_mngr) {
+ adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
+ if (!adapter->vcxn_mngr) {
+ err = -ENOMEM;
+ goto init_failed;
+ }
+ }
+ idpf_vc_xn_init(adapter->vcxn_mngr);
+
while (adapter->state != __IDPF_INIT_SW) {
switch (adapter->state) {
- case __IDPF_STARTUP:
- if (idpf_send_ver_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_VER_CHECK;
- goto restart;
case __IDPF_VER_CHECK:
- err = idpf_recv_ver_msg(adapter);
- if (err == -EIO) {
- return err;
- } else if (err == -EAGAIN) {
- adapter->state = __IDPF_STARTUP;
+ err = idpf_send_ver_msg(adapter);
+ switch (err) {
+ case 0:
+ /* success, move state machine forward */
+ adapter->state = __IDPF_GET_CAPS;
+ fallthrough;
+ case -EAGAIN:
goto restart;
- } else if (err) {
+ default:
+ /* Something bad happened, try again but only a
+ * few times.
+ */
goto init_failed;
}
- if (idpf_send_get_caps_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_GET_CAPS;
- goto restart;
case __IDPF_GET_CAPS:
- if (idpf_recv_get_caps_msg(adapter))
+ err = idpf_send_get_caps_msg(adapter);
+ if (err)
goto init_failed;
adapter->state = __IDPF_INIT_SW;
break;
default:
dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
adapter->state);
+ err = -EINVAL;
goto init_failed;
}
break;
@@ -3142,7 +2992,9 @@ restart:
queue_delayed_work(adapter->init_wq, &adapter->init_task,
msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
- goto no_err;
+ set_bit(IDPF_VC_CORE_INIT, adapter->flags);
+
+ return 0;
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
@@ -3151,7 +3003,6 @@ err_intr_req:
err_netdev_alloc:
kfree(adapter->vports);
adapter->vports = NULL;
-no_err:
return err;
init_failed:
@@ -3168,7 +3019,9 @@ init_failed:
* register writes might not have taken effect. Retry to initialize
* the mailbox again
*/
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
+ if (adapter->vcxn_mngr)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
@@ -3184,29 +3037,22 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
- int i;
+ if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
+ return;
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- /* Set all bits as we dont know on which vc_state the vhnl_wq is
- * waiting on and wakeup the virtchnl workqueue even if it is waiting
- * for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, adapter->vc_state);
- wake_up(&adapter->vchnl_wq);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter);
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, adapter->vc_state);
-
kfree(adapter->vports);
adapter->vports = NULL;
+
+ clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
}
/**
@@ -3622,6 +3468,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
}
/**
+ * idpf_mac_filter_async_handler - Async callback for mac filters
+ * @adapter: private data struct
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
+ * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
+ * situation to deal with errors returned on the reply. The best we can
+ * ultimately do is remove it from our list of mac filters and report the
+ * error.
+ */
+static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_mac_addr_list *ma_list;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_mac_addr *mac_addr;
+ struct idpf_mac_filter *f, *tmp;
+ struct list_head *ma_list_head;
+ struct idpf_vport *vport;
+ u16 num_entries;
+ int i;
+
+ /* if success we're done, we're only here if something bad happened */
+ if (!ctlq_msg->cookie.mbx.chnl_retval)
+ return 0;
+
+ /* make sure at least struct is there */
+ if (xn->reply_sz < sizeof(*ma_list))
+ goto invalid_payload;
+
+ ma_list = ctlq_msg->ctx.indirect.payload->va;
+ mac_addr = ma_list->mac_addr_list;
+ num_entries = le16_to_cpu(ma_list->num_mac_addr);
+ /* we should have received a buffer at least this big */
+ if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
+ goto invalid_payload;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
+ if (!vport)
+ goto invalid_payload;
+
+ vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
+ ma_list_head = &vport_config->user_config.mac_filter_list;
+
+ /* We can't do much to reconcile bad filters at this point, however we
+ * should at least remove them from our list one way or the other so we
+ * have some idea what good filters we have.
+ */
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ list_for_each_entry_safe(f, tmp, ma_list_head, list)
+ for (i = 0; i < num_entries; i++)
+ if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
+ list_del(&f->list);
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+ dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
+ xn->vc_op);
+
+ return 0;
+
+invalid_payload:
+ dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
+ xn->vc_op, xn->reply_sz);
+
+ return -EINVAL;
+}
+
+/**
* idpf_add_del_mac_filters - Add/del mac filters
* @vport: Virtual port data structure
* @np: Netdev private structure
@@ -3634,17 +3549,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
bool add, bool async)
{
- struct virtchnl2_mac_addr_list *ma_list = NULL;
+ struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
+ struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
struct idpf_adapter *adapter = np->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- enum idpf_vport_config_flags mac_flag;
- struct pci_dev *pdev = adapter->pdev;
- enum idpf_vport_vc_state vc, vc_err;
- struct virtchnl2_mac_addr *mac_addr;
- struct idpf_mac_filter *f, *tmp;
u32 num_msgs, total_filters = 0;
- int i = 0, k, err = 0;
- u32 vop;
+ struct idpf_mac_filter *f;
+ ssize_t reply_sz;
+ int i = 0, k;
+
+ xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
+ VIRTCHNL2_OP_DEL_MAC_ADDR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = async;
+ xn_params.async_handler = idpf_mac_filter_async_handler;
vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
@@ -3668,13 +3587,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
GFP_ATOMIC);
if (!mac_addr) {
- err = -ENOMEM;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- goto error;
+
+ return -ENOMEM;
}
- list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
- list) {
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
+ list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
i++;
@@ -3693,26 +3612,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (add) {
- vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
- vc = IDPF_VC_ADD_MAC_ADDR;
- vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_ADD_MAC_REQ;
- } else {
- vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
- vc = IDPF_VC_DEL_MAC_ADDR;
- vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_DEL_MAC_REQ;
- }
-
/* Chunk up the filters into multiple messages to avoid
* sending a control queue message buffer that is too large
*/
num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
- if (!async)
- mutex_lock(&vport->vc_buf_lock);
-
for (i = 0, k = 0; i < num_msgs; i++) {
u32 entries_size, buf_size, num_entries;
@@ -3724,10 +3628,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
kfree(ma_list);
ma_list = kzalloc(buf_size, GFP_ATOMIC);
- if (!ma_list) {
- err = -ENOMEM;
- goto list_prep_error;
- }
+ if (!ma_list)
+ return -ENOMEM;
} else {
memset(ma_list, 0, buf_size);
}
@@ -3736,34 +3638,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
- if (async)
- set_bit(mac_flag, vport_config->flags);
-
- err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
- if (err)
- goto mbx_error;
-
- if (!async) {
- err = idpf_wait_for_event(adapter, vport, vc, vc_err);
- if (err)
- goto mbx_error;
- }
+ xn_params.send_buf.iov_base = ma_list;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_entries;
total_filters -= num_entries;
}
-mbx_error:
- if (!async)
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ma_list);
-list_prep_error:
- kfree(mac_addr);
-error:
- if (err)
- dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
-
- return err;
+ return 0;
}
/**
@@ -3780,9 +3665,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_promisc_info vpi;
+ ssize_t reply_sz;
u16 flags = 0;
- int err;
if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
flags |= VIRTCHNL2_UNICAST_PROMISC;
@@ -3792,9 +3678,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
vpi.vport_id = cpu_to_le32(vport_id);
vpi.flags = cpu_to_le16(flags);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
- sizeof(struct virtchnl2_promisc_info),
- (u8 *)&vpi);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &vpi;
+ xn_params.send_buf.iov_len = sizeof(vpi);
+ /* setting promiscuous is only ever done asynchronously */
+ xn_params.async = true;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
new file mode 100644
index 000000000000..83da5d8da56b
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_VIRTCHNL_H_
+#define _IDPF_VIRTCHNL_H_
+
+struct idpf_adapter;
+struct idpf_netdev_priv;
+struct idpf_vec_regs;
+struct idpf_vport;
+struct idpf_vport_max_q;
+struct idpf_vport_user_config_data;
+
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
+int idpf_vc_core_init(struct idpf_adapter *adapter);
+void idpf_vc_core_deinit(struct idpf_adapter *adapter);
+
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals);
+int idpf_queue_reg_init(struct idpf_vport *vport);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+
+int idpf_recv_mb_msg(struct idpf_adapter *adapter);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg, u16 cookie);
+
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+u32 idpf_get_vport_id(struct idpf_vport *vport);
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
+int idpf_send_enable_vport_msg(struct idpf_vport *vport);
+int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+
+int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
+int idpf_send_delete_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks);
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async);
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id);
+int idpf_check_supported_desc_ids(struct idpf_vport *vport);
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
+int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+
+#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 8dc837889723..4a3c4454d25a 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -978,7 +978,7 @@ struct virtchnl2_ptype {
u8 proto_id_count;
__le16 pad;
__le16 proto_id[];
-};
+} __packed __aligned(2);
VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
/**
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index a2b759531cb7..3c2dc7bdebb5 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -637,7 +637,7 @@ struct igb_adapter {
struct timespec64 period;
} perout[IGB_N_PEROUT];
- char fw_version[32];
+ char fw_version[48];
#ifdef CONFIG_IGB_HWMON
struct hwmon_buff *igb_hwmon_buff;
bool ets;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index b66199c9bb3a..99977a22b843 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3027,7 +3027,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int igb_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3038,11 +3038,13 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- edata->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
if (!hw->dev_spec._82575.eee_disable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
/* The IPCNFG and EEER registers are not supported on I354. */
if (hw->mac.type == e1000_i354) {
@@ -3068,7 +3070,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
case e1000_i354:
case e1000_i210:
@@ -3079,7 +3081,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
default:
@@ -3099,18 +3101,20 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igb_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
bool adv1g_eee = true, adv100m_eee = true;
s32 ret_val;
@@ -3118,7 +3122,7 @@ static int igb_set_eee(struct net_device *netdev,
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igb_get_eee(netdev, &eee_curr);
if (ret_val)
@@ -3138,14 +3142,21 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (!edata->advertised || (edata->advertised &
- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
dev_err(&adapter->pdev->dev,
"EEE Advertisement supports only 100Tx and/or 100T full duplex\n");
return -EINVAL;
}
- adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
- adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
+ adv100m_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->advertised);
+ adv1g_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised);
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
@@ -3153,7 +3164,7 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
adapter->flags |= IGB_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 4df8d4153aa5..518298bbdadc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -202,7 +202,7 @@ static struct notifier_block dca_notifier = {
#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
-module_param(max_vfs, uint, 0);
+module_param(max_vfs, uint, 0444);
MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
@@ -2538,7 +2538,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_fw_version fw;
- char *lbuf;
igb_get_fw_version(hw, &fw);
@@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter)
case e1000_i210:
case e1000_i211:
if (!(igb_get_flash_presence_i210(hw))) {
- lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor,
- fw.invm_img_type);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
break;
}
fallthrough;
default:
/* if option rom is valid, display its version too */
if (fw.or_valid) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id, fw.or_major, fw.or_build,
- fw.or_patch);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
} else if (fw.etrack_id != 0X0000) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
} else {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major,
- fw.eep_minor, fw.eep_build);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
-
- /* the truncate happens here if it doesn't fit */
- strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version));
- kfree(lbuf);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 319c544b9f04..f94570556120 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
/* adjust timestamp for the TX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_TX_LATENCY_10;
@@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
@@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index a4d4f00e6a87..b0cf310e6f7b 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2655,7 +2655,7 @@ igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 95d1e8c490a4..ebffd3054285 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,6 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 45430e246e9c..90316dc58630 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -168,7 +168,7 @@ struct igc_ring {
struct igc_adapter {
struct net_device *netdev;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u16 eee_advert;
unsigned long state;
@@ -295,6 +295,9 @@ struct igc_adapter {
struct timespec64 start;
struct timespec64 period;
} perout[IGC_N_PEROUT];
+
+ /* LEDs */
+ struct mutex led_mutex;
};
void igc_up(struct igc_adapter *adapter);
@@ -567,7 +570,6 @@ struct igc_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
- struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
@@ -585,7 +587,7 @@ enum igc_filter_match_flags {
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
- __be16 vlan_etype;
+ u16 vlan_etype;
u16 vlan_tci;
u16 vlan_tci_mask;
u8 src_addr[ETH_ALEN];
@@ -720,6 +722,8 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
+int igc_led_setup(struct igc_adapter *adapter);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index b95d2c86e803..1a64f1ca6ca8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -981,7 +981,7 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
fsp->flow_type |= FLOW_EXT;
- fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
+ fsp->h_ext.vlan_etype = htons(rule->filter.vlan_etype);
fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
}
@@ -1249,7 +1249,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
/* VLAN etype matching */
if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) {
- rule->filter.vlan_etype = fsp->h_ext.vlan_etype;
+ rule->filter.vlan_etype = ntohs(fsp->h_ext.vlan_etype);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE;
}
@@ -1623,18 +1623,17 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static int igc_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
u32 eeer;
if (hw->dev_spec._base.eee_enable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
*edata = adapter->eee;
- edata->supported = SUPPORTED_Autoneg;
eeer = rd32(IGC_EEER);
@@ -1647,9 +1646,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = hw->dev_spec._base.eee_enable;
- edata->advertised = SUPPORTED_Autoneg;
- edata->lp_advertised = SUPPORTED_Autoneg;
-
/* Report correct negotiated EEE status for devices that
* wrongly report EEE at half-duplex
*/
@@ -1657,21 +1653,21 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igc_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igc_ethtool_get_eee(netdev, &eee_curr);
if (ret_val) {
@@ -1699,7 +1695,8 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
+
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
new file mode 100644
index 000000000000..bf240c5daf86
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Linutronix GmbH */
+
+#include <linux/bits.h>
+#include <linux/leds.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <uapi/linux/uleds.h>
+
+#include "igc.h"
+
+#define IGC_NUM_LEDS 3
+
+#define IGC_LEDCTL_LED0_MODE_SHIFT 0
+#define IGC_LEDCTL_LED0_MODE_MASK GENMASK(3, 0)
+#define IGC_LEDCTL_LED0_BLINK BIT(7)
+#define IGC_LEDCTL_LED1_MODE_SHIFT 8
+#define IGC_LEDCTL_LED1_MODE_MASK GENMASK(11, 8)
+#define IGC_LEDCTL_LED1_BLINK BIT(15)
+#define IGC_LEDCTL_LED2_MODE_SHIFT 16
+#define IGC_LEDCTL_LED2_MODE_MASK GENMASK(19, 16)
+#define IGC_LEDCTL_LED2_BLINK BIT(23)
+
+#define IGC_LEDCTL_MODE_ON 0x00
+#define IGC_LEDCTL_MODE_OFF 0x01
+#define IGC_LEDCTL_MODE_LINK_10 0x05
+#define IGC_LEDCTL_MODE_LINK_100 0x06
+#define IGC_LEDCTL_MODE_LINK_1000 0x07
+#define IGC_LEDCTL_MODE_LINK_2500 0x08
+#define IGC_LEDCTL_MODE_ACTIVITY 0x0b
+
+#define IGC_SUPPORTED_MODES \
+ (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK_1000) | \
+ BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_10) | \
+ BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+#define IGC_ACTIVITY_MODES \
+ (BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+struct igc_led_classdev {
+ struct net_device *netdev;
+ struct led_classdev led;
+ int index;
+};
+
+#define lcdev_to_igc_ldev(lcdev) \
+ container_of(lcdev, struct igc_led_classdev, led)
+
+static void igc_led_select(struct igc_adapter *adapter, int led,
+ u32 *mask, u32 *shift, u32 *blink)
+{
+ switch (led) {
+ case 0:
+ *mask = IGC_LEDCTL_LED0_MODE_MASK;
+ *shift = IGC_LEDCTL_LED0_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED0_BLINK;
+ break;
+ case 1:
+ *mask = IGC_LEDCTL_LED1_MODE_MASK;
+ *shift = IGC_LEDCTL_LED1_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED1_BLINK;
+ break;
+ case 2:
+ *mask = IGC_LEDCTL_LED2_MODE_MASK;
+ *shift = IGC_LEDCTL_LED2_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED2_BLINK;
+ break;
+ default:
+ *mask = *shift = *blink = 0;
+ netdev_err(adapter->netdev, "Unknown LED %d selected!\n", led);
+ }
+}
+
+static void igc_led_set(struct igc_adapter *adapter, int led, u32 mode,
+ bool blink)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+
+ /* Set mode */
+ ledctl = rd32(IGC_LEDCTL);
+ ledctl &= ~mask;
+ ledctl |= mode << shift;
+
+ /* Configure blinking */
+ if (blink)
+ ledctl |= blink_bit;
+ else
+ ledctl &= ~blink_bit;
+ wr32(IGC_LEDCTL, ledctl);
+
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
+static u32 igc_led_get(struct igc_adapter *adapter, int led)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+ ledctl = rd32(IGC_LEDCTL);
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+
+ return (ledctl & mask) >> shift;
+}
+
+static int igc_led_brightness_set_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ if (brightness)
+ mode = IGC_LEDCTL_MODE_ON;
+ else
+ mode = IGC_LEDCTL_MODE_OFF;
+
+ netdev_dbg(adapter->netdev, "Set brightness for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ igc_led_set(adapter, ldev->index, mode, false);
+
+ return 0;
+}
+
+static int igc_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ if (flags & ~IGC_SUPPORTED_MODES)
+ return -EOPNOTSUPP;
+
+ /* If Tx and Rx selected, activity can be offloaded unless some other
+ * mode is selected as well.
+ */
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)) &&
+ !(flags & ~IGC_ACTIVITY_MODES))
+ return 0;
+
+ /* Single Rx or Tx activity is not supported. */
+ if (flags & IGC_ACTIVITY_MODES)
+ return -EOPNOTSUPP;
+
+ /* Only one mode can be active at a given time. */
+ if (flags & (flags - 1))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int igc_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode = IGC_LEDCTL_MODE_OFF;
+ bool blink = false;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode = IGC_LEDCTL_MODE_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = IGC_LEDCTL_MODE_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = IGC_LEDCTL_MODE_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode = IGC_LEDCTL_MODE_LINK_2500;
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)))
+ mode = IGC_LEDCTL_MODE_ACTIVITY;
+
+ netdev_dbg(adapter->netdev, "Set HW control for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ /* blink is recommended for activity */
+ if (mode == IGC_LEDCTL_MODE_ACTIVITY)
+ blink = true;
+
+ igc_led_set(adapter, ldev->index, mode, blink);
+
+ return 0;
+}
+
+static int igc_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ mode = igc_led_get(adapter, ldev->index);
+
+ switch (mode) {
+ case IGC_LEDCTL_MODE_ACTIVITY:
+ *flags = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case IGC_LEDCTL_MODE_LINK_10:
+ *flags = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case IGC_LEDCTL_MODE_LINK_100:
+ *flags = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case IGC_LEDCTL_MODE_LINK_1000:
+ *flags = BIT(TRIGGER_NETDEV_LINK_1000);
+ break;
+ case IGC_LEDCTL_MODE_LINK_2500:
+ *flags = BIT(TRIGGER_NETDEV_LINK_2500);
+ break;
+ }
+
+ return 0;
+}
+
+static struct device *igc_led_hw_control_get_device(struct led_classdev *led_cdev)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+
+ return &ldev->netdev->dev;
+}
+
+static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
+ size_t buf_len)
+{
+ snprintf(buf, buf_len, "igc-%x%x-led%d",
+ pci_domain_nr(adapter->pdev->bus),
+ pci_dev_id(adapter->pdev), index);
+}
+
+static void igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->netdev = netdev;
+ ldev->index = index;
+
+ igc_led_get_name(adapter, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->max_brightness = 1;
+ led_cdev->brightness_set_blocking = igc_led_brightness_set_blocking;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->hw_control_is_supported = igc_led_hw_control_is_supported;
+ led_cdev->hw_control_set = igc_led_hw_control_set;
+ led_cdev->hw_control_get = igc_led_hw_control_get;
+ led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
+
+ devm_led_classdev_register(&netdev->dev, led_cdev);
+}
+
+int igc_led_setup(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &netdev->dev;
+ struct igc_led_classdev *leds;
+ int i;
+
+ mutex_init(&adapter->led_mutex);
+
+ leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ igc_setup_ldev(leds + i, netdev, i);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ba8d3fe186ae..14924dd8a4b9 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3385,7 +3385,7 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
u32 fhftsl;
if (input->index >= MAX_FLEX_FILTER) {
- dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
+ netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n");
return -EINVAL;
}
@@ -3420,7 +3420,6 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
struct igc_flex_filter *input)
{
- struct device *dev = &adapter->pdev->dev;
struct igc_hw *hw = &adapter->hw;
u8 *data = input->data;
u8 *mask = input->mask;
@@ -3434,7 +3433,7 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
* out early to avoid surprises later.
*/
if (input->length % 8 != 0) {
- dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
+ netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n");
return -EINVAL;
}
@@ -3504,8 +3503,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
}
wr32(IGC_WUFC, wufc);
- dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
- input->index);
+ netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n",
+ input->index);
return 0;
}
@@ -3577,9 +3576,9 @@ static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
static int igc_add_flex_filter(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
- struct igc_flex_filter flex = { };
struct igc_nfc_filter *filter = &rule->filter;
unsigned int eth_offset, user_offset;
+ struct igc_flex_filter flex = { };
int ret, index;
bool vlan;
@@ -3615,10 +3614,12 @@ static int igc_add_flex_filter(struct igc_adapter *adapter,
ETH_ALEN, NULL);
/* Add VLAN etype */
- if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
- igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
- sizeof(filter->vlan_etype),
- NULL);
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
+ __be16 vlan_etype = cpu_to_be16(filter->vlan_etype);
+
+ igc_flex_filter_add_field(&flex, &vlan_etype, 12,
+ sizeof(vlan_etype), NULL);
+ }
/* Add VLAN TCI */
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
@@ -5276,7 +5277,7 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -6487,7 +6488,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
- int i, drops;
+ int i, nxmit;
if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
@@ -6503,16 +6504,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
- drops = 0;
+ nxmit = 0;
for (i = 0; i < num_frames; i++) {
int err;
struct xdp_frame *xdpf = frames[i];
err = igc_xdp_init_tx_descriptor(ring, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err)
+ break;
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH)
@@ -6520,7 +6520,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_unlock(nq);
- return num_frames - drops;
+ return nxmit;
}
static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
@@ -6977,6 +6977,12 @@ static int igc_probe(struct pci_dev *pdev,
pm_runtime_put_noidle(&pdev->dev);
+ if (IS_ENABLED(CONFIG_IGC_LEDS)) {
+ err = igc_led_setup(adapter);
+ if (err)
+ goto err_register;
+ }
+
return 0;
err_register:
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 7cd8716d2ffa..861f37076861 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -130,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* Temporary workaround - should be removed when PHY will implement
- * IEEE registers as properly
- */
- /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
usleep_range(1000, 2000);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index d38c87d7e5e8..e5b893fc5b66 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -12,6 +12,7 @@
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
+#define IGC_LEDCTL 0x00E00 /* LED Control - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6f0376e42f4..559b443c409f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -949,19 +949,19 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
int ethtool_ioctl(struct ifreq *ifr);
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask);
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
@@ -1059,7 +1059,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#ifdef CONFIG_IXGBE_IPSEC
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 6835d5f18753..283a23150a4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -15,10 +15,10 @@
#define IXGBE_82598_VFT_TBL_SIZE 128
#define IXGBE_82598_RX_PB_SIZE 512
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
/**
@@ -66,7 +66,7 @@ out:
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -93,12 +93,12 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
u16 list_offset, data_offset;
+ int ret_val;
/* Identify the PHY */
phy->ops.identify(hw);
@@ -148,9 +148,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
* Then set pcie completion timeout
*
**/
-static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -170,7 +170,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -271,7 +271,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+static int ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
u32 fctrl_reg;
u32 rmcs_reg;
@@ -411,13 +411,13 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
/* Restart link */
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -457,7 +457,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
* Function indicates success when phy link is available. If phy is not ready
* within 5 seconds of MAC indicating link, the function returns error.
**/
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+static int ixgbe_validate_link_ready(struct ixgbe_hw *hw)
{
u32 timeout;
u16 an_reg;
@@ -493,7 +493,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *link_up,
bool link_up_wait_to_complete)
{
@@ -579,7 +579,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -624,11 +624,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -647,15 +647,15 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
* clears all interrupts, performing a PHY reset, and performing a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
- s32 status;
- s32 phy_status = 0;
- u32 ctrl;
+ int phy_status = 0;
+ u8 analog_val;
u32 gheccr;
- u32 i;
+ int status;
u32 autoc;
- u8 analog_val;
+ u32 ctrl;
+ u32 i;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -781,7 +781,7 @@ mac_reset_top:
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq set index
**/
-static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -805,7 +805,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq clear index (not used in 82598, but elsewhere)
**/
-static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -836,7 +836,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+static int ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regindex;
@@ -881,7 +881,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+static int ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
u32 offset;
u32 vlanbyte;
@@ -905,7 +905,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
*
* Performs read operation to Atlas analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 atlas_ctl;
@@ -927,7 +927,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Atlas analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 atlas_ctl;
@@ -948,13 +948,13 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
*
* Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+static int ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u8 byte_offset, u8 *eeprom_data)
{
- s32 status = 0;
u16 sfp_addr = 0;
u16 sfp_data = 0;
u16 sfp_stat = 0;
+ int status = 0;
u16 gssr;
u32 i;
@@ -1019,7 +1019,7 @@ out:
*
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
@@ -1034,8 +1034,8 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
-static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *sff8472_data)
+static int ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
byte_offset, sff8472_data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 339e106a5732..cdaf087b4e85 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -21,24 +21,24 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
@@ -98,10 +98,10 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
}
}
-static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
u16 list_offset, data_offset, data_value;
+ int ret_val;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -173,10 +173,10 @@ setup_sfp_err:
* prot_autoc_write_82599(). Note, that locked can only be true in cases
* where this function doesn't return an error.
**/
-static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
u32 *reg_val)
{
- s32 ret_val;
+ int ret_val;
*locked = false;
/* If LESM is on then we need to hold the SW/FW semaphore. */
@@ -203,9 +203,9 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
* This part (82599) may need to hold a the SW/FW lock around all writes to
* AUTOC. Likewise after a write we need to do a pipeline reset.
**/
-static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+static int prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
{
- s32 ret_val = 0;
+ int ret_val = 0;
/* Blocked by MNG FW so bail */
if (ixgbe_check_reset_blocked(hw))
@@ -237,7 +237,7 @@ out:
return ret_val;
}
-static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -263,11 +263,11 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
u32 esdp;
if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
@@ -322,7 +322,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -334,7 +334,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
return 0;
@@ -500,14 +502,14 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
{
+ bool got_lock = false;
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
- bool got_lock = false;
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
@@ -657,15 +659,15 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
*
* Implements the Intel SmartSpeed algorithm.
**/
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 i, j;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ bool link_up = false;
+ int status = 0;
+ s32 i, j;
/* Set autoneg_advertised value based on input link speed */
hw->phy.autoneg_advertised = 0;
@@ -767,16 +769,15 @@ out:
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- bool autoneg = false;
- s32 status;
- u32 pma_pmd_1g, link_mode, links_reg, i;
- u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 pma_pmd_10g_serial, pma_pmd_1g, link_mode, links_reg, i;
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ bool autoneg = false;
+ int status;
/* holds the value of AUTOC register at this current point in time */
u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -785,6 +786,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* temporary variable used for comparison purposes */
u32 autoc = current_autoc;
+ pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
&autoneg);
@@ -882,11 +885,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
*
* Restarts link on PHY and MAC based on settings passed in.
**/
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -905,13 +908,13 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
- s32 status;
u32 ctrl, i, autoc, autoc2;
- u32 curr_lms;
bool link_up = false;
+ u32 curr_lms;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -1081,7 +1084,7 @@ mac_reset_top:
* @hw: pointer to hardware structure
* @fdircmd: current value of FDIRCMD register
*/
-static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+static int ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
{
int i;
@@ -1099,12 +1102,12 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
- int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
u32 fdircmd;
- s32 err;
+ int err;
+ int i;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
@@ -1212,7 +1215,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1236,7 +1239,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1359,7 +1362,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* Note that the tunnel bit in input must not be set when the hardware
* tunneling support does not exist.
**/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue)
@@ -1515,7 +1518,7 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
#define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value)))
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask)
{
/* mask IPv6 since it is currently not supported */
@@ -1627,12 +1630,12 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue)
{
u32 fdirport, fdirvlan, fdirhash, fdircmd;
- s32 err;
+ int err;
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1690,13 +1693,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id)
{
u32 fdirhash;
u32 fdircmd;
- s32 err;
+ int err;
/* configure FDIRHASH register */
fdirhash = (__force u32)input->formatted.bkt_hash;
@@ -1734,7 +1737,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
*
* Performs read operation to Omer analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 core_ctl;
@@ -1756,7 +1759,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Omer analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 core_ctl;
@@ -1776,9 +1779,9 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82599(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
+ int ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -1802,9 +1805,9 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
* If PHY already detected, maintains current PHY type in hw struct,
* otherwise executes the PHY detection routine.
**/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Detect PHY if not unknown - returns success if already detected. */
status = ixgbe_identify_phy_generic(hw);
@@ -1835,7 +1838,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit for 82599
**/
-static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+static int ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
/*
* Workaround for 82599 silicon errata when enabling the Rx datapath.
@@ -1865,12 +1868,12 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Return: -EACCES if the FW is not present or if the FW version is
* not supported.
**/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_ptp_cfg_offset;
- s32 status = -EACCES;
- u16 offset;
+ int status = -EACCES;
u16 fw_version = 0;
+ u16 offset;
/* firmware check is only necessary for SFI devices */
if (hw->phy.media_type != ixgbe_media_type_fiber)
@@ -1917,7 +1920,7 @@ fw_version_err:
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
- s32 status;
+ int status;
/* get the offset to the Firmware Module block */
status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
@@ -1956,7 +1959,7 @@ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
*
* Retrieves 16 bit word(s) read from EEPROM
**/
-static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -1982,7 +1985,7 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word from the EEPROM
**/
-static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+static int ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
u16 offset, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -2006,11 +2009,11 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 anlp1_reg = 0;
u32 i, autoc_reg, autoc2_reg;
+ u32 anlp1_reg = 0;
+ int ret_val;
/* Enable link if disabled in NVM */
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
@@ -2061,12 +2064,12 @@ reset_pipeline_out:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
@@ -2115,12 +2118,12 @@ release_i2c_access:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2e6e0365154a..3be1bfb16498 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -10,10 +10,10 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u16 count);
@@ -22,15 +22,15 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -111,12 +111,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
**/
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
- u16 reg_cu = 0;
bool locked = false;
+ int ret_val = 0;
+ u16 reg_cu = 0;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -267,11 +267,11 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 ctrl_ext;
u16 device_caps;
+ u32 ctrl_ext;
+ int ret_val;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
@@ -330,7 +330,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* 82599
* X540
**/
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
u32 i;
@@ -354,9 +354,9 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
* up link and flow control settings, and leaves transmit and receive units
* disabled and uninitialized
**/
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
@@ -380,7 +380,7 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
* Clears all hardware statistics counters by reading them from the hardware
* Statistics counters are clear on read.
**/
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
{
u16 i = 0;
@@ -489,14 +489,14 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
*
* Reads the part number string from the EEPROM.
**/
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size)
{
- s32 ret_val;
- u16 data;
+ int ret_val;
u16 pba_ptr;
u16 offset;
u16 length;
+ u16 data;
if (pba_num == NULL) {
hw_dbg(hw, "PBA string buffer was null\n");
@@ -599,7 +599,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
* A reset of the adapter must be performed prior to calling this function
* in order for the MAC address to have been loaded from the EEPROM into RAR0
**/
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
{
u32 rar_high;
u32 rar_low;
@@ -653,7 +653,7 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
*
* Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
**/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
u16 link_status;
@@ -709,7 +709,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
u32 reg_val;
u16 i;
@@ -759,7 +759,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Store the index for the link active LED. This will be used to support
* blinking the LED.
**/
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 led_reg, led_mode;
@@ -800,7 +800,7 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @index: led number to turn on
**/
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -821,7 +821,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to turn off
**/
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -844,7 +844,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@@ -895,11 +895,11 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -942,14 +942,14 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
u16 page_size;
+ int status;
+ u16 word;
u16 i;
- u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
@@ -1019,7 +1019,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
hw->eeprom.ops.init_params(hw);
@@ -1038,11 +1038,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1077,12 +1077,12 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word_in;
u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 word_in;
+ int status;
u16 i;
/* Prepare the EEPROM for reading */
@@ -1129,7 +1129,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit value from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data)
{
hw->eeprom.ops.init_params(hw);
@@ -1149,11 +1149,11 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eerd;
- s32 status;
u32 i;
hw->eeprom.ops.init_params(hw);
@@ -1189,11 +1189,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
* This function is called only when we are writing a new large buffer
* at given offset so the data would be overwritten anyway.
**/
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset)
{
u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
- s32 status;
+ int status;
u16 i;
for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
@@ -1229,7 +1229,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
}
@@ -1243,11 +1243,11 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eewr;
- s32 status;
u16 i;
hw->eeprom.ops.init_params(hw);
@@ -1286,7 +1286,7 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
}
@@ -1299,7 +1299,7 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -1325,7 +1325,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
* Prepares EEPROM for access using bit-bang method. This function should
* be called before issuing a command to the EEPROM.
**/
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
u32 i;
@@ -1371,7 +1371,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
*
* Sets the hardware semaphores so EEPROM access can occur for bit-bang method
**/
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -1462,7 +1462,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
* ixgbe_ready_eeprom - Polls for EEPROM ready
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw)
{
u16 i;
u8 spi_stat_reg;
@@ -1680,7 +1680,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1728,7 +1728,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -1739,12 +1739,12 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1786,10 +1786,10 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1823,7 +1823,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
*
* Puts an ethernet address into a receive address register.
**/
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr)
{
u32 rar_low, rar_high;
@@ -1876,7 +1876,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
*
* Clears an ethernet address from a receive address register.
**/
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1917,7 +1917,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
**/
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
{
u32 i;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1980,7 +1980,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
@@ -2049,7 +2049,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
@@ -2091,7 +2091,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
*
* Enables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2108,7 +2108,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
*
* Disables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2124,7 +2124,7 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
{
u32 mflcn_reg, fccfg_reg;
u32 reg;
@@ -2252,7 +2252,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
@@ -2294,10 +2294,10 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
*
* Enable flow control according on 1 gig fiber.
**/
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- s32 ret_val;
+ int ret_val;
/*
* On multispeed fiber at 1g, bail out if
@@ -2328,10 +2328,10 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
- s32 ret_val;
+ int ret_val;
/*
* On backplane, bail out if
@@ -2367,7 +2367,7 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
{
u16 technology_ability_reg = 0;
u16 lp_technology_ability_reg = 0;
@@ -2395,7 +2395,7 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
ixgbe_link_speed speed;
- s32 ret_val = -EIO;
+ int ret_val = -EIO;
bool link_up;
/*
@@ -2501,7 +2501,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
* bit hasn't caused the primary requests to be disabled, else 0
* is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2573,7 +2573,7 @@ gio_disable_fail:
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@@ -2641,7 +2641,7 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
*
* The default case requires no protection so just to the register read.
**/
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
{
*locked = false;
*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -2655,7 +2655,7 @@ s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous read.
**/
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
{
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
return 0;
@@ -2668,7 +2668,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
* Stops the receive data path and waits for the HW to internally
* empty the Rx security block.
**/
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
{
#define IXGBE_MAX_SECRX_POLL 40
int i;
@@ -2700,7 +2700,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the receive data path
**/
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
{
u32 secrxreg;
@@ -2719,7 +2719,7 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit
**/
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{
if (regval & IXGBE_RXCTRL_RXEN)
hw->mac.ops.enable_rx(hw);
@@ -2734,14 +2734,14 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
* @hw: pointer to hardware structure
* @index: led number to blink
**/
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
- ixgbe_link_speed speed = 0;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ixgbe_link_speed speed = 0;
+ bool link_up = false;
bool locked = false;
- s32 ret_val;
+ int ret_val;
if (index > 3)
return -EINVAL;
@@ -2782,12 +2782,12 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to stop blinking
**/
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
bool locked = false;
- s32 ret_val;
+ u32 autoc_reg = 0;
+ int ret_val;
if (index > 3)
return -EINVAL;
@@ -2821,10 +2821,10 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
* pointer, and returns the value at that location. This is used in both
* get and set mac_addr routines.
**/
-static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+static int ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
{
- s32 ret_val;
+ int ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2849,11 +2849,11 @@ static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
* set_lan_id() is called by identify_sfp(), but this cannot be relied
* upon for non-SFP connections, so we must call it here.
**/
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
+ int ret_val;
u8 i;
- s32 ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2942,7 +2942,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar_lo, mpsar_hi;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2993,7 +2993,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq pool index
**/
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -3026,7 +3026,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
**/
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
u32 rar = hw->mac.san_mac_rar_index;
@@ -3045,7 +3045,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
{
int i;
@@ -3065,9 +3065,9 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* return the VLVF index where this VLAN id should be placed
*
**/
-static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+static int ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
{
- s32 regindex, first_empty_slot;
+ int regindex, first_empty_slot;
u32 bits;
/* short cut the special case */
@@ -3115,11 +3115,11 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regidx, vfta_delta, vfta, bits;
- s32 vlvf_index;
+ int vlvf_index;
if ((vlan > 4095) || (vind > 63))
return -EINVAL;
@@ -3226,7 +3226,7 @@ vfta_update:
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
u32 offset;
@@ -3276,7 +3276,7 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);
@@ -3396,8 +3396,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* This function will read the EEPROM from the alternative SAN MAC address
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
@@ -3494,7 +3494,7 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* This function will read the EEPROM location for the device capabilities,
* and return the word through device_caps.
**/
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
{
hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
@@ -3604,7 +3604,7 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
* by the caller.
**/
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
u32 timeout)
{
u32 hicr, i, fwsts;
@@ -3676,15 +3676,15 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
* Communicates with the manageability block. On success return 0
* else return -EIO or -EINVAL.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
struct ixgbe_hic_hdr *hdr = buffer;
- u32 *u32arr = buffer;
u16 buf_len, dword_len;
- s32 status;
+ u32 *u32arr = buffer;
+ int status;
u32 bi;
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
@@ -3753,13 +3753,13 @@ rel_out:
* else returns -EBUSY when encountering an error acquiring
* semaphore or -EIO when command fails.
**/
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, __always_unused u16 len,
__always_unused const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
+ int ret_val;
int i;
- s32 ret_val;
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
@@ -3875,10 +3875,10 @@ static const u8 ixgbe_emc_therm_limit[4] = {
*
* Returns error code.
**/
-static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
+static int ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
u16 *ets_offset)
{
- s32 status;
+ int status;
status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
if (status)
@@ -3903,13 +3903,13 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
*
* Returns the thermal sensor data structure
**/
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 ets_offset;
- u16 ets_cfg;
u16 ets_sensor;
u8 num_sensors;
+ u16 ets_cfg;
+ int status;
u8 i;
struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
@@ -3959,17 +3959,17 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
* Inits the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
{
- s32 status;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
u8 low_thresh_delta;
u8 num_sensors;
u8 therm_limit;
+ u16 ets_sensor;
+ u16 ets_offset;
+ u16 ets_cfg;
+ int status;
u8 i;
- struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
@@ -4192,16 +4192,16 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
*
* Set the link speed in the MAC and/or PHY register and restarts link.
*/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ bool autoneg, link_up = false;
u32 speedcnt = 0;
+ int status = 0;
u32 i = 0;
- bool autoneg, link_up = false;
/* Mask off requested but non-supported speeds */
status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
@@ -4340,8 +4340,8 @@ out:
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u8 rs, eeprom_data;
+ int status;
switch (speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 34761e691d52..6493abf189de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -8,89 +8,89 @@
#include "ixgbe.h"
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on, bool vlvf_bypass);
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver, u16 len, const char *str);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
u32 timeout, bool return_data);
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
@@ -111,8 +111,8 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
#define IXGBE_EMC_DIODE3_DATA 0x2A
#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_get_etk_id(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
@@ -121,7 +121,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index d26cea5b43bd..502666f28124 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -18,7 +18,7 @@
* @max: max credits by traffic class
* @max_frame: maximum frame size
*/
-static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
+static int ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
{
int min_percent = 100;
@@ -59,7 +59,7 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
@@ -247,7 +247,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u8 pfc_en;
@@ -283,7 +283,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -300,7 +300,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
return -EINVAL;
}
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
{
__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
@@ -333,7 +333,7 @@ s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
bwg_id, prio_type, ets->prio_tc);
}
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *prio_type, u8 *prio_tc)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 60cd5863bf5e..91788e4c4e19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -124,15 +124,15 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
+int ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 379ae747cdce..185c3e5f9837 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -15,10 +15,8 @@
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type)
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
@@ -75,11 +73,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
@@ -124,11 +119,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg;
u8 i;
@@ -171,7 +163,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
@@ -224,7 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -260,7 +252,7 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index fdca41abb44c..5bf3f13c6953 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -46,27 +46,19 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 7948849840a5..c61bd9059541 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -17,7 +17,7 @@
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -76,7 +76,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -128,7 +128,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -187,7 +187,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
u32 i, j, fcrtl, reg;
u8 max_tc = 0;
@@ -272,7 +272,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -330,7 +330,7 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index c6f084883cab..f6e5a87c03e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -70,30 +70,21 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type,
u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9a63457712c7..6e6e6f1847b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -349,6 +349,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
+ case ixgbe_sfp_type_1g_bx_core0:
+ case ixgbe_sfp_type_1g_bx_core1:
ethtool_link_ksettings_add_link_mode(cmd, supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
@@ -459,7 +461,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
- s32 err = 0;
+ int err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -3326,9 +3328,9 @@ static int ixgbe_get_module_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
+ int status;
if (hw->phy.type == ixgbe_phy_fw)
return -ENXIO;
@@ -3372,7 +3374,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status = -EFAULT;
+ int status = -EFAULT;
u8 databyte = 0xFF;
int i = 0;
@@ -3403,66 +3405,68 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
static const struct {
ixgbe_link_speed mac_speed;
- u32 supported;
+ u32 link_mode;
} ixgbe_ls_map[] = {
- { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
- { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
- { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
- { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
- { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
+ { IXGBE_LINK_SPEED_10_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_100_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_1GB_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_2_5GB_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT },
+ { IXGBE_LINK_SPEED_10GB_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
};
static const struct {
u32 lp_advertised;
- u32 mac_speed;
+ u32 link_mode;
} ixgbe_lp_map[] = {
- { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
- { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
- { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
- { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
- { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
- { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
+ { FW_PHY_ACT_UD_2_100M_TX_EEE, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_T_EEE, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_T_EEE, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_KX_EEE, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KX4_EEE, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KR_EEE, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
};
static int
-ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
+ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
- s32 rc;
+ int rc;
u16 i;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
if (rc)
return rc;
- edata->lp_advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
if (info[0] & ixgbe_lp_map[i].lp_advertised)
- edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->lp_advertised);
}
- edata->supported = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
- edata->supported |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->supported);
}
- edata->advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
- edata->advertised |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->advertised);
}
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !linkmode_empty(edata->advertised);
edata->tx_lpi_enabled = edata->eee_enabled;
- if (edata->advertised & edata->lp_advertised)
- edata->eee_active = true;
+
+ linkmode_and(common, edata->advertised, edata->lp_advertised);
+ edata->eee_active = !linkmode_empty(common);
return 0;
}
-static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3476,17 +3480,17 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- struct ethtool_eee eee_data;
- s32 ret_val;
+ struct ethtool_keee eee_data;
+ int ret_val;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
return -EOPNOTSUPP;
- memset(&eee_data, 0, sizeof(struct ethtool_eee));
+ memset(&eee_data, 0, sizeof(struct ethtool_keee));
ret_val = ixgbe_get_eee(netdev, &eee_data);
if (ret_val)
@@ -3504,7 +3508,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (eee_data.advertised != edata->advertised) {
+ if (!linkmode_equal(eee_data.advertised, edata->advertised)) {
e_err(drv,
"Setting EEE advertised speeds is not supported\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bd541527c8c7..8fe8107eb854 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -153,7 +153,7 @@ MODULE_PARM_DESC(max_vfs,
#endif /* CONFIG_PCI_IOV */
static bool allow_unsupported_sfp;
-module_param(allow_unsupported_sfp, bool, 0);
+module_param(allow_unsupported_sfp, bool, 0444);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
@@ -205,7 +205,7 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
return 0;
}
-static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
+static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 link_status = 0;
@@ -2939,8 +2939,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -7809,7 +7809,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- s32 err;
+ int err;
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
@@ -10205,7 +10205,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -10525,6 +10525,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
}
/**
+ * ixgbe_irq_disable_single - Disable single IRQ vector
+ * @adapter: adapter structure
+ * @ring: ring index
+ **/
+static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 qmask = BIT_ULL(ring);
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = qmask & IXGBE_EIMC_RTX_QUEUE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ synchronize_irq(adapter->msix_entries[ring].vector);
+ else
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
* @adapter: adapter structure
* @ring: ring index
@@ -10540,6 +10578,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
+ ixgbe_irq_disable_single(adapter, ring);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
ixgbe_disable_txr(adapter, tx_ring);
if (xdp_ring)
ixgbe_disable_txr(adapter, xdp_ring);
@@ -10548,9 +10591,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
if (xdp_ring)
synchronize_rcu();
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_disable(&rx_ring->q_vector->napi);
-
ixgbe_clean_tx_ring(tx_ring);
if (xdp_ring)
ixgbe_clean_tx_ring(xdp_ring);
@@ -10578,9 +10618,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_enable(&rx_ring->q_vector->napi);
-
ixgbe_configure_tx_ring(adapter, tx_ring);
if (xdp_ring)
ixgbe_configure_tx_ring(adapter, xdp_ring);
@@ -10589,6 +10626,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
if (xdp_ring)
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index fe7ef5773369..d67d77e5dacc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -15,7 +15,7 @@
*
* returns SUCCESS if it successfully read message from buffer
**/
-s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -38,7 +38,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -58,7 +58,7 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -75,7 +75,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -92,7 +92,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -109,7 +109,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message notification
**/
-static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
@@ -134,7 +134,7 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
-static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
@@ -162,11 +162,11 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
-static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
if (!mbx->ops)
return -EIO;
@@ -189,11 +189,11 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
-static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static int ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops || !mbx->timeout)
@@ -208,7 +208,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ixgbe_poll_for_ack(hw, mbx_id);
}
-static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+static int ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
@@ -227,9 +227,9 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
@@ -248,9 +248,9 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
@@ -305,7 +305,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* return SUCCESS if we obtained the mailbox lock
**/
-static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 p2v_mailbox;
@@ -329,10 +329,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
@@ -368,10 +368,10 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
-static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 6434c190e7a4..bd205306934b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -96,11 +96,11 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+int ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+int ixgbe_check_for_rst(struct ixgbe_hw *, u16);
#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index f28140a05f09..07eaa3c3f4d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -11,19 +11,19 @@
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw);
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
* ixgbe_out_i2c_byte_ack - Send I2C byte with ack
@@ -32,9 +32,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
*
* Returns an error code on error.
**/
-static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+static int ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_out_i2c_byte(hw, byte);
if (status)
@@ -49,9 +49,9 @@ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
*
* Returns an error code on error.
**/
-static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+static int ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_in_i2c_byte(hw, byte);
if (status)
@@ -85,7 +85,7 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
*
* Returns an error code on error.
*/
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -163,7 +163,7 @@ fail:
*
* Returns an error code on error.
*/
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -260,7 +260,7 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
*
* Determines the physical layer module found on the current adapter.
**/
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
u32 status = -EFAULT;
u32 phy_addr;
@@ -332,11 +332,11 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
**/
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
- s32 status;
u16 phy_id_high = 0;
u16 phy_id_low = 0;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
@@ -394,11 +394,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
* ixgbe_reset_phy_generic - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
u32 i;
u16 ctrl = 0;
- s32 status = 0;
+ int status = 0;
if (hw->phy.type == ixgbe_phy_unknown)
status = ixgbe_identify_phy_generic(hw);
@@ -470,8 +470,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*
* Reads a value from a specified PHY register without the SWFW lock
**/
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
{
u32 i, data, command;
@@ -546,11 +546,11 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@@ -571,8 +571,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
{
u32 i, command;
@@ -644,11 +644,11 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
@@ -668,7 +668,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @hw: pointer to hardware structure
* @cmd: command register value to write
**/
-static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
+static int ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
{
IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
@@ -684,11 +684,11 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -718,11 +718,11 @@ mii_bus_read_done:
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -756,11 +756,11 @@ mii_bus_read_done:
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u16 val, u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -787,12 +787,12 @@ static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u16 val,
u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -821,7 +821,7 @@ mii_bus_write_done:
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
+static int ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
@@ -837,7 +837,7 @@ static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
+static int ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -854,7 +854,7 @@ static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
+static int ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -872,7 +872,7 @@ static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
+static int ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -889,7 +889,7 @@ static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -907,7 +907,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
* @devad: device address to read
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
int devad, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -925,7 +925,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -944,7 +944,7 @@ static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
int devad, int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -1023,13 +1023,13 @@ out:
*
* ixgbe_mii_bus_init initializes a mii_bus structure in adapter
**/
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
- s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
- s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum);
- s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
+ int (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ int (*read_c22)(struct mii_bus *bus, int addr, int regnum);
+ int (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
u16 val);
- s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
+ int (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
@@ -1095,12 +1095,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*
* Restart autonegotiation and PHY and waits for completion.
**/
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
- s32 status = 0;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
- bool autoneg = false;
ixgbe_link_speed speed;
+ bool autoneg = false;
+ int status = 0;
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
@@ -1173,7 +1173,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: unused
**/
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -1214,10 +1214,10 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the supported link capabilities by reading the PHY auto
* negotiation register.
*/
-static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
{
u16 speed_ability;
- s32 status;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
@@ -1253,11 +1253,11 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: boolean auto-negotiation value
*/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
- s32 status = 0;
+ int status = 0;
*autoneg = true;
if (!hw->phy.speeds_supported)
@@ -1276,15 +1276,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
**/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up)
{
- s32 status;
- u32 time_out;
u32 max_time_out = 10;
- u16 phy_link = 0;
u16 phy_speed = 0;
+ u16 phy_link = 0;
u16 phy_data = 0;
+ u32 time_out;
+ int status;
/* Initialize speed and link to default case */
*link_up = false;
@@ -1326,7 +1326,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* it is called via a function pointer that could call other
* functions that could return an error.
**/
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = false;
@@ -1399,13 +1399,13 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
{
u16 phy_offset, control, eword, edata, block_crc;
- bool end_data = false;
u16 list_offset, data_offset;
+ bool end_data = false;
u16 phy_data = 0;
- s32 ret_val;
+ int ret_val;
u32 i;
/* Blocked by MNG FW so bail */
@@ -1506,7 +1506,7 @@ err_eeprom:
*
* Determines HW type and calls appropriate function.
**/
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw)
{
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
@@ -1527,19 +1527,20 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
*
* Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 bitrate_nominal = 0;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+ u16 enforce_sfp = 0;
u32 vendor_oui = 0;
- enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
- u8 comp_codes_1g = 0;
- u8 comp_codes_10g = 0;
- u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
u8 cable_spec = 0;
- u16 enforce_sfp = 0;
+ int status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1576,7 +1577,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
+ if (status)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_BITRATE_NOMINAL,
+ &bitrate_nominal);
if (status)
goto err_read_i2c_eeprom;
@@ -1659,6 +1665,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_lx_core1;
+ /* Support only Ethernet 1000BASE-BX10, checking the Bit Rate
+ * Nominal Value as per SFF-8472 by convention 1.25 Gb/s should
+ * be rounded up to 0Dh (13 in units of 100 MBd) for 1000BASE-BX
+ */
+ } else if ((comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE) &&
+ (bitrate_nominal == 0xD)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1747,7 +1765,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
return -EOPNOTSUPP;
}
@@ -1763,7 +1783,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel)
return 0;
@@ -1792,10 +1814,10 @@ err_read_i2c_eeprom:
*
* Searches for and identifies the QSFP module and assigns appropriate PHY type
**/
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ int status;
u32 vendor_oui = 0;
enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
@@ -1975,7 +1997,7 @@ err_read_i2c_eeprom:
* Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
* so it returns the offsets to the phy init sequence block.
**/
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset)
{
@@ -1999,12 +2021,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
@@ -2065,7 +2089,7 @@ err_phy:
*
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2081,7 +2105,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2097,7 +2121,7 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
@@ -2131,14 +2155,14 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data, bool lock)
{
- s32 status;
- u32 max_retry = 10;
- u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 max_retry = 10;
bool nack = true;
+ u32 retry = 0;
+ int status;
if (hw->mac.type >= ixgbe_mac_X550)
max_retry = 3;
@@ -2221,7 +2245,7 @@ fail:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2238,7 +2262,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2256,13 +2280,13 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
- s32 status;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
u32 max_retry = 1;
u32 retry = 0;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int status;
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return -EBUSY;
@@ -2324,7 +2348,7 @@ fail:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2341,7 +2365,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2422,10 +2446,10 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
*
* Clocks in one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
- s32 i;
bool bit = false;
+ int i;
*data = 0;
for (i = 7; i >= 0; i--) {
@@ -2443,12 +2467,12 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
*
* Clocks out one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
{
- s32 status;
- s32 i;
- u32 i2cctl;
bool bit = false;
+ int status;
+ u32 i2cctl;
+ int i;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -2474,14 +2498,14 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
*
* Clocks in/out one bit via I2C data/clock
**/
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
- u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
- s32 status = 0;
- u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
u32 timeout = 10;
bool ack = true;
+ int status = 0;
+ u32 i = 0;
if (data_oe_bit) {
i2cctl |= IXGBE_I2C_DATA_OUT(hw);
@@ -2525,7 +2549,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
*
* Clocks in one bit via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2559,10 +2583,10 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
*
* Clocks out one bit via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
- s32 status;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ int status;
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -2647,7 +2671,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* Sets the I2C data bit
* Asserts the I2C data output enable on X550 hardware.
**/
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2769,7 +2793,7 @@ bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @on: true for on, false for off
**/
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
{
u32 status;
u16 reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index ef72729d7c93..14aa2ca51f70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -17,6 +17,7 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_BITRATE_NOMINAL 0xC
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
@@ -39,6 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
@@ -121,57 +123,57 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw);
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val, bool lock);
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val, bool lock);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7299a830f6e4..fcfd0a075eee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -492,7 +492,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
u32 reg_offset, vf_shift, vfre;
- s32 err = 0;
+ int err = 0;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
@@ -775,7 +775,7 @@ static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- s32 retval;
+ int retval;
ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
@@ -1254,7 +1254,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
- s32 retval;
+ int retval;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- s32 retval;
+ int retval;
if (vf >= adapter->num_vfs)
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 61b9774b3d31..ed440dd0c4f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3210,6 +3210,9 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_1g_lx_core0 = 13,
ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_1g_bx_core0 = 15,
+ ixgbe_sfp_type_1g_bx_core1 = 16,
+
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -3393,50 +3396,50 @@ struct ixgbe_hw;
/* Function pointer table */
struct ixgbe_eeprom_operations {
- s32 (*init_params)(struct ixgbe_hw *);
- s32 (*read)(struct ixgbe_hw *, u16, u16 *);
- s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*write)(struct ixgbe_hw *, u16, u16);
- s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
- s32 (*update_checksum)(struct ixgbe_hw *);
- s32 (*calc_checksum)(struct ixgbe_hw *);
+ int (*init_params)(struct ixgbe_hw *);
+ int (*read)(struct ixgbe_hw *, u16, u16 *);
+ int (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*write)(struct ixgbe_hw *, u16, u16);
+ int (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ int (*update_checksum)(struct ixgbe_hw *);
+ int (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
- s32 (*init_hw)(struct ixgbe_hw *);
- s32 (*reset_hw)(struct ixgbe_hw *);
- s32 (*start_hw)(struct ixgbe_hw *);
- s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ int (*init_hw)(struct ixgbe_hw *);
+ int (*reset_hw)(struct ixgbe_hw *);
+ int (*start_hw)(struct ixgbe_hw *);
+ int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
- s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
- s32 (*stop_adapter)(struct ixgbe_hw *);
- s32 (*get_bus_info)(struct ixgbe_hw *);
+ int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ int (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ int (*stop_adapter)(struct ixgbe_hw *);
+ int (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
- s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
- s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
- s32 (*setup_sfp)(struct ixgbe_hw *);
- s32 (*disable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ int (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ int (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ int (*setup_sfp)(struct ixgbe_hw *);
+ int (*disable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ int (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
void (*init_swfw_sync)(struct ixgbe_hw *);
- s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
- s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+ int (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ int (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
void (*stop_link_on_d3)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
- s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ int (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ int (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
@@ -3444,38 +3447,38 @@ struct ixgbe_mac_operations {
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
/* LED */
- s32 (*led_on)(struct ixgbe_hw *, u32);
- s32 (*led_off)(struct ixgbe_hw *, u32);
- s32 (*blink_led_start)(struct ixgbe_hw *, u32);
- s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
- s32 (*init_led_link_act)(struct ixgbe_hw *);
+ int (*led_on)(struct ixgbe_hw *, u32);
+ int (*led_off)(struct ixgbe_hw *, u32);
+ int (*blink_led_start)(struct ixgbe_hw *, u32);
+ int (*blink_led_stop)(struct ixgbe_hw *, u32);
+ int (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
- s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
- s32 (*clear_rar)(struct ixgbe_hw *, u32);
- s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
- s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*enable_mc)(struct ixgbe_hw *);
- s32 (*disable_mc)(struct ixgbe_hw *);
- s32 (*clear_vfta)(struct ixgbe_hw *);
- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
- s32 (*init_uta_tables)(struct ixgbe_hw *);
+ int (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ int (*clear_rar)(struct ixgbe_hw *, u32);
+ int (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+ int (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*init_rx_addrs)(struct ixgbe_hw *);
+ int (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ int (*enable_mc)(struct ixgbe_hw *);
+ int (*disable_mc)(struct ixgbe_hw *);
+ int (*clear_vfta)(struct ixgbe_hw *);
+ int (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
+ int (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
- s32 (*fc_enable)(struct ixgbe_hw *);
- s32 (*setup_fc)(struct ixgbe_hw *);
+ int (*fc_enable)(struct ixgbe_hw *);
+ int (*setup_fc)(struct ixgbe_hw *);
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ int (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
const char *);
- s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
- s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ int (*get_thermal_sensor_data)(struct ixgbe_hw *);
+ int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
@@ -3484,47 +3487,47 @@ struct ixgbe_mac_operations {
void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* DMA Coalescing */
- s32 (*dmac_config)(struct ixgbe_hw *hw);
- s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
- s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
- s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
- s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+ int (*dmac_config)(struct ixgbe_hw *hw);
+ int (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ int (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
- s32 (*identify)(struct ixgbe_hw *);
- s32 (*identify_sfp)(struct ixgbe_hw *);
- s32 (*init)(struct ixgbe_hw *);
- s32 (*reset)(struct ixgbe_hw *);
- s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_internal_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
- s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
- s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
- s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ int (*identify)(struct ixgbe_hw *);
+ int (*identify_sfp)(struct ixgbe_hw *);
+ int (*init)(struct ixgbe_hw *);
+ int (*reset)(struct ixgbe_hw *);
+ int (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ int (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
+ int (*setup_link)(struct ixgbe_hw *);
+ int (*setup_internal_link)(struct ixgbe_hw *);
+ int (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ int (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ int (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ int (*read_i2c_sff8472)(struct ixgbe_hw *, u8, u8 *);
+ int (*read_i2c_eeprom)(struct ixgbe_hw *, u8, u8 *);
+ int (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
bool (*check_overtemp)(struct ixgbe_hw *);
- s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
- s32 (*enter_lplu)(struct ixgbe_hw *);
- s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *);
- s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*set_phy_power)(struct ixgbe_hw *, bool on);
+ int (*enter_lplu)(struct ixgbe_hw *);
+ int (*handle_lasi)(struct ixgbe_hw *hw, bool *);
+ int (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 *value);
- s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 value);
};
struct ixgbe_link_operations {
- s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
- s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ int (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val);
- s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
- s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+ int (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val);
};
@@ -3602,14 +3605,14 @@ struct ixgbe_phy_info {
#include "ixgbe_mbx.h"
struct ixgbe_mbx_operations {
- s32 (*init_params)(struct ixgbe_hw *hw);
- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct ixgbe_hw *, u16);
- s32 (*check_for_ack)(struct ixgbe_hw *, u16);
- s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+ int (*init_params)(struct ixgbe_hw *hw);
+ int (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*check_for_msg)(struct ixgbe_hw *, u16);
+ int (*check_for_ack)(struct ixgbe_hw *, u16);
+ int (*check_for_rst)(struct ixgbe_hw *, u16);
};
struct ixgbe_mbx_stats {
@@ -3656,7 +3659,7 @@ struct ixgbe_hw {
struct ixgbe_info {
enum ixgbe_mac_type mac;
- s32 (*get_invariants)(struct ixgbe_hw *);
+ int (*get_invariants)(struct ixgbe_hw *);
const struct ixgbe_mac_operations *mac_ops;
const struct ixgbe_eeprom_operations *eeprom_ops;
const struct ixgbe_phy_operations *phy_ops;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 57a912e4653f..f1ffa398f6df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -16,9 +16,9 @@
#define IXGBE_X540_VFT_TBL_SIZE 128
#define IXGBE_X540_RX_PB_SIZE 384
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
@@ -26,7 +26,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
return ixgbe_media_type_copper;
}
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -51,7 +51,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
@@ -66,11 +66,11 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
- s32 status;
- u32 ctrl, i;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -166,9 +166,9 @@ mac_reset_top:
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -184,7 +184,7 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -215,9 +215,9 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -237,10 +237,10 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -259,9 +259,9 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -281,10 +281,10 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -303,7 +303,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
*
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -368,7 +368,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -379,12 +379,12 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -439,10 +439,10 @@ out:
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -484,10 +484,10 @@ out:
* Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
* EEPROM from shadow RAM to the flash device.
**/
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
+ int status;
u32 flup;
- s32 status;
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == -EIO) {
@@ -529,7 +529,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
* Polls the FLUDONE (bit 26) of the EEC Register to determine when the
* flash update is done.
**/
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
{
u32 i;
u32 reg;
@@ -551,7 +551,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
@@ -660,7 +660,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
*/
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -760,7 +760,7 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
@@ -798,7 +798,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index e246c0d2a427..b69a680d3ab5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -3,17 +3,17 @@
#include "ixgbe_type.h"
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 6208923e29a2..2decb0710b6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -6,13 +6,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -29,7 +29,7 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -41,7 +41,7 @@ static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -55,7 +55,7 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -91,7 +91,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
*
* Returns status code
*/
-static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+static int ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
{
return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -104,7 +104,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+static int ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
{
return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -117,9 +117,9 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
*
* Returns status code
*/
-static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+static int ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
{
- s32 status;
+ int status;
status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
if (status)
@@ -135,9 +135,9 @@ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+static int ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
{
- s32 status;
+ int status;
status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
value);
@@ -153,9 +153,9 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
* This function assumes that the caller has acquired the proper semaphore.
* Returns error code
*/
-static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+static int ixgbe_reset_cs4227(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u32 retry;
u16 value;
u8 reg;
@@ -225,7 +225,7 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- s32 status;
+ int status;
u16 value;
u8 retry;
@@ -292,7 +292,7 @@ out:
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
@@ -347,13 +347,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
return -EOPNOTSUPP;
}
-static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
return -EOPNOTSUPP;
@@ -368,7 +368,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+static int ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -383,7 +383,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
@@ -399,7 +399,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -414,7 +414,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
@@ -427,7 +427,7 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
* @activity: activity to perform
* @data: Pointer to 4 32-bit words of data
*/
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT])
{
union {
@@ -435,7 +435,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
struct ixgbe_hic_phy_activity_resp rsp;
} hic;
u16 retries = FW_PHY_ACT_RETRIES;
- s32 rc;
+ int rc;
u32 i;
do {
@@ -484,12 +484,12 @@ static const struct {
*
* Returns error code
*/
-static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
u16 phy_speeds;
u16 phy_id_lo;
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.id)
@@ -526,7 +526,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
{
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
@@ -545,7 +545,7 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+static int ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
@@ -557,10 +557,10 @@ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
* ixgbe_setup_fw_link - Setup firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+static int ixgbe_setup_fw_link(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
@@ -613,7 +613,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
*/
-static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
{
if (hw->fc.requested_mode == ixgbe_fc_default)
hw->fc.requested_mode = ixgbe_fc_full;
@@ -627,7 +627,7 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+static int ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -659,7 +659,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
*
* Note: ctrl can be NULL if the IOSF control register value is not needed
*/
-static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
{
u32 i, command;
@@ -690,12 +690,12 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
* @device_type: 3 bit device type
* @phy_data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -716,7 +716,8 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
hw_dbg(hw, "Failed to read, error %x\n", error);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (!ret)
@@ -731,10 +732,10 @@ out:
* ixgbe_get_phy_token - Get the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -760,10 +761,10 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
* ixgbe_put_phy_token - Put the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_put_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -789,7 +790,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 data)
{
@@ -815,7 +816,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 3 bit device type
* @data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 *data)
{
@@ -823,7 +824,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
struct ixgbe_hic_internal_phy_req cmd;
struct ixgbe_hic_internal_phy_resp rsp;
} hic;
- s32 status;
+ int status;
memset(&hic, 0, sizeof(hic));
hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
@@ -850,14 +851,14 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
*
* Reads a 16 bit word(s) from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
u32 current_word = 0;
u16 words_to_read;
- s32 status;
+ int status;
u32 i;
/* Take semaphore for the entire operation. */
@@ -922,14 +923,14 @@ out:
*
* Returns error status for any failure
**/
-static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+static int ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
u16 size, u16 *csum, u16 *buffer,
u32 buffer_size)
{
- u16 buf[256];
- s32 status;
u16 length, bufsz, i, start;
u16 *local_buffer;
+ u16 buf[256];
+ int status;
bufsz = ARRAY_SIZE(buf);
@@ -990,14 +991,14 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
u32 buffer_size)
{
u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 pointer, i, size;
u16 *local_buffer;
- s32 status;
u16 checksum = 0;
- u16 pointer, i, size;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1059,7 +1060,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1067,7 +1068,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
return ixgbe_calc_checksum_X550(hw, NULL, 0);
}
@@ -1079,11 +1080,11 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
- s32 status;
+ int status;
buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1117,12 +1118,12 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1167,11 +1168,11 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data)
{
- s32 status;
struct ixgbe_hic_write_shadow_ram buffer;
+ int status;
buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1195,9 +1196,9 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status = 0;
+ int status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
@@ -1215,10 +1216,10 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
**/
-static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X550(struct ixgbe_hw *hw)
{
- s32 status = 0;
union ixgbe_hic_hdr2 buffer;
+ int status = 0;
buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
buffer.req.buf_lenh = 0;
@@ -1237,7 +1238,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
* Sets bus link width and speed to unknown because X550em is
* not a PCI device.
**/
-static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
{
hw->bus.type = ixgbe_bus_type_internal;
hw->bus.width = ixgbe_bus_width_unknown;
@@ -1268,9 +1269,9 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
- u32 rxctrl, pfdtxgswc;
- s32 status;
struct ixgbe_hic_disable_rxen fw_cmd;
+ u32 rxctrl, pfdtxgswc;
+ int status;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
@@ -1310,10 +1311,10 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum = 0;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1350,11 +1351,11 @@ static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Write a 16 bit word(s) to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words,
u16 *data)
{
- s32 status = 0;
+ int status = 0;
u32 i = 0;
/* Take semaphore for the entire operation. */
@@ -1386,12 +1387,12 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -1429,10 +1430,10 @@ out:
*
* iXfI configuration needed for ixgbe_mac_X550EM_x devices.
**/
-static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
+static int ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
{
- s32 status;
u32 reg_val;
+ int status;
/* Disable training protocol FSM. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
@@ -1501,10 +1502,10 @@ static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
* internal PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
{
- s32 status;
u32 link_ctrl;
+ int status;
/* Restart auto-negotiation. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
@@ -1550,11 +1551,11 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
* Configures the integrated KR PHY to use iXFI mode. Used to connect an
* internal and external PHY at a specific speed, without autonegotiation.
**/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* iXFI is only supported with X552 */
if (mac->type != ixgbe_mac_X550EM_x)
@@ -1607,7 +1608,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @hw: pointer to hardware structure
* @linear: true if SFP module is linear
*/
-static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+static int ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
{
switch (hw->phy.sfp_type) {
case ixgbe_sfp_type_not_present:
@@ -1644,14 +1645,14 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
*
* Configures the extern PHY and the integrated KR PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
- s32 status;
- u16 reg_slice, reg_val;
bool setup_linear = false;
+ u16 reg_slice, reg_val;
+ int status;
/* Check if SFP module is supported and linear */
status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1690,11 +1691,11 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
* Configures the integrated PHY for native SFI mode. Used to connect the
* internal PHY directly to an SFP cage, without autonegotiation.
**/
-static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* Disable all AN and force speed to 10G Serial. */
status = mac->ops.read_iosf_sb_reg(hw,
@@ -1789,13 +1790,13 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
*
* Configure the integrated PHY for native SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
bool setup_linear = false;
u32 reg_phy_int;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1838,14 +1839,14 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Configure the integrated PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
u32 reg_slice, slice_offset;
bool setup_linear = false;
u16 reg_phy_ext;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1917,12 +1918,12 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Returns error status for any failure
**/
-static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait)
{
- s32 status;
ixgbe_link_speed force_speed;
+ int status;
/* Setup internal/external PHY link speed to iXFI (10G), unless
* only 1G is auto advertised then setup KX link.
@@ -1953,7 +1954,7 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
*
* Check that both the MAC and X557 external PHY have link.
**/
-static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool link_up_wait_to_complete)
@@ -1997,13 +1998,13 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
* @speed: unused
* @autoneg_wait_to_complete: unused
*/
-static s32
+static int
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2070,12 +2071,12 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
* @speed: the link speed to force
* @autoneg_wait: true when waiting for completion is needed
*/
-static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+static int ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2147,7 +2148,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
- s32 status = -EIO;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -2275,10 +2276,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
{
- s32 status;
bool linear;
+ int status;
/* Check if SFP module is supported */
status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
@@ -2296,7 +2297,7 @@ static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
**/
-static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -2374,7 +2375,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* Determime if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
**/
-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
+static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
bool *is_overtemp)
{
u32 status;
@@ -2462,7 +2463,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
*
* Returns PHY access status
**/
-static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
bool lsc, overtemp;
u32 status;
@@ -2554,7 +2555,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
* failure alarm then return error, else if link status change
* then setup internal/external PHY link
**/
-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
bool *is_overtemp)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -2578,11 +2579,11 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
*
* Configures the integrated KR PHY.
**/
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u32 reg_val;
+ int status;
status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2633,7 +2634,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
/* leave link alone for 2.5G */
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
@@ -2651,7 +2652,7 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
*
* Returns error code if unable to get link status.
**/
-static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
{
u32 ret;
u16 autoneg_status;
@@ -2685,7 +2686,7 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
* A return of a non-zero value indicates an error, and the base driver should
* not report link up.
**/
-static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
{
ixgbe_link_speed force_speed;
bool link_up;
@@ -2745,9 +2746,9 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
status = ixgbe_reset_phy_generic(hw);
@@ -2763,7 +2764,7 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @led_idx: led number to turn on
**/
-static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2785,7 +2786,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* @hw: pointer to hardware structure
* @led_idx: led number to turn off
**/
-static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2818,12 +2819,12 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* semaphore, -EIO when command fails or -ENIVAL when incorrect
* params passed.
**/
-static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, u16 len,
const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
- s32 ret_val;
+ int ret_val;
int i;
if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
@@ -2865,12 +2866,12 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
*
* Determine lowest common link speed with link partner.
**/
-static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed *lcd_speed)
{
- u16 an_lp_status;
- s32 status;
u16 word = hw->eeprom.ctrl_word_3;
+ u16 an_lp_status;
+ int status;
*lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -2883,28 +2884,28 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
/* If link partner advertised 1G, return 1G */
if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
*lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
- return status;
+ return 0;
}
/* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
(word & NVM_INIT_CTRL_3_D10GMP_PORT0))
- return status;
+ return 0;
/* Link partner not capable of lower speeds, return 10G */
*lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
- return status;
+ return 0;
}
/**
* ixgbe_setup_fc_x550em - Set up flow control
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
{
bool pause, asm_dir;
u32 reg_val;
- s32 rc = 0;
+ int rc = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -2989,7 +2990,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
{
u32 link_s1, lp_an_page_low, an_cntl_1;
ixgbe_link_speed speed;
- s32 status = -EIO;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -3072,13 +3073,13 @@ static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
* (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
* the X557 PHY immediately prior to entering LPLU.
**/
-static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
{
u16 an_10g_cntl_reg, autoneg_reg, speed;
- s32 status;
ixgbe_link_speed lcd_speed;
u32 save_autoneg;
bool link_up;
+ int status;
/* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw))
@@ -3129,7 +3130,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
(lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
(lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
- return status;
+ return 0;
/* Clear AN completed indication */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
@@ -3166,10 +3167,10 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
* ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return 0;
@@ -3195,7 +3196,7 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
@@ -3238,10 +3239,10 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
* set during init_shared_code because the PHY/SFP type was
* not known. Perform the SFP init if necessary.
**/
-static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
hw->mac.ops.set_lan_id(hw);
@@ -3366,9 +3367,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
-static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u16 reg;
status = hw->phy.ops.read_reg(hw,
@@ -3440,14 +3441,14 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
** reset.
**/
-static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
ixgbe_link_speed link_speed;
- s32 status;
+ bool link_up = false;
u32 ctrl = 0;
+ int status;
u32 i;
- bool link_up = false;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -3608,10 +3609,10 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
{
- s32 status = 0;
u32 an_cntl = 0;
+ int status = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -3713,9 +3714,9 @@ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
*
* Acquires the SWFW semaphore and sets the I2C MUX
*/
-static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
{
- s32 status;
+ int status;
status = ixgbe_acquire_swfw_sync_X540(hw, mask);
if (status)
@@ -3749,11 +3750,11 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
*
* Acquires the SWFW semaphore and get the shared PHY token as needed
*/
-static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
int retries = FW_PHY_TOKEN_RETRIES;
- s32 status;
+ int status;
while (--retries) {
status = 0;
@@ -3806,11 +3807,11 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* Token. The PHY Token is needed since the MDIO is shared between to MAC
* instances.
*/
-static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
return -EBUSY;
@@ -3832,11 +3833,11 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* Writes a value to specified PHY register using the SWFW lock and PHY Token.
* The PHY Token is needed since the MDIO is shared between to MAC instances.
*/
-static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
return -EBUSY;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a44e4bd56142..9c960017a6de 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4413,7 +4413,7 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index 5182fe737c37..ff54fbe41bcc 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
module_platform_driver(liteeth_driver);
MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 884d64114bff..837295fecd17 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -180,6 +180,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index ceba4aa4f026..a399defe25fd 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,5 +12,6 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeon_ep/
+obj-y += octeon_ep_vf/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a641b3534ca3..40a5f1431e4e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5097,7 +5097,7 @@ static int mvneta_ethtool_set_wol(struct net_device *dev,
}
static int mvneta_ethtool_get_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
@@ -5113,7 +5113,7 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
}
static int mvneta_ethtool_set_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 820b1fabe297..23adf53c2aa1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
}
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
+ u32 val;
+ int i;
+
+ /* Drain the BM from all possible residues left by firmware */
+ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+ mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+ put_cpu();
+
+ /* Stop the BM pool */
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+ val |= MVPP2_BM_STOP_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
+}
+
static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
int i, err, poolnum = MVPP2_BM_POOLS_NUM;
struct mvpp2_port *port;
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+ /* Clean up the pool state in case it contains stale state */
+ for (i = 0; i < poolnum; i++)
+ mvpp2_bm_pool_cleanup(priv, i);
+
if (priv->percpu_pools) {
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
}
}
- poolnum = mvpp2_get_nrxqs(priv) * 2;
for (i = 0; i < poolnum; i++) {
/* the pool in use */
int pn = i / (poolnum / 2);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
new file mode 100644
index 000000000000..e371a3ef0c49
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC VF Driver Configuration
+#
+
+config OCTEON_EP_VF
+ tristate "Marvell Octeon PCI Endpoint NIC VF Driver"
+ depends on 64BIT
+ depends on PCI
+ help
+ This driver supports the networking functionality of Marvell's
+ Octeon PCI Endpoint NIC VF.
+
+ To know the list of devices supported by this driver, refer to the
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst>.
+
+ To compile this driver as a module, choose M here.
+ The name of the module will be octeon_ep_vf.
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
new file mode 100644
index 000000000000..4a5f9fcb0b40
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC VF
+#
+
+obj-$(CONFIG_OCTEON_EP_VF) += octeon_ep_vf.o
+
+octeon_ep_vf-y := octep_vf_main.o octep_vf_cn9k.o octep_vf_cnxk.o \
+ octep_vf_tx.o octep_vf_rx.o octep_vf_mbox.o \
+ octep_vf_ethtool.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
new file mode 100644
index 000000000000..88937fce75f1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cn9k.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no),
+ val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_vf_reset_iq(oct, q);
+ cn93_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) &
+ CN93_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CN93_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_VF_R_IN_CTL_IS_64B;
+ reg_val |= CN93_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~GENMASK_ULL(22, 0); //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); //populate the BSIZE (15-0)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cn93_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cn93(oct, q);
+ octep_vf_enable_oq_cn93(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cn93(oct, q);
+ octep_vf_disable_oq_cn93(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cn93_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cn93() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93;
+ octep_vf_init_config_cn93_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
new file mode 100644
index 000000000000..1f79dfad42c6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cnxk.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_ERR_TYPE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cnxk_vf_reset_iq(oct, q);
+ cnxk_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) &
+ CNXK_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+ conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CNXK_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CNXK_VF_R_IN_CTL_IS_64B;
+ reg_val |= CNXK_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~GENMASK_ULL(22, 0);
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ /* set watermark for backpressure */
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no));
+ reg_val &= ~GENMASK_ULL(31, 0);
+ reg_val |= CFG_GET_OQ_WMARK(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cnxk_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cnxk(oct, q);
+ octep_vf_enable_oq_cnxk(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cnxk(oct, q);
+ octep_vf_disable_oq_cnxk(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cnxk_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cnxk() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk;
+ octep_vf_init_config_cnxk_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
new file mode 100644
index 000000000000..e03a647b0110
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_CONFIG_H_
+#define _OCTEP_VF_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_VF_32BYTE_INSTR 32
+#define OCTEP_VF_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_VF_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_VF_DB_MIN 8
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_VF_IQ_INTR_THRESHOLD 0x0
+
+/* Minimum watermark for backpressure */
+#define OCTEP_VF_OQ_WMARK_MIN 256
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_VF_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_VF_OQ_PKTS_PER_INTR 128
+#define OCTEP_VF_OQ_REFILL_THRESHOLD (OCTEP_VF_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_VF_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_VF_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_VF_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_VF_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_VF_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_VF_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_VF_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
+
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->ring_cfg.max_io_rings)
+
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+
+/* Hardware Tx Queue configuration. */
+struct octep_vf_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_vf_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+
+ /* Water mark for backpressure.
+ * Output queue sends backpressure signal to source when
+ * free buffer count falls below wmark.
+ */
+ u32 wmark;
+};
+
+/* Tx/Rx configuration */
+struct octep_vf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+};
+
+/* Octeon MSI-x config. */
+struct octep_vf_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_vf_config {
+ /* Input Queue attributes. */
+ struct octep_vf_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_vf_oq_config oq;
+
+ /* MSI-X interrupt config */
+ struct octep_vf_msix_config msix_cfg;
+
+ /* NIC VF ring Configuration */
+ struct octep_vf_ring_config ring_cfg;
+};
+#endif /* _OCTEP_VF_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
new file mode 100644
index 000000000000..a1979b45e355
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_dropped_bytes_fifo_full",
+};
+
+#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_VF_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_vf_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_VF_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_VF_TX_Q_STATS_CNT + OCTEP_VF_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_tx_stats *iface_tx_stats;
+ struct octep_vf_iface_rx_stats *iface_rx_stats;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+
+ octep_vf_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_busy_errors += iq->stats.tx_busy;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(octep_vf_speeds, ksettings, name) \
+{ \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_vf_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static const struct ethtool_ops octep_vf_ethtool_ops = {
+ .get_drvinfo = octep_vf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_vf_get_strings,
+ .get_sset_count = octep_vf_get_sset_count,
+ .get_ethtool_stats = octep_vf_get_ethtool_stats,
+ .get_link_ksettings = octep_vf_get_link_ksettings,
+};
+
+void octep_vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
new file mode 100644
index 000000000000..dd49d0b8b494
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -0,0 +1,1231 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+struct workqueue_struct *octep_vf_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_vf_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct)
+{
+ struct octep_vf_ioq_vector *ioq_vector;
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_vf_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_vf_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_vf_enable_msix_range(struct octep_vf_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ //num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ num_msix = oct->num_oqs;
+ oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_vf_disable_msix(struct octep_vf_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_vf_ioq_vector *ioq_vector = data;
+ struct octep_vf_device *oct = ioq_vector->octep_vf_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_vf_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_vf_request_irqs(struct octep_vf_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_vf_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ int ret, i;
+
+ /* Request IRQs for Tx/Rx queues */
+ for (i = 0; i < oct->num_oqs; i++) {
+ ioq_vector = oct->ioq_vector[i];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, i);
+ ret = request_irq(msix_entry->vector,
+ octep_vf_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ i, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(i % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_vf_free_irqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_vf_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_vf_setup_irqs(struct octep_vf_device *oct)
+{
+ if (octep_vf_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_vf_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_vf_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_vf_disable_msix(oct);
+enable_msix_err:
+ octep_vf_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_vf_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+{
+ octep_vf_free_irqs(oct);
+ octep_vf_disable_msix(oct);
+ octep_vf_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ smp_wmb();
+ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_vf_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_vf_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_vf_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, 64);
+ rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, rx_done)))
+ octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+
+ return rx_done;
+}
+
+/**
+ * octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_add(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_delete(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_enable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_disable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_vf_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_rx_state(oct, up);
+ if (err)
+ netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err);
+}
+
+static int octep_vf_get_link_status(struct octep_vf_device *oct)
+{
+ int err;
+
+ err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up);
+ if (err)
+ netdev_err(oct->netdev, "Get link status failed with err:%d\n", err);
+ return oct->link_info.oper_up;
+}
+
+static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_link_status(oct, up);
+ if (err) {
+ netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err);
+ return;
+ }
+ oct->link_info.oper_up = up;
+}
+
+/**
+ * octep_vf_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_vf_open(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_vf_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_vf_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_vf_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_vf_napi_add(oct);
+ octep_vf_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_vf_set_rx_state(oct, true);
+
+ ret = octep_vf_get_link_status(oct);
+ if (!ret)
+ octep_vf_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_vf_oq_dbell_init(oct);
+
+ ret = octep_vf_get_link_status(oct);
+ if (ret)
+ octep_vf_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+ octep_vf_clean_irqs(oct);
+setup_irq_err:
+ octep_vf_free_oqs(oct);
+setup_oq_err:
+ octep_vf_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_vf_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_vf_stop(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_vf_set_link_status(oct, false);
+ octep_vf_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+
+ octep_vf_clean_irqs(oct);
+ octep_vf_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_vf_free_oqs(oct);
+ octep_vf_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_vf_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
+{
+ int ret;
+
+ ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD,
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+ switch (ret) {
+ case 0: /* Stopped the queue, since IQ is full */
+ return 1;
+ case -1: /*
+ * Pending updates in write index from
+ * iq_process_completion in other cpus
+ * caused queues to get re-enabled after
+ * being stopped
+ */
+ iq->stats.restart_cnt++;
+ fallthrough;
+ case 1: /* Queue left enabled, since IQ is not yet full*/
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ netdev_features_t feat = netdev->features;
+ struct octep_vf_tx_sglist_desc *sglist;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct octep_vf_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_vf_instr_hdr *ih;
+ struct octep_vf_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ int xmit_more;
+ u16 q_no, wi;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->fw_info.pkind;
+ ih->fsz = oct->fw_info.fsz;
+ ih->tlen = skb->len + ih->fsz;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+ if (oct->fw_info.tx_ol_flags) {
+ if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO;
+ hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
+ hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
+ } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ }
+ /* due to ESR txm will be swapped by hw */
+ hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+
+ skb_tx_timestamp(skb);
+ iq->fill_cnt++;
+ wi++;
+ iq->host_write_index = wi & iq->ring_size_mask;
+
+ /* octep_iq_full_check stops the queue and returns
+ * true if so, in case the queue has become full
+ * by inserting current packet. If so, we can
+ * go ahead and ring doorbell.
+ */
+ if (!octep_vf_iq_full_check(iq) && xmit_more &&
+ iq->fill_cnt < iq->fill_threshold)
+ return NETDEV_TX_OK;
+
+ goto ring_dbell;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ring_dbell:
+ /* Flush the hw descriptors before writing to doorbell */
+ smp_wmb();
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ iq->stats.instr_posted += iq->fill_cnt;
+ iq->fill_cnt = 0;
+ return NETDEV_TX_OK;
+}
+
+int octep_vf_get_if_stats(struct octep_vf_device *oct)
+{
+ struct octep_vf_iface_rxtx_stats vf_stats;
+ int ret, size;
+
+ memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats));
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ (u8 *)&vf_stats, &size);
+
+ if (ret)
+ return ret;
+
+ memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats,
+ sizeof(struct octep_vf_iface_rx_stats));
+ memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats,
+ sizeof(struct octep_vf_iface_tx_stats));
+
+ return 0;
+}
+
+int octep_vf_get_link_info(struct octep_vf_device *oct)
+{
+ int ret, size;
+
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ (u8 *)&oct->link_info, &size);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * octep_vf_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_vf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ int q;
+
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ if (!octep_vf_get_if_stats(oct)) {
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
+ oct->iface_rx_stats.err_pkts;
+ stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
+ stats->tx_dropped = oct->iface_tx_stats.dropped;
+ }
+}
+
+/**
+ * octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_vf_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_vf_stop(netdev);
+ octep_vf_open(netdev);
+ }
+ rtnl_unlock();
+ netdev_put(netdev, NULL);
+}
+
+/**
+ * octep_vf_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_hold(netdev, NULL, GFP_ATOMIC);
+ schedule_work(&oct->tx_timeout_task);
+}
+
+static int octep_vf_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ int err;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_vf_mbox_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+ return err;
+}
+
+static int octep_vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 rx_offloads = 0, tx_offloads = 0;
+ int err;
+
+ /* We only support features received from firmware */
+ if ((features & netdev->hw_features) != features)
+ return -EINVAL;
+
+ if (features & NETIF_F_TSO)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_TSO6)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_IP_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_IPV6_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_RXCSUM)
+ rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM;
+
+ err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads);
+ if (!err)
+ netdev->features = features;
+
+ return err;
+}
+
+static const struct net_device_ops octep_vf_netdev_ops = {
+ .ndo_open = octep_vf_open,
+ .ndo_stop = octep_vf_stop,
+ .ndo_start_xmit = octep_vf_start_xmit,
+ .ndo_get_stats64 = octep_vf_get_stats64,
+ .ndo_tx_timeout = octep_vf_tx_timeout,
+ .ndo_set_mac_address = octep_vf_set_mac,
+ .ndo_change_mtu = octep_vf_change_mtu,
+ .ndo_set_features = octep_vf_set_features,
+};
+
+static const char *octep_vf_devid_to_str(struct octep_vf_device *oct)
+{
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ return "CN93XX";
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ return "CNF95N";
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ return "CN10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ return "CNF10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ return "CNF10KB";
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ return "CN10KB";
+ default:
+ return "Unsupported";
+ }
+}
+
+/**
+ * octep_vf_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_vf_device_setup(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR region 0 */
+ oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ if (!oct->mmio.hw_addr) {
+ dev_err(&pdev->dev,
+ "Failed to remap BAR0; start=0x%llx len=0x%llx\n",
+ pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ goto ioremap_err;
+ }
+ oct->mmio.mapped = 1;
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ case OCTEP_PCI_DEVICE_ID_CN98_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cn93(oct);
+ break;
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cnxk(oct);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported device\n");
+ goto unsupported_dev;
+ }
+
+ return 0;
+
+unsupported_dev:
+ iounmap(oct->mmio.hw_addr);
+ioremap_err:
+ kfree(oct->conf);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * octep_vf_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_vf_device_cleanup(struct octep_vf_device *oct)
+{
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ if (oct->mmio.mapped)
+ iounmap(oct->mmio.hw_addr);
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr)
+{
+ return octep_vf_mbox_get_mac_addr(oct, addr);
+}
+
+/**
+ * octep_vf_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto disable_pci_device;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto disable_pci_device;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device),
+ OCTEP_VF_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto mem_regions_release;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_vf_dev = netdev_priv(netdev);
+ octep_vf_dev->netdev = netdev;
+ octep_vf_dev->pdev = pdev;
+ octep_vf_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_vf_dev);
+
+ err = octep_vf_device_setup(octep_vf_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto netdevice_free;
+ }
+ INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task);
+
+ netdev->netdev_ops = &octep_vf_netdev_ops;
+ octep_vf_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ if (octep_vf_setup_mbox(octep_vf_dev)) {
+ dev_err(&pdev->dev, "VF Mailbox setup failed\n");
+ err = -ENOMEM;
+ goto device_cleanup;
+ }
+
+ if (octep_vf_mbox_version_check(octep_vf_dev)) {
+ dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ if (octep_vf_mbox_get_fw_info(octep_vf_dev)) {
+ dev_err(&pdev->dev, "unable to get fw info\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ netdev->hw_features = NETIF_F_SG;
+ if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags))
+ netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ netdev->min_mtu = OCTEP_VF_MIN_MTU;
+ netdev->max_mtu = OCTEP_VF_MAX_MTU;
+ netdev->mtu = OCTEP_VF_DEFAULT_MTU;
+
+ if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netif_set_tso_max_size(netdev, netdev->max_mtu);
+ }
+
+ netdev->features |= netdev->hw_features;
+ octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_vf_dev->mac_addr);
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto delete_mbox;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+delete_mbox:
+ octep_vf_delete_mbox(octep_vf_dev);
+device_cleanup:
+ octep_vf_device_cleanup(octep_vf_dev);
+netdevice_free:
+ free_netdev(netdev);
+mem_regions_release:
+ pci_release_mem_regions(pdev);
+disable_pci_device:
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "Device probe failed\n");
+ return err;
+}
+
+/**
+ * octep_vf_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_vf_remove(struct pci_dev *pdev)
+{
+ struct octep_vf_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ octep_vf_mbox_dev_remove(oct);
+ cancel_work_sync(&oct->tx_timeout_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+ octep_vf_delete_mbox(oct);
+ octep_vf_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_vf_driver = {
+ .name = OCTEP_VF_DRV_NAME,
+ .id_table = octep_vf_pci_id_tbl,
+ .probe = octep_vf_probe,
+ .remove = octep_vf_remove,
+};
+
+/**
+ * octep_vf_init_module() - Module initialization.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_vf_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING);
+
+ ret = pci_register_driver(&octep_vf_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_VF_DRV_NAME, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * octep_vf_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_vf_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME);
+
+ pci_unregister_driver(&octep_vf_driver);
+
+ pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME);
+}
+
+module_init(octep_vf_init_module);
+module_exit(octep_vf_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
new file mode 100644
index 000000000000..5769f62545cd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_MAIN_H_
+#define _OCTEP_VF_MAIN_H_
+
+#include "octep_vf_tx.h"
+#include "octep_vf_rx.h"
+#include "octep_vf_mbox.h"
+
+#define OCTEP_VF_DRV_NAME "octeon_ep_vf"
+#define OCTEP_VF_DRV_STRING "Marvell Octeon EndPoint NIC VF Driver"
+
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 //93xx VF
+#define OCTEP_PCI_DEVICE_ID_CNF95N_VF 0xB403 //95N VF
+#define OCTEP_PCI_DEVICE_ID_CN98_VF 0xB103
+#define OCTEP_PCI_DEVICE_ID_CN10KA_VF 0xB903
+#define OCTEP_PCI_DEVICE_ID_CNF10KA_VF 0xBA03
+#define OCTEP_PCI_DEVICE_ID_CNF10KB_VF 0xBC03
+#define OCTEP_PCI_DEVICE_ID_CN10KB_VF 0xBD03
+
+#define OCTEP_VF_MAX_QUEUES 63
+#define OCTEP_VF_MAX_IQ OCTEP_VF_MAX_QUEUES
+#define OCTEP_VF_MAX_OQ OCTEP_VF_MAX_QUEUES
+
+#define OCTEP_VF_MAX_MSIX_VECTORS OCTEP_VF_MAX_OQ
+
+#define OCTEP_VF_IQ_INTR_RESEND_BIT 59
+#define OCTEP_VF_OQ_INTR_RESEND_BIT 59
+
+#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
+ ((iq__)->host_write_index - (iq__)->flush_index) & \
+ (iq__)->ring_size_mask; \
+ })
+#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
+ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \
+ })
+
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_vf_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_vf_hw_ops {
+ void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ void (*reinit_regs)(struct octep_vf_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_vf_iq *iq);
+
+ void (*enable_interrupts)(struct octep_vf_device *oct);
+ void (*disable_interrupts)(struct octep_vf_device *oct);
+
+ void (*enable_io_queues)(struct octep_vf_device *oct);
+ void (*disable_io_queues)(struct octep_vf_device *oct);
+ void (*enable_iq)(struct octep_vf_device *oct, int q);
+ void (*disable_iq)(struct octep_vf_device *oct, int q);
+ void (*enable_oq)(struct octep_vf_device *oct, int q);
+ void (*disable_oq)(struct octep_vf_device *oct, int q);
+ void (*reset_io_queues)(struct octep_vf_device *oct);
+ void (*dump_registers)(struct octep_vf_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_vf_mbox_data {
+ /* Holds the offset of received data via mailbox. */
+ u32 data_index;
+
+ /* Holds the received data via mailbox. */
+ u8 recv_data[OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE];
+};
+
+/* wrappers around work structs */
+struct octep_vf_mbox_wk {
+ struct work_struct work;
+ void *ctxptr;
+};
+
+/* Octeon device mailbox */
+struct octep_vf_mbox {
+ /* A mutex to protect access to this q_mbox. */
+ struct mutex lock;
+
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ /* Octeon mailbox data */
+ struct octep_vf_mbox_data mbox_data;
+
+ /* Octeon mailbox work handler to process Mbox messages */
+ struct octep_vf_mbox_wk wk;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_vf_ioq_vector {
+ char name[OCTEP_VF_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_vf_device *octep_vf_dev;
+ struct octep_vf_iq *iq;
+ struct octep_vf_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_VF_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_VF_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_VF_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_vf_link_mode_bit_indices {
+ OCTEP_VF_LINK_MODE_10GBASE_T = 0,
+ OCTEP_VF_LINK_MODE_10GBASE_R,
+ OCTEP_VF_LINK_MODE_10GBASE_CR,
+ OCTEP_VF_LINK_MODE_10GBASE_KR,
+ OCTEP_VF_LINK_MODE_10GBASE_LR,
+ OCTEP_VF_LINK_MODE_10GBASE_SR,
+ OCTEP_VF_LINK_MODE_25GBASE_CR,
+ OCTEP_VF_LINK_MODE_25GBASE_KR,
+ OCTEP_VF_LINK_MODE_25GBASE_SR,
+ OCTEP_VF_LINK_MODE_40GBASE_CR4,
+ OCTEP_VF_LINK_MODE_40GBASE_KR4,
+ OCTEP_VF_LINK_MODE_40GBASE_LR4,
+ OCTEP_VF_LINK_MODE_40GBASE_SR4,
+ OCTEP_VF_LINK_MODE_50GBASE_CR2,
+ OCTEP_VF_LINK_MODE_50GBASE_KR2,
+ OCTEP_VF_LINK_MODE_50GBASE_SR2,
+ OCTEP_VF_LINK_MODE_50GBASE_CR,
+ OCTEP_VF_LINK_MODE_50GBASE_KR,
+ OCTEP_VF_LINK_MODE_50GBASE_LR,
+ OCTEP_VF_LINK_MODE_50GBASE_SR,
+ OCTEP_VF_LINK_MODE_100GBASE_CR4,
+ OCTEP_VF_LINK_MODE_100GBASE_KR4,
+ OCTEP_VF_LINK_MODE_100GBASE_LR4,
+ OCTEP_VF_LINK_MODE_100GBASE_SR4,
+ OCTEP_VF_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_vf_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotiation state. */
+#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* Hardware interface stats information. */
+struct octep_vf_iface_rxtx_stats {
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+};
+
+struct octep_vf_fw_info {
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* front size data */
+ u8 fsz;
+ /* supported rx offloads OCTEP_VF_RX_OFFLOAD_* */
+ u16 rx_ol_flags;
+ /* supported tx offloads OCTEP_VF_TX_OFFLOAD_* */
+ u16 tx_ol_flags;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_vf_device {
+ struct octep_vf_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_vf_mmio mmio;
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* Pointers to Octeon Tx queues */
+ struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* Hardware operations */
+ struct octep_vf_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_vf_ioq_vector *ioq_vector[OCTEP_VF_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_vf_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_vf_mbox *mbox;
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Negotiated Mbox version */
+ u32 mbox_neg_ver;
+
+ /* firmware info */
+ struct octep_vf_fw_info fw_info;
+};
+
+static inline u16 OCTEP_VF_MAJOR_REV(struct octep_vf_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_vf_write_csr(octep_vf_dev, reg_off, value) \
+ writel(value, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_write_csr64(octep_vf_dev, reg_off, val64) \
+ writeq(val64, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr(octep_vf_dev, reg_off) \
+ readl((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
+ readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+extern struct workqueue_struct *octep_vf_wq;
+
+int octep_vf_device_setup(struct octep_vf_device *oct);
+int octep_vf_setup_iqs(struct octep_vf_device *oct);
+void octep_vf_free_iqs(struct octep_vf_device *oct);
+void octep_vf_clean_iqs(struct octep_vf_device *oct);
+int octep_vf_setup_oqs(struct octep_vf_device *oct);
+void octep_vf_free_oqs(struct octep_vf_device *oct);
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct);
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct);
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct);
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget);
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget);
+void octep_vf_set_ethtool_ops(struct net_device *netdev);
+int octep_vf_get_link_info(struct octep_vf_device *oct);
+int octep_vf_get_if_stats(struct octep_vf_device *oct);
+void octep_vf_mbox_work(struct work_struct *work);
+#endif /* _OCTEP_VF_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
new file mode 100644
index 000000000000..2eab21e43048
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* When a new command is implemented, the below table should be updated
+ * with new command and it's version info.
+ */
+static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
+ [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
+ [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct)
+{
+ int ring = 0;
+
+ oct->mbox = vzalloc(sizeof(*oct->mbox));
+ if (!oct->mbox)
+ return -1;
+
+ mutex_init(&oct->mbox->lock);
+
+ oct->hw_ops.setup_mbox_regs(oct, ring);
+ INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work);
+ oct->mbox->wk.ctxptr = oct;
+ oct->mbox_neg_ver = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ dev_info(&oct->pdev->dev, "setup vf mbox successfully\n");
+ return 0;
+}
+
+void octep_vf_delete_mbox(struct octep_vf_device *oct)
+{
+ if (oct->mbox) {
+ if (work_pending(&oct->mbox->wk.work))
+ cancel_work_sync(&oct->mbox->wk.work);
+
+ mutex_destroy(&oct->mbox->lock);
+ vfree(oct->mbox);
+ oct->mbox = NULL;
+ dev_info(&oct->pdev->dev, "Deleted vf mbox successfully\n");
+ }
+}
+
+int octep_vf_mbox_version_check(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OCTEP_PFVF_MBOX_CMD_VERSION;
+ cmd.s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret == OCTEP_PFVF_MBOX_CMD_STATUS_NACK) {
+ dev_err(&oct->pdev->dev,
+ "VF Mbox version is incompatible with PF\n");
+ return -EINVAL;
+ }
+ oct->mbox_neg_ver = (u32)rsp.s_version.version;
+ dev_dbg(&oct->pdev->dev,
+ "VF Mbox version:%u Negotiated VF version with PF:%u\n",
+ (u32)cmd.s_version.version,
+ (u32)rsp.s_version.version);
+ return 0;
+}
+
+void octep_vf_mbox_work(struct work_struct *work)
+{
+ struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work);
+ struct octep_vf_iface_link_info *link_info;
+ struct octep_vf_device *oct = NULL;
+ struct octep_vf_mbox *mbox = NULL;
+ union octep_pfvf_mbox_word *notif;
+ u64 pf_vf_data;
+
+ oct = (struct octep_vf_device *)wk->ctxptr;
+ link_info = &oct->link_info;
+ mbox = oct->mbox;
+ pf_vf_data = readq(mbox->mbox_read_reg);
+
+ notif = (union octep_pfvf_mbox_word *)&pf_vf_data;
+
+ switch (notif->s.opcode) {
+ case OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS:
+ if (notif->s_link_status.status) {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_UP;
+ netif_carrier_on(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ } else {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_DOWN;
+ netif_carrier_off(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ }
+ break;
+ default:
+ dev_err(&oct->pdev->dev,
+ "Received unsupported notif %d\n", notif->s.opcode);
+ break;
+ }
+}
+
+static int __octep_vf_mbox_send_cmd(struct octep_vf_device *oct,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ u64 reg_val = 0ull;
+ int count;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ cmd.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
+ writeq(cmd.u64, mbox->mbox_write_reg);
+
+ /* No response for notification messages */
+ if (!rsp)
+ return 0;
+
+ for (count = 0; count < OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT; count++) {
+ usleep_range(1000, 1500);
+ reg_val = readq(mbox->mbox_write_reg);
+ if (reg_val != cmd.u64) {
+ rsp->u64 = reg_val;
+ break;
+ }
+ }
+ if (count == OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT) {
+ dev_err(&oct->pdev->dev, "mbox send command timed out\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT;
+ }
+ if (rsp->s.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "mbox_send: Received NACK\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NACK;
+ }
+ rsp->u64 = reg_val;
+ return 0;
+}
+
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ int ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+ mutex_lock(&mbox->lock);
+ if (pfvf_cmd_versions[cmd.s.opcode] > oct->mbox_neg_ver) {
+ dev_dbg(&oct->pdev->dev, "CMD:%d not supported in Version:%d\n",
+ cmd.s.opcode, oct->mbox_neg_ver);
+ mutex_unlock(&mbox->lock);
+ return -EOPNOTSUPP;
+ }
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, rsp);
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int data_len = 0, tmp_len = 0;
+ int read_cnt, i = 0, ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ mutex_lock(&mbox->lock);
+ cmd.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 0;
+ /* Send cmd to read data from PF */
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+ }
+ /* PF sends the data length of requested CMD
+ * in ACK
+ */
+ data_len = *((int32_t *)rsp.s_data.data);
+ tmp_len = data_len;
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ while (data_len) {
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ return ret;
+ }
+ if (data_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE) {
+ data_len -= OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ read_cnt = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ } else {
+ read_cnt = data_len;
+ data_len = 0;
+ }
+ for (i = 0; i < read_cnt; i++) {
+ mbox->mbox_data.recv_data[mbox->mbox_data.data_index] =
+ rsp.s_data.data[i];
+ mbox->mbox_data.data_index++;
+ }
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ }
+ memcpy(data, mbox->mbox_data.recv_data, tmp_len);
+ *size = tmp_len;
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu)
+{
+ int frame_size = mtu + ETH_HLEN + ETH_FCS_LEN;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret = 0;
+
+ if (mtu < ETH_MIN_MTU || frame_size > ETH_MAX_MTU) {
+ dev_err(&oct->pdev->dev,
+ "Failed to set MTU to %d MIN MTU:%d MAX MTU:%d\n",
+ mtu, ETH_MIN_MTU, ETH_MAX_MTU);
+ return -EINVAL;
+ }
+
+ cmd.u64 = 0;
+ cmd.s_set_mtu.opcode = OCTEP_PFVF_MBOX_CMD_SET_MTU;
+ cmd.s_set_mtu.mtu = mtu;
+
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err=%d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mtu.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Received Mbox NACK from PF for MTU:%d\n", mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR;
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd.s_set_mac.mac_addr[i] = mac_addr[i];
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "get_mac: mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "get_mac: received NACK\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = rsp.s_set_mac.mac_addr[i];
+ return 0;
+}
+
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_state.opcode = OCTEP_PFVF_MBOX_CMD_SET_RX_STATE;
+ cmd.s_link_state.state = state;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set Rx state via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set Rx state received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS;
+ cmd.s_link_status.status = status;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set link status received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ *oper_up = rsp.s_link_status.status;
+ return 0;
+}
+
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s.opcode = OCTEP_PFVF_MBOX_CMD_DEV_REMOVE;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, NULL);
+ return ret;
+}
+
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_fw_info.opcode = OCTEP_PFVF_MBOX_CMD_GET_FW_INFO;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_fw_info.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ oct->fw_info.pkind = rsp.s_fw_info.pkind;
+ oct->fw_info.fsz = rsp.s_fw_info.fsz;
+ oct->fw_info.rx_ol_flags = rsp.s_fw_info.rx_ol_flags;
+ oct->fw_info.tx_ol_flags = rsp.s_fw_info.tx_ol_flags;
+
+ return 0;
+}
+
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads,
+ u16 rx_offloads)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_offloads.opcode = OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS;
+ cmd.s_offloads.rx_ol_flags = rx_offloads;
+ cmd.s_offloads.tx_ol_flags = tx_offloads;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set offloads via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set offloads received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
new file mode 100644
index 000000000000..9b5efad37eab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_MBOX_H_
+#define _OCTEP_VF_MBOX_H_
+
+/* When a new command is implemented, VF Mbox version should be bumped.
+ */
+enum octep_pfvf_mbox_version {
+ OCTEP_PFVF_MBOX_VERSION_V0,
+ OCTEP_PFVF_MBOX_VERSION_V1,
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+
+enum octep_pfvf_mbox_opcode {
+ OCTEP_PFVF_MBOX_CMD_VERSION,
+ OCTEP_PFVF_MBOX_CMD_SET_MTU,
+ OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
+ OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_MTU,
+ OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
+ OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
+ OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
+ OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_MAX,
+};
+
+enum octep_pfvf_mbox_word_type {
+ OCTEP_PFVF_MBOX_TYPE_CMD,
+ OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
+ OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
+};
+
+enum octep_pfvf_mbox_cmd_status {
+ OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
+ OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4,
+ OCTEP_PFVF_MBOX_CMD_STATUS_ERR = 5
+};
+
+enum octep_pfvf_link_status {
+ OCTEP_PFVF_LINK_STATUS_DOWN,
+ OCTEP_PFVF_LINK_STATUS_UP,
+};
+
+enum octep_pfvf_link_speed {
+ OCTEP_PFVF_LINK_SPEED_NONE,
+ OCTEP_PFVF_LINK_SPEED_1000,
+ OCTEP_PFVF_LINK_SPEED_10000,
+ OCTEP_PFVF_LINK_SPEED_25000,
+ OCTEP_PFVF_LINK_SPEED_40000,
+ OCTEP_PFVF_LINK_SPEED_50000,
+ OCTEP_PFVF_LINK_SPEED_100000,
+ OCTEP_PFVF_LINK_SPEED_LAST,
+};
+
+enum octep_pfvf_link_duplex {
+ OCTEP_PFVF_LINK_HALF_DUPLEX,
+ OCTEP_PFVF_LINK_FULL_DUPLEX,
+};
+
+enum octep_pfvf_link_autoneg {
+ OCTEP_PFVF_LINK_AUTONEG,
+ OCTEP_PFVF_LINK_FIXED,
+};
+
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT 8000
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_UDELAY 1000
+#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
+#define OCTEP_PFVF_MBOX_VERSION 0
+#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
+#define OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE 320
+#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
+
+union octep_pfvf_mbox_word {
+ u64 u64;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 data:48;
+ } s;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 frag:1;
+ u64 rsvd:5;
+ u8 data[6];
+ } s_data;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 version:48;
+ } s_version;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u8 mac_addr[6];
+ } s_set_mac;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_set_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 state:1;
+ u64 rsvd:53;
+ } s_link_state;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 status:1;
+ u64 rsvd:53;
+ } s_link_status;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 pkind:8;
+ u64 fsz:8;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ u64 rsvd:6;
+ } s_fw_info;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:22;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ } s_offloads;
+} __packed;
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct);
+void octep_vf_delete_mbox(struct octep_vf_device *oct);
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp);
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size);
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu);
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_version_check(struct octep_vf_device *oct);
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state);
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status);
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up);
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct);
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct);
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, u16 rx_offloads);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
new file mode 100644
index 000000000000..25e2a876ebba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CN9K_H_
+#define _OCTEP_VF_REGS_CN9K_H_
+
+/*############################ RST #########################*/
+#define CN93_VF_CONFIG_XPANSION_BAR 0x38
+#define CN93_VF_CONFIG_PCIE_CAP 0x70
+#define CN93_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_VF_RING_OFFSET BIT_ULL(17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CN93_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_VF_SDP_R_IN_CNTS_START 0x10050
+#define CN93_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_VF_SDP_R_IN_CONTROL(ring) \
+ (CN93_VF_SDP_R_IN_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_ENABLE(ring) \
+ (CN93_VF_SDP_R_IN_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_CNTS(ring) \
+ (CN93_VF_SDP_R_IN_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CN93_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CN93_VF_R_IN_CTL_IDLE BIT_ULL(28)
+#define CN93_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_VF_R_IN_CTL_IS_64B BIT_ULL(24)
+#define CN93_VF_R_IN_CTL_D_NSR BIT_ULL(8)
+#define CN93_VF_R_IN_CTL_D_ESR BIT_ULL(6)
+#define CN93_VF_R_IN_CTL_D_ROR BIT_ULL(5)
+#define CN93_VF_R_IN_CTL_NSR BIT_ULL(3)
+#define CN93_VF_R_IN_CTL_ESR BIT_ULL(1)
+#define CN93_VF_R_IN_CTL_ROR BIT_ULL(0)
+
+#define CN93_VF_R_IN_CTL_MASK (CN93_VF_R_IN_CTL_RDSIZE | CN93_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CN93_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_VF_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_VF_SDP_R_OUT_CONTROL(ring) \
+ (CN93_VF_SDP_R_OUT_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_ENABLE(ring) \
+ (CN93_VF_SDP_R_OUT_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_CNTS(ring) \
+ (CN93_VF_SDP_R_OUT_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CN9K_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
new file mode 100644
index 000000000000..2e156745ef64
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CNXK_H_
+#define _OCTEP_VF_REGS_CNXK_H_
+
+/*############################ RST #########################*/
+#define CNXK_VF_CONFIG_XPANSION_BAR 0x38
+#define CNXK_VF_CONFIG_PCIE_CAP 0x70
+#define CNXK_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CNXK_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CNXK_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CNXK_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CNXK_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CNXK_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CNXK_VF_RING_OFFSET (0x1ULL << 17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CNXK_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CNXK_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CNXK_VF_SDP_R_IN_CNTS_START 0x10050
+#define CNXK_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CNXK_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CNXK_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_VF_SDP_R_ERR_TYPE_START 0x10400
+
+#define CNXK_VF_SDP_R_ERR_TYPE(ring) \
+ (CNXK_VF_SDP_R_ERR_TYPE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CONTROL(ring) \
+ (CNXK_VF_SDP_R_IN_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_ENABLE(ring) \
+ (CNXK_VF_SDP_R_IN_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CNTS(ring) \
+ (CNXK_VF_SDP_R_IN_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CNXK_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CNXK_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CNXK_VF_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CNXK_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CNXK_VF_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CNXK_VF_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CNXK_VF_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CNXK_VF_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CNXK_VF_R_IN_CTL_NSR (0x1ULL << 3)
+#define CNXK_VF_R_IN_CTL_ESR (0x1ULL << 1)
+#define CNXK_VF_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CNXK_VF_R_IN_CTL_MASK (CNXK_VF_R_IN_CTL_RDSIZE | CNXK_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CNXK_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CNXK_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CNXK_VF_SDP_R_OUT_WMARK_START 0x10160
+#define CNXK_VF_SDP_R_OUT_ENABLE_START 0x10170
+#define CNXK_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CNXK_VF_SDP_R_OUT_CONTROL(ring) \
+ (CNXK_VF_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_ENABLE(ring) \
+ (CNXK_VF_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_WMARK(ring) \
+ (CNXK_VF_SDP_R_OUT_WMARK_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_CNTS(ring) \
+ (CNXK_VF_SDP_R_OUT_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CNXK_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CNXK_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CNXK_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CNXK_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CNXK_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CNXK_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CNXK_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CNXK_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CNXK_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CNXK_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CNXK_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CNXK_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CNXK_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CNXK_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
new file mode 100644
index 000000000000..82821bc28634
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -ENOMEM, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+ put_page(page);
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_vf_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_vf_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->fw_info.rx_ol_flags)
+ oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);
+
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_vf_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_vf_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_vf_oq_reset_indices(oq);
+}
+
+/**
+ * octep_vf_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_vf_free_oq(struct octep_vf_oq *oq)
+{
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+ int q_no = oq->q_no;
+
+ octep_vf_oq_free_ring_buffers(oq);
+
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_VF_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_vf_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_oqs(struct octep_vf_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_vf_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_oq(oct->oq[i]);
+ }
+ return retval;
+}
+
+/**
+ * octep_vf_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_vf_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_oqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_vf_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_vf_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq, u16 pkts_to_process)
+{
+ struct octep_vf_oq_resp_hw_ext *resp_hw_ext = NULL;
+ netdev_features_t feat = oq->netdev->features;
+ struct octep_vf_rx_buffer *buff_info;
+ struct octep_vf_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ u16 data_offset, rx_ol_flags;
+ struct sk_buff *skb;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_vf_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->fw_info.rx_ol_flags) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_vf_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE +
+ OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ rx_ol_flags = resp_hw_ext->rx_ol_flags;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
+ rx_ol_flags = 0;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_vf_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (feat & NETIF_F_RXCSUM &&
+ OCTEP_VF_RX_CSUM_VERIFIED(rx_ol_flags))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_vf_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_vf_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_vf_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_vf_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ smp_wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
new file mode 100644
index 000000000000..fe46838b5200
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_RX_H_
+#define _OCTEP_VF_RX_H_
+
+/* struct octep_vf_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_vf_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+static_assert(sizeof(struct octep_vf_oq_desc_hw) == 16);
+
+#define OCTEP_VF_OQ_DESC_SIZE (sizeof(struct octep_vf_oq_desc_hw))
+
+/* Rx offload flags */
+#define OCTEP_VF_RX_OFFLOAD_VLAN_STRIP BIT(0)
+#define OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_RX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_RX_OFFLOAD_TCP_CKSUM BIT(3)
+
+#define OCTEP_VF_RX_OFFLOAD_CKSUM (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_RX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM))
+
+/* bit 0 is vlan strip */
+#define OCTEP_VF_RX_CSUM_IP_VERIFIED BIT(1)
+#define OCTEP_VF_RX_CSUM_L4_VERIFIED BIT(2)
+
+#define OCTEP_VF_RX_CSUM_VERIFIED(flags) ((flags) & \
+ (OCTEP_VF_RX_CSUM_L4_VERIFIED | \
+ OCTEP_VF_RX_CSUM_IP_VERIFIED))
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_vf_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 rsvd:48;
+
+ /* rx offload flags */
+ u16 rx_ol_flags;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw_ext) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_vf_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_vf_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_SIZE (sizeof(struct octep_vf_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_vf_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_VF_OQ_RECVBUF_SIZE (sizeof(struct octep_vf_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_vf_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_VF_OQ_STATS_SIZE (sizeof(struct octep_vf_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_vf_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_vf_oq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_vf_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_vf_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_vf_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_VF_OQ_SIZE (sizeof(struct octep_vf_oq))
+#endif /* _OCTEP_VF_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
new file mode 100644
index 000000000000..47a5c054fdb6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_vf_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+}
+
+/**
+ * octep_vf_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_vf_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
+ compl_bytes, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+
+ return !budget;
+}
+
+/**
+ * octep_vf_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_vf_iq_free_pending(struct octep_vf_iq *iq)
+{
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_vf_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_vf_clean_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_vf_iq_free_pending(oct->iq[i]);
+ octep_vf_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_vf_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_vf_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_vf_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_vf_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_vf_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_vf_free_iq(struct octep_vf_iq *iq)
+{
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_vf_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_vf_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_vf_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
new file mode 100644
index 000000000000..f338b975103c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_TX_H_
+#define _OCTEP_VF_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
+struct octep_vf_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40);
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_VF_SGLIST_SIZE_PER_PKT \
+ (OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc))
+
+struct octep_vf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_vf_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer))
+
+/* VF Hardware interface Tx statistics */
+struct octep_vf_iface_tx_stats {
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets dropped */
+ u64 dropped;
+
+ /* Reserved */
+ u64 reserved[13];
+};
+
+/* VF Input Queue statistics */
+struct octep_vf_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_vf_iq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_vf_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_vf_iq_stats stats;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_vf_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_vf_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_vf_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_vf_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+static_assert(sizeof(struct octep_vf_instr_hdr) == 8);
+
+/* Tx offload flags */
+#define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0)
+#define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3)
+#define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4)
+#define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5)
+#define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6)
+
+#define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO)
+
+#define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM))
+
+#define OCTEP_VF_TX_TSO(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO))
+
+struct tx_mdata {
+ /* offload flags */
+ u16 ol_flags;
+
+ /* gso size */
+ u16 gso_size;
+
+ /* gso flags */
+ u16 gso_segs;
+
+ /* reserved */
+ u16 rsvd1;
+
+ /* reserved */
+ u64 rsvd2;
+};
+
+static_assert(sizeof(struct tx_mdata) == 16);
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_vf_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_vf_instr_hdr ih;
+ u64 ih64;
+ };
+
+ union {
+ u64 txm64[2];
+ struct tx_mdata txm;
+ };
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64);
+
+#define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw))
+#endif /* _OCTEP_VF_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 9690ac01f02c..b92264d0a77e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -413,4 +413,5 @@ const char *otx2_mbox_id2name(u16 id)
EXPORT_SYMBOL(otx2_mbox_id2name);
MODULE_AUTHOR("Marvell.");
+MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index edeb0f737312..61ab7f66f053 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -837,6 +837,8 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_INVALID_BPID = -434,
+ NIX_AF_ERR_INVALID_BPID_REQ = -435,
NIX_AF_ERR_INVALID_MCAST_GRP = -436,
NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
@@ -1114,6 +1116,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
#define NIX_FLOW_KEY_TYPE_AH BIT(22)
@@ -1553,6 +1556,7 @@ struct flow_msg {
u32 mpls_lse[4];
u8 icmp_type;
u8 icmp_code;
+ __be16 tcp_flags;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index b0b4dea548e1..d883157393ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -85,8 +85,7 @@ enum npc_kpu_lc_ltype {
enum npc_kpu_ld_ltype {
NPC_LT_LD_TCP = 1,
NPC_LT_LD_UDP,
- NPC_LT_LD_ICMP,
- NPC_LT_LD_SCTP,
+ NPC_LT_LD_SCTP = 4,
NPC_LT_LD_ICMP6,
NPC_LT_LD_CUSTOM0,
NPC_LT_LD_CUSTOM1,
@@ -97,6 +96,7 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_LT_LD_ICMP,
};
enum npc_kpu_le_ltype {
@@ -140,14 +140,14 @@ enum npc_kpu_lg_ltype {
enum npc_kpu_lh_ltype {
NPC_LT_LH_TU_TCP = 1,
NPC_LT_LH_TU_UDP,
- NPC_LT_LH_TU_ICMP,
- NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_SCTP = 4,
NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_CUSTOM0,
+ NPC_LT_LH_CUSTOM1,
NPC_LT_LH_TU_IGMP = 8,
NPC_LT_LH_TU_ESP,
NPC_LT_LH_TU_AH,
- NPC_LT_LH_CUSTOM0 = 0xE,
- NPC_LT_LH_CUSTOM1 = 0xF,
+ NPC_LT_LH_TU_ICMP = 0xF,
};
/* NPC port kind defines how the incoming or outgoing packets
@@ -155,10 +155,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CPT_HDR_PTP_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CPT_HDR_PTP_PKIND = 54ULL,
NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
@@ -216,6 +217,7 @@ enum key_fields {
NPC_MPLS4_TTL,
NPC_TYPE_ICMP,
NPC_CODE_ICMP,
+ NPC_TCP_FLAGS,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index a820bad3abb2..41de72c8607f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -35,6 +35,7 @@
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
#define NPC_ETYPE_PPPOE 0x8864
+#define NPC_ETYPE_ERSPA 0x88be
#define NPC_PPP_IP 0x0021
#define NPC_PPP_IP6 0x0057
@@ -59,6 +60,9 @@
#define NPC_IPNH_MPLS 137
#define NPC_IPNH_HOSTID 139
#define NPC_IPNH_SHIM6 140
+#define NPC_IPNH_CUSTOM 253
+
+#define NPC_IP6_ROUTE_TYPE 4
#define NPC_UDP_PORT_PTP_E 319
#define NPC_UDP_PORT_PTP_G 320
@@ -187,6 +191,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
+ NPC_S_KPU2_MT,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -231,6 +236,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_ICMP6,
NPC_S_KPU8_GRE,
NPC_S_KPU8_AH,
+ NPC_S_KPU8_CUSTOM,
NPC_S_KPU9_TU_MPLS_IN_GRE,
NPC_S_KPU9_TU_MPLS_IN_NSH,
NPC_S_KPU9_TU_MPLS_IN_IP,
@@ -242,6 +248,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU9_GTPC,
NPC_S_KPU9_GTPU,
NPC_S_KPU9_ESP,
+ NPC_S_KPU9_CUSTOM,
NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
@@ -318,10 +325,10 @@ enum npc_kpu_lc_uflag {
NPC_F_LC_U_UNK_PROTO = 0x10,
NPC_F_LC_U_IP_FRAG = 0x20,
NPC_F_LC_U_IP6_FRAG = 0x40,
+ NPC_F_LC_L_6TO4 = 0x80,
};
enum npc_kpu_lc_lflag {
NPC_F_LC_L_IP_IN_IP = 1,
- NPC_F_LC_L_6TO4,
NPC_F_LC_L_MPLS_IN_IP,
NPC_F_LC_L_IP6_TUN_IP6,
NPC_F_LC_L_IP6_MPLS_IN_IP,
@@ -334,6 +341,8 @@ enum npc_kpu_lc_lflag {
NPC_F_LC_L_EXT_MOBILITY,
NPC_F_LC_L_EXT_HOSTID,
NPC_F_LC_L_EXT_SHIM6,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
};
enum npc_kpu_ld_lflag {
@@ -970,10 +979,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 48, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
@@ -2786,6 +2795,24 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_MT, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_MT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4501,6 +4528,24 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0xff00,
NPC_IP_VER_6,
NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 1,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 2,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
0x0000,
0x0000,
},
@@ -4776,6 +4821,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
@@ -4884,6 +4938,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4,
@@ -5064,6 +5127,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
0x0000,
0x0000,
NPC_IP_VER_6,
@@ -5208,6 +5280,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5325,6 +5406,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5433,6 +5523,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5532,6 +5631,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5649,6 +5757,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5757,6 +5874,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5883,6 +6009,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5982,6 +6117,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6081,6 +6225,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6310,6 +6463,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0xffff,
0x0000,
0x0000,
+ 0x0009,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
0x0000,
0x0000,
},
@@ -6756,6 +6918,78 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
{
NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
0x0000,
0xffff,
NPC_GRE_F_ROUTE,
@@ -6836,6 +7070,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU8_CUSTOM, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -7304,6 +7547,24 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x4000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x6000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8384,7 +8645,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -8536,7 +8797,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
NPC_F_LA_U_HAS_IH_NIX,
@@ -8693,7 +8954,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 30, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
NPC_F_LA_U_HAS_HIGIG2,
@@ -8818,7 +9079,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 38, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
@@ -8947,7 +9208,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 0,
NPC_LID_LA, NPC_LT_NA,
0,
@@ -9124,7 +9385,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
0,
@@ -9204,7 +9465,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -9213,7 +9474,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9228,7 +9489,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9324,7 +9585,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9428,7 +9689,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9532,7 +9793,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
0,
@@ -9628,7 +9889,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9684,7 +9945,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -9757,7 +10018,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9772,7 +10033,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -9836,7 +10097,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_EXDSA,
NPC_F_LB_L_EXDSA,
@@ -9923,6 +10184,22 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG_C, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9949,7 +10226,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10029,7 +10306,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10101,7 +10378,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10165,7 +10442,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10237,7 +10514,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10310,80 +10587,80 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ 6, 0, 42, 1, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_NSH, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
@@ -10397,7 +10674,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10469,7 +10746,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10533,7 +10810,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10605,7 +10882,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10685,7 +10962,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_DSA,
NPC_F_LB_L_DSA,
@@ -10733,7 +11010,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
NPC_F_LB_L_DSA_VLAN,
@@ -10894,7 +11171,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10942,7 +11219,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10990,7 +11267,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -11014,7 +11291,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 2, 0,
NPC_LID_LC, NPC_LT_NA,
0,
@@ -11063,15 +11340,15 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
- NPC_S_KPU5_IP, 10, 0,
+ NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
- NPC_S_KPU5_IP6, 10, 0,
+ 6, 0, 42, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
@@ -11119,7 +11396,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
+ 2, 0, 4, 2, 0,
NPC_S_KPU8_UDP, 20, 1,
NPC_LID_LC, NPC_LT_LC_IP,
0,
@@ -11223,7 +11500,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 2, 0,
+ 2, 8, 4, 2, 0,
NPC_S_KPU8_UDP, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
0,
@@ -11450,6 +11727,22 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0, 0,
NPC_S_KPU6_IP6_ROUT, 40, 1,
NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
NPC_F_LC_L_EXT_ROUT,
0, 0, 0, 0,
},
@@ -11695,6 +11988,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP,
@@ -11791,6 +12092,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
@@ -11951,6 +12260,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP6,
@@ -12080,6 +12397,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12184,6 +12509,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12280,6 +12613,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12368,6 +12709,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12472,6 +12821,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12568,6 +12925,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12681,6 +13046,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12769,6 +13142,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12857,6 +13238,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -13058,6 +13447,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
NPC_S_KPU9_ESP, 8, 1,
NPC_LID_LD, NPC_LT_LD_UDP,
@@ -13458,6 +13855,70 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 24, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LD, NPC_LT_LD_GRE,
@@ -13529,6 +13990,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LD, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_CUSTOM, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_CUSTOM0,
+ 0,
+ 0, 0xff, 0, 0,
+ },
+ {
NPC_ERRLEV_LD, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -13946,6 +14415,22 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LE, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -15105,7 +15590,9 @@ static struct npc_lt_def_cfg npc_lt_defaults = {
},
.rx_et = {
{
- .lid = NPC_LID_LB,
+ .offset = -2,
+ .valid = 1,
+ .lid = NPC_LID_LC,
.ltype_match = NPC_LT_NA,
.ltype_mask = 0x0,
},
@@ -15139,6 +15626,12 @@ static struct npc_mcam_kex npc_mkex_default = {
/* Ethertype: 2 bytes, KW0[55:40] */
KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
},
+ [NPC_LT_LA_CPT_HDR] = {
+ /* DMAC: 6 bytes, KW1[55:8] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
/* Layer A: HiGig2: */
[NPC_LT_LA_HIGIG2_ETHER] = {
/* Classification: 2 bytes, KW1[23:8] */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5c1d04a3c559..07d4859de53a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -817,6 +817,8 @@ static int rvu_fwdata_init(struct rvu *rvu)
err = cgx_get_fwdata_base(&fwdbase);
if (err)
goto fail;
+
+ BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET);
rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
if (!rvu->fwdata)
goto fail;
@@ -1484,7 +1486,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
/* All CGX mapped PFs are set with assigned NIX block during init */
if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
blkaddr = pf->nix_blkaddr;
- } else if (is_afvf(pcifunc)) {
+ } else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
/* Assign NIX based on VF number. All even numbered VFs get
* NIX0 and odd numbered gets NIX1
@@ -2034,7 +2036,7 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
u16 target;
/* Only PF can add VF permissions */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc))
return -EOPNOTSUPP;
target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
@@ -2618,6 +2620,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
+ /* Free allocated BPIDs */
+ rvu_nix_flr_free_bpids(rvu, pcifunc);
+
/* Free multicast/mirror node associated with the 'pcifunc' */
rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
@@ -3151,6 +3156,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
int err, chans, vfs;
+ int pos = 0;
if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
dev_warn(&pdev->dev,
@@ -3158,6 +3164,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
+ /* Get RVU VFs device id */
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid);
+
chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 43be37dd1f32..f390525a6217 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -288,6 +288,16 @@ enum rvu_pfvf_flags {
#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+struct nix_bp {
+ struct rsrc_bmap bpids; /* free bpids bitmap */
+ u16 cgx_bpid_cnt;
+ u16 sdp_bpid_cnt;
+ u16 free_pool_base;
+ u16 *fn_map; /* pcifunc mapping */
+ u8 *intf_map; /* interface type map */
+ u8 *ref_cnt;
+};
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -363,6 +373,7 @@ struct nix_hw {
struct nix_lso lso;
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
+ struct nix_bp bp;
u64 *tx_credits;
u8 cc_mcs_cnt;
};
@@ -432,6 +443,13 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct channel_fwdata {
+ struct sdp_node_info info;
+ u8 valid;
+#define RVU_CHANL_INFO_RESERVED 379
+ u8 reserved[RVU_CHANL_INFO_RESERVED];
+};
+
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
@@ -450,11 +468,13 @@ struct rvu_fwdata {
u64 msixtr_base;
u32 ptp_ext_clk_rate;
u32 ptp_ext_tstamp;
-#define FWDATA_RESERVED_MEM 1022
+ struct channel_fwdata channel_data;
+#define FWDATA_RESERVED_MEM 958
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
#define CGX_LMACS_USX 8
+#define FWDATA_CGX_LMAC_OFFSET 10536
union {
struct cgx_lmac_fwdata_s
cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
@@ -503,6 +523,7 @@ struct rvu {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
+ u16 vf_devid; /* VF devices id */
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@@ -732,9 +753,11 @@ static inline bool is_rvu_supports_nix1(struct rvu *rvu)
/* Function Prototypes
* RVU
*/
-static inline bool is_afvf(u16 pcifunc)
+#define RVU_LBK_VF_DEVID 0xA0F8
+static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc)
{
- return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+ return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) &&
+ (rvu->vf_devid == RVU_LBK_VF_DEVID));
}
static inline bool is_vf(u16 pcifunc)
@@ -794,7 +817,7 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
int rvu_sdp_init(struct rvu *rvu);
bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
-bool is_sdp_vf(u16 pcifunc);
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
@@ -873,6 +896,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx);
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index e6d7914ce61c..2500f5ba4f5a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2870,6 +2870,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%d ", ntohs(rule->packet.dport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
+ case NPC_TCP_FLAGS:
+ seq_printf(s, "%d ", rule->packet.tcp_flags);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
+ break;
case NPC_IPSEC_SPI:
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 66203a90f052..d39001cdc707 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -499,29 +499,115 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
+#define NIX_BPIDS_PER_LMAC 8
+#define NIX_BPIDS_PER_CPT 1
+static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
+{
+ struct nix_bp *bp = &hw->bp;
+ int err, max_bpids;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
+
+ /* Reserve the BPIds for CGX and SDP */
+ bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
+ bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
+ bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
+ NIX_BPIDS_PER_CPT;
+ bp->bpids.max = max_bpids - bp->free_pool_base;
+
+ err = rvu_alloc_bitmap(&bp->bpids);
+ if (err)
+ return err;
+
+ bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!bp->fn_map)
+ return -ENOMEM;
+
+ bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->intf_map)
+ return -ENOMEM;
+
+ bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->ref_cnt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, bpid, err;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
+
+ if (!is_lbk_vf(rvu, pcifunc))
+ return;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return;
+
+ bp = &nix_hw->bp;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->ref_cnt[bpid]--;
+ if (bp->ref_cnt[bpid])
+ continue;
+ rvu_free_rsrc(&bp->bpids, bpid);
+ bp->fn_map[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+}
+
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, pf, type, err;
+ u16 chan_base, chan, bpid;
struct rvu_pfvf *pfvf;
- int blkaddr, pf, type;
- u16 chan_base, chan;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+ bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg & ~BIT_ULL(16));
+
+ if (type == NIX_INTF_TYPE_LBK) {
+ bpid = cfg & GENMASK(8, 0);
+ mutex_lock(&rvu->rsrc_lock);
+ rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->fn_map[bpid] = 0;
+ bp->ref_cnt[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ }
}
return 0;
}
@@ -529,25 +615,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
- int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
- u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
+ int bpid, blkaddr, sdp_chan_base, err;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_pfvf *pfvf;
+ struct nix_hw *nix_hw;
u8 cgx_id, lmac_id;
- u64 cfg;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- lmac_chan_cnt = cfg & 0xFF;
+ struct nix_bp *bp;
- cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
- lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
- sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
- pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ bp = &nix_hw->bp;
/* Backpressure IDs range division
* CGX channles are mapped to (0 - 191) BPIDs
@@ -561,38 +642,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 16)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
- bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
- (lmac_id * lmac_chan_cnt) + req->chan_base;
+ bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
+ (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > cgx_bpid_cnt)
- return -EINVAL;
+ if (bpid > bp->cgx_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID;
break;
case NIX_INTF_TYPE_LBK:
- if ((req->chan_base + req->chan_cnt) > 63)
- return -EINVAL;
- bpid = cgx_bpid_cnt + req->chan_base;
- if (req->bpid_per_chan)
- bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
- return -EINVAL;
+ /* Alloc bpid from the free pool */
+ mutex_lock(&rvu->rsrc_lock);
+ bpid = rvu_alloc_rsrc(&bp->bpids);
+ if (bpid < 0) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return NIX_AF_ERR_INVALID_BPID;
+ }
+ bp->fn_map[bpid] = req->hdr.pcifunc;
+ bp->ref_cnt[bpid]++;
+ bpid += bp->free_pool_base;
+ mutex_unlock(&rvu->rsrc_lock);
break;
case NIX_INTF_TYPE_SDP:
- if ((req->chan_base + req->chan_cnt) > 255)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
- bpid = sdp_bpid_cnt + req->chan_base;
+ /* Handle usecase of 2 SDP blocks */
+ if (!hw->cap.programmable_chans)
+ sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
+ else
+ sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
+
+ bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
- return -EINVAL;
+ if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
+ return NIX_AF_ERR_INVALID_BPID;
break;
default:
return -EINVAL;
@@ -612,7 +703,7 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
@@ -1523,7 +1614,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
- intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
intf = NIX_INTF_TYPE_SDP;
@@ -1899,7 +1990,7 @@ static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- if (is_afvf(pcifunc)) {/* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
return hw->cgx_links;
} else if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -1916,7 +2007,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
- if (is_afvf(pcifunc)) { /* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
@@ -3356,7 +3447,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
@@ -3703,7 +3794,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
@@ -4039,6 +4130,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_CUSTOM0:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 6;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LC_CUSTOM0;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_VLAN:
field->lid = NPC_LID_LB;
field->hdr_offset = 2; /* Skip TPID (2-bytes) */
@@ -4420,7 +4518,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &max_mtu);
@@ -4784,6 +4882,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_bpids(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
/* Configure segmentation offload formats */
nix_setup_lso(rvu, nix_hw, blkaddr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 167145bdcb75..e350242bbafb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
return 0;
}
-static int npc_mcam_verify_pf_func(struct rvu *rvu,
- struct mcam_entry *entry_data, u8 intf,
- u16 pcifunc)
-{
- u16 pf_func, pf_func_mask;
-
- if (is_npc_intf_rx(intf))
- return 0;
-
- pf_func_mask = (entry_data->kw_mask[0] >> 32) &
- NPC_KEX_PF_FUNC_MASK;
- pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
-
- pf_func = be16_to_cpu((__force __be16)pf_func);
- if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
- ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
- (pcifunc & ~RVU_PFVF_FUNC_MASK)))
- return -EINVAL;
-
- return 0;
-}
-
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -417,7 +395,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
/* do nothing when target is LBK/PF or owner is not PF */
- if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) ||
(owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
@@ -437,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
return;
}
+ /* AF modifies given action iff PF/VF has requested for it */
+ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
+ return;
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -626,7 +608,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
- if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -791,7 +773,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
return;
/* Skip LBK VFs */
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
return;
/* If pkt replication is not supported,
@@ -871,7 +853,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
- if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1850,8 +1832,8 @@ void npc_mcam_rsrcs_deinit(struct rvu *rvu)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- kfree(mcam->bmap);
- kfree(mcam->bmap_reverse);
+ bitmap_free(mcam->bmap);
+ bitmap_free(mcam->bmap_reverse);
kfree(mcam->entry2pfvf_map);
kfree(mcam->cntr2pfvf_map);
kfree(mcam->entry2cntr_map);
@@ -1904,21 +1886,20 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
/* Allocate bitmaps for managing MCAM entries */
- mcam->bmap = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap)
return -ENOMEM;
- mcam->bmap_reverse = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap_reverse)
goto free_bmap;
mcam->bmap_fcnt = mcam->bmap_entries;
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
- mcam->entry2pfvf_map = kmalloc_array(mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
+
if (!mcam->entry2pfvf_map)
goto free_bmap_reverse;
@@ -1941,21 +1922,21 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (err)
goto free_entry_map;
- mcam->cntr2pfvf_map = kmalloc_array(mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr2pfvf_map)
goto free_cntr_bmap;
/* Alloc memory for MCAM entry to counter mapping and for tracking
* counter's reference count.
*/
- mcam->entry2cntr_map = kmalloc_array(mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->entry2cntr_map)
goto free_cntr_map;
- mcam->cntr_refcnt = kmalloc_array(mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr_refcnt)
goto free_entry_cntr_map;
@@ -1988,9 +1969,9 @@ free_cntr_bmap:
free_entry_map:
kfree(mcam->entry2pfvf_map);
free_bmap_reverse:
- kfree(mcam->bmap_reverse);
+ bitmap_free(mcam->bmap_reverse);
free_bmap:
- kfree(mcam->bmap);
+ bitmap_free(mcam->bmap);
return -ENOMEM;
}
@@ -2852,12 +2833,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (!is_pffunc_af(pcifunc) &&
- npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
- rc = NPC_MCAM_INVALID_REQ;
- goto exit;
- }
-
/* For AF installed rules, the nix_intf should be set to target NIX */
if (is_pffunc_af(req->hdr.pcifunc))
nix_intf = req->intf;
@@ -3209,10 +3184,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_MCAM_INVALID_REQ;
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- req->hdr.pcifunc))
- return NPC_MCAM_INVALID_REQ;
-
/* Try to allocate a MCAM entry */
entry_req.hdr.pcifunc = req->hdr.pcifunc;
entry_req.contig = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c75669c8fde7..c181e7aa9eb6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -53,6 +53,7 @@ static const char * const npc_flow_names[] = {
[NPC_MPLS4_TTL] = "lse depth 4",
[NPC_TYPE_ICMP] = "icmp type",
[NPC_CODE_ICMP] = "icmp code",
+ [NPC_TCP_FLAGS] = "tcp flags",
[NPC_UNKNOWN] = "unknown",
};
@@ -530,6 +531,7 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
+ NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
@@ -574,7 +576,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
- BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP);
+ BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) |
+ BIT_ULL(NPC_TCP_FLAGS);
/* for tcp/udp/sctp corresponding layer type should be in the key */
if (*features & proto_flags) {
@@ -982,7 +985,8 @@ do { \
mask->icmp_type, 0);
NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
mask->icmp_code, 0);
-
+ NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0,
+ ntohs(mask->tcp_flags), 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 6f73ad9807f0..086f05c0376f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -439,6 +439,9 @@
#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
+#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index ae50d56258ec..38cfe148f4b7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -40,8 +40,12 @@ bool is_sdp_pf(u16 pcifunc)
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
-bool is_sdp_vf(u16 pcifunc)
+#define RVU_SDP_VF_DEVID 0xA0F7
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
{
+ if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
+ return (rvu->vf_devid == RVU_SDP_VF_DEVID);
+
return (is_sdp_pfvf(pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -52,6 +56,14 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
+ if (rvu->fwdata->channel_data.valid) {
+ sdp_pf_num[0] = 0;
+ pfvf = &rvu->pf[sdp_pf_num[0]];
+ pfvf->sdp_info = &rvu->fwdata->channel_data.info;
+
+ return 0;
+ }
+
while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OTX2_SDP_PF,
pdev)) != NULL) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 7ca6941ea0b9..02d0b707aea5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
- if (err)
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
return err;
+ }
}
sq->head = 0;
@@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 2928898c7f8d..7f786de61014 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev,
pfvf->hw.tx_queues = channel->tx_count;
if (pfvf->xdp_prog)
pfvf->hw.xdp_queues = channel->rx_count;
- pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
if (if_up)
err = dev->netdev_ops->ndo_open(dev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index a57455aebff6..e5fe67e73865 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1744,6 +1744,7 @@ int otx2_open(struct net_device *netdev)
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
+ pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues;
pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
pf->hw.tc_tx_queues);
@@ -2643,8 +2644,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
xdp_features_clear_redirect_target(dev);
}
- pf->hw.non_qos_queues += pf->hw.xdp_queues;
-
if (if_up)
otx2_open(pf->netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 4fd44b6eecea..87bdb93cb066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -638,6 +638,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -857,6 +858,16 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+
+ flow_rule_match_tcp(rule, &match);
+
+ flow_spec->tcp_flags = match.key->flags;
+ flow_mask->tcp_flags = match.mask->flags;
+ req->features |= BIT_ULL(NPC_TCP_FLAGS);
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match;
u8 bit;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 4d519ea833b2..f828d32737af 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1403,7 +1403,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
bool *need_xdp_flush)
{
- unsigned char *hard_start, *data;
+ unsigned char *hard_start;
int qidx = cq->cq_idx;
struct xdp_buff xdp;
struct page *page;
@@ -1417,9 +1417,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
- data = (unsigned char *)phys_to_virt(pa);
- hard_start = page_address(page);
- xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ hard_start = (unsigned char *)phys_to_virt(pa);
+ xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
cqe->sg.seg_size, false);
act = bpf_prog_run_xdp(prog, &xdp);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index a6e91573f8da..de123350bd46 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4761,7 +4761,10 @@ static int mtk_probe(struct platform_device *pdev)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
+ if (!err)
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
if (err) {
dev_err(&pdev->dev, "Wrong DMA config\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index d58b07e7e123..7063c78bd35f 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
int i;
for (i = 0; i < q->n_desc; i++) {
@@ -301,19 +300,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
entry->buf = NULL;
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
-
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -323,12 +315,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f5b1f8c7834f..7f20813456e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2199,8 +2199,9 @@ reset_slave:
if (cmd != MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
slave, cmd);
- /* Turn on internal error letting slave reset itself immeditaly,
- * otherwise it might take till timeout on command is passed
+ /* Turn on internal error letting slave reset itself
+ * immediately, otherwise it might take till timeout on
+ * command is passed
*/
reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
}
@@ -2954,7 +2955,7 @@ static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
dummy_admin.default_vlan = vlan;
/* VF wants to move to other VST state which is valid with current
- * rate limit. Either differnt default vlan in VST or other
+ * rate limit. Either different default vlan in VST or other
* supported QoS priority. Otherwise we don't allow this change when
* the TX rate is still configured.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 4d4f9cf9facb..e130e7259275 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -115,7 +115,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
return;
}
- /* Acessing the CQ outside of rcu_read_lock is safe, because
+ /* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn;
@@ -137,7 +137,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
return;
}
- /* Acessing the CQ outside of rcu_read_lock is safe, because
+ /* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 9e3b76182088..cd754cd76bde 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -96,8 +96,8 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
#define MLX4_EN_WRAP_AROUND_SEC 10UL
/* By scheduling the overflow check every 5 seconds, we have a reasonably
- * good chance we wont miss a wrap around.
- * TOTO: Use a timer instead of a work queue to increase the guarantee.
+ * good chance we won't miss a wrap around.
+ * TODO: Use a timer instead of a work queue to increase the guarantee.
*/
#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 33bbcced8105..5d3fde63b273 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -42,6 +42,7 @@
#include <net/ip.h>
#include <net/vxlan.h>
#include <net/devlink.h>
+#include <net/rps.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -1072,7 +1073,8 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1, MLX4_MCAST_CONFIG);
/* Update multicast list - we cache all addresses so they won't
- * change while HW is updated holding the command semaphor */
+ * change while HW is updated holding the command semaphore
+ */
netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
@@ -1817,7 +1819,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_set_rss_steer_rules(priv))
mlx4_warn(mdev, "Failed setting steering rules\n");
- /* Attach rx QP to bradcast address */
+ /* Attach rx QP to broadcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a09b6e05337d..eac49657bd07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -762,7 +762,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+ en_err(priv, "CQE completed in error - vendor syndrome:%d syndrome:%d\n",
((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 65cb63f6c465..1ddb11cb25f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -992,7 +992,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->ts_requested = 1;
}
- /* Prepare ctrl segement apart opcode+ownership, which depends on
+ /* Prepare ctrl segment apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 6598b10a9ff4..9572a45f6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -210,7 +210,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
- /* ensure all information is written before setting the ownersip bit */
+ /* ensure all information is written before setting the ownership bit */
dma_wmb();
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
++slave_eq->prod;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
index 954b86faac29..40ca29bb928c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
@@ -44,7 +44,7 @@
/* Default supported priorities for VPP allocation */
#define MLX4_DEFAULT_QOS_PRIO (0)
-/* Derived from FW feature definition, 0 is the default vport fo all QPs */
+/* Derived from FW feature definition, 0 is the default vport for all QPs */
#define MLX4_VPP_DEFAULT_VPORT (0)
struct mlx4_vport_qos_param {
@@ -98,7 +98,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
u16 *available_vpp, u8 *vpp_p_up);
/**
- * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
+ * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among different priorities.
* The total number of VPPs assigned to all for a port must not exceed
* the value reported by available_vpp in mlx4_ALLOCATE_VPP_get.
* VPP allocation is allowed only after the port type has been set,
@@ -113,7 +113,7 @@ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
/**
- * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
+ * mlx4_SET_VPORT_QOS_get - Query QoS properties of a Vport.
* Each priority allowed for the Vport is assigned with a share of the BW,
* and a BW limitation. This commands query the current QoS values.
*
@@ -128,7 +128,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
struct mlx4_vport_qos_param *out_param);
/**
- * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
+ * mlx4_SET_VPORT_QOS_set - Set QoS properties of a Vport.
* QoS parameters can be modified at any time, but must be initialized
* before any QP is associated with the VPort.
*
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2581226836b5..7b02ff61126d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -129,7 +129,7 @@ static const struct mlx4_profile default_profile = {
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 19,
- .num_mtt = 1 << 20, /* It is really num mtt segements */
+ .num_mtt = 1 << 20, /* It is really num mtt segments */
};
static const struct mlx4_profile low_mem_profile = {
@@ -1508,7 +1508,7 @@ static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
priv->v2p.port1 = port1;
priv->v2p.port2 = port2;
} else {
- mlx4_err(dev, "Failed to change port mape: %d\n", err);
+ mlx4_err(dev, "Failed to change port map: %d\n", err);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index e9cd4bb6f83d..d3d9ec042d2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -112,7 +112,7 @@ struct mlx4_en_stat_out_flow_control_mbox {
__be64 tx_pause_duration;
/* Number of transmitter transitions from XOFF state to XON state */
__be64 tx_pause_transition;
- /* Reserverd */
+ /* Reserved */
__be64 reserved[2];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 256a06b3c096..4e43f4a7d246 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -2118,7 +2118,7 @@ static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
* @data: output buffer to put the requested data into.
*
* Reads cable module eeprom data, puts the outcome data into
- * data pointer paramer.
+ * data pointer parameter.
* Returns num of read bytes on success or a negative error
* code.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a7b1f9686c09..4957412ff1f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
{
const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats;
+ unsigned long flags;
if (!err || !(strcmp(namep, "unknown command opcode")))
return;
@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats = xa_load(&dev->cmd.stats, opcode);
if (!stats)
return;
- spin_lock_irq(&stats->lock);
+ spin_lock_irqsave(&stats->lock, flags);
stats->failed++;
if (err < 0)
stats->last_failed_errno = -err;
@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats->last_failed_mbox_status = status;
stats->last_failed_syndrome = syndrome;
}
- spin_unlock_irq(&stats->lock);
+ spin_unlock_irqrestore(&stats->lock, flags);
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index cf0477f53dc4..47e7c2639774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev)
return false;
if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
- mlx5_core_warn(dev, "Missing SyncE capability\n");
+ mlx5_core_dbg(dev, "Missing SyncE capability\n");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3e064234f6fe..98d4306929f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
+ !dev->priv.fw_reset) {
+ NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
+ return -EOPNOTSUPP;
+ }
+
if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 18fed2b34fb1..904e08de852e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -41,6 +41,7 @@ struct mlx5_dpll_synce_status {
enum mlx5_msees_oper_status oper_status;
bool ho_acq;
bool oper_freq_measure;
+ enum mlx5_msees_failure_reason failure_reason;
s32 frequency_diff;
};
@@ -60,6 +61,7 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
+ synce_status->failure_reason = MLX5_GET(msees_reg, out, failure_reason);
synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
return 0;
}
@@ -99,6 +101,26 @@ mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
}
}
+static enum dpll_lock_status_error
+mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status *synce_status)
+{
+ switch (synce_status->oper_status) {
+ case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING:
+ switch (synce_status->failure_reason) {
+ case MLX5_MSEES_FAILURE_REASON_PORT_DOWN:
+ return DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN;
+ case MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF:
+ return DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH;
+ default:
+ return DPLL_LOCK_STATUS_ERROR_UNDEFINED;
+ }
+ default:
+ return DPLL_LOCK_STATUS_ERROR_NONE;
+ }
+}
+
static enum dpll_pin_state
mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
{
@@ -118,10 +140,11 @@ mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
return 0;
}
-static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
{
struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = priv;
@@ -131,6 +154,7 @@ static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
if (err)
return err;
*status = mlx5_dpll_lock_status_get(&synce_status);
+ *status_error = mlx5_dpll_lock_status_error_get(&synce_status);
return 0;
}
@@ -261,7 +285,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
{
if (mdpll->tracking_netdev)
return;
- netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
mdpll->tracking_netdev = netdev;
}
@@ -269,7 +293,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
{
if (!mdpll->tracking_netdev)
return;
- netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ dpll_netdev_pin_clear(mdpll->tracking_netdev);
mdpll->tracking_netdev = NULL;
}
@@ -389,7 +413,7 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev)
struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
struct mlx5_core_dev *mdev = mdpll->mdev;
- cancel_delayed_work(&mdpll->work);
+ cancel_delayed_work_sync(&mdpll->work);
mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
destroy_workqueue(mdpll->wq);
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0bfe1ca8a364..55c6ace0acd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index e1283531e0b8..671adbad0a40 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
+ ft->g = NULL;
kvfree(in);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 284253b79266..5757f4f10c12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{
- /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
- u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 headroom;
+
+ if (no_head_tail_room)
+ return SKB_DATA_ALIGN(hw_mtu);
+ headroom = mlx5e_get_linear_rq_headroom(params, NULL);
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}
@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
+ bool no_head_tail_room;
u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in
@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
- sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
+ no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+
+ /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
+ * no_head_tail_room should be set in the case of XDP with Striding RQ
+ * when SKB is not linear. This is because another page is allocated for the linear part.
+ */
+ sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
/* XDP in mlx5e doesn't support multiple packets per page.
* Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;
- /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
+ * to exclude headroom and tailroom from calculations.
+ * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
+ * since packet data buffers don't have headroom and tailroom resreved for the SKB.
+ * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
@@ -1064,8 +1078,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
- allow_swp =
- mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
+ allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index c206cc0a8483..a848a7fda66b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
WARN_ON_ONCE(tracker->inuse);
tracker->inuse = true;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_add_tail(&tracker->entry, &list->tracker_list_head);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
static void
@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
WARN_ON_ONCE(!tracker->inuse);
tracker->inuse = false;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_del(&tracker->entry);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
- spin_lock(&cqe_list->tracker_list_lock);
+ spin_lock_bh(&cqe_list->tracker_list_lock);
list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
struct sk_buff *skb =
mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
@@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
pos->inuse = false;
list_del(&pos->entry);
}
- spin_unlock(&cqe_list->tracker_list_lock);
+ spin_unlock_bh(&cqe_list->tracker_list_lock);
}
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
- md_buff[*md_buff_sz++] = metadata_id;
+ md_buff[(*md_buff_sz)++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
@@ -935,6 +935,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
mlx5e_trigger_napi_sched(&c->napi);
}
@@ -943,8 +944,10 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
{
int tc;
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL);
mlx5e_deactivate_rq(&c->rq);
+ }
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 86bf007fd05b..b500cc2c9689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 161c5190c236..c54fd01ea635 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
+ attrs->dir = x->xso.dir;
+
/* esn */
if (x->props.flags & XFRM_STATE_ESN) {
attrs->replay_esn.trigger = true;
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ goto skip_replay_window;
+
switch (x->replay_esn->replay_window) {
case 32:
attrs->replay_esn.replay_window =
@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
}
}
- attrs->dir = x->xso.dir;
+skip_replay_window:
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+ if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
+ x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {
@@ -978,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &work->work);
}
-static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct net *net = dev_net(x->xso.dev);
u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
+ x->stats.integrity_failed += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ return;
+
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets;
x->curlft.bytes += bytes;
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
+ x->stats.replay += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ }
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
@@ -1150,7 +1176,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
- .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+ .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index adaea3493193..7d943e93cf6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats {
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
- atomic64_t ipsec_rx_drop_syndrome;
atomic64_t ipsec_tx_drop_bundle;
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 51a144246ea6..727fa7c18523 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -304,12 +304,6 @@ drop:
return false;
}
-enum {
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
-};
-
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
u32 ipsec_meta_data)
@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
-
- switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
- xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
- break;
- default:
- atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
- }
+ xo->status = CRYPTO_SUCCESS;
}
int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada
return err;
}
- *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
-
+ *metadata = ipsec_obj_id;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2ed99772f168..82064614846f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,7 +43,6 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
-#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index e0e36a09721c..dd36b04e30a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index d4ebd8743114..b2cabd6ab86c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
- struct mlx5e_macsec_sa *sa,
- bool is_tx, struct net_device *netdev, u32 fs_id)
+static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
fs_id);
- mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
+{
+ mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+}
+
+static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa, bool encrypt,
+ bool is_tx, u32 *fs_id)
+{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ union mlx5_macsec_rule *macsec_rule;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
+ if (!macsec_rule)
+ return -ENOMEM;
+
+ sa->macsec_rule = macsec_rule;
+
+ return 0;
+}
+
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
- struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
- union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
if (err)
return err;
- rule_attrs.macsec_obj_id = sa->macsec_obj_id;
- rule_attrs.sci = sa->sci;
- rule_attrs.assoc_num = sa->assoc_num;
- rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
- MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
-
- macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
- if (!macsec_rule) {
- err = -ENOMEM;
- goto destroy_macsec_object;
+ if (sa->active) {
+ err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
+ if (err)
+ goto destroy_macsec_object;
}
- sa->macsec_rule = macsec_rule;
-
return 0;
destroy_macsec_object:
@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
goto destroy_sa;
macsec_device->tx_sa[assoc_num] = tx_sa;
- if (!secy->operational ||
- assoc_num != tx_sc->encoding_sa ||
- !tx_sa->active)
+ if (!secy->operational)
goto out;
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ if (rx_sa->active)
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
}
}
@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
- &rx_sc->sc_xarray_element->fs_id);
+ err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index bb7f86c993e5..c7f542d0b8f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/rps.h>
#include "en.h"
#define ARFS_HASH_SHIFT BITS_PER_BYTE
@@ -254,11 +255,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in || !ft->g) {
- kfree(ft->g);
- kvfree(in);
+ if (!ft->g)
return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free_g;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@@ -278,7 +281,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
switch (type) {
@@ -300,7 +303,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -309,7 +312,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
memset(in, 0, inlen);
@@ -318,18 +321,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
kvfree(in);
return 0;
-err:
+err_clean_group:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
-out:
+err_free_in:
kvfree(in);
-
+err_free_g:
+ kfree(ft->g);
+ ft->g = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 67f546683e85..6ed3a32b7e22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO
{
int tc, i;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
mlx5e_destroy_tis(mdev, tisn[i][tc]);
}
@@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
int tc, i;
int err;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
@@ -140,7 +140,7 @@ err_close_tises:
return err;
}
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
@@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
- err = mlx5e_create_tises(mdev, res->tisn);
- if (err) {
- mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
- goto err_destroy_bfreg;
+ if (create_tises) {
+ err = mlx5e_create_tises(mdev, res->tisn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
+ goto err_destroy_bfreg;
+ }
+ res->tisn_valid = true;
}
+
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
@@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
- mlx5e_destroy_tises(mdev, res->tisn);
+ if (res->tisn_valid)
+ mlx5e_destroy_tises(mdev, res->tisn);
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b5f1c4ca38ba..be809556b2e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1806,6 +1806,7 @@ void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
}
void mlx5e_tx_disable_queue(struct netdev_queue *txq)
@@ -1819,6 +1820,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, NULL);
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
@@ -2560,6 +2562,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
+ netif_napi_set_irq(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2602,12 +2605,16 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
+
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
{
int tc;
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, NULL);
+
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
else
@@ -5992,7 +5999,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
if (netif_device_present(netdev))
return 0;
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, true);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 30932c9c9a8f..9fb2c057bd78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
err = mlx5e_rss_params_indir_init(&indir, mdev,
mlx5e_rqt_size(mdev, hp->num_channels),
- mlx5e_rqt_size(mdev, priv->max_nch));
+ mlx5e_rqt_size(mdev, hp->num_channels));
if (err)
return err;
@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue;
+
+ list_del(&peer_flow->peer_flows);
if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
- list_del(&peer_flow->peer_flows);
kfree(peer_flow);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5c166d9d2dca..2fa076b23fbe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
+ /* ensure skb is put on metadata_map before tracking the index */
+ wmb();
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index a7ed87e9d842..22dd30cf8033 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
i++;
}
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
ether_addr_copy(dmac_v, entry->key.addr);
@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
}
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
kvfree(rule_spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 190f10aba170..5a0047bdcb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
+ if (!rpriv || !rpriv->netdev)
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b0455134c98e..baaae628b0a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
}
static bool
-esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
{
- bool vf_dest = false, pf_dest = false;
+ bool internal_dest = false, external_dest = false;
int i;
for (i = 0; i < max_dest; i++) {
- if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
continue;
- if (dests[i].vport.num == MLX5_VPORT_UPLINK)
- pf_dest = true;
+ /* Uplink dest is external, but considered as internal
+ * if there is reformat because firmware uses LB+hairpin to support it.
+ */
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
+ !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
+ external_dest = true;
else
- vf_dest = true;
+ internal_dest = true;
- if (vf_dest && pf_dest)
+ if (internal_dest && external_dest)
return true;
}
@@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
/* Header rewrite with combined wire+loopback in FDB is not allowed */
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
- esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_dests_to_int_external(dest, i)) {
esw_warn(esw->dev,
- "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
rule = ERR_PTR(-EINVAL);
goto err_esw_get;
}
@@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct net *devl_net, *netdev_net;
- bool ret = false;
-
- mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
- if (dev->mlx5e_res.uplink_netdev) {
- netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
- devl_net = devlink_net(devlink);
- ret = net_eq(devl_net, netdev_net);
- }
- mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
- return ret;
-}
-
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
- !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
- return -EPERM;
- }
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 1616a6144f7b..9b8599c200e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
+ MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
+ !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 58f4c0d0fafa..e7faf7e73ca4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
return -EIO;
}
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED);
/* Loop until device state turns to disable */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
cond_resched();
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index f27eab6e4929..2911aa34a5be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -703,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
mlx5_eq_notifier_register(dev, &fw_reset->nb);
}
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
{
- mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (!fw_reset)
+ return;
+
+ mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
}
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
@@ -733,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
- struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ struct mlx5_fw_reset *fw_reset;
int err;
+ if (!MLX5_CAP_MCAM_REG(dev, mfrl))
+ return 0;
+
+ fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
if (!fw_reset)
return -ENOMEM;
fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
@@ -771,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
devl_params_unregister(priv_to_devlink(dev),
mlx5_fw_reset_devlink_params,
ARRAY_SIZE(mlx5_fw_reset_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8ff6dc9bc803..ad38e31822df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
return MLX5_SENSOR_PCI_COMM_ERR;
if (pci_channel_offline(dev->pdev))
return MLX5_SENSOR_PCI_ERR;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
return MLX5_SENSOR_NIC_DISABLED;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET)
return MLX5_SENSOR_NIC_SW_RESET;
if (sensor_fw_synd_rfr(dev))
return MLX5_SENSOR_FW_SYND_RFR;
@@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
/* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it.
*/
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET);
return true;
}
@@ -246,13 +246,13 @@ recover_from_sw_reset:
/* Recover from SW reset */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
msleep(20);
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
}
@@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
- case MLX5_NIC_IFC_FULL:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
break;
- case MLX5_NIC_IFC_DISABLED:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED:
mlx5_core_warn(dev, "starting teardown\n");
break;
- case MLX5_NIC_IFC_NO_DRAM_NIC:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
break;
- case MLX5_NIC_IFC_SW_RESET:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET:
/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
* 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
* and this is a VF), this is not recoverable by SW reset.
* Logging of this is handled elsewhere.
* 2. FW reset has been issued by another function, driver can
* be reloaded to recover after the mode switches to
- * MLX5_NIC_IFC_DISABLED.
+ * MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED.
*/
if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
mlx5_core_warn(dev, "NIC SW reset in progress\n");
@@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct health_buffer __iomem *h = health->health;
u8 synd = ioread8(&h->synd);
+ devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
if (!synd)
return 0;
- devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
return 0;
@@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
&fw_reporter_ctx);
}
-static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_pf_ops = {
.name = "fw",
.diagnose = mlx5_fw_reporter_diagnose,
.dump = mlx5_fw_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = mlx5_fw_reporter_diagnose,
+};
+
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
@@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
-static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+};
+
#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
@@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
+ const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
+ const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
u64 grace_period;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
+ fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
} else if (mlx5_core_is_pf(dev)) {
@@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
} else {
/* VF or SF */
grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
+ fw_ops = &mlx5_fw_reporter_ops;
}
health->fw_reporter =
- devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
- 0, dev);
+ devl_health_reporter_create(devlink, fw_ops, 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_fatal_reporter =
devl_health_reporter_create(devlink,
- &mlx5_fw_fatal_reporter_ops,
+ fw_fatal_ops,
grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 58845121954c..d77be1b4dd9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
}
/* This should only be called once per mdev */
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, false);
if (err)
goto destroy_ht;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 40c7be124041..58bd749b5e4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
- MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bccf6e53556c..c2593625c09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = {
};
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
- u32 warn_time_mili)
+ u32 warn_time_mili, const char *init_state)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
u32 fw_initializing;
- int err = 0;
do {
fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31))
break;
- if (time_after(jiffies, end) ||
- test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
- err = -EBUSY;
- break;
+ if (time_after(jiffies, end)) {
+ mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n",
+ max_wait_mili, init_state);
+ return -ETIMEDOUT;
+ }
+ if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
+ mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n",
+ init_state);
+ return -ENODEV;
}
if (warn_time_mili && time_after(jiffies, warn)) {
- mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
- jiffies_to_msecs(end - warn) / 1000, fw_initializing);
+ mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n",
+ init_state, jiffies_to_msecs(end - warn) / 1000,
+ fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
} while (true);
- return err;
+ return 0;
}
static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
@@ -1151,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
- mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
- timeout);
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL),
+ "pre-initializing");
+ if (err)
return err;
- }
err = mlx5_cmd_enable(dev);
if (err) {
@@ -1166,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
mlx5_tout_query_iseg(dev);
- err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
- mlx5_tout_ms(dev, FW_INIT));
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing");
+ if (err)
goto err_cmd_cleanup;
- }
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a79b7959361b..58732f44940f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -312,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
return ret;
}
-enum {
- MLX5_NIC_IFC_FULL = 0,
- MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_SW_RESET = 7
-};
-
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index c93492b67788..99219ea52c4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev);
}
-static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev)
+static void mlx5_sf_dev_remove_aux(struct mlx5_core_dev *dev,
+ struct mlx5_sf_dev *sf_dev)
{
int id;
@@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
return;
xa_err:
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err);
@@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index);
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
}
static int
@@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index);
- mlx5_sf_dev_remove(table->dev, sf_dev);
+ mlx5_sf_dev_remove_aux(table->dev, sf_dev);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 169c2c68ed5c..bc863e1f062e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -95,24 +95,29 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
+ struct devlink *devlink;
- mlx5_drain_health_wq(sf_dev->mdev);
+ devlink = priv_to_devlink(mdev);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
devlink_unregister(devlink);
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
+ if (mlx5_dev_is_lightweight(mdev))
+ mlx5_uninit_one_light(mdev);
else
- mlx5_uninit_one(sf_dev->mdev);
- iounmap(sf_dev->mdev->iseg);
- mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_uninit_one(mdev);
+ iounmap(mdev->iseg);
+ mlx5_mdev_uninit(mdev);
mlx5_devlink_free(devlink);
}
static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
- mlx5_unload_one(sf_dev->mdev, false);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_unload_one(mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 6f9790e97fed..2ebb61ef3ea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
switch (action_type) {
case DR_ACTION_TYP_DROP:
attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
break;
case DR_ACTION_TYP_FT:
dest_action = action;
@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->sampler->tx_icm_addr;
break;
case DR_ACTION_TYP_VPORT:
- attr.hit_gvmi = action->vport->caps->vhca_gvmi;
- dest_action = action;
- attr.final_icm_addr = rx_rule ?
- action->vport->caps->icm_address_rx :
- action->vport->caps->icm_address_tx;
+ if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
+ /* can't go to uplink on RX rule - dropping instead */
+ attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+ } else {
+ attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+ dest_action = action;
+ attr.final_icm_addr = rx_rule ?
+ action->vport->caps->icm_address_rx :
+ action->vport->caps->icm_address_tx;
+ }
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 7e36e1062139..64f4cc284aea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -54,6 +54,107 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
+static struct mlx5dr_dbg_dump_buff *
+mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *new_buff;
+
+ new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL);
+ if (!new_buff)
+ return NULL;
+
+ new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL);
+ if (!new_buff->buff) {
+ kfree(new_buff);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&new_buff->node);
+ list_add_tail(&new_buff->node, &dump_data->buff_list);
+
+ return new_buff;
+}
+
+static struct mlx5dr_dbg_dump_data *
+mlx5dr_dbg_create_dump_data(void)
+{
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL);
+ if (!dump_data)
+ return NULL;
+
+ INIT_LIST_HEAD(&dump_data->buff_list);
+
+ if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) {
+ kfree(dump_data);
+ return NULL;
+ }
+
+ return dump_data;
+}
+
+static void
+mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff;
+
+ if (!dump_data)
+ return;
+
+ list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
+ kvfree(dump_buff->buff);
+ list_del(&dump_buff->node);
+ kfree(dump_buff);
+ }
+
+ kfree(dump_data);
+}
+
+static int
+mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ struct mlx5dr_dbg_dump_buff *buff;
+ u32 buff_capacity, write_size;
+ int remain_size, ret;
+
+ if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE)
+ return -EINVAL;
+
+ dump_data = dmn->dump_info.dump_data;
+ buff = list_last_entry(&dump_data->buff_list,
+ struct mlx5dr_dbg_dump_buff, node);
+
+ buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index;
+ remain_size = buff_capacity - size;
+ write_size = (remain_size > 0) ? size : buff_capacity;
+
+ if (likely(write_size)) {
+ ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str);
+ if (ret < 0)
+ return ret;
+
+ buff->index += write_size;
+ }
+
+ if (remain_size < 0) {
+ remain_size *= -1;
+ buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data);
+ if (!buff)
+ return -ENOMEM;
+
+ ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size);
+ if (ret < 0)
+ return ret;
+
+ buff->index += remain_size;
+ }
+
+ return 0;
+}
+
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
@@ -109,36 +210,68 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
+ int ret;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DROP, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id,
- -1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id,
- DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_CTR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
- action->ctr->ctr_id + action->ctr->offset);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
+ action->ctr->ctr_id + action->ctr->offset);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TAG:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
- action->flow_tag->flow_tag);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
+ action->flow_tag->flow_tag);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_MODIFY_HDR:
{
@@ -150,83 +283,171 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
- DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
- rule_id, action->rewrite->index,
- action->rewrite->single_action_opt,
- ptrn_arg ? action->rewrite->num_of_actions : 0,
- ptrn_arg ? ptrn->index : 0,
- ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
+ rule_id, action->rewrite->index,
+ action->rewrite->single_action_opt,
+ ptrn_arg ? action->rewrite->num_of_actions : 0,
+ ptrn_arg ? ptrn->index : 0,
+ ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (ptrn_arg) {
for (i = 0; i < action->rewrite->num_of_actions; i++) {
- seq_printf(file, ",0x%016llx",
- be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ ",0x%016llx",
+ be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
}
- seq_puts(file, "\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n");
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
}
case DR_ACTION_TYP_VPORT:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
- action->vport->caps->num);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
+ action->vport->caps->num);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
- rule_id,
- (action->rewrite->ptrn && action->rewrite->arg) ?
- mlx5dr_arg_get_obj_id(action->rewrite->arg) :
- action->rewrite->index);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
+ rule_id,
+ (action->rewrite->ptrn && action->rewrite->arg) ?
+ mlx5dr_arg_get_obj_id(action->rewrite->arg) :
+ action->rewrite->index);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L3:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_POP_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
- rule_id, action->push_vlan->vlan_hdr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
+ rule_id, action->push_vlan->vlan_hdr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_INSERT_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_REMOVE_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_SAMPLER:
- seq_printf(file,
- "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
- 0, 0, action->sampler->sampler_id,
- action->sampler->rx_icm_addr,
- action->sampler->tx_icm_addr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id,
+ rule_id, 0, 0, action->sampler->sampler_id,
+ action->sampler->rx_icm_addr,
+ action->sampler->tx_icm_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_RANGE:
if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
@@ -247,10 +468,17 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
}
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
- hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
- action->range->definer_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id,
+ rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id,
+ miss_tbl_ptr, action->range->definer_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
default:
return 0;
@@ -263,8 +491,10 @@ static int
dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
+ int ret;
if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
@@ -277,9 +507,16 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
DR_STE_SIZE_REDUCED);
- seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
- dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
- hw_ste_dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
+ dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)),
+ rule_id, hw_ste_dump);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -309,6 +546,7 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_rule_rx_tx *rx = &rule->rx;
struct mlx5dr_rule_rx_tx *tx = &rule->tx;
u8 format_ver;
@@ -316,8 +554,15 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
- seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
- DR_DBG_PTR_TO_ID(rule->matcher));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE,
+ rule_id, DR_DBG_PTR_TO_ID(rule->matcher));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_matcher) {
ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
@@ -344,46 +589,94 @@ static int
dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
+ int ret;
- seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
- matcher_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,",
+ DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (criteria & DR_MATCHER_CRITERIA_OUTER) {
dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_INNER) {
dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC) {
dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC2) {
dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC3) {
dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
- seq_printf(file, "%s\n", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s\n", dump);
} else {
- seq_puts(file, ",\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -391,9 +684,19 @@ static int
dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
- DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
- builder->lu_type);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,%d,0x%x\n",
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index,
+ is_rx, builder->lu_type);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -403,6 +706,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -412,11 +716,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
- rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
- matcher_id, matcher_rx_tx->num_of_builders,
- dr_dump_icm_to_idx(s_icm_addr),
- dr_dump_icm_to_idx(e_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
+ rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
+ matcher_id, matcher_rx_tx->num_of_builders,
+ dr_dump_icm_to_idx(s_icm_addr),
+ dr_dump_icm_to_idx(e_icm_addr));
+
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
@@ -434,13 +746,22 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 matcher_id;
int ret;
matcher_id = DR_DBG_PTR_TO_ID(matcher);
- seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
- matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
+ matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl),
+ matcher->prio);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_matcher_mask(file, &matcher->mask,
matcher->match_criteria, matcher_id);
@@ -486,15 +807,24 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
+ int ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
- dr_dump_icm_to_idx(s_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", rec_type, table_id,
+ dr_dump_icm_to_idx(s_icm_addr));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -503,11 +833,19 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
- seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
- DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
- table->table_type, table->level);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
+ DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
+ table->table_type, table->level);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_dmn) {
ret = dr_dump_table_rx_tx(file, true, rx,
@@ -546,46 +884,86 @@ static int
dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
- domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING,
+ DR_DBG_PTR_TO_ID(ring), domain_id,
+ ring->cq->mcq.cqn, ring->qp->qpn);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_flex_parser(struct seq_file *file,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,%s,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
- flex_parser_name, flex_parser_value);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%s,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
+ flex_parser_name, flex_parser_value);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
+ int ret;
xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
; /* count the number of vports in xarray */
- seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
- caps->nic_rx_drop_address, caps->nic_tx_drop_address,
- caps->flex_protocols, vports_num, caps->eswitch_manager);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
+ caps->nic_rx_drop_address, caps->nic_tx_drop_address,
+ caps->flex_protocols, vports_num, caps->eswitch_manager);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
- seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
- vport_caps->vport_gvmi, vport_caps->icm_address_rx,
- vport_caps->icm_address_tx);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT,
+ domain_id, i, vport_caps->vport_gvmi,
+ vport_caps->icm_address_rx,
+ vport_caps->icm_address_tx);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -627,24 +1005,32 @@ dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
int ret;
- seq_printf(file, "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
- DR_DUMP_REC_TYPE_DOMAIN,
- domain_id, dmn->type, dmn->info.caps.gvmi,
- dmn->info.supp_sw_steering,
- /* package version */
- LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
- LINUX_VERSION_SUBLEVEL,
- pci_name(dmn->mdev->pdev),
- 0, /* domain flags */
- dmn->num_buddies[DR_ICM_TYPE_STE],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
+ DR_DUMP_REC_TYPE_DOMAIN,
+ domain_id, dmn->type, dmn->info.caps.gvmi,
+ dmn->info.supp_sw_steering,
+ /* package version */
+ LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL,
+ pci_name(dmn->mdev->pdev),
+ 0, /* domain flags */
+ dmn->num_buddies[DR_ICM_TYPE_STE],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_domain_info(file, &dmn->info, domain_id);
if (ret < 0)
@@ -683,11 +1069,91 @@ unlock_mutex:
return ret;
}
-static int dr_dump_show(struct seq_file *file, void *priv)
+static void *
+dr_dump_start(struct seq_file *file, loff_t *pos)
{
- return dr_dump_domain_all(file, file->private);
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) {
+ mlx5_core_warn(dmn->mdev, "Dump already in progress\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS);
+ dump_data = dmn->dump_info.dump_data;
+
+ if (dump_data) {
+ return seq_list_start(&dump_data->buff_list, *pos);
+ } else if (*pos == 0) {
+ dump_data = mlx5dr_dbg_create_dump_data();
+ if (!dump_data)
+ goto exit;
+
+ dmn->dump_info.dump_data = dump_data;
+ if (dr_dump_domain_all(file, dmn)) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ goto exit;
+ }
+
+ return seq_list_start(&dump_data->buff_list, *pos);
+ }
+
+exit:
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+ return NULL;
}
-DEFINE_SHOW_ATTRIBUTE(dr_dump);
+
+static void *
+dr_dump_next(struct seq_file *file, void *v, loff_t *pos)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = dmn->dump_info.dump_data;
+
+ return seq_list_next(v, &dump_data->buff_list, pos);
+}
+
+static void
+dr_dump_stop(struct seq_file *file, void *v)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (v && IS_ERR(v))
+ return;
+
+ if (!v) {
+ dump_data = dmn->dump_info.dump_data;
+ if (dump_data) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ }
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+}
+
+static int
+dr_dump_show(struct seq_file *file, void *v)
+{
+ struct mlx5dr_dbg_dump_buff *entry;
+
+ entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
+ seq_printf(file, "%s", entry->buff);
+
+ return 0;
+}
+
+static const struct seq_operations dr_dump_sops = {
+ .start = dr_dump_start,
+ .next = dr_dump_next,
+ .stop = dr_dump_stop,
+ .show = dr_dump_show,
+};
+DEFINE_SEQ_ATTRIBUTE(dr_dump);
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
index def6cf853eea..57c6b363b870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
@@ -1,10 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
+#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
+
+enum {
+ MLX5DR_DEBUG_DUMP_STATE_FREE,
+ MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
+};
+
+struct mlx5dr_dbg_dump_buff {
+ char *buff;
+ u32 index;
+ struct list_head node;
+};
+
+struct mlx5dr_dbg_dump_data {
+ struct list_head buff_list;
+};
+
struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs;
struct dentry *fdb_debugfs;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ atomic_t state;
};
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 21753f327868..1005bb6935b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -440,6 +440,27 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 *out;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
+
+ *sd_group = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.sd_group);
+out:
+ kvfree(out);
+ return err;
+}
+
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 253d7ad9b809..8b63968bbee9 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -124,6 +124,41 @@ static void mlxbf_gige_get_pauseparam(struct net_device *netdev,
pause->tx_pause = 1;
}
+static bool mlxbf_gige_llu_counters_enabled(struct mlxbf_gige *priv)
+{
+ u32 data;
+
+ if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
+ data = readl(priv->llu_base + MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG);
+ if (data & MLXBF_GIGE_BF2_LLU_COUNTERS_EN)
+ return true;
+ } else {
+ data = readl(priv->llu_base + MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG);
+ if (data & MLXBF_GIGE_BF3_LLU_COUNTERS_EN)
+ return true;
+ }
+
+ return false;
+}
+
+static void mlxbf_gige_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ u64 data_lo, data_hi;
+
+ /* Read LLU counters to provide stats only if counters are enabled */
+ if (mlxbf_gige_llu_counters_enabled(priv)) {
+ data_lo = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_LO);
+ data_hi = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_HI);
+ pause_stats->tx_pause_frames = (data_hi << 32) | data_lo;
+
+ data_lo = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_LO);
+ data_hi = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_HI);
+ pause_stats->rx_pause_frames = (data_hi << 32) | data_lo;
+ }
+}
+
const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = mlxbf_gige_get_ringparam,
@@ -134,6 +169,7 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_ethtool_stats = mlxbf_gige_get_ethtool_stats,
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = mlxbf_gige_get_pauseparam,
+ .get_pause_stats = mlxbf_gige_get_pause_stats,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index cd0973229c9b..98a8681c21b9 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -99,4 +99,34 @@
#define MLXBF_GIGE_100M_IPG_SIZE 119
#define MLXBF_GIGE_10M_IPG_SIZE 1199
+/* Offsets into OOB LLU block for pause frame counters */
+#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI 0x33d8
+#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO 0x33dc
+#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI 0x3210
+#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO 0x3214
+
+#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI 0x3a88
+#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO 0x3a8c
+#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI 0x38c0
+#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO 0x38c4
+
+#define MLXBF_GIGE_TX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI : \
+ MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI)
+#define MLXBF_GIGE_TX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO : \
+ MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO)
+#define MLXBF_GIGE_RX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI : \
+ MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI)
+#define MLXBF_GIGE_RX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO : \
+ MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO)
+
+#define MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG 0x2110
+#define MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG 0x2030
+
+#define MLXBF_GIGE_BF2_LLU_COUNTERS_EN BIT(0)
+#define MLXBF_GIGE_BF3_LLU_COUNTERS_EN BIT(4)
+
#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index faa63ea9b83e..1915fa41c622 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -95,7 +95,7 @@ struct mlxsw_afa_set {
*/
has_trap:1,
has_police:1;
- unsigned int ref_count;
+ refcount_t ref_count;
struct mlxsw_afa_set *next; /* Pointer to the next set. */
struct mlxsw_afa_set *prev; /* Pointer to the previous set,
* note that set may have multiple
@@ -120,7 +120,7 @@ struct mlxsw_afa_fwd_entry {
struct rhash_head ht_node;
struct mlxsw_afa_fwd_entry_ht_key ht_key;
u32 kvdl_index;
- unsigned int ref_count;
+ refcount_t ref_count;
};
static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
@@ -282,7 +282,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
/* Need to initialize the set to pass by default */
mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
set->ht_key.is_first = is_first;
- set->ref_count = 1;
+ refcount_set(&set->ref_count, 1);
return set;
}
@@ -330,7 +330,7 @@ static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_set *set)
{
- if (--set->ref_count)
+ if (!refcount_dec_and_test(&set->ref_count))
return;
if (set->shared)
mlxsw_afa_set_unshare(mlxsw_afa, set);
@@ -350,7 +350,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
mlxsw_afa_set_ht_params);
if (set) {
- set->ref_count++;
+ refcount_inc(&set->ref_count);
mlxsw_afa_set_put(mlxsw_afa, orig_set);
} else {
set = orig_set;
@@ -564,7 +564,7 @@ mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
if (!fwd_entry)
return ERR_PTR(-ENOMEM);
fwd_entry->ht_key.local_port = local_port;
- fwd_entry->ref_count = 1;
+ refcount_set(&fwd_entry->ref_count, 1);
err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
&fwd_entry->ht_node,
@@ -607,7 +607,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
mlxsw_afa_fwd_entry_ht_params);
if (fwd_entry) {
- fwd_entry->ref_count++;
+ refcount_inc(&fwd_entry->ref_count);
return fwd_entry;
}
return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
@@ -616,7 +616,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_fwd_entry *fwd_entry)
{
- if (--fwd_entry->ref_count)
+ if (!refcount_dec_and_test(&fwd_entry->ref_count))
return;
mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 0d5e6f9b466e..947500f8ed71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/errno.h>
+#include <linux/refcount.h>
#include "item.h"
#include "core_acl_flex_keys.h"
@@ -107,7 +108,7 @@ EXPORT_SYMBOL(mlxsw_afk_destroy);
struct mlxsw_afk_key_info {
struct list_head list;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int blocks_count;
int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
* is index inside "blocks"
@@ -334,7 +335,7 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
if (err)
goto err_picker;
list_add(&key_info->list, &mlxsw_afk->key_info_list);
- key_info->ref_count = 1;
+ refcount_set(&key_info->ref_count, 1);
return key_info;
err_picker:
@@ -356,7 +357,7 @@ mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
if (key_info) {
- key_info->ref_count++;
+ refcount_inc(&key_info->ref_count);
return key_info;
}
return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
@@ -365,7 +366,7 @@ EXPORT_SYMBOL(mlxsw_afk_key_info_get);
void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
{
- if (--key_info->ref_count)
+ if (!refcount_dec_and_test(&key_info->ref_count))
return;
mlxsw_afk_key_info_destroy(key_info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 6b98c3287b49..f0ceb196a6ce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -708,7 +708,6 @@ static const struct i2c_device_id mlxsw_m_i2c_id[] = {
static struct i2c_driver mlxsw_m_i2c_driver = {
.driver.name = "mlxsw_minimal",
- .class = I2C_CLASS_HWMON,
.id_table = mlxsw_m_i2c_id,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5d3413636a62..ecde2086c703 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2695,23 +2695,18 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
{
char sgcr_pl[MLXSW_REG_SGCR_LEN];
- u16 max_lag;
int err;
if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
return 0;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return err;
-
/* In DDD mode, which we by default use, each LAG entry is 8 PGT
* entries. The LAG table address needs to be 8-aligned, but that ought
* to be the case, since the LAG table is allocated first.
*/
err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
if (err)
return err;
if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
@@ -2728,33 +2723,31 @@ static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
err_mid_alloc_range:
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
return err;
}
static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
{
- u16 max_lag;
- int err;
-
if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
return;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return;
-
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
}
#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
+struct mlxsw_sp_lag {
+ struct net_device *dev;
+ refcount_t ref_count;
+ u16 lag_id;
+};
+
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
- u16 max_lag;
u32 seed;
int err;
@@ -2773,7 +2766,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag);
if (err)
return err;
@@ -2784,7 +2777,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
+ mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag),
GFP_KERNEL);
if (!mlxsw_sp->lags) {
err = -ENOMEM;
@@ -4269,19 +4262,48 @@ mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
+ struct mlxsw_sp_lag *lag;
+ u16 lag_id;
+ int i, err;
+
+ for (i = 0; i < mlxsw_sp->max_lag; i++) {
+ if (!mlxsw_sp->lags[i].dev)
+ break;
+ }
+
+ if (i == mlxsw_sp->max_lag) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Exceeded number of supported LAG devices");
+ return ERR_PTR(-EBUSY);
+ }
+ lag_id = i;
mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ if (err)
+ return ERR_PTR(err);
+
+ lag = &mlxsw_sp->lags[lag_id];
+ lag->lag_id = lag_id;
+ lag->dev = lag_dev;
+ refcount_set(&lag->ref_count, 1);
+
+ return lag;
}
-static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+static int
+mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
- mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
+ lag->dev = NULL;
+
+ mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}
@@ -4329,34 +4351,44 @@ static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
}
-static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *lag_dev,
- u16 *p_lag_id)
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev)
{
- struct mlxsw_sp_upper *lag;
- int free_lag_id = -1;
- u16 max_lag;
- int err, i;
+ int i;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return err;
+ for (i = 0; i < mlxsw_sp->max_lag; i++) {
+ if (!mlxsw_sp->lags[i].dev)
+ continue;
- for (i = 0; i < max_lag; i++) {
- lag = mlxsw_sp_lag_get(mlxsw_sp, i);
- if (lag->ref_count) {
- if (lag->dev == lag_dev) {
- *p_lag_id = i;
- return 0;
- }
- } else if (free_lag_id < 0) {
- free_lag_id = i;
- }
+ if (mlxsw_sp->lags[i].dev == lag_dev)
+ return &mlxsw_sp->lags[i];
}
- if (free_lag_id < 0)
- return -EBUSY;
- *p_lag_id = free_lag_id;
- return 0;
+
+ return NULL;
+}
+
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_lag *lag;
+
+ lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev);
+ if (lag) {
+ refcount_inc(&lag->ref_count);
+ return lag;
+ }
+
+ return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack);
+}
+
+static void
+mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
+{
+ if (!refcount_dec_and_test(&lag->ref_count))
+ return;
+
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag);
}
static bool
@@ -4365,12 +4397,6 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
struct netdev_lag_upper_info *lag_upper_info,
struct netlink_ext_ack *extack)
{
- u16 lag_id;
-
- if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
- NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
- return false;
- }
if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
return false;
@@ -4482,22 +4508,16 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_upper *lag;
+ struct mlxsw_sp_lag *lag;
u16 lag_id;
u8 port_index;
int err;
- err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
- if (err)
- return err;
- lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
- if (!lag->ref_count) {
- err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
- if (err)
- return err;
- lag->dev = lag_dev;
- }
+ lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack);
+ if (IS_ERR(lag))
+ return PTR_ERR(lag);
+ lag_id = lag->lag_id;
err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
if (err)
return err;
@@ -4515,7 +4535,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lag_id = lag_id;
mlxsw_sp_port->lagged = 1;
- lag->ref_count++;
err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
if (err)
@@ -4542,7 +4561,6 @@ err_replay:
err_router_join:
mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
err_fid_port_join_lag:
- lag->ref_count--;
mlxsw_sp_port->lagged = 0;
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
@@ -4550,8 +4568,7 @@ err_fid_port_join_lag:
err_col_port_add:
mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
err_lag_uppers_bridge_join:
- if (!lag->ref_count)
- mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ mlxsw_sp_lag_put(mlxsw_sp, lag);
return err;
}
@@ -4560,12 +4577,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 lag_id = mlxsw_sp_port->lag_id;
- struct mlxsw_sp_upper *lag;
+ struct mlxsw_sp_lag *lag;
if (!mlxsw_sp_port->lagged)
return;
- lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
- WARN_ON(lag->ref_count == 0);
+ lag = &mlxsw_sp->lags[lag_id];
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
@@ -4579,13 +4595,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
- if (lag->ref_count == 1)
- mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ mlxsw_sp_lag_put(mlxsw_sp, lag);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lagged = 0;
- lag->ref_count--;
/* Make sure untagged frames are allowed to ingress */
mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a0c9775fa955..898d24232935 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -78,11 +78,6 @@ struct mlxsw_sp_span_entry;
enum mlxsw_sp_l3proto;
union mlxsw_sp_l3addr;
-struct mlxsw_sp_upper {
- struct net_device *dev;
- unsigned int ref_count;
-};
-
enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_SUBPORT,
MLXSW_SP_RIF_TYPE_VLAN,
@@ -136,6 +131,7 @@ struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
struct mlxsw_sp_pgt;
+struct mlxsw_sp_lag;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -164,7 +160,8 @@ struct mlxsw_sp {
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
- struct mlxsw_sp_upper *lags;
+ struct mlxsw_sp_lag *lags;
+ u16 max_lag;
struct mlxsw_sp_port_mapping *port_mapping;
struct mlxsw_sp_port_mapping_events port_mapping_events;
struct rhashtable sample_trigger_ht;
@@ -257,12 +254,6 @@ struct mlxsw_sp_fid_core_ops {
void (*fini)(struct mlxsw_sp *mlxsw_sp);
};
-static inline struct mlxsw_sp_upper *
-mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
-{
- return &mlxsw_sp->lags[lag_id];
-}
-
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 7c59c8a13584..b01b000bc71c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@@ -55,7 +56,7 @@ struct mlxsw_sp_acl_ruleset {
struct rhash_head ht_node; /* Member of acl HT */
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int min_prio;
unsigned int max_prio;
unsigned long priv[];
@@ -99,7 +100,7 @@ static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
/* We hold a reference on ruleset ourselves */
- return ruleset->ref_count == 2;
+ return refcount_read(&ruleset->ref_count) == 2;
}
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
@@ -176,7 +177,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
ruleset = kzalloc(alloc_size, GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
- ruleset->ref_count = 1;
+ refcount_set(&ruleset->ref_count, 1);
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
@@ -222,13 +223,13 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
{
- ruleset->ref_count++;
+ refcount_inc(&ruleset->ref_count);
}
static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset)
{
- if (--ruleset->ref_count)
+ if (!refcount_dec_and_test(&ruleset->ref_count))
return;
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 50ea1eff02b2..f20052776b3f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -155,7 +156,7 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_vchunk;
@@ -176,7 +177,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
unsigned int priority; /* Priority within the vregion and group */
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct mlxsw_sp_acl_tcam_vregion *vregion;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_entry {
@@ -769,7 +770,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
vregion->tcam = tcam;
vregion->mlxsw_sp = mlxsw_sp;
vregion->vgroup = vgroup;
- vregion->ref_count = 1;
+ refcount_set(&vregion->ref_count, 1);
vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(vregion->key_info)) {
@@ -856,7 +857,7 @@ mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
*/
return ERR_PTR(-EOPNOTSUPP);
}
- vregion->ref_count++;
+ refcount_inc(&vregion->ref_count);
return vregion;
}
@@ -871,7 +872,7 @@ static void
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- if (--vregion->ref_count)
+ if (!refcount_dec_and_test(&vregion->ref_count))
return;
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
@@ -924,7 +925,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
INIT_LIST_HEAD(&vchunk->ventry_list);
vchunk->priority = priority;
vchunk->vgroup = vgroup;
- vchunk->ref_count = 1;
+ refcount_set(&vchunk->ref_count, 1);
vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
priority, elusage);
@@ -1008,7 +1009,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
elusage)))
return ERR_PTR(-EINVAL);
- vchunk->ref_count++;
+ refcount_inc(&vchunk->ref_count);
return vchunk;
}
return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
@@ -1019,7 +1020,7 @@ static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
- if (--vchunk->ref_count)
+ if (!refcount_dec_and_test(&vchunk->ref_count))
return;
mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7164f9e6370f..87617df694ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -501,7 +501,7 @@ struct mlxsw_sp_rt6 {
struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
- unsigned int ref_count;
+ refcount_t ref_count;
enum mlxsw_sp_l3proto proto;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
@@ -578,7 +578,7 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count == 0)
+ if (refcount_read(&lpm_tree->ref_count) == 0)
return lpm_tree;
}
return NULL;
@@ -654,7 +654,7 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
sizeof(lpm_tree->prefix_usage));
memset(&lpm_tree->prefix_ref_count, 0,
sizeof(lpm_tree->prefix_ref_count));
- lpm_tree->ref_count = 1;
+ refcount_set(&lpm_tree->ref_count, 1);
return lpm_tree;
err_left_struct_set:
@@ -678,7 +678,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count != 0 &&
+ if (refcount_read(&lpm_tree->ref_count) &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage)) {
@@ -691,14 +691,15 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
{
- lpm_tree->ref_count++;
+ refcount_inc(&lpm_tree->ref_count);
}
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ if (!refcount_dec_and_test(&lpm_tree->ref_count))
+ return;
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6c749c148148..6397ff0dc951 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -61,7 +61,7 @@ struct mlxsw_sp_bridge_port {
struct mlxsw_sp_bridge_device *bridge_device;
struct list_head list;
struct list_head vlans_list;
- unsigned int ref_count;
+ refcount_t ref_count;
u8 stp_state;
unsigned long flags;
bool mrouter;
@@ -495,7 +495,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
- bridge_port->ref_count = 1;
+ refcount_set(&bridge_port->ref_count, 1);
err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
NULL, NULL, NULL, false, extack);
@@ -531,7 +531,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
if (bridge_port) {
- bridge_port->ref_count++;
+ refcount_inc(&bridge_port->ref_count);
return bridge_port;
}
@@ -558,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
- if (--bridge_port->ref_count != 0)
+ if (!refcount_dec_and_test(&bridge_port->ref_count))
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 5693784eec5b..443128adbcb6 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -464,7 +464,7 @@ static struct regmap_config regcfg = {
.val_bits = 16,
.max_register = 0xee,
.reg_stride = 2,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.readable_reg = encx24j600_regmap_readable,
.writeable_reg = encx24j600_regmap_writeable,
@@ -485,7 +485,7 @@ static struct regmap_config phycfg = {
.reg_bits = 8,
.val_bits = 16,
.max_register = 0x1f,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.readable_reg = encx24j600_phymap_readable,
.writeable_reg = encx24j600_phymap_writeable,
@@ -513,4 +513,5 @@ int devm_regmap_init_encx24j600(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+MODULE_DESCRIPTION("Microchip ENCX24J600 helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index a2b3f4433ca8..8a6ae171e375 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1055,7 +1055,7 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
}
static int lan743x_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
@@ -1092,7 +1092,7 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter;
struct phy_device *phydev;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 45e209a7d083..bd8aa83b47e5 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1196,7 +1196,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
if (ret < 0) {
netif_err(adapter, drv, adapter->netdev,
- "erro %d SGMII get mode failed\n", ret);
+ "error %d SGMII get mode failed\n", ret);
return ret;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 2f04bc77a118..2801f08bf1c9 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1712,13 +1712,13 @@ bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter)
struct lan743x_ptp *ptp = &adapter->ptp;
bool result = false;
- spin_lock_bh(&ptp->tx_ts_lock);
+ spin_lock(&ptp->tx_ts_lock);
if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
/* request granted */
ptp->pending_tx_timestamps++;
result = true;
}
- spin_unlock_bh(&ptp->tx_ts_lock);
+ spin_unlock(&ptp->tx_ts_lock);
return result;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
index 41fa2523d91d..5f2cd9a8cf8f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
/* Now, set PGIDs for each active LAG */
for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
- struct net_device *bond = lan966x->ports[lag]->bond;
+ struct lan966x_port *port = lan966x->ports[lag];
int num_active_ports = 0;
+ struct net_device *bond;
unsigned long bond_mask;
u8 aggr_idx[16];
- if (!bond || (visited & BIT(lag)))
+ if (!port || !port->bond || (visited & BIT(lag)))
continue;
+ bond = port->bond;
bond_mask = lan966x_lag_get_mask(lan966x, bond);
for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
struct lan966x_port *port = lan966x->ports[p];
+ if (!port)
+ continue;
+
lan_wr(ANA_PGID_PGID_SET(bond_mask),
lan966x, ANA_PGID(p));
if (port->lag_tx_active)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 92108d354051..2e83bbb9477e 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port)
lan966x_taprio_speed_set(port, config->speed);
/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
- * port speed for QSGMII ports.
+ * port speed for QSGMII or SGMII ports.
*/
- if (phy_interface_num_ports(config->portmode) == 4)
+ if (phy_interface_num_ports(config->portmode) == 4 ||
+ config->portmode == PHY_INTERFACE_MODE_SGMII)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
lan_wr(config->duplex | mode,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
index ac525ff1503e..3a01e13bd10b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -25,6 +25,8 @@ static void lan966x_vcap_is1_port_keys(struct lan966x_port *port,
for (int l = 0; l < admin->lookups; ++l) {
out->prf(out->dst, "\n Lookup %d: ", l);
+ val = lan_rd(lan966x, ANA_VCAP_S1_CFG(port->chip_port, l));
+
out->prf(out->dst, "\n other: ");
switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
case VCAP_IS1_PS_OTHER_NORMAL:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 4af285918ea2..75868b3f548e 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5,
list) {
if ((vid == 0 || mact_entry->vid == vid) &&
ether_addr_equal(addr, mact_entry->mac)) {
+ sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+
list_del(&mact_entry->list);
devm_kfree(sparx5->dev, mact_entry);
-
- sparx5_mact_forget(sparx5, addr, mact_entry->vid);
}
}
mutex_unlock(&sparx5->mact_lock);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index d1f7fc8b1b71..3c066b62e689 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sparx5);
sparx5->pdev = pdev;
sparx5->dev = &pdev->dev;
+ spin_lock_init(&sparx5->tx_lock);
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 6f565c0c0c3d..316fed5f2735 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -280,6 +280,7 @@ struct sparx5 {
int xtr_irq;
/* Frame DMA */
int fdma_irq;
+ spinlock_t tx_lock; /* lock for frame transmission */
struct sparx5_rx rx;
struct sparx5_tx tx;
/* PTP */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 6db6ac6a3bbc..ac7e1cffbcec 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
ret = sparx5_fdma_xmit(sparx5, ifh, skb);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
+ spin_unlock(&sparx5->tx_lock);
if (ret == -EBUSY)
goto busy;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index d33b27214539..1332db9a08eb 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1249,15 +1249,47 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
+static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+{
+ const struct cpumask *next, *prev = cpu_none_mask;
+ cpumask_var_t cpus __free(free_cpumask_var);
+ int cpu, weight;
+
+ if (!alloc_cpumask_var(&cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ rcu_read_lock();
+ for_each_numa_hop_mask(next, node) {
+ weight = cpumask_weight_andnot(next, prev);
+ while (weight > 0) {
+ cpumask_andnot(cpus, next, prev);
+ for_each_cpu(cpu, cpus) {
+ if (len-- == 0)
+ goto done;
+ irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+ }
+ }
+ prev = next;
+ }
+done:
+ rcu_read_unlock();
+ return 0;
+}
+
static int mana_gd_setup_irqs(struct pci_dev *pdev)
{
- unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
unsigned int max_irqs, cpu;
- int nvec, irq;
+ int start_irq_index = 1;
+ int nvec, *irqs, irq;
int err, i = 0, j;
+ cpus_read_lock();
+ max_queues_per_port = num_online_cpus();
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
max_queues_per_port = MANA_MAX_NUM_QUEUES;
@@ -1265,8 +1297,18 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
max_irqs = max_queues_per_port + 1;
nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0)
+ if (nvec < 0) {
+ cpus_read_unlock();
return nvec;
+ }
+ if (nvec <= num_online_cpus())
+ start_irq_index = 0;
+
+ irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
+ if (!irqs) {
+ err = -ENOMEM;
+ goto free_irq_vector;
+ }
gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
GFP_KERNEL);
@@ -1294,17 +1336,41 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq;
}
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+ if (!i) {
+ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_irq;
+
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ if (start_irq_index) {
+ cpu = cpumask_local_spread(i, gc->numa_node);
+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+ } else {
+ irqs[start_irq_index] = irq;
+ }
+ } else {
+ irqs[i - start_irq_index] = irq;
+ err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
+ gic->name, gic);
+ if (err)
+ goto free_irq;
+ }
}
+ err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
+ if (err)
+ goto free_irq;
+
gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;
-
+ cpus_read_unlock();
return 0;
free_irq:
@@ -1317,8 +1383,10 @@ free_irq:
}
kfree(gc->irq_contexts);
+ kfree(irqs);
gc->irq_contexts = NULL;
free_irq_vector:
+ cpus_read_unlock();
pci_free_irq_vectors(pdev);
return err;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 56ccbd4c37fe..ed2fb44500b0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -3078,4 +3078,5 @@ void ocelot_deinit_port(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_deinit_port);
+MODULE_DESCRIPTION("Microsemi Ocelot switch family library");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2b383d92d7f5..2c3f62907958 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -460,7 +460,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip6_dst_hoplimit(dst);
dst_release(dst);
} else {
- set_tun->ttl = net->ipv6.devconf_all->hop_limit;
+ set_tun->ttl = READ_ONCE(net->ipv6.devconf_all->hop_limit);
}
#endif
} else {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 2967bab72505..15180538b80a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1424,10 +1424,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_
mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
return;
+ /* Both struct tcphdr and struct udphdr start with
+ * __be16 source;
+ * __be16 dest;
+ * so we can use the same code for both.
+ */
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
- mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
- mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
+ if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) {
+ mangle_action->mangle.val =
+ (__force u32)cpu_to_be32(mangle_action->mangle.val << 16);
+ /* The mask of mangle action is inverse mask,
+ * so clear the dest tp port with 0xFFFF to
+ * instead of rotate-left operation.
+ */
+ mangle_action->mangle.mask =
+ (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF);
+ }
+ if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) {
+ mangle_action->mangle.offset = 0;
+ mangle_action->mangle.val =
+ (__force u32)cpu_to_be32(mangle_action->mangle.val);
+ mangle_action->mangle.mask =
+ (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+ }
return;
default:
@@ -1864,10 +1884,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct nfp_fl_ct_flow_entry *ct_entry;
+ struct flow_action_entry *ct_goto;
struct nfp_fl_ct_zone_entry *zt;
+ struct flow_action_entry *act;
bool wildcarded = false;
struct flow_match_ct ct;
- struct flow_action_entry *ct_goto;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_REDIRECT_INGRESS:
+ case FLOW_ACTION_MIRRED:
+ case FLOW_ACTION_MIRRED_INGRESS:
+ if (act->dev->rtnl_link_ops &&
+ !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: out port is openvswitch internal port");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+ }
flow_rule_match_ct(rule, &ct);
if (!ct.mask->ct_zone) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index e522845c7c21..0d7d138d6e0d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
- if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
if (entry->bridge_count ||
!nfp_flower_is_supported_bridge(netdev)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 3b3210d823e8..f28e769e6fda 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2776,6 +2776,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
case NFP_NFD_VER_NFD3:
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
break;
case NFP_NFD_VER_NFDK:
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 33b4c2856316..3f10c5365c80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
const u32 barcfg_msix_general =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
const u32 barcfg_msix_xpb =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
NFP_CPP_TARGET_ISLAND_XPB);
const u32 barcfg_explicit[4] = {
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7a549b834e97..31f896c4aa26 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1761,7 +1761,7 @@ static void nv_get_stats(int cpu, struct fe_priv *np,
/*
* nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
- * Called with read_lock(&dev_base_lock) held for read -
+ * Called with rcu_read_lock() held -
* only synchronized against unregister_netdevice.
*/
static void
@@ -3090,7 +3090,7 @@ static void set_bufsize(struct net_device *dev)
/*
* nv_change_mtu: dev->change_mtu function
- * Called with dev_base_lock held for read.
+ * Called with RTNL held for read.
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index c49aa358e424..6ba8d4aca0a0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic)
bars[i].len = 0;
}
}
+ ionic->num_bars = 0;
}
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
@@ -215,15 +216,17 @@ out:
static void ionic_clear_pci(struct ionic *ionic)
{
- ionic->idev.dev_info_regs = NULL;
- ionic->idev.dev_cmd_regs = NULL;
- ionic->idev.intr_status = NULL;
- ionic->idev.intr_ctrl = NULL;
-
- ionic_unmap_bars(ionic);
- pci_release_regions(ionic->pdev);
+ if (ionic->num_bars) {
+ ionic->idev.dev_info_regs = NULL;
+ ionic->idev.dev_cmd_regs = NULL;
+ ionic->idev.intr_status = NULL;
+ ionic->idev.intr_ctrl = NULL;
+
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+ }
- if (atomic_read(&ionic->pdev->enable_cnt) > 0)
+ if (pci_is_enabled(ionic->pdev))
pci_disable_device(ionic->pdev);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 91327ef670c7..c3ae11a48024 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -113,8 +113,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
- struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
+ struct dentry *intr_dentry, *stats_dentry;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
struct debugfs_blob_wrapper *desc_blob;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 1e7c71f7f081..746072b4dbd0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -319,22 +319,32 @@ do_check_time:
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return (u8)PCI_ERROR_RESPONSE;
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
}
bool ionic_dev_cmd_done(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return false;
return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
}
void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
{
+ if (!idev->dev_cmd_regs)
+ return;
memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
}
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
{
idev->opcode = cmd->cmd.opcode;
+
+ if (!idev->dev_cmd_regs)
+ return;
+
memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
iowrite32(0, &idev->dev_cmd_regs->done);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 2667e1cde16b..516db910e8e8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
+#include <linux/bpf_trace.h>
#include "ionic_if.h"
#include "ionic_regs.h"
@@ -15,9 +16,10 @@
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 64
-#define IONIC_DEF_TXRX_DESC 4096
+#define IONIC_DEF_TXRX_DESC 1024
#define IONIC_RX_FILL_THRESHOLD 16
#define IONIC_RX_FILL_DIV 8
+#define IONIC_TSO_DESCS_NEEDED 44 /* 64K TSO @1500B */
#define IONIC_LIFS_MAX 1024
#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
@@ -195,6 +197,11 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
__GFP_COMP | __GFP_MEMALLOC)
+#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
+ (VLAN_ETH_HLEN + \
+ XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
+
struct ionic_buf_info {
struct page *page;
dma_addr_t dma_addr;
@@ -222,6 +229,8 @@ struct ionic_desc_info {
struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
ionic_desc_cb cb;
void *cb_arg;
+ struct xdp_frame *xdpf;
+ enum xdp_action act;
};
#define IONIC_QUEUE_NAME_MAX_SZ 16
@@ -256,6 +265,9 @@ struct ionic_queue {
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl;
};
+ struct xdp_rxq_info *xdp_rxq_info;
+ struct ionic_queue *partner;
+ bool xdp_flush;
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
@@ -368,6 +380,7 @@ typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index cd3c0b01402e..91183965a6b7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev;
unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
+ idev = &lif->ionic->idev;
+ if (!idev->dev_info_regs)
+ return;
+
offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
- memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
+ memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
}
static void ionic_get_link_ext_stats(struct net_device *netdev,
@@ -721,6 +726,11 @@ static int ionic_set_channels(struct net_device *netdev,
ionic_init_queue_params(lif, &qparam);
+ if ((ch->rx_count || ch->tx_count) && lif->xdp_prog) {
+ netdev_info(lif->netdev, "Split Tx/Rx interrupts not available when using XDP\n");
+ return -EOPNOTSUPP;
+ }
+
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index 5f40324cd243..3c209c1a2337 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
dl = priv_to_devlink(ionic);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ if (!idev->dev_cmd_regs) {
+ err = -ENXIO;
+ goto err_out;
+ }
+
buf_sz = sizeof(idev->dev_cmd_regs->data);
netdev_dbg(netdev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index cf2d5ad7b68c..33b1691a4ee5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -46,18 +46,26 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
+static int ionic_xdp_queues_config(struct ionic_lif *lif);
+static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
+
static void ionic_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct ionic_intr_info *intr;
struct dim_cq_moder cur_moder;
+ struct ionic_intr_info *intr;
struct ionic_qcq *qcq;
struct ionic_lif *lif;
+ struct ionic_queue *q;
u32 new_coal;
- cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
qcq = container_of(dim, struct ionic_qcq, dim);
- lif = qcq->q.lif;
+ q = &qcq->q;
+ if (q->type == IONIC_QTYPE_RXQ)
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ else
+ cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ lif = q->lif;
new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
new_coal = new_coal ? new_coal : 1;
@@ -422,6 +430,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0;
}
+ ionic_xdp_unregister_rxq_info(&qcq->q);
ionic_qcq_intr_free(lif, qcq);
vfree(qcq->cq.info);
@@ -681,7 +690,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
INIT_WORK(&new->dim.work, ionic_dim_work);
- new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
*qcq = new;
@@ -862,8 +871,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.type = q->type,
.ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
- .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
- IONIC_QINIT_F_SG),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
.intr_index = cpu_to_le16(cq->bound_intr->index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
@@ -875,6 +883,13 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
};
int err;
+ q->partner = &lif->txqcqs[q->index]->q;
+ q->partner->partner = q;
+
+ if (!lif->xdp_prog ||
+ (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags))
+ ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG);
+
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
@@ -1252,8 +1267,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
- ionic_tx_service, NULL, NULL);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -1640,6 +1654,12 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->priv_flags |= IFF_UNICAST_FLT |
IFF_LIVE_ADDR_CHANGE;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
return 0;
}
@@ -1777,6 +1797,21 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif)
return err;
}
+static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu,
+ struct bpf_prog *xdp_prog)
+{
+ if (!xdp_prog)
+ return true;
+
+ if (mtu <= IONIC_XDP_MAX_LINEAR_MTU)
+ return true;
+
+ if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags)
+ return true;
+
+ return false;
+}
+
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1789,8 +1824,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
.mtu = cpu_to_le32(new_mtu),
},
};
+ struct bpf_prog *xdp_prog;
int err;
+ xdp_prog = READ_ONCE(lif->xdp_prog);
+ if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog))
+ return -EINVAL;
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -2166,6 +2206,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0;
int i, err;
+ err = ionic_xdp_queues_config(lif);
+ if (err)
+ return err;
+
for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
@@ -2211,6 +2255,8 @@ err_out:
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
+ ionic_xdp_queues_config(lif);
+
return err;
}
@@ -2668,11 +2714,151 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic);
}
+static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
+{
+ struct xdp_rxq_info *xi;
+
+ if (!q->xdp_rxq_info)
+ return;
+
+ xi = q->xdp_rxq_info;
+ q->xdp_rxq_info = NULL;
+
+ xdp_rxq_info_unreg(xi);
+ kfree(xi);
+}
+
+static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
+{
+ struct xdp_rxq_info *rxq_info;
+ int err;
+
+ rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
+ if (!rxq_info)
+ return -ENOMEM;
+
+ err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
+ if (err) {
+ dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
+ q->index, err);
+ goto err_out;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
+ q->index, err);
+ xdp_rxq_info_unreg(rxq_info);
+ goto err_out;
+ }
+
+ q->xdp_rxq_info = rxq_info;
+
+ return 0;
+
+err_out:
+ kfree(rxq_info);
+ return err;
+}
+
+static int ionic_xdp_queues_config(struct ionic_lif *lif)
+{
+ unsigned int i;
+ int err;
+
+ if (!lif->rxqcqs)
+ return 0;
+
+ /* There's no need to rework memory if not going to/from NULL program.
+ * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
+ * This way we don't need to keep an *xdp_prog in every queue struct.
+ */
+ if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
+ return 0;
+
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
+ struct ionic_queue *q = &lif->rxqcqs[i]->q;
+
+ if (q->xdp_rxq_info) {
+ ionic_xdp_unregister_rxq_info(q);
+ continue;
+ }
+
+ err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
+ if (err) {
+ dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
+ i, err);
+ goto err_out;
+ }
+ }
+
+ return 0;
+
+err_out:
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
+ ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
+
+ return err;
+}
+
+static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct bpf_prog *old_prog;
+ u32 maxfs;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+#define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
+ NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT);
+ netdev_info(lif->netdev, XDP_ERR_SPLIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) {
+#define XDP_ERR_MTU "MTU is too large for XDP without frags support"
+ NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU);
+ netdev_info(lif->netdev, XDP_ERR_MTU);
+ return -EINVAL;
+ }
+
+ maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
+ if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags))
+ maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
+ netdev->max_mtu = maxfs;
+
+ if (!netif_running(netdev)) {
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ } else {
+ mutex_lock(&lif->queue_lock);
+ ionic_stop_queues_reconfig(lif);
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ ionic_start_queues_reconfig(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ionic_xdp_config(netdev, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
.ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
+ .ndo_bpf = ionic_xdp,
+ .ndo_xdp_xmit = ionic_xdp_xmit,
.ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
.ndo_set_features = ionic_set_features,
@@ -2755,6 +2941,8 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
+ swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
+ swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base);
swap(a->q_base_pa, b->q_base_pa);
swap(a->q_size, b->q_size);
@@ -3391,9 +3579,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
napi_enable(&qcq->napi);
- if (qcq->flags & IONIC_QCQ_F_INTR)
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector,
+ &qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3559,7 +3750,10 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
}
- err = ionic_init_nic_features(lif);
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ err = ionic_set_nic_features(lif, lif->netdev->features);
+ else
+ err = ionic_init_nic_features(lif);
if (err)
goto err_out_notifyq_deinit;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 61548b3eea93..b4f8692a3ead 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -37,6 +37,7 @@ struct ionic_tx_stats {
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
+ u64 xdp_frames;
};
struct ionic_rx_stats {
@@ -51,6 +52,11 @@ struct ionic_rx_stats {
u64 alloc_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
+ u64 xdp_drop;
+ u64 xdp_aborted;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_redirect;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -135,6 +141,12 @@ struct ionic_lif_sw_stats {
u64 hw_rx_over_errors;
u64 hw_rx_missed_errors;
u64 hw_tx_aborted_errors;
+ u64 xdp_drop;
+ u64 xdp_aborted;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_redirect;
+ u64 xdp_frames;
};
enum ionic_lif_state_flags {
@@ -230,6 +242,7 @@ struct ionic_lif {
struct ionic_phc *phc;
struct dentry *dentry;
+ struct bpf_prog *xdp_prog;
};
struct ionic_phc {
@@ -314,7 +327,7 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
{
- return unlikely(q->features & IONIC_TXQ_F_HWSTAMP);
+ return q->features & IONIC_TXQ_F_HWSTAMP;
}
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 165ab08ad2dd..29b4d039bbce 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -15,7 +15,7 @@
#include "ionic_debugfs.h"
MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
-MODULE_AUTHOR("Pensando Systems, Inc");
+MODULE_AUTHOR("Shannon Nelson <shannon.nelson@amd.com>");
MODULE_LICENSE("GPL");
static const char *ionic_error_to_str(enum ionic_status_code code)
@@ -416,6 +416,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
+ if (!idev->dev_cmd_regs)
+ return;
+
iowrite32(0, &idev->dev_cmd_regs->doorbell);
memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 1f6022fb7679..0107599a9dd4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -27,6 +27,12 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(hw_rx_over_errors),
IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
+ IONIC_LIF_STAT_DESC(xdp_drop),
+ IONIC_LIF_STAT_DESC(xdp_aborted),
+ IONIC_LIF_STAT_DESC(xdp_pass),
+ IONIC_LIF_STAT_DESC(xdp_tx),
+ IONIC_LIF_STAT_DESC(xdp_redirect),
+ IONIC_LIF_STAT_DESC(xdp_frames),
};
static const struct ionic_stat_desc ionic_port_stats_desc[] = {
@@ -135,6 +141,7 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(csum_none),
IONIC_TX_STAT_DESC(csum),
IONIC_TX_STAT_DESC(vlan_inserted),
+ IONIC_TX_STAT_DESC(xdp_frames),
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
@@ -149,6 +156,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(hwstamp_invalid),
IONIC_RX_STAT_DESC(dropped),
IONIC_RX_STAT_DESC(vlan_stripped),
+ IONIC_RX_STAT_DESC(xdp_drop),
+ IONIC_RX_STAT_DESC(xdp_aborted),
+ IONIC_RX_STAT_DESC(xdp_pass),
+ IONIC_RX_STAT_DESC(xdp_tx),
+ IONIC_RX_STAT_DESC(xdp_redirect),
};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
@@ -171,6 +183,7 @@ static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num,
stats->tx_csum += txstats->csum;
stats->tx_hwstamp_valid += txstats->hwstamp_valid;
stats->tx_hwstamp_invalid += txstats->hwstamp_invalid;
+ stats->xdp_frames += txstats->xdp_frames;
}
static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
@@ -185,6 +198,11 @@ static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
stats->rx_csum_error += rxstats->csum_error;
stats->rx_hwstamp_valid += rxstats->hwstamp_valid;
stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid;
+ stats->xdp_drop += rxstats->xdp_drop;
+ stats->xdp_aborted += rxstats->xdp_aborted;
+ stats->xdp_pass += rxstats->xdp_pass;
+ stats->xdp_tx += rxstats->xdp_tx;
+ stats->xdp_redirect += rxstats->xdp_redirect;
}
static void ionic_get_lif_stats(struct ionic_lif *lif,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 54cd96b035d6..6d168ad8c84f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -5,11 +5,27 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
#include "ionic.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ void *data, size_t len);
+
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ const skb_frag_t *frag,
+ size_t offset, size_t len);
+
+static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info);
+
+static void ionic_tx_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
+ struct ionic_cq_info *cq_info,
+ void *cb_arg);
+
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
@@ -24,8 +40,8 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
bool ionic_txq_poke_doorbell(struct ionic_queue *q)
{
- unsigned long now, then, dif;
struct netdev_queue *netdev_txq;
+ unsigned long now, then, dif;
struct net_device *netdev;
netdev = q->lif->netdev;
@@ -83,15 +99,30 @@ bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
return true;
}
-static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
+static inline struct netdev_queue *q_to_ndq(struct net_device *netdev,
+ struct ionic_queue *q)
{
- return netdev_get_tx_queue(q->lif->netdev, q->index);
+ return netdev_get_tx_queue(netdev, q->index);
+}
+
+static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
+{
+ return page_address(buf_info->page) + buf_info->page_offset;
+}
+
+static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
+{
+ return buf_info->dma_addr + buf_info->page_offset;
+}
+
+static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
+{
+ return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
}
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_rx_stats *stats;
struct device *dev;
struct page *page;
@@ -101,14 +132,14 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
if (unlikely(!buf_info)) {
net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
return -EINVAL;
}
page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
stats->alloc_err++;
return -ENOMEM;
}
@@ -118,7 +149,7 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
__free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
stats->dma_map_err++;
return -EIO;
}
@@ -132,12 +163,11 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
static void ionic_rx_page_free(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
- struct net_device *netdev = q->lif->netdev;
struct device *dev = q->dev;
if (unlikely(!buf_info)) {
net_err_ratelimited("%s: %s invalid buf_info in free\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
return;
}
@@ -162,7 +192,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
if (page_to_nid(buf_info->page) != numa_mem_id())
return false;
- size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
+ size = ALIGN(used, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
buf_info->page_offset += size;
if (buf_info->page_offset >= IONIC_PAGE_SIZE)
return false;
@@ -172,52 +202,55 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
return true;
}
-static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
+static struct sk_buff *ionic_rx_frags(struct net_device *netdev,
+ struct ionic_queue *q,
struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+ unsigned int headroom,
+ unsigned int len,
+ unsigned int num_sg_elems,
+ bool synced)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
struct ionic_rx_stats *stats;
struct device *dev = q->dev;
struct sk_buff *skb;
unsigned int i;
u16 frag_len;
- u16 len;
stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
stats->alloc_err++;
return NULL;
}
- i = comp->num_sg_elems + 1;
+ i = num_sg_elems + 1;
do {
if (unlikely(!buf_info->page)) {
dev_kfree_skb(skb);
return NULL;
}
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
+ if (headroom)
+ frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ else
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
len -= frag_len;
- dma_sync_single_for_cpu(dev,
- buf_info->dma_addr + buf_info->page_offset,
- frag_len, DMA_FROM_DEVICE);
+ if (!synced)
+ dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
+ headroom, frag_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset, frag_len,
- IONIC_PAGE_SIZE);
+ buf_info->page, buf_info->page_offset + headroom,
+ frag_len, IONIC_PAGE_SIZE);
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr,
@@ -225,6 +258,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
buf_info->page = NULL;
}
+ /* only needed on the first buffer */
+ if (headroom)
+ headroom = 0;
+
buf_info++;
i--;
@@ -233,26 +270,26 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
return skb;
}
-static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
+static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
+ struct ionic_queue *q,
struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+ unsigned int headroom,
+ unsigned int len,
+ bool synced)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
struct ionic_rx_stats *stats;
struct device *dev = q->dev;
struct sk_buff *skb;
- u16 len;
stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
stats->alloc_err++;
return NULL;
}
@@ -262,18 +299,333 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return NULL;
}
- dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
- len, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
- dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
- len, DMA_FROM_DEVICE);
+ if (!synced)
+ dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
+ headroom, len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
+ dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
+ headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, q->lif->netdev);
+ skb->protocol = eth_type_trans(skb, netdev);
return skb;
}
+static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info)
+{
+ unsigned int nbufs = desc_info->nbufs;
+ struct ionic_buf_info *buf_info;
+ struct device *dev = q->dev;
+ int i;
+
+ if (!nbufs)
+ return;
+
+ buf_info = desc_info->bufs;
+ dma_unmap_single(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
+ buf_info->page = NULL;
+
+ buf_info++;
+ for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
+ dma_unmap_page(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
+ buf_info->page = NULL;
+ }
+
+ if (desc_info->act == XDP_REDIRECT)
+ xdp_return_frame(desc_info->xdpf);
+
+ desc_info->nbufs = 0;
+ desc_info->xdpf = NULL;
+ desc_info->act = 0;
+}
+
+static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
+ enum xdp_action act, struct page *page, int off,
+ bool ring_doorbell)
+{
+ struct ionic_desc_info *desc_info;
+ struct ionic_buf_info *buf_info;
+ struct ionic_tx_stats *stats;
+ struct ionic_txq_desc *desc;
+ size_t len = frame->len;
+ dma_addr_t dma_addr;
+ u64 cmd;
+
+ desc_info = &q->info[q->head_idx];
+ desc = desc_info->txq_desc;
+ buf_info = desc_info->bufs;
+ stats = q_to_tx_stats(q);
+
+ dma_addr = ionic_tx_map_single(q, frame->data, len);
+ if (dma_mapping_error(q->dev, dma_addr)) {
+ stats->dma_map_err++;
+ return -EIO;
+ }
+ buf_info->dma_addr = dma_addr;
+ buf_info->len = len;
+ buf_info->page = page;
+ buf_info->page_offset = off;
+
+ desc_info->nbufs = 1;
+ desc_info->xdpf = frame;
+ desc_info->act = act;
+
+ if (xdp_frame_has_frags(frame)) {
+ struct ionic_txq_sg_elem *elem;
+ struct skb_shared_info *sinfo;
+ struct ionic_buf_info *bi;
+ skb_frag_t *frag;
+ int i;
+
+ bi = &buf_info[1];
+ sinfo = xdp_get_shared_info_from_frame(frame);
+ frag = sinfo->frags;
+ elem = desc_info->txq_sg_desc->elems;
+ for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
+ dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
+ if (dma_mapping_error(q->dev, dma_addr)) {
+ stats->dma_map_err++;
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ return -EIO;
+ }
+ bi->dma_addr = dma_addr;
+ bi->len = skb_frag_size(frag);
+ bi->page = skb_frag_page(frag);
+
+ elem->addr = cpu_to_le64(bi->dma_addr);
+ elem->len = cpu_to_le16(bi->len);
+ elem++;
+
+ desc_info->nbufs++;
+ }
+ }
+
+ cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
+ 0, (desc_info->nbufs - 1), buf_info->dma_addr);
+ desc->cmd = cpu_to_le64(cmd);
+ desc->len = cpu_to_le16(len);
+ desc->csum_start = 0;
+ desc->csum_offset = 0;
+
+ stats->xdp_frames++;
+ stats->pkts++;
+ stats->bytes += len;
+
+ ionic_txq_post(q, ring_doorbell, ionic_tx_clean, NULL);
+
+ return 0;
+}
+
+int ionic_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **xdp_frames, u32 flags)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ int nxmit;
+ int space;
+ int cpu;
+ int qi;
+
+ if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* AdminQ is assumed on cpu 0, while we attempt to affinitize the
+ * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that
+ * affinitization here, but of course irqbalance and friends might
+ * have juggled things anyway, so we have to check for the 0 case.
+ */
+ cpu = smp_processor_id();
+ qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
+
+ txq = &lif->txqcqs[qi]->q;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, cpu);
+ txq_trans_cond_update(nq);
+
+ if (netif_tx_queue_stopped(nq) ||
+ !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 1, 1)) {
+ __netif_tx_unlock(nq);
+ return -EIO;
+ }
+
+ space = min_t(int, n, ionic_q_space_avail(txq));
+ for (nxmit = 0; nxmit < space ; nxmit++) {
+ if (ionic_xdp_post_frame(txq, xdp_frames[nxmit],
+ XDP_REDIRECT,
+ virt_to_page(xdp_frames[nxmit]->data),
+ 0, false)) {
+ nxmit--;
+ break;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
+ txq->dbval | txq->head_idx);
+
+ netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 4, 4);
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ struct net_device *netdev,
+ struct bpf_prog *xdp_prog,
+ struct ionic_queue *rxq,
+ struct ionic_buf_info *buf_info,
+ int len)
+{
+ u32 xdp_action = XDP_ABORTED;
+ struct xdp_buff xdp_buf;
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ struct xdp_frame *xdpf;
+ int remain_len;
+ int frag_len;
+ int err = 0;
+
+ xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
+ frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
+ XDP_PACKET_HEADROOM, frag_len, false);
+
+ dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
+ XDP_PACKET_HEADROOM, len,
+ DMA_FROM_DEVICE);
+
+ prefetchw(&xdp_buf.data_hard_start);
+
+ /* We limit MTU size to one buffer if !xdp_has_frags, so
+ * if the recv len is bigger than one buffer
+ * then we know we have frag info to gather
+ */
+ remain_len = len - frag_len;
+ if (remain_len) {
+ struct skb_shared_info *sinfo;
+ struct ionic_buf_info *bi;
+ skb_frag_t *frag;
+
+ bi = buf_info;
+ sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp_buf);
+
+ do {
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
+ err = -ENOSPC;
+ goto out_xdp_abort;
+ }
+
+ frag = &sinfo->frags[sinfo->nr_frags];
+ sinfo->nr_frags++;
+ bi++;
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
+ dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
+ 0, frag_len, DMA_FROM_DEVICE);
+ skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
+ sinfo->xdp_frags_size += frag_len;
+ remain_len -= frag_len;
+
+ if (page_is_pfmemalloc(bi->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp_buf);
+ } while (remain_len > 0);
+ }
+
+ xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
+
+ switch (xdp_action) {
+ case XDP_PASS:
+ stats->xdp_pass++;
+ return false; /* false = we didn't consume the packet */
+
+ case XDP_DROP:
+ ionic_rx_page_free(rxq, buf_info);
+ stats->xdp_drop++;
+ break;
+
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(&xdp_buf);
+ if (!xdpf)
+ goto out_xdp_abort;
+
+ txq = rxq->partner;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, smp_processor_id());
+ txq_trans_cond_update(nq);
+
+ if (netif_tx_queue_stopped(nq) ||
+ !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 1, 1)) {
+ __netif_tx_unlock(nq);
+ goto out_xdp_abort;
+ }
+
+ dma_unmap_page(rxq->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
+ buf_info->page,
+ buf_info->page_offset,
+ true);
+ __netif_tx_unlock(nq);
+ if (err) {
+ netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
+ goto out_xdp_abort;
+ }
+ stats->xdp_tx++;
+
+ /* the Tx completion will free the buffers */
+ break;
+
+ case XDP_REDIRECT:
+ /* unmap the pages before handing them to a different device */
+ dma_unmap_page(rxq->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
+ if (err) {
+ netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
+ goto out_xdp_abort;
+ }
+ buf_info->page = NULL;
+ rxq->xdp_flush = true;
+ stats->xdp_redirect++;
+ break;
+
+ case XDP_ABORTED:
+ default:
+ goto out_xdp_abort;
+ }
+
+ return true;
+
+out_xdp_abort:
+ trace_xdp_exception(netdev, xdp_prog, xdp_action);
+ ionic_rx_page_free(rxq, buf_info);
+ stats->xdp_aborted++;
+
+ return true;
+}
+
static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info,
@@ -283,7 +635,10 @@ static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
struct ionic_rxq_comp *comp;
+ struct bpf_prog *xdp_prog;
+ unsigned int headroom;
struct sk_buff *skb;
+ u16 len;
comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
@@ -294,13 +649,22 @@ static void ionic_rx_clean(struct ionic_queue *q,
return;
}
+ len = le16_to_cpu(comp->len);
stats->pkts++;
- stats->bytes += le16_to_cpu(comp->len);
+ stats->bytes += len;
+
+ xdp_prog = READ_ONCE(q->lif->xdp_prog);
+ if (xdp_prog &&
+ ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
+ return;
- if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
- skb = ionic_rx_copybreak(q, desc_info, comp);
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ if (len <= q->lif->rx_copybreak)
+ skb = ionic_rx_copybreak(netdev, q, desc_info,
+ headroom, len, !!xdp_prog);
else
- skb = ionic_rx_frags(q, desc_info, comp);
+ skb = ionic_rx_frags(netdev, q, desc_info, headroom, len,
+ comp->num_sg_elems, !!xdp_prog);
if (unlikely(!skb)) {
stats->dropped++;
@@ -367,7 +731,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
}
}
- if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ if (len <= q->lif->rx_copybreak)
napi_gro_receive(&qcq->napi, skb);
else
napi_gro_frags(&qcq->napi);
@@ -424,8 +788,9 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int frag_len;
unsigned int nfrags;
unsigned int n_fill;
- unsigned int i, j;
unsigned int len;
+ unsigned int i;
+ unsigned int j;
n_fill = ionic_q_space_avail(q);
@@ -434,9 +799,12 @@ void ionic_rx_fill(struct ionic_queue *q)
if (n_fill < fill_threshold)
return;
- len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ len = netdev->mtu + VLAN_ETH_HLEN;
for (i = n_fill; i; i--) {
+ unsigned int headroom;
+ unsigned int buf_len;
+
nfrags = 0;
remain_len = len;
desc_info = &q->info[q->head_idx];
@@ -451,10 +819,18 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- /* fill main descriptor - buf[0] */
- desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
+ /* fill main descriptor - buf[0]
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ if (q->xdp_rxq_info)
+ buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
+ else
+ buf_len = ionic_rx_buf_size(buf_info);
+ frag_len = min_t(u16, len, buf_len);
+
+ desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -472,10 +848,8 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE -
- buf_info->page_offset));
+ sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -576,8 +950,10 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
lif = cq->bound_q->lif;
idev = &lif->ionic->idev;
- work_done = ionic_cq_service(cq, budget,
- ionic_tx_service, NULL, NULL);
+ work_done = ionic_tx_cq_service(cq, budget);
+
+ if (unlikely(!budget))
+ return budget;
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
@@ -598,6 +974,14 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
return work_done;
}
+static void ionic_xdp_do_flush(struct ionic_cq *cq)
+{
+ if (cq->bound_q->xdp_flush) {
+ xdp_do_flush();
+ cq->bound_q->xdp_flush = false;
+ }
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
@@ -607,6 +991,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
+ if (unlikely(!budget))
+ return budget;
+
lif = cq->bound_q->lif;
idev = &lif->ionic->idev;
@@ -615,6 +1002,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(cq->bound_q);
+ ionic_xdp_do_flush(cq);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -653,14 +1041,17 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
- ionic_tx_service, NULL, NULL);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
+
+ if (unlikely(!budget))
+ return budget;
rx_work_done = ionic_cq_service(rxcq, budget,
ionic_rx_service, NULL, NULL);
ionic_rx_fill(rxcq->bound_q);
+ ionic_xdp_do_flush(rxcq);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -693,7 +1084,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
- q->lif->netdev->name, q->name);
+ dev_name(dev), q->name);
stats->dma_map_err++;
return 0;
}
@@ -711,7 +1102,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
- q->lif->netdev->name, q->name);
+ dev_name(dev), q->name);
stats->dma_map_err++;
}
return dma_addr;
@@ -794,16 +1185,23 @@ static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
struct sk_buff *skb = cb_arg;
- u16 qi;
+
+ if (desc_info->xdpf) {
+ ionic_xdp_tx_desc_clean(q->partner, desc_info);
+ stats->clean++;
+
+ if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
+ netif_wake_subqueue(q->lif->netdev, q->index);
+
+ return;
+ }
ionic_tx_desc_unmap_bufs(q, desc_info);
if (!skb)
return;
- qi = skb_get_queue_mapping(skb);
-
- if (ionic_txq_hwstamp_enabled(q)) {
+ if (unlikely(ionic_txq_hwstamp_enabled(q))) {
if (cq_info) {
struct skb_shared_hwtstamps hwts = {};
__le64 *cq_desc_hwstamp;
@@ -828,24 +1226,22 @@ static void ionic_tx_clean(struct ionic_queue *q,
stats->hwstamp_invalid++;
}
}
-
- } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
- netif_wake_subqueue(q->lif->netdev, qi);
}
desc_info->bytes = skb->len;
stats->clean++;
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, 1);
}
-bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info,
+ unsigned int *total_pkts, unsigned int *total_bytes)
{
struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
struct ionic_txq_comp *comp;
- int bytes = 0;
- int pkts = 0;
+ unsigned int bytes = 0;
+ unsigned int pkts = 0;
u16 index;
comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
@@ -870,19 +1266,52 @@ bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
desc_info->cb_arg = NULL;
} while (index != le16_to_cpu(comp->comp_index));
- if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
- netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+ (*total_pkts) += pkts;
+ (*total_bytes) += bytes;
return true;
}
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+{
+ struct ionic_cq_info *cq_info;
+ unsigned int work_done = 0;
+ unsigned int bytes = 0;
+ unsigned int pkts = 0;
+
+ if (work_to_do == 0)
+ return 0;
+
+ cq_info = &cq->info[cq->tail_idx];
+ while (ionic_tx_service(cq, cq_info, &pkts, &bytes)) {
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ cq_info = &cq->info[cq->tail_idx];
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+
+ if (work_done) {
+ struct ionic_queue *q = cq->bound_q;
+
+ if (likely(!ionic_txq_hwstamp_enabled(q)))
+ netif_txq_completed_wake(q_to_ndq(q->lif->netdev, q),
+ pkts, bytes,
+ ionic_q_space_avail(q),
+ IONIC_TSO_DESCS_NEEDED);
+ }
+
+ return work_done;
+}
+
void ionic_tx_flush(struct ionic_cq *cq)
{
struct ionic_dev *idev = &cq->lif->ionic->idev;
u32 work_done;
- work_done = ionic_cq_service(cq, cq->num_descs,
- ionic_tx_service, NULL, NULL);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs);
if (work_done)
ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
@@ -908,8 +1337,12 @@ void ionic_tx_empty(struct ionic_queue *q)
desc_info->cb_arg = NULL;
}
- if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
- netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+ if (likely(!ionic_txq_hwstamp_enabled(q))) {
+ struct netdev_queue *ndq = q_to_ndq(q->lif->netdev, q);
+
+ netdev_tx_completed_queue(ndq, pkts, bytes);
+ netdev_tx_reset_queue(ndq);
+ }
}
static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
@@ -957,7 +1390,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0;
}
-static void ionic_tx_tso_post(struct ionic_queue *q,
+static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
@@ -986,15 +1419,16 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
if (start) {
skb_tx_timestamp(skb);
- if (!ionic_txq_hwstamp_enabled(q))
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
+ if (likely(!ionic_txq_hwstamp_enabled(q)))
+ netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
ionic_txq_post(q, false, ionic_tx_clean, skb);
} else {
ionic_txq_post(q, done, NULL, NULL);
}
}
-static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
+ struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_desc_info *desc_info;
@@ -1102,7 +1536,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(q, desc_info, skb,
+ ionic_tx_tso_post(netdev, q, desc_info, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
@@ -1212,10 +1646,12 @@ static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
stats->frags += skb_shinfo(skb)->nr_frags;
}
-static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
+ struct sk_buff *skb)
{
struct ionic_desc_info *desc_info = &q->info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ bool ring_dbell = true;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
@@ -1233,16 +1669,22 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts++;
stats->bytes += skb->len;
- if (!ionic_txq_hwstamp_enabled(q))
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
- ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
+ if (likely(!ionic_txq_hwstamp_enabled(q))) {
+ struct netdev_queue *ndq = q_to_ndq(netdev, q);
+
+ if (unlikely(!ionic_q_has_space(q, MAX_SKB_FRAGS + 1)))
+ netif_tx_stop_queue(ndq);
+ ring_dbell = __netdev_tx_sent_queue(ndq, skb->len,
+ netdev_xmit_more());
+ }
+ ionic_txq_post(q, ring_dbell, ionic_tx_clean, skb);
return 0;
}
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
bool too_many_frags = false;
skb_frag_t *frag;
int desc_bufs;
@@ -1258,17 +1700,20 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
/* Each desc is mss long max, so a descriptor for each gso_seg */
if (skb_is_gso(skb)) {
ndescs = skb_shinfo(skb)->gso_segs;
+ if (!nr_frags)
+ return ndescs;
} else {
ndescs = 1;
- if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
+ if (!nr_frags)
+ return ndescs;
+
+ if (unlikely(nr_frags > q->max_sg_elems)) {
too_many_frags = true;
goto linearize;
}
- }
- /* If non-TSO, or no frags to check, we're done */
- if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
return ndescs;
+ }
/* We need to scan the skb to be sure that none of the MTU sized
* packets in the TSO will require more sgs per descriptor than we
@@ -1316,6 +1761,8 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
linearize:
if (too_many_frags) {
+ struct ionic_tx_stats *stats = q_to_tx_stats(q);
+
err = skb_linearize(skb);
if (err)
return err;
@@ -1325,30 +1772,11 @@ linearize:
return ndescs;
}
-static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
-{
- int stopped = 0;
-
- if (unlikely(!ionic_q_has_space(q, ndescs))) {
- netif_stop_subqueue(q->lif->netdev, q->index);
- stopped = 1;
-
- /* Might race with ionic_tx_clean, check again */
- smp_rmb();
- if (ionic_q_has_space(q, ndescs)) {
- netif_wake_subqueue(q->lif->netdev, q->index);
- stopped = 0;
- }
- }
-
- return stopped;
-}
-
static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_queue *q = &lif->hwstamp_txq->q;
+ struct ionic_queue *q;
int err, ndescs;
/* Does not stop/start txq, because we post to a separate tx queue
@@ -1356,6 +1784,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
* the timestamping queue, it is dropped.
*/
+ q = &lif->hwstamp_txq->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (unlikely(ndescs < 0))
goto err_out_drop;
@@ -1365,9 +1794,9 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
if (skb_is_gso(skb))
- err = ionic_tx_tso(q, skb);
+ err = ionic_tx_tso(netdev, q, skb);
else
- err = ionic_tx(q, skb);
+ err = ionic_tx(netdev, q, skb);
if (err)
goto err_out_drop;
@@ -1405,23 +1834,19 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (ndescs < 0)
goto err_out_drop;
- if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
+ if (!netif_txq_maybe_stop(q_to_ndq(netdev, q),
+ ionic_q_space_avail(q),
+ ndescs, ndescs))
return NETDEV_TX_BUSY;
if (skb_is_gso(skb))
- err = ionic_tx_tso(q, skb);
+ err = ionic_tx_tso(netdev, q, skb);
else
- err = ionic_tx(q, skb);
+ err = ionic_tx(netdev, q, skb);
if (err)
goto err_out_drop;
- /* Stop the queue if there aren't descriptors for the next packet.
- * Since our SG lists per descriptor take care of most of the possible
- * fragmentation, we don't need to have many descriptors available.
- */
- ionic_maybe_stop_tx(q, 4);
-
return NETDEV_TX_OK;
err_out_drop:
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index d7cbaad8a6fb..68228bb8c119 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -15,6 +15,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
-bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags);
#endif /* _IONIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 35ec9aab3dc7..51fa880eaf6c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1186,7 +1186,6 @@ static int
netxen_p3_has_mn(struct netxen_adapter *adapter)
{
u32 capability, flashed_ver;
- capability = 0;
/* NX2031 always had MN */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
@@ -1197,7 +1196,6 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
-
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
if (capability & NX_PEG_TUNE_MN_PRESENT)
return 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 5a5dbbb8d8aa..9a1660a12c57 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1794,8 +1794,6 @@ qed_rdma_create_srq(void *rdma_cxt,
goto err;
opaque_fid = p_hwfn->hw_info.opaque_fid;
-
- opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.opaque_fid = opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 0e240b5ab8d4..ae3ebf0cf999 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1776,7 +1776,7 @@ static int qede_get_tunable(struct net_device *dev,
return 0;
}
-static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int qede_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
@@ -1789,18 +1789,26 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (current_link.eee.adv_caps & QED_EEE_1G_ADV)
- edata->advertised = ADVERTISED_1000baseT_Full;
- if (current_link.eee.adv_caps & QED_EEE_10G_ADV)
- edata->advertised |= ADVERTISED_10000baseT_Full;
- if (current_link.sup_caps & QED_EEE_1G_ADV)
- edata->supported = ADVERTISED_1000baseT_Full;
- if (current_link.sup_caps & QED_EEE_10G_ADV)
- edata->supported |= ADVERTISED_10000baseT_Full;
- if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV)
- edata->lp_advertised = ADVERTISED_1000baseT_Full;
- if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV)
- edata->lp_advertised |= ADVERTISED_10000baseT_Full;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised,
+ current_link.eee.adv_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->advertised,
+ current_link.eee.adv_caps & QED_EEE_10G_ADV);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported,
+ current_link.sup_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->supported,
+ current_link.sup_caps & QED_EEE_10G_ADV);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->lp_advertised,
+ current_link.eee.lp_adv_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->lp_advertised,
+ current_link.eee.lp_adv_caps & QED_EEE_10G_ADV);
edata->tx_lpi_timer = current_link.eee.tx_lpi_timer;
edata->eee_enabled = current_link.eee.enable;
@@ -1810,11 +1818,14 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int qede_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
+ bool unsupp;
if (!edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev, "Link settings are not allowed to be changed\n");
@@ -1832,21 +1843,26 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG;
- if (!(edata->advertised & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)) ||
- ((edata->advertised & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)) !=
- edata->advertised)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+
+ unsupp = linkmode_andnot(tmp, edata->advertised, supported);
+ if (unsupp) {
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Invalid advertised capabilities %d\n",
- edata->advertised);
+ "Invalid advertised capabilities %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, edata->advertised);
return -EINVAL;
}
- if (edata->advertised & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised))
params.eee.adv_caps = QED_EEE_1G_ADV;
- if (edata->advertised & ADVERTISED_10000baseT_Full)
- params.eee.adv_caps |= QED_EEE_10G_ADV;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->advertised))
+ params.eee.adv_caps = QED_EEE_10G_ADV;
+
params.eee.enable = edata->eee_enabled;
params.eee.tx_lpi_enable = edata->tx_lpi_enabled;
params.eee.tx_lpi_timer = edata->tx_lpi_timer;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index cb1746bc0e0c..847fa62c80df 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -215,7 +215,7 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
- bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ bd2_bits2 |= ((skb_transport_offset(skb) >> 1) &
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
<< ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 41894d154013..b9dc0071c5de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -446,8 +446,7 @@ static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
encap_descr |= skb_network_offset(skb) << 10;
first_desc->encap_descr = cpu_to_le16(encap_descr);
- first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
- skb->data;
+ first_desc->tcp_hdr_offset = skb_inner_transport_offset(skb);
first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 3270df72541b..4c06f55878de 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -771,5 +771,6 @@ static struct platform_driver emac_platform_driver = {
module_platform_driver(emac_platform_driver);
+MODULE_DESCRIPTION("Qualcomm EMAC Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qcom-emac");
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index 4292c89bd35c..6263e4cf47fa 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -1,22 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- *
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
*/
/* This module implements the Qualcomm Atheros SPI protocol for
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h
index 356de8ec5d48..828ee9c27578 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k.h
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
*/
/* Qualcomm Atheros SPI register definition.
diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.c b/drivers/net/ethernet/qualcomm/qca_7k_common.c
index 6b511f05df61..5302da587620 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k_common.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros ethernet framing. Every Ethernet frame is surrounded
@@ -162,5 +149,5 @@ EXPORT_SYMBOL_GPL(qcafrm_fsm_decode);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.h b/drivers/net/ethernet/qualcomm/qca_7k_common.h
index 928554f11e35..44ed66fdb407 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k_common.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros Ethernet framing. Every Ethernet frame is surrounded by an atheros
@@ -107,9 +94,6 @@ struct qcafrm_handle {
/* Offset in buffer (borrowed for length too) */
u16 offset;
-
- /* Frame length as kept by this module */
- u16 len;
};
u16 qcafrm_create_header(u8 *buf, u16 len);
@@ -128,17 +112,6 @@ static inline void qcafrm_fsm_init_uart(struct qcafrm_handle *handle)
handle->state = handle->init;
}
-/* Gather received bytes and try to extract a full Ethernet frame
- * by following a simple state machine.
- *
- * Return: QCAFRM_GATHER No Ethernet frame fully received yet.
- * QCAFRM_NOHEAD Header expected but not found.
- * QCAFRM_INVLEN QCA7K frame length is invalid
- * QCAFRM_NOTAIL Footer expected but not found.
- * > 0 Number of byte in the fully received
- * Ethernet frame
- */
-
s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte);
#endif /* _QCA_FRAMING_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 1822f2ad8f0d..ff3b89e9028e 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains debugging routines for use in the QCA7K driver.
@@ -255,7 +242,7 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
struct qcaspi *qca = netdev_priv(dev);
ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
- ring->tx_max_pending = TX_RING_MAX_LEN;
+ ring->tx_max_pending = QCASPI_TX_RING_MAX_LEN;
ring->rx_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_pending = qca->txr.count;
}
@@ -275,8 +262,8 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
if (qca->spi_thread)
kthread_park(qca->spi_thread);
- qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
- qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
+ qca->txr.count = max_t(u32, ring->tx_pending, QCASPI_TX_RING_MIN_LEN);
+ qca->txr.count = min_t(u16, qca->txr.count, QCASPI_TX_RING_MAX_LEN);
if (qca->spi_thread)
kthread_unpark(qca->spi_thread);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.h b/drivers/net/ethernet/qualcomm/qca_debug.h
index 46a785844421..0d98cef3abc4 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.h
+++ b/drivers/net/ethernet/qualcomm/qca_debug.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains debugging routines for use in the QCA7K driver.
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5f3c11fb3fa2..5799ecc88a87 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This module implements the Qualcomm Atheros SPI protocol for
@@ -359,7 +346,7 @@ qcaspi_receive(struct qcaspi *qca)
/* Read the packet size. */
qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available);
- netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
+ netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %04x\n",
available);
if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
@@ -476,7 +463,7 @@ qcaspi_flush_tx_ring(struct qcaspi *qca)
* has been replaced by netif_tx_lock_bh() and so on.
*/
netif_tx_lock_bh(qca->net_dev);
- for (i = 0; i < TX_RING_MAX_LEN; i++) {
+ for (i = 0; i < QCASPI_TX_RING_MAX_LEN; i++) {
if (qca->txr.skb[i]) {
dev_kfree_skb(qca->txr.skb[i]);
qca->txr.skb[i] = NULL;
@@ -687,7 +674,7 @@ static int
qcaspi_netdev_open(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
- int ret = 0;
+ struct task_struct *thread;
if (!qca)
return -EINVAL;
@@ -697,23 +684,18 @@ qcaspi_netdev_open(struct net_device *dev)
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
- qca->spi_thread = kthread_run((void *)qcaspi_spi_thread,
- qca, "%s", dev->name);
+ thread = kthread_run((void *)qcaspi_spi_thread,
+ qca, "%s", dev->name);
- if (IS_ERR(qca->spi_thread)) {
+ if (IS_ERR(thread)) {
netdev_err(dev, "%s: unable to start kernel thread.\n",
QCASPI_DRV_NAME);
- return PTR_ERR(qca->spi_thread);
+ return PTR_ERR(thread);
}
- ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0,
- dev->name, qca);
- if (ret) {
- netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
- QCASPI_DRV_NAME, qca->spi_dev->irq, ret);
- kthread_stop(qca->spi_thread);
- return ret;
- }
+ qca->spi_thread = thread;
+
+ enable_irq(qca->spi_dev->irq);
/* SPI thread takes care of TX queue */
@@ -728,10 +710,12 @@ qcaspi_netdev_close(struct net_device *dev)
netif_stop_queue(dev);
qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
- free_irq(qca->spi_dev->irq, qca);
+ disable_irq(qca->spi_dev->irq);
- kthread_stop(qca->spi_thread);
- qca->spi_thread = NULL;
+ if (qca->spi_thread) {
+ kthread_stop(qca->spi_thread);
+ qca->spi_thread = NULL;
+ }
qcaspi_flush_tx_ring(qca);
return 0;
@@ -831,8 +815,8 @@ qcaspi_netdev_init(struct net_device *dev)
qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL;
- qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
- QCAFRM_FOOTER_LEN + 4) * 4;
+ qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
+ QCAFRM_FOOTER_LEN + QCASPI_HW_PKT_LEN) * QCASPI_RX_MAX_FRAMES;
memset(&qca->stats, 0, sizeof(struct qcaspi_stats));
@@ -881,6 +865,8 @@ qcaspi_netdev_setup(struct net_device *dev)
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->needed_tailroom = ALIGN(QCAFRM_FOOTER_LEN + QCAFRM_MIN_LEN, 4);
+ dev->needed_headroom = ALIGN(QCAFRM_HEADER_LEN, 4);
dev->tx_queue_len = 100;
/* MTU range: 46 - 1500 */
@@ -891,7 +877,7 @@ qcaspi_netdev_setup(struct net_device *dev)
memset(qca, 0, sizeof(struct qcaspi));
memset(&qca->txr, 0, sizeof(qca->txr));
- qca->txr.count = TX_RING_MAX_LEN;
+ qca->txr.count = QCASPI_TX_RING_MAX_LEN;
}
static const struct of_device_id qca_spi_of_match[] = {
@@ -984,6 +970,15 @@ qca_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, qcaspi_devs);
+ ret = devm_request_irq(&spi->dev, spi->irq, qcaspi_intr_handler,
+ IRQF_NO_AUTOEN, qca->net_dev->name, qca);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to get IRQ %d (irqval=%d).\n",
+ spi->irq, ret);
+ free_netdev(qcaspi_devs);
+ return ret;
+ }
+
ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
@@ -998,8 +993,8 @@ qca_spi_probe(struct spi_device *spi)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
- signature);
+ dev_err(&spi->dev, "Invalid signature (expected 0x%04x, read 0x%04x)\n",
+ QCASPI_GOOD_SIGNATURE, signature);
free_netdev(qcaspi_devs);
return -EFAULT;
}
@@ -1048,6 +1043,6 @@ module_spi_driver(qca_spi_driver);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCASPI_DRV_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 3067356106f0..d59cb2352cee 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Qualcomm Atheros SPI register definition.
@@ -39,8 +26,9 @@
#define QCASPI_GOOD_SIGNATURE 0xAA55
-#define TX_RING_MAX_LEN 10
-#define TX_RING_MIN_LEN 2
+#define QCASPI_TX_RING_MAX_LEN 10
+#define QCASPI_TX_RING_MIN_LEN 2
+#define QCASPI_RX_MAX_FRAMES 4
/* sync related constants */
#define QCASPI_SYNC_UNKNOWN 0
@@ -54,7 +42,7 @@
#define QCASPI_EVENT_CPUON 1
struct tx_ring {
- struct sk_buff *skb[TX_RING_MAX_LEN];
+ struct sk_buff *skb[QCASPI_TX_RING_MAX_LEN];
u16 head;
u16 tail;
u16 size;
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 223321897b96..321fd8d00730 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2017, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This module implements the Qualcomm Atheros UART protocol for
@@ -410,6 +397,6 @@ module_serdev_device_driver(qca_uart_driver);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCAUART_DRV_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 5b69b9268c75..f3bea196a8f9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -520,4 +520,5 @@ static void __exit rmnet_exit(void)
module_init(rmnet_init)
module_exit(rmnet_exit)
MODULE_ALIAS_RTNL_LINK("rmnet");
+MODULE_DESCRIPTION("Qualcomm RmNet MAP driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 046b5f7d8e7c..9d2a9562c96f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -98,7 +98,7 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
- return priv->real_dev->ifindex;
+ return READ_ONCE(priv->real_dev->ifindex);
}
static int rmnet_vnd_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 81567fcf3957..4c043052198d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -68,6 +68,7 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
+ RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_NONE
};
@@ -84,3 +85,6 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
void rtl8168_init_leds(struct net_device *ndev);
+int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
+int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
+void rtl8125_init_leds(struct net_device *ndev);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
index 007d077edcad..7c5dc9d0df85 100644
--- a/drivers/net/ethernet/realtek/r8169_leds.c
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -18,12 +18,14 @@
#define RTL8168_LED_CTRL_LINK_100 BIT(1)
#define RTL8168_LED_CTRL_LINK_10 BIT(0)
-#define RTL8168_NUM_LEDS 3
+#define RTL8125_LED_CTRL_ACT BIT(9)
+#define RTL8125_LED_CTRL_LINK_2500 BIT(5)
+#define RTL8125_LED_CTRL_LINK_1000 BIT(3)
+#define RTL8125_LED_CTRL_LINK_100 BIT(1)
+#define RTL8125_LED_CTRL_LINK_10 BIT(0)
-#define RTL8168_SUPPORTED_MODES \
- (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK_100) | \
- BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_RX) | \
- BIT(TRIGGER_NETDEV_TX))
+#define RTL8168_NUM_LEDS 3
+#define RTL8125_NUM_LEDS 4
struct r8169_led_classdev {
struct led_classdev led;
@@ -33,28 +35,35 @@ struct r8169_led_classdev {
#define lcdev_to_r8169_ldev(lcdev) container_of(lcdev, struct r8169_led_classdev, led)
+static bool r8169_trigger_mode_is_valid(unsigned long flags)
+{
+ bool rx, tx;
+
+ if (flags & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ return false;
+ if (flags & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ return false;
+
+ rx = flags & BIT(TRIGGER_NETDEV_RX);
+ tx = flags & BIT(TRIGGER_NETDEV_TX);
+
+ return rx == tx;
+}
+
static int rtl8168_led_hw_control_is_supported(struct led_classdev *led_cdev,
unsigned long flags)
{
struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
struct rtl8169_private *tp = netdev_priv(ldev->ndev);
int shift = ldev->index * 4;
- bool rx, tx;
-
- if (flags & ~RTL8168_SUPPORTED_MODES)
- goto nosupp;
- rx = flags & BIT(TRIGGER_NETDEV_RX);
- tx = flags & BIT(TRIGGER_NETDEV_TX);
- if (rx != tx)
- goto nosupp;
+ if (!r8169_trigger_mode_is_valid(flags)) {
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
+ return -EOPNOTSUPP;
+ }
return 0;
-
-nosupp:
- /* Switch LED off to indicate that mode isn't supported */
- rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
- return -EOPNOTSUPP;
}
static int rtl8168_led_hw_control_set(struct led_classdev *led_cdev,
@@ -129,7 +138,6 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
led_cdev->name = led_name;
- led_cdev->default_trigger = "netdev";
led_cdev->hw_control_trigger = "netdev";
led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
led_cdev->hw_control_is_supported = rtl8168_led_hw_control_is_supported;
@@ -155,3 +163,102 @@ void rtl8168_init_leds(struct net_device *ndev)
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
}
+
+static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+
+ if (!r8169_trigger_mode_is_valid(flags)) {
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8125_set_led_mode(tp, ldev->index, 0);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rtl8125_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ u16 mode = 0;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode |= RTL8125_LED_CTRL_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode |= RTL8125_LED_CTRL_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode |= RTL8125_LED_CTRL_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode |= RTL8125_LED_CTRL_LINK_2500;
+ if (flags & (BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX)))
+ mode |= RTL8125_LED_CTRL_ACT;
+
+ return rtl8125_set_led_mode(tp, ldev->index, mode);
+}
+
+static int rtl8125_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int mode;
+
+ mode = rtl8125_get_led_mode(tp, ldev->index);
+ if (mode < 0)
+ return mode;
+
+ if (mode & RTL8125_LED_CTRL_LINK_10)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_10);
+ if (mode & RTL8125_LED_CTRL_LINK_100)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_100);
+ if (mode & RTL8125_LED_CTRL_LINK_1000)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_1000);
+ if (mode & RTL8125_LED_CTRL_LINK_2500)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_2500);
+ if (mode & RTL8125_LED_CTRL_ACT)
+ *flags |= BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+
+ return 0;
+}
+
+static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
+ struct net_device *ndev, int index)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->ndev = ndev;
+ ldev->index = index;
+
+ r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->hw_control_is_supported = rtl8125_led_hw_control_is_supported;
+ led_cdev->hw_control_set = rtl8125_led_hw_control_set;
+ led_cdev->hw_control_get = rtl8125_led_hw_control_get;
+ led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
+
+ /* ignore errors */
+ devm_led_classdev_register(&ndev->dev, led_cdev);
+}
+
+void rtl8125_init_leds(struct net_device *ndev)
+{
+ /* bind resource mgmt to netdev */
+ struct device *dev = &ndev->dev;
+ struct r8169_led_classdev *leds;
+ int i;
+
+ leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return;
+
+ for (i = 0; i < RTL8125_NUM_LEDS; i++)
+ rtl8125_setup_led_ldev(leds + i, ndev, i);
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index dd73df6b17b0..0d2cbb32c870 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -55,6 +55,7 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
+#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -136,6 +137,7 @@ static const struct {
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
+ [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -158,6 +160,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
+ { PCI_VDEVICE(REALTEK, 0x8126) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
{}
};
@@ -327,13 +330,23 @@ enum rtl8168_registers {
};
enum rtl8125_registers {
+ LEDSEL0 = 0x18,
+ INT_CFG0_8125 = 0x34,
+#define INT_CFG0_ENABLE_8125 BIT(0)
+#define INT_CFG0_CLKREQEN BIT(3)
IntrMask_8125 = 0x38,
IntrStatus_8125 = 0x3c,
+ INT_CFG1_8125 = 0x7a,
+ LEDSEL2 = 0x84,
+ LEDSEL1 = 0x86,
TxPoll_8125 = 0x90,
+ LEDSEL3 = 0x96,
MAC0_BKP = 0x19e0,
EEE_TXIDLE_TIMER_8125 = 0x6048,
};
+#define LEDSEL_MASK_8125 0x23f
+
#define RX_VLAN_INNER_8125 BIT(22)
#define RX_VLAN_OUTER_8125 BIT(23)
#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
@@ -606,6 +619,7 @@ struct rtl8169_private {
struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
+ u16 tx_lpi_timer;
u32 irq_mask;
int irq;
struct clk *clk;
@@ -629,7 +643,6 @@ struct rtl8169_private {
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
- int eee_adv;
const char *fw_name;
struct rtl_fw *rtl_fw;
@@ -663,6 +676,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_2);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -824,6 +838,51 @@ int rtl8168_get_led_mode(struct rtl8169_private *tp)
return ret;
}
+static int rtl8125_get_led_reg(int index)
+{
+ static const int led_regs[] = { LEDSEL0, LEDSEL1, LEDSEL2, LEDSEL3 };
+
+ return led_regs[index];
+}
+
+int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode)
+{
+ int reg = rtl8125_get_led_reg(index);
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+ u16 val;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->led_lock);
+ val = RTL_R16(tp, reg) & ~LEDSEL_MASK_8125;
+ RTL_W16(tp, reg, val | mode);
+ mutex_unlock(&tp->led_lock);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+int rtl8125_get_led_mode(struct rtl8169_private *tp, int index)
+{
+ int reg = rtl8125_get_led_reg(index);
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = RTL_R16(tp, reg);
+
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
+
void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len)
{
@@ -1140,7 +1199,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1155,7 +1214,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1341,7 +1400,7 @@ static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
if (enable)
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
else
@@ -1508,7 +1567,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
if (wolopts)
rtl_mod_config2(tp, 0, PME_SIGNAL);
else
@@ -1974,30 +2033,65 @@ static int rtl_set_coalesce(struct net_device *dev,
return 0;
}
-static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
+static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
+{
+ unsigned int timer_val = READ_ONCE(tp->dev->mtu) + ETH_HLEN + 0x20;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_48:
+ tp->tx_lpi_timer = timer_val;
+ r8168_mac_ocp_write(tp, 0xe048, timer_val);
+ break;
+ case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
+ tp->tx_lpi_timer = timer_val;
+ RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned int r8169_get_tx_lpi_timer_us(struct rtl8169_private *tp)
+{
+ unsigned int speed = tp->phydev->speed;
+ unsigned int timer = tp->tx_lpi_timer;
+
+ if (!timer || speed == SPEED_UNKNOWN)
+ return 0;
+
+ /* tx_lpi_timer value is in bytes */
+ return DIV_ROUND_CLOSEST(timer * BITS_PER_BYTE, speed);
+}
+
+static int rtl8169_get_eee(struct net_device *dev, struct ethtool_keee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ int ret;
if (!rtl_supports_eee(tp))
return -EOPNOTSUPP;
- return phy_ethtool_get_eee(tp->phydev, data);
+ ret = phy_ethtool_get_eee(tp->phydev, data);
+ if (ret)
+ return ret;
+
+ data->tx_lpi_timer = r8169_get_tx_lpi_timer_us(tp);
+ data->tx_lpi_enabled = data->tx_lpi_timer ? data->eee_enabled : false;
+
+ return 0;
}
-static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
+static int rtl8169_set_eee(struct net_device *dev, struct ethtool_keee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
if (!rtl_supports_eee(tp))
return -EOPNOTSUPP;
- ret = phy_ethtool_set_eee(tp->phydev, data);
-
- if (!ret)
- tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
- MDIO_AN_EEE_ADV);
- return ret;
+ return phy_ethtool_set_eee(tp->phydev, data);
}
static void rtl8169_get_ringparam(struct net_device *dev,
@@ -2062,21 +2156,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_pauseparam = rtl8169_set_pauseparam,
};
-static void rtl_enable_eee(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int adv;
-
- /* respect EEE advertisement the user may have set */
- if (tp->eee_adv >= 0)
- adv = tp->eee_adv;
- else
- adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
-
- if (adv >= 0)
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
-}
-
static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{
/*
@@ -2095,6 +2174,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
u16 val;
enum mac_version ver;
} mac_info[] = {
+ /* 8126A family. */
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
+
/* 8125B family. */
{ 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
@@ -2250,14 +2332,8 @@ static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
}
-static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp)
-{
- RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20);
-}
-
static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
{
- rtl8125_set_eee_txidle_timer(tp);
r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
}
@@ -2313,9 +2389,6 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
- if (rtl_supports_eee(tp))
- rtl_enable_eee(tp);
-
genphy_soft_reset(tp->phydev);
}
@@ -2368,6 +2441,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2554,7 +2628,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2797,7 +2871,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2811,7 +2885,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2821,6 +2895,8 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
+ u8 val8;
+
if (tp->mac_version < RTL_GIGA_MAC_VER_32)
return;
@@ -2834,11 +2910,19 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
return;
rtl_mod_config5(tp, 0, ASPM_en);
- rtl_mod_config2(tp, 0, ClkReqEn);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_65:
+ val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
+ RTL_W8(tp, INT_CFG0_8125, val8);
+ break;
+ default:
+ rtl_mod_config2(tp, 0, ClkReqEn);
+ break;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2850,14 +2934,22 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
break;
}
- rtl_mod_config2(tp, ClkReqEn, 0);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_65:
+ val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
+ RTL_W8(tp, INT_CFG0_8125, val8);
+ break;
+ default:
+ rtl_mod_config2(tp, ClkReqEn, 0);
+ break;
+ }
rtl_mod_config5(tp, ASPM_en, 0);
}
}
@@ -3570,10 +3662,15 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_63)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
else
- r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0300);
if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000);
@@ -3586,6 +3683,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
+ else
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068);
r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
@@ -3600,10 +3701,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
- if (tp->mac_version == RTL_GIGA_MAC_VER_63)
- rtl8125b_config_eee_mac(tp);
- else
+ if (tp->mac_version == RTL_GIGA_MAC_VER_61)
rtl8125a_config_eee_mac(tp);
+ else
+ rtl8125b_config_eee_mac(tp);
rtl_disable_rxdvgate(tp);
}
@@ -3647,6 +3748,12 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
rtl_hw_start_8125_common(tp);
}
+static void rtl_hw_start_8126a(struct rtl8169_private *tp)
+{
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_config(struct rtl8169_private *tp)
{
static const rtl_generic_fct hw_configs[] = {
@@ -3689,6 +3796,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
+ [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3699,9 +3807,23 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
{
int i;
+ RTL_W8(tp, INT_CFG0_8125, 0x00);
+
/* disable interrupt coalescing */
- for (i = 0xa00; i < 0xb00; i += 4)
- RTL_W32(tp, i, 0);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_61:
+ for (i = 0xa00; i < 0xb00; i += 4)
+ RTL_W32(tp, i, 0);
+ break;
+ case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
+ for (i = 0xa00; i < 0xa80; i += 4)
+ RTL_W32(tp, i, 0);
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
+ break;
+ default:
+ break;
+ }
rtl_hw_config(tp);
}
@@ -3744,6 +3866,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, false);
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
+ rtl_set_eee_txidle_timer(tp);
+
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
rtl_hw_start_8169(tp);
else if (rtl_is_8125(tp))
@@ -3777,15 +3901,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
netdev_update_features(dev);
rtl_jumbo_config(tp);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
- rtl8125_set_eee_txidle_timer(tp);
- break;
- default:
- break;
- }
+ rtl_set_eee_txidle_timer(tp);
return 0;
}
@@ -3929,7 +4045,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4080,8 +4196,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -5058,7 +5173,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
}
tp->phydev->mac_managed_pm = true;
-
+ if (rtl_supports_eee(tp))
+ phy_advertise_eee_all(tp->phydev);
phy_support_asym_pause(tp->phydev);
/* PHY will be woken up in rtl_open() */
@@ -5108,7 +5224,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
rtl_hw_init_8125(tp);
break;
default:
@@ -5193,7 +5309,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
- tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
raw_spin_lock_init(&tp->cfg9346_usage_lock);
@@ -5201,11 +5316,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
raw_spin_lock_init(&tp->mac_ocp_lock);
mutex_init(&tp->led_lock);
- dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
- struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
/* Get the *optional* external "ether_clk" used on some boards */
tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
if (IS_ERR(tp->clk))
@@ -5320,6 +5430,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
netdev_sw_irq_coalesce_default_on(dev);
/* configure chip for default features */
@@ -5356,10 +5468,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- if (IS_ENABLED(CONFIG_R8169_LEDS) &&
- tp->mac_version > RTL_GIGA_MAC_VER_06 &&
- tp->mac_version < RTL_GIGA_MAC_VER_61)
- rtl8168_init_leds(dev);
+ if (IS_ENABLED(CONFIG_R8169_LEDS)) {
+ if (rtl_is_8125(tp))
+ rtl8125_init_leds(dev);
+ else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+ rtl8168_init_leds(dev);
+ }
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index b50f16786c24..1f74317beb88 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -1102,6 +1102,12 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
rtl8125b_config_eee_phy(phydev);
}
+static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+}
+
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver)
{
@@ -1152,6 +1158,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
+ [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index d6136fe5c206..b03fae7a0f72 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -34,6 +34,7 @@ config RAVB
select MII
select MDIO_BITBANG
select PHYLIB
+ select RESET_CONTROLLER
help
Renesas Ethernet AVB device driver.
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index e0f8276cffed..b48935ec7e28 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -205,7 +205,11 @@ enum ravb_reg {
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
- CSR0 = 0x0800, /* RZ/G2L only */
+
+ /* TOE registers (RZ/G2L only) */
+ CSR0 = 0x0800,
+ CSR1 = 0x0804,
+ CSR2 = 0x0808,
};
@@ -978,16 +982,39 @@ enum CSR0_BIT {
CSR0_RPE = 0x00000020,
};
+enum CSR1_BIT {
+ CSR1_TIP4 = 0x00000001,
+ CSR1_TTCP4 = 0x00000010,
+ CSR1_TUDP4 = 0x00000020,
+ CSR1_TICMP4 = 0x00000040,
+ CSR1_TTCP6 = 0x00100000,
+ CSR1_TUDP6 = 0x00200000,
+ CSR1_TICMP6 = 0x00400000,
+ CSR1_THOP = 0x01000000,
+ CSR1_TROUT = 0x02000000,
+ CSR1_TAHD = 0x04000000,
+ CSR1_TDHD = 0x08000000,
+};
+
+enum CSR2_BIT {
+ CSR2_RIP4 = 0x00000001,
+ CSR2_RTCP4 = 0x00000010,
+ CSR2_RUDP4 = 0x00000020,
+ CSR2_RICMP4 = 0x00000040,
+ CSR2_RTCP6 = 0x00100000,
+ CSR2_RUDP6 = 0x00200000,
+ CSR2_RICMP6 = 0x00400000,
+ CSR2_RHOP = 0x01000000,
+ CSR2_RROUT = 0x02000000,
+ CSR2_RAHD = 0x04000000,
+ CSR2_RDHD = 0x08000000,
+};
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
-#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
-
-#define GBETH_RX_BUFF_MAX 8192
-#define GBETH_RX_DESC_DATA_SIZE 4080
-
struct ravb_tstamp_skb {
struct list_head list;
struct sk_buff *skb;
@@ -1012,9 +1039,6 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- void (*rx_ring_free)(struct net_device *ndev, int q);
- void (*rx_ring_format)(struct net_device *ndev, int q);
- void *(*alloc_rx_desc)(struct net_device *ndev, int q);
bool (*receive)(struct net_device *ndev, int *quota, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
@@ -1025,9 +1049,10 @@ struct ravb_hw_info {
netdev_features_t net_hw_features;
netdev_features_t net_features;
int stats_len;
- size_t max_rx_len;
u32 tccr_mask;
- u32 rx_max_buf_size;
+ u32 rx_max_frame_size;
+ u32 rx_max_desc_use;
+ u32 rx_desc_size;
unsigned aligned_tx: 1;
/* hardware features */
@@ -1060,8 +1085,11 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
- struct ravb_rx_desc *gbeth_rx_ring;
- struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ union {
+ struct ravb_rx_desc *desc;
+ struct ravb_ex_rx_desc *ex_desc;
+ void *raw;
+ } rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
@@ -1089,10 +1117,6 @@ struct ravb_private {
int msg_enable;
int speed;
int emac_irq;
- int erra_irq;
- int mgmta_irq;
- int rx_irqs[NUM_RX_QUEUE];
- int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
@@ -1106,6 +1130,8 @@ struct ravb_private {
const struct ravb_hw_info *info;
struct reset_control *rstc;
+
+ u32 gti_tiv;
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0e3731f50fc2..fa48ff4aba2d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/reset.h>
#include <linux/math64.h>
+#include <net/ip.h>
#include "ravb.h"
@@ -38,16 +39,6 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
-static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
- "ch0", /* RAVB_BE */
- "ch1", /* RAVB_NC */
-};
-
-static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
- "ch18", /* RAVB_BE */
- "ch19", /* RAVB_NC */
-};
-
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
@@ -96,13 +87,13 @@ static void ravb_set_rate_gbeth(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
switch (priv->speed) {
- case 10: /* 10BASE */
+ case 10: /* 10BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
break;
- case 100: /* 100BASE */
+ case 100: /* 100BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
break;
- case 1000: /* 1000BASE */
+ case 1000: /* 1000BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
break;
}
@@ -122,12 +113,23 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static void ravb_set_buffer_align(struct sk_buff *skb)
+static struct sk_buff *
+ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
+ gfp_t gfp_mask)
{
- u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+ struct sk_buff *skb;
+ u32 reserve;
+
+ skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
+ gfp_mask);
+ if (!skb)
+ return NULL;
+ reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
if (reserve)
skb_reserve(skb, RAVB_ALIGN - reserve);
+
+ return skb;
}
/* Get MAC address from the MAC address registers
@@ -200,6 +202,13 @@ static const struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+static struct ravb_rx_desc *
+ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
+ unsigned int i)
+{
+ return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
+}
+
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
@@ -244,67 +253,40 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
+static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
unsigned int i;
- if (!priv->gbeth_rx_ring)
+ if (!priv->rx_ring[q].raw)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
+ struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
- GBETH_RX_BUFF_MAX,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
}
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
- priv->gbeth_rx_ring = NULL;
-}
-
-static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
- unsigned int i;
-
- if (!priv->rx_ring[q])
- return;
-
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
- DMA_FROM_DEVICE);
- }
- ring_size = sizeof(struct ravb_ex_rx_desc) *
- (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
- priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
+ priv->rx_ring[q].raw = NULL;
}
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
unsigned int ring_size;
unsigned int i;
- info->rx_ring_free(ndev, q);
+ ravb_rx_ring_free(ndev, q);
if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
@@ -335,7 +317,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
+static void ravb_rx_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_rx_desc *rx_desc;
@@ -343,45 +325,15 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
dma_addr_t dma_addr;
unsigned int i;
- rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- memset(priv->gbeth_rx_ring, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
- /* We just set the data size to 0 for a failed mapping which
- * should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
- rx_desc->die_dt = DT_FEMPTY;
- }
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
-}
-
-static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_ex_rx_desc *rx_desc;
- unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- dma_addr_t dma_addr;
- unsigned int i;
-
- memset(priv->rx_ring[q], 0, rx_ring_size);
+ rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
- rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
+ rx_desc = ravb_rx_get_desc(priv, q, i);
+ rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -391,7 +343,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = &priv->rx_ring[q][i];
+ rx_desc = ravb_rx_get_desc(priv, q, i);
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
@@ -400,7 +352,6 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
@@ -413,7 +364,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- info->rx_ring_format(ndev, q);
+ ravb_rx_ring_format(ndev, q);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -439,30 +390,18 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
-static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
- priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->gbeth_rx_ring;
-}
+ priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
-static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
-
- ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
-
- priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->rx_ring[q];
+ return priv->rx_ring[q].raw;
}
/* Init skb and descriptor buffer for Ethernet AVB */
@@ -484,10 +423,9 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
+ skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
if (!skb)
goto error;
- ravb_set_buffer_align(skb);
priv->rx_skb[q][i] = skb;
}
@@ -500,7 +438,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
}
/* Allocate all RX descriptors. */
- if (!info->alloc_rx_desc(ndev, q))
+ if (!ravb_alloc_rx_desc(ndev, q))
goto error;
priv->dirty_rx[q] = 0;
@@ -522,6 +460,36 @@ error:
return -ENOMEM;
}
+static void ravb_csum_init_gbeth(struct net_device *ndev)
+{
+ bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
+ bool rx_enable = ndev->features & NETIF_F_RXCSUM;
+
+ if (!(tx_enable || rx_enable))
+ goto done;
+
+ ravb_write(ndev, 0, CSR0);
+ if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
+ netdev_err(ndev, "Timeout enabling hardware checksum\n");
+
+ if (tx_enable)
+ ndev->features &= ~NETIF_F_HW_CSUM;
+
+ if (rx_enable)
+ ndev->features &= ~NETIF_F_RXCSUM;
+ } else {
+ if (tx_enable)
+ ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1);
+
+ if (rx_enable)
+ ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4,
+ CSR2);
+ }
+
+done:
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+}
+
static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -536,7 +504,7 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
}
/* Receive frame limit set register */
- ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
@@ -553,7 +521,8 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
- ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ ravb_csum_init_gbeth(ndev);
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
@@ -596,6 +565,7 @@ static void ravb_emac_init(struct net_device *ndev)
static int ravb_dmac_init_gbeth(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
int error;
error = ravb_ring_init(ndev, RAVB_BE);
@@ -609,7 +579,7 @@ static int ravb_dmac_init_gbeth(struct net_device *ndev)
ravb_write(ndev, 0x60000000, RCR);
/* Set Max Frame Length (RTC) */
- ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+ ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
/* Set FIFO size */
ravb_write(ndev, 0x00222200, TGC);
@@ -734,6 +704,30 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
}
}
+static void ravb_rx_csum_gbeth(struct sk_buff *skb)
+{
+ __wsum csum_ip_hdr, csum_proto;
+ u8 *hw_csum;
+
+ /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
+ * bytes appended to packet data. First 2 bytes is ip header checksum
+ * and last 2 bytes is protocol checksum.
+ */
+ if (unlikely(skb->len < sizeof(__sum16) * 2))
+ return;
+
+ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+
+ hw_csum -= sizeof(__sum16);
+ csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+ skb_trim(skb, skb->len - 2 * sizeof(__sum16));
+
+ /* TODO: IPV6 Rx checksum */
+ if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
static void ravb_rx_csum(struct sk_buff *skb)
{
u8 *hw_csum;
@@ -758,7 +752,8 @@ static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
skb = priv->rx_skb[RAVB_BE][entry];
priv->rx_skb[RAVB_BE][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
+ ALIGN(priv->info->rx_max_frame_size, 16),
+ DMA_FROM_DEVICE);
return skb;
}
@@ -772,29 +767,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
struct ravb_rx_desc *desc;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ int rx_packets = 0;
u8 desc_status;
- int boguscnt;
u16 pkt_len;
u8 die_dt;
int entry;
int limit;
+ int i;
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->gbeth_rx_ring[entry];
- while (desc->die_dt != DT_FEMPTY) {
+ desc = &priv->rx_ring[q].desc[entry];
+ for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -819,8 +810,10 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
case DT_FSTART:
@@ -846,32 +839,33 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
}
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
+ desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+ desc = &priv->rx_ring[q].desc[entry];
+ desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break;
- ravb_set_buffer_align(skb);
dma_addr = dma_map_single(ndev->dev.parent,
skb->data,
- GBETH_RX_BUFF_MAX,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
@@ -887,9 +881,9 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -911,7 +905,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
@@ -945,7 +939,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -971,20 +965,19 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break; /* Better luck next round. */
- ravb_set_buffer_align(skb);
dma_addr = dma_map_single(ndev->dev.parent, skb->data,
le16_to_cpu(desc->ds_cc),
DMA_FROM_DEVICE);
@@ -1092,11 +1085,23 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ irqreturn_t result = IRQ_HANDLED;
+
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev))) {
+ result = IRQ_NONE;
+ goto out_rpm_put;
+ }
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
+ return result;
}
/* Error interrupt handler */
@@ -1176,9 +1181,15 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1222,6 +1233,9 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1230,9 +1244,15 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1254,6 +1274,9 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1261,8 +1284,14 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Network control/Best effort queue RX/TX */
@@ -1270,6 +1299,9 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
result = IRQ_HANDLED;
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1288,25 +1320,16 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- bool gptp = info->gptp || info->ccc_gac;
- struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
- unsigned int entry;
- if (!gptp) {
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (gptp || desc->die_dt != DT_FEMPTY) {
- if (ravb_rx(ndev, &quota, q))
- goto out;
- }
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1736,89 +1759,159 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.set_wol = ravb_set_wol,
};
-static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
- struct net_device *ndev, struct device *dev,
- const char *ch)
+static int ravb_set_config_mode(struct net_device *ndev)
{
- char *name;
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
- name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!name)
- return -ENOMEM;
- error = request_irq(irq, handler, 0, name, ndev);
- if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", name);
+ if (info->gptp) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ return error;
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ } else if (info->ccc_gac) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ }
return error;
}
+static void ravb_set_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+ if (!(info->gptp || info->ccc_gac))
+ return;
+
+ ravb_write(ndev, priv->gti_tiv, GTI);
+
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+}
+
+static int ravb_compute_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct device *dev = ndev->dev.parent;
+ unsigned long rate;
+ u64 inc;
+
+ if (!(info->gptp || info->ccc_gac))
+ return 0;
+
+ if (info->gptp_ref_clk)
+ rate = clk_get_rate(priv->gptp_clk);
+ else
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
+ return -EINVAL;
+
+ inc = div64_ul(1000000000ULL << 20, rate);
+
+ if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
+ dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
+ inc, GTI_TIV_MIN, GTI_TIV_MAX);
+ return -EINVAL;
+ }
+ priv->gti_tiv = inc;
+
+ return 0;
+}
+
+/* Set tx and rx clock internal delay modes */
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool explicit_delay = false;
+ u32 delay;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 1800, according to DT bindings */
+ priv->rxcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 2000, according to DT bindings */
+ priv->txcidm = !!delay;
+ explicit_delay = true;
+ }
+
+ if (explicit_delay)
+ return;
+
+ /* Fall back to legacy rgmii-*id behavior */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ priv->rxcidm = 1;
+ priv->rgmii_override = 1;
+ }
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
+ }
+}
+
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (priv->rxcidm)
+ set |= APSR_RDM;
+ if (priv->txcidm)
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = &priv->pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
- if (!info->multi_irqs) {
- error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
- ndev->name, ndev);
- if (error) {
- netdev_err(ndev, "cannot request IRQ\n");
- goto out_napi_off;
- }
- } else {
- error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
- dev, "ch22:multi");
- if (error)
- goto out_napi_off;
- error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
- dev, "ch24:emac");
- if (error)
- goto out_free_irq;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch0:rx_be");
- if (error)
- goto out_free_irq_emac;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch18:tx_be");
- if (error)
- goto out_free_irq_be_rx;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch1:rx_nc");
- if (error)
- goto out_free_irq_be_tx;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch19:tx_nc");
- if (error)
- goto out_free_irq_nc_rx;
-
- if (info->err_mgmt_irqs) {
- error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
- ndev, dev, "err_a");
- if (error)
- goto out_free_irq_nc_tx;
- error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
- ndev, dev, "mgmt_a");
- if (error)
- goto out_free_irq_erra;
- }
- }
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ goto out_napi_off;
+
+ /* Set AVB config mode */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ goto out_rpm_put;
+
+ ravb_set_delay_mode(ndev);
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq_mgmta;
+ goto out_set_reset;
+
ravb_emac_init(ndev);
+ ravb_set_gti(ndev);
+
/* Initialise PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_init(ndev, priv->pdev);
/* PHY control start */
@@ -1832,29 +1925,14 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
ravb_stop_dma(ndev);
-out_free_irq_mgmta:
- if (!info->multi_irqs)
- goto out_free_irq;
- if (info->err_mgmt_irqs)
- free_irq(priv->mgmta_irq, ndev);
-out_free_irq_erra:
- if (info->err_mgmt_irqs)
- free_irq(priv->erra_irq, ndev);
-out_free_irq_nc_tx:
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
-out_free_irq_nc_rx:
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
-out_free_irq_be_tx:
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
-out_free_irq_be_rx:
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
-out_free_irq_emac:
- free_irq(priv->emac_irq, ndev);
-out_free_irq:
- free_irq(ndev->irq, ndev);
+out_set_reset:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
+out_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
out_napi_off:
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
@@ -1939,6 +2017,36 @@ out_unlock:
rtnl_unlock();
}
+static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
+{
+ struct iphdr *ip = ip_hdr(skb);
+
+ /* TODO: Need to add support for VLAN tag 802.1Q */
+ if (skb_vlan_tag_present(skb))
+ return false;
+
+ /* TODO: Need to add hardware checksum for IPv6 */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ switch (ip->protocol) {
+ case IPPROTO_TCP:
+ break;
+ case IPPROTO_UDP:
+ /* If the checksum value in the UDP header field is 0, TOE does
+ * not calculate checksum for UDP part of this frame as it is
+ * optional function as per standards.
+ */
+ if (udp_hdr(skb)->check == 0)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
/* Packet transmit function for Ethernet AVB */
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
@@ -1954,6 +2062,9 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 entry;
u32 len;
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
+ skb_checksum_help(skb);
+
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
num_tx_desc) {
@@ -2088,8 +2199,15 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *nstats, *stats0, *stats1;
+ struct device *dev = &priv->pdev->dev;
nstats = &ndev->stats;
+
+ pm_runtime_get_noresume(dev);
+
+ if (!pm_runtime_active(dev))
+ goto out_rpm_put;
+
stats0 = &priv->stats[RAVB_BE];
if (info->tx_counters) {
@@ -2131,6 +2249,8 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
nstats->rx_over_errors += stats1->rx_over_errors;
}
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return nstats;
}
@@ -2153,6 +2273,8 @@ static int ravb_close(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct device *dev = &priv->pdev->dev;
+ int error;
netif_tx_stop_all_queues(ndev);
@@ -2161,8 +2283,16 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, RIC2);
ravb_write(ndev, 0, TIC);
+ /* PHY disconnect */
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ }
+
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -2179,29 +2309,8 @@ static int ravb_close(struct net_device *ndev)
}
}
- /* PHY disconnect */
- if (ndev->phydev) {
- phy_stop(ndev->phydev);
- phy_disconnect(ndev->phydev);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- }
-
cancel_work_sync(&priv->work);
- if (info->multi_irqs) {
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
- free_irq(priv->emac_irq, ndev);
- if (info->err_mgmt_irqs) {
- free_irq(priv->erra_irq, ndev);
- free_irq(priv->mgmta_irq, ndev);
- }
- }
- free_irq(ndev->irq, ndev);
-
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
@@ -2211,6 +2320,17 @@ static int ravb_close(struct net_device *ndev)
if (info->nc_queues)
ravb_ring_free(ndev, RAVB_NC);
+ /* Update statistics. */
+ ravb_get_stats(ndev);
+
+ /* Set reset mode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
@@ -2334,11 +2454,58 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
+static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
+ u32 val, u32 mask)
+{
+ u32 csr0 = CSR0_TPE | CSR0_RPE;
+ int ret;
+
+ ravb_write(ndev, csr0 & ~mask, CSR0);
+ ret = ravb_wait(ndev, CSR0, mask, 0);
+ if (!ret)
+ ravb_write(ndev, val, reg);
+
+ ravb_write(ndev, csr0, CSR0);
+
+ return ret;
+}
+
static int ravb_set_features_gbeth(struct net_device *ndev,
netdev_features_t features)
{
- /* Place holder */
- return 0;
+ netdev_features_t changed = ndev->features ^ features;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
+ if (ret)
+ goto done;
+ }
+
+ if (changed & NETIF_F_HW_CSUM) {
+ if (features & NETIF_F_HW_CSUM)
+ val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
+ if (ret)
+ goto done;
+ }
+
+done:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
}
static int ravb_set_features_rcar(struct net_device *ndev,
@@ -2349,8 +2516,6 @@ static int ravb_set_features_rcar(struct net_device *ndev,
if (changed & NETIF_F_RXCSUM)
ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
- ndev->features = features;
-
return 0;
}
@@ -2359,8 +2524,24 @@ static int ravb_set_features(struct net_device *ndev,
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ pm_runtime_get_noresume(dev);
+
+ if (pm_runtime_active(dev))
+ ret = info->set_feature(ndev, features);
+ else
+ ret = 0;
+
+ pm_runtime_put_noidle(dev);
+
+ if (ret)
+ return ret;
- return info->set_feature(ndev, features);
+ ndev->features = features;
+
+ return 0;
}
static const struct net_device_ops ravb_netdev_ops = {
@@ -2434,9 +2615,6 @@ static int ravb_mdio_release(struct ravb_private *priv)
}
static const struct ravb_hw_info ravb_gen3_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2447,9 +2625,10 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
@@ -2460,9 +2639,6 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
};
static const struct ravb_hw_info ravb_gen2_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2473,9 +2649,10 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.aligned_tx = 1,
.gptp = 1,
.nc_queues = 1,
@@ -2483,9 +2660,6 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
};
static const struct ravb_hw_info ravb_rzv2m_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2496,9 +2670,10 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
.gptp = 1,
@@ -2508,9 +2683,6 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
};
static const struct ravb_hw_info gbeth_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_gbeth,
- .rx_ring_format = ravb_rx_ring_format_gbeth,
- .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
.receive = ravb_rx_gbeth,
.set_rate = ravb_set_rate_gbeth,
.set_feature = ravb_set_features_gbeth,
@@ -2518,10 +2690,13 @@ static const struct ravb_hw_info gbeth_hw_info = {
.emac_init = ravb_emac_init_gbeth,
.gstrings_stats = ravb_gstrings_stats_gbeth,
.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
+ .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
- .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
.tccr_mask = TCCR_TSRQ0,
- .rx_max_buf_size = SZ_8K,
+ .rx_max_frame_size = SZ_8K,
+ .rx_max_desc_use = 4080,
+ .rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
.tx_counters = 1,
.carrier_counters = 1,
@@ -2541,100 +2716,91 @@ static const struct of_device_id ravb_match_table[] = {
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
-static int ravb_set_gti(struct net_device *ndev)
+static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
+ const char *ch, int *irq, irq_handler_t handler)
{
- struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- struct device *dev = ndev->dev.parent;
- unsigned long rate;
- uint64_t inc;
-
- if (info->gptp_ref_clk)
- rate = clk_get_rate(priv->gptp_clk);
- else
- rate = clk_get_rate(priv->clk);
- if (!rate)
- return -EINVAL;
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ struct device *dev = &pdev->dev;
+ const char *dev_name;
+ unsigned long flags;
+ int error, irq_num;
- inc = div64_ul(1000000000ULL << 20, rate);
+ if (irq_name) {
+ dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
+ if (!dev_name)
+ return -ENOMEM;
- if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
- dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
- inc, GTI_TIV_MIN, GTI_TIV_MAX);
- return -EINVAL;
+ irq_num = platform_get_irq_byname(pdev, irq_name);
+ flags = 0;
+ } else {
+ dev_name = ndev->name;
+ irq_num = platform_get_irq(pdev, 0);
+ flags = IRQF_SHARED;
}
+ if (irq_num < 0)
+ return irq_num;
- ravb_write(ndev, inc, GTI);
+ if (irq)
+ *irq = irq_num;
- return 0;
+ error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ if (error)
+ netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+
+ return error;
}
-static int ravb_set_config_mode(struct net_device *ndev)
+static int ravb_setup_irqs(struct ravb_private *priv)
{
- struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct net_device *ndev = priv->ndev;
+ const char *irq_name, *emac_irq_name;
int error;
- if (info->gptp) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
- if (error)
- return error;
- /* Set CSEL value */
- ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else if (info->ccc_gac) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ if (!info->multi_irqs)
+ return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
+
+ if (info->err_mgmt_irqs) {
+ irq_name = "dia";
+ emac_irq_name = "line3";
} else {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ irq_name = "ch22";
+ emac_irq_name = "ch24";
}
- return error;
-}
-
-/* Set tx and rx clock internal delay modes */
-static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- bool explicit_delay = false;
- u32 delay;
+ error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
+ if (error)
+ return error;
- if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 1800, according to DT bindings */
- priv->rxcidm = !!delay;
- explicit_delay = true;
- }
- if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 2000, according to DT bindings */
- priv->txcidm = !!delay;
- explicit_delay = true;
- }
+ error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
+ ravb_emac_interrupt);
+ if (error)
+ return error;
- if (explicit_delay)
- return;
+ if (info->err_mgmt_irqs) {
+ error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
- /* Fall back to legacy rgmii-*id behavior */
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- priv->rxcidm = 1;
- priv->rgmii_override = 1;
+ error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- priv->txcidm = 1;
- priv->rgmii_override = 1;
- }
-}
+ error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
-static void ravb_set_delay_mode(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- u32 set = 0;
+ error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
+ if (error)
+ return error;
- if (priv->rxcidm)
- set |= APSR_RDM;
- if (priv->txcidm)
- set |= APSR_TDM;
- ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+ error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
+
+ return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
}
static int ravb_probe(struct platform_device *pdev)
@@ -2644,9 +2810,8 @@ static int ravb_probe(struct platform_device *pdev)
struct reset_control *rstc;
struct ravb_private *priv;
struct net_device *ndev;
- int error, irq, q;
struct resource *res;
- int i;
+ int error, q;
if (!np) {
dev_err(&pdev->dev,
@@ -2654,7 +2819,7 @@ static int ravb_probe(struct platform_device *pdev)
return -EINVAL;
}
- rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get cpg reset\n");
@@ -2673,25 +2838,6 @@ static int ravb_probe(struct platform_device *pdev)
if (error)
goto out_free_netdev;
- pm_runtime_enable(&pdev->dev);
- error = pm_runtime_resume_and_get(&pdev->dev);
- if (error < 0)
- goto out_rpm_disable;
-
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "dia");
- else
- irq = platform_get_irq_byname(pdev, "ch22");
- } else {
- irq = platform_get_irq(pdev, 0);
- }
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- ndev->irq = irq;
-
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
@@ -2706,10 +2852,43 @@ static int ravb_probe(struct platform_device *pdev)
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
}
+ error = ravb_setup_irqs(priv);
+ if (error)
+ goto out_reset_assert;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ error = PTR_ERR(priv->clk);
+ goto out_reset_assert;
+ }
+
+ if (info->gptp_ref_clk) {
+ priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
+ if (IS_ERR(priv->gptp_clk)) {
+ error = PTR_ERR(priv->gptp_clk);
+ goto out_reset_assert;
+ }
+ }
+
+ priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
+ if (IS_ERR(priv->refclk)) {
+ error = PTR_ERR(priv->refclk);
+ goto out_reset_assert;
+ }
+ clk_prepare(priv->refclk);
+
+ platform_set_drvdata(pdev, ndev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ error = pm_runtime_resume_and_get(&pdev->dev);
+ if (error < 0)
+ goto out_rpm_disable;
+
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
- goto out_release;
+ goto out_rpm_put;
}
/* The Ether-specific entries in the device structure. */
@@ -2720,79 +2899,14 @@ static int ravb_probe(struct platform_device *pdev)
error = of_get_phy_mode(np, &priv->phy_interface);
if (error && error != -ENODEV)
- goto out_release;
+ goto out_rpm_put;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "line3");
- else
- irq = platform_get_irq_byname(pdev, "ch24");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->emac_irq = irq;
- for (i = 0; i < NUM_RX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->rx_irqs[i] = irq;
- }
- for (i = 0; i < NUM_TX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->tx_irqs[i] = irq;
- }
-
- if (info->err_mgmt_irqs) {
- irq = platform_get_irq_byname(pdev, "err_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->erra_irq = irq;
-
- irq = platform_get_irq_byname(pdev, "mgmt_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->mgmta_irq = irq;
- }
- }
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- error = PTR_ERR(priv->clk);
- goto out_release;
- }
-
- priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
- if (IS_ERR(priv->refclk)) {
- error = PTR_ERR(priv->refclk);
- goto out_release;
- }
- clk_prepare_enable(priv->refclk);
-
- if (info->gptp_ref_clk) {
- priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
- if (IS_ERR(priv->gptp_clk)) {
- error = PTR_ERR(priv->gptp_clk);
- goto out_disable_refclk;
- }
- clk_prepare_enable(priv->gptp_clk);
- }
-
- ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = info->rx_max_frame_size -
+ (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2806,25 +2920,11 @@ static int ravb_probe(struct platform_device *pdev)
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
- /* Set AVB config mode */
- error = ravb_set_config_mode(ndev);
+ error = ravb_compute_gti(ndev);
if (error)
- goto out_disable_gptp_clk;
-
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- }
-
- if (info->internal_delay) {
- ravb_parse_delay_mode(np, ndev);
- ravb_set_delay_mode(ndev);
- }
+ ravb_parse_delay_mode(np, ndev);
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
@@ -2835,22 +2935,22 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Initialise HW timestamp list */
INIT_LIST_HEAD(&priv->ts_skb_list);
- /* Initialise PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_init(ndev, pdev);
-
/* Debug message level */
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+ /* Set config mode as this is needed for PHY initialization. */
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ goto out_rpm_put;
+
/* Read and set MAC address */
ravb_read_mac_address(np, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -2863,9 +2963,14 @@ static int ravb_probe(struct platform_device *pdev)
error = ravb_mdio_init(priv);
if (error) {
dev_err(&pdev->dev, "failed to initialize MDIO\n");
- goto out_dma_free;
+ goto out_reset_mode;
}
+ /* Undo previous switch to config opmode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ goto out_mdio_release;
+
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
@@ -2881,7 +2986,8 @@ static int ravb_probe(struct platform_device *pdev)
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
- platform_set_drvdata(pdev, ndev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -2890,22 +2996,19 @@ out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
+out_mdio_release:
ravb_mdio_release(priv);
-out_dma_free:
+out_reset_mode:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
-
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-out_disable_gptp_clk:
- clk_disable_unprepare(priv->gptp_clk);
-out_disable_refclk:
- clk_disable_unprepare(priv->refclk);
-out_release:
+out_rpm_put:
pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ clk_unprepare(priv->refclk);
+out_reset_assert:
reset_control_assert(rstc);
out_free_netdev:
free_netdev(ndev);
@@ -2917,6 +3020,12 @@ static void ravb_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int error;
+
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ return;
unregister_netdev(ndev);
if (info->nc_queues)
@@ -2925,20 +3034,13 @@ static void ravb_remove(struct platform_device *pdev)
ravb_mdio_release(priv);
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
- ravb_set_opmode(ndev, CCC_OPC_RESET);
-
- clk_disable_unprepare(priv->gptp_clk);
- clk_disable_unprepare(priv->refclk);
-
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ clk_unprepare(priv->refclk);
reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
@@ -2964,6 +3066,9 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Enable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
+ if (priv->info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
return enable_irq_wake(priv->emac_irq);
}
@@ -2971,6 +3076,20 @@ static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ /* Set reset mode to rearm the WoL logic. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ /* Set AVB config mode. */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ return error;
+
+ if (priv->info->ccc_gac)
+ ravb_ptp_init(ndev, priv->pdev);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
@@ -2984,102 +3103,96 @@ static int ravb_wol_restore(struct net_device *ndev)
return disable_irq_wake(priv->emac_irq);
}
-static int __maybe_unused ravb_suspend(struct device *dev)
+static int ravb_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
- return 0;
+ goto reset_assert;
netif_device_detach(ndev);
if (priv->wol_enabled)
- ret = ravb_wol_setup(ndev);
- else
- ret = ravb_close(ndev);
+ return ravb_wol_setup(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_stop(ndev);
+ ret = ravb_close(ndev);
+ if (ret)
+ return ret;
- return ret;
+ ret = pm_runtime_force_suspend(&priv->pdev->dev);
+ if (ret)
+ return ret;
+
+reset_assert:
+ return reset_control_assert(priv->rstc);
}
-static int __maybe_unused ravb_resume(struct device *dev)
+static int ravb_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- int ret = 0;
-
- /* If WoL is enabled set reset mode to rearm the WoL logic */
- if (priv->wol_enabled) {
- ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
- if (ret)
- return ret;
- }
-
- /* All register have been reset to default values.
- * Restore all registers which where setup at probe time and
- * reopen device if it was running before system suspended.
- */
+ int ret;
- /* Set AVB config mode */
- ret = ravb_set_config_mode(ndev);
+ ret = reset_control_deassert(priv->rstc);
if (ret)
return ret;
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
+ if (!netif_running(ndev))
+ return 0;
+
+ /* If WoL is enabled restore the interface. */
+ if (priv->wol_enabled) {
+ ret = ravb_wol_restore(ndev);
+ if (ret)
+ return ret;
+ } else {
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
-
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
}
- if (info->internal_delay)
- ravb_set_delay_mode(ndev);
+ /* Reopening the interface will restore the device to the working state. */
+ ret = ravb_open(ndev);
+ if (ret < 0)
+ goto out_rpm_put;
- /* Restore descriptor base address table */
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
+ ravb_set_rx_mode(ndev);
+ netif_device_attach(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_init(ndev, priv->pdev);
+ return 0;
- if (netif_running(ndev)) {
- if (priv->wol_enabled) {
- ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- }
- ret = ravb_open(ndev);
- if (ret < 0)
- return ret;
- ravb_set_rx_mode(ndev);
- netif_device_attach(ndev);
+out_rpm_put:
+ if (!priv->wol_enabled) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
return ret;
}
-static int __maybe_unused ravb_runtime_nop(struct device *dev)
+static int ravb_runtime_suspend(struct device *dev)
{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ clk_disable(priv->refclk);
+
return 0;
}
+static int ravb_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return clk_enable(priv->refclk);
+}
+
static const struct dev_pm_ops ravb_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
- SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
+ SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
+ RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
};
static struct platform_driver ravb_driver = {
@@ -3087,7 +3200,7 @@ static struct platform_driver ravb_driver = {
.remove_new = ravb_remove,
.driver = {
.name = "ravb",
- .pm = &ravb_dev_pm_ops,
+ .pm = pm_ptr(&ravb_dev_pm_ops),
.of_match_table = ravb_match_table,
},
};
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 9e59669a93dd..755db89db909 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -32,7 +32,6 @@
#include <net/fib_rules.h>
#include <net/fib_notifier.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <generated/utsrelease.h>
#include "rocker_hw.h"
#include "rocker.h"
@@ -2227,7 +2226,6 @@ static void rocker_port_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static struct rocker_port_stats {
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index d14e0cfc3a6b..1458939c3bf5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -503,7 +503,6 @@ struct sxgbe_priv_data {
bool tx_path_in_lpi_mode;
int lpi_irq;
int eee_enabled;
- int eee_active;
int tx_lpi_timer;
};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 8ba017ec9849..4a439b34114d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -133,22 +133,20 @@ static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
static int sxgbe_get_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
if (!priv->hw_cap.eee)
return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled;
- edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
return phy_ethtool_get_eee(dev->phydev, edata);
}
static int sxgbe_set_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 71439825ea4e..ecbe3994f2b1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -130,7 +130,6 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
if (phy_init_eee(ndev->phydev, true))
return false;
- priv->eee_active = 1;
timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0);
priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
add_timer(&priv->eee_ctrl_timer);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 175bd9cdfdac..551f890db90a 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -595,7 +595,7 @@ void efx_stop_all(struct efx_nic *efx)
efx_stop_datapath(efx);
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index e001f27085c6..1cb32aedd89c 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2085,7 +2085,7 @@ int ef4_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
static void ef4_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index fac227d372db..dcd901eccfc8 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
+#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index e4b294b8e9ac..88e5bc347a44 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -605,7 +605,7 @@ static size_t efx_siena_update_stats_atomic(struct efx_nic *efx, u64 *full_stats
return efx->type->update_stats(efx, full_stats, core_stats);
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
void efx_siena_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 4579f43484c3..219fb358a646 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
+#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"
diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c
index a7a9ab304e13..71f9b5ec5ae4 100644
--- a/drivers/net/ethernet/sfc/siena/tx_common.c
+++ b/drivers/net/ethernet/sfc/siena/tx_common.c
@@ -317,11 +317,10 @@ static int efx_tx_tso_header_length(struct sk_buff *skb)
size_t header_len;
if (skb->encapsulation)
- header_len = skb_inner_transport_header(skb) -
- skb->data +
+ header_len = skb_inner_transport_offset(skb) +
(inner_tcp_hdr(skb)->doff << 2u);
else
- header_len = skb_transport_header(skb) - skb->data +
+ header_len = skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 9f2393d34371..2adb132b2f7e 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -336,11 +336,10 @@ int efx_tx_tso_header_length(struct sk_buff *skb)
size_t header_len;
if (skb->encapsulation)
- header_len = skb_inner_transport_header(skb) -
- skb->data +
+ header_len = skb_inner_transport_offset(skb) +
(inner_tcp_hdr(skb)->doff << 2u);
else
- header_len = skb_transport_header(skb) - skb->data +
+ header_len = skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c
index 64a6768f75ea..ddf149db8180 100644
--- a/drivers/net/ethernet/sfc/tx_tso.c
+++ b/drivers/net/ethernet/sfc/tx_tso.c
@@ -174,8 +174,8 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
unsigned int header_len, in_len;
dma_addr_t dma_addr;
- st->ip_off = skb_network_header(skb) - skb->data;
- st->tcp_off = skb_transport_header(skb) - skb->data;
+ st->ip_off = skb_network_offset(skb);
+ st->tcp_off = skb_transport_offset(skb);
header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
in_len = skb_headlen(skb) - header_len;
st->header_len = header_len;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 758347616535..78ff3af7911a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -98,6 +98,7 @@ static int watchdog = 1000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+MODULE_DESCRIPTION("SMC 91C9x/91C1xxx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:smc91x");
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 31cb7d0166f0..74f1ccc96459 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -56,6 +56,7 @@
#define SMSC_MDIONAME "smsc911x-mdio"
#define SMSC_DRV_VERSION "2008-10-21"
+MODULE_DESCRIPTION("SMSC LAN911x/LAN921x Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SMSC_DRV_VERSION);
MODULE_ALIAS("platform:smsc911x");
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index e1c4a11c1f18..15cb96c2506d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -26,6 +26,7 @@
#define DRV_DESCRIPTION "SMSC LAN9420 driver"
#define DRV_VERSION "1.01"
+MODULE_DESCRIPTION("SMSC LAN9420 Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 85dcda51df05..4ec61f1ee71a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -165,9 +165,9 @@ config DWMAC_STARFIVE
help
Support for ethernet controllers on StarFive RISC-V SoCs
- This selects the StarFive platform specific glue layer support for
- the stmmac device driver. This driver is used for StarFive JH7110
- ethernet controller.
+ This selects the StarFive platform specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ StarFive JH7100 and JH7110 ethernet controllers.
config DWMAC_STI
tristate "STi GMAC support"
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 721c1f8e892f..a6fefe675ef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -59,28 +59,51 @@
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
+struct stmmac_q_tx_stats {
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_set_ic_bit;
+ u64_stats_t tx_tso_frames;
+ u64_stats_t tx_tso_nfrags;
+};
+
+struct stmmac_napi_tx_stats {
+ u64_stats_t tx_packets;
+ u64_stats_t tx_pkt_n;
+ u64_stats_t poll;
+ u64_stats_t tx_clean;
+ u64_stats_t tx_set_ic_bit;
+};
+
struct stmmac_txq_stats {
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_pkt_n;
- u64 tx_normal_irq_n;
- u64 napi_poll;
- u64 tx_clean;
- u64 tx_set_ic_bit;
- u64 tx_tso_frames;
- u64 tx_tso_nfrags;
- struct u64_stats_sync syncp;
+ /* Updates protected by tx queue lock. */
+ struct u64_stats_sync q_syncp;
+ struct stmmac_q_tx_stats q;
+
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_tx_stats napi;
} ____cacheline_aligned_in_smp;
+struct stmmac_napi_rx_stats {
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_pkt_n;
+ u64_stats_t poll;
+};
+
struct stmmac_rxq_stats {
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_pkt_n;
- u64 rx_normal_irq_n;
- u64 napi_poll;
- struct u64_stats_sync syncp;
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_rx_stats napi;
} ____cacheline_aligned_in_smp;
+/* Updates on each CPU protected by not allowing nested irqs. */
+struct stmmac_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+};
+
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -202,9 +225,12 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+ struct stmmac_pcpu_stats __percpu *pcpu_stats;
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
@@ -216,6 +242,7 @@ struct stmmac_safety_stats {
unsigned long mac_errors[32];
unsigned long mtl_errors[32];
unsigned long dma_errors[32];
+ unsigned long dma_dpp_errors[32];
};
/* Number of fields in Safety Stats */
@@ -344,6 +371,7 @@ enum request_irq_err {
REQ_IRQ_ERR_ALL,
REQ_IRQ_ERR_TX,
REQ_IRQ_ERR_RX,
+ REQ_IRQ_ERR_SFTY,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
REQ_IRQ_ERR_LPI,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 8f730ada71f9..6b65420e11b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -353,6 +353,10 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ for (int i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 31631e3f89d0..e254b21fdb59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -106,6 +106,7 @@ struct qcom_ethqos {
struct clk *link_clk;
struct phy *serdes_phy;
unsigned int speed;
+ int serdes_speed;
phy_interface_t phy_mode;
const struct ethqos_emac_por *por;
@@ -169,6 +170,9 @@ static void rgmii_dump(void *priv)
static void
ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
{
+ if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
+ return;
+
switch (speed) {
case SPEED_1000:
ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
@@ -606,19 +610,39 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
*/
static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
{
+ struct net_device *dev = platform_get_drvdata(ethqos->pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
int val;
val = readl(ethqos->mac_base + MAC_CTRL_REG);
switch (ethqos->speed) {
+ case SPEED_2500:
+ val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->serdes_speed != SPEED_2500)
+ phy_set_speed(ethqos->serdes_phy, SPEED_2500);
+ ethqos->serdes_speed = SPEED_2500;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
+ break;
case SPEED_1000:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, SPEED_1000);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_100:
val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, SPEED_1000);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
@@ -627,6 +651,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
}
@@ -728,7 +756,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
struct qcom_ethqos *ethqos;
- int ret;
+ int ret, i;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -799,6 +827,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
"Failed to get serdes phy\n");
ethqos->speed = SPEED_1000;
+ ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
@@ -822,6 +851,10 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->serdes_powerdown = qcom_ethqos_serdes_powerdown;
}
+ /* Enable TSO on queue0 and enable TBS on rest of the queues */
+ for (i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index ba2ce776bd4d..68f85e4605cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -585,4 +585,5 @@ static struct platform_driver socfpga_dwmac_driver = {
};
module_platform_driver(socfpga_dwmac_driver);
+MODULE_DESCRIPTION("Altera SOC DWMAC Specific Glue layer");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 5d630affb4d1..4e1076faee0c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -15,13 +15,20 @@
#include "stmmac_platform.h"
-#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
-#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
-#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
+#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
+#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+
+#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8
+
+struct starfive_dwmac_data {
+ unsigned int gtxclk_dlychain;
+};
struct starfive_dwmac {
struct device *dev;
struct clk *clk_tx;
+ const struct starfive_dwmac_data *data;
};
static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
@@ -67,6 +74,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
break;
@@ -89,6 +98,14 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
if (err)
return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
+ if (dwmac->data) {
+ err = regmap_write(regmap, JH7100_SYSMAIN_REGISTER49_DLYCHAIN,
+ dwmac->data->gtxclk_dlychain);
+ if (err)
+ return dev_err_probe(dwmac->dev, err,
+ "error selecting gtxclk delay chain\n");
+ }
+
return 0;
}
@@ -114,6 +131,8 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
+ dwmac->data = device_get_match_data(&pdev->dev);
+
dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
if (IS_ERR(dwmac->clk_tx))
return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
@@ -144,8 +163,13 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
+static const struct starfive_dwmac_data jh7100_data = {
+ .gtxclk_dlychain = 4,
+};
+
static const struct of_device_id starfive_dwmac_match[] = {
- { .compatible = "starfive,jh7110-dwmac" },
+ { .compatible = "starfive,jh7100-dwmac", .data = &jh7100_data },
+ { .compatible = "starfive,jh7110-dwmac" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 137741b94122..b21d99faa2d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
u32 v;
@@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 358e7dcb6a9a..17d9120db5fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -92,7 +92,7 @@
#define DMA_TBS_FTOV BIT(0)
#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
-/* Following DMA defines are chanels oriented */
+/* Following DMA defines are channel-oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9470d3fd2ded..0d185e54eb7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 7907d62d3437..85e18f9a22f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 207ff1799f2c..6a2c7d22df1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -303,6 +303,8 @@
#define XGMAC_RXCEIE BIT(4)
#define XGMAC_TXCEIE BIT(0)
#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
+#define XGMAC_MTL_DPP_CONTROL 0x000010e0
+#define XGMAC_DPP_DISABLE BIT(0)
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -385,6 +387,7 @@
#define XGMAC_DCEIE BIT(1)
#define XGMAC_TCEIE BIT(0)
#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
+#define XGMAC_DMA_DPP_INT_STATUS 0x00003074
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index eb48211d9b0e..1af2f89a0504 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
+#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
+#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
+ { true, "TDPES0", DPP_TX_ERR },
+ { true, "TDPES1", DPP_TX_ERR },
+ { true, "TDPES2", DPP_TX_ERR },
+ { true, "TDPES3", DPP_TX_ERR },
+ { true, "TDPES4", DPP_TX_ERR },
+ { true, "TDPES5", DPP_TX_ERR },
+ { true, "TDPES6", DPP_TX_ERR },
+ { true, "TDPES7", DPP_TX_ERR },
+ { true, "TDPES8", DPP_TX_ERR },
+ { true, "TDPES9", DPP_TX_ERR },
+ { true, "TDPES10", DPP_TX_ERR },
+ { true, "TDPES11", DPP_TX_ERR },
+ { true, "TDPES12", DPP_TX_ERR },
+ { true, "TDPES13", DPP_TX_ERR },
+ { true, "TDPES14", DPP_TX_ERR },
+ { true, "TDPES15", DPP_TX_ERR },
+ { true, "RDPES0", DPP_RX_ERR },
+ { true, "RDPES1", DPP_RX_ERR },
+ { true, "RDPES2", DPP_RX_ERR },
+ { true, "RDPES3", DPP_RX_ERR },
+ { true, "RDPES4", DPP_RX_ERR },
+ { true, "RDPES5", DPP_RX_ERR },
+ { true, "RDPES6", DPP_RX_ERR },
+ { true, "RDPES7", DPP_RX_ERR },
+ { true, "RDPES8", DPP_RX_ERR },
+ { true, "RDPES9", DPP_RX_ERR },
+ { true, "RDPES10", DPP_RX_ERR },
+ { true, "RDPES11", DPP_RX_ERR },
+ { true, "RDPES12", DPP_RX_ERR },
+ { true, "RDPES13", DPP_RX_ERR },
+ { true, "RDPES14", DPP_RX_ERR },
+ { true, "RDPES15", DPP_RX_ERR },
+};
+
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
@@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
dwxgmac3_log_error(ndev, value, correctable, "DMA",
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+
+ value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
+ dwxgmac3_dma_dpp_errors,
+ STAT_OFF(dma_dpp_errors), stats);
}
static int
@@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+ /* 5. Enable Data Path Parity Protection */
+ value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
+ /* already enabled by default, explicit enable it again */
+ value &= ~XGMAC_DPP_DISABLE;
+ writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
+
return 0;
}
@@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
ret |= !corr;
}
- err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
+ * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
+ * Parity Errors here
+ */
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
corr = dma & XGMAC_DECIS;
if (err) {
dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
@@ -930,6 +985,7 @@ static const struct dwxgmac3_error {
{ dwxgmac3_mac_errors },
{ dwxgmac3_mtl_errors },
{ dwxgmac3_dma_errors },
+ { dwxgmac3_dma_dpp_errors },
};
static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 3cde695fec91..dd2ab6185c40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 1bd34b2a47e8..29367105df54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -224,7 +224,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
- .est_off = EST_XGMAC_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 14c9d2637dfe..dff02d75d519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -86,10 +86,6 @@ struct stmmac_counters {
unsigned int mmc_rx_discard_octets_gb;
unsigned int mmc_rx_align_err_frames;
- /* IPC */
- unsigned int mmc_rx_ipc_intr_mask;
- unsigned int mmc_rx_ipc_intr;
-
/* IPv4 */
unsigned int mmc_rx_ipv4_gd;
unsigned int mmc_rx_ipv4_hderr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 8597c6abae8d..7eb477faa75a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -316,9 +316,6 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
- /* IPC */
- mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
- mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
/* IPv4 */
mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f155e4841c62..dddcaa9220cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -31,6 +31,7 @@ struct stmmac_resources {
int wol_irq;
int lpi_irq;
int irq;
+ int sfty_irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
@@ -298,6 +299,7 @@ struct stmmac_priv {
void __iomem *ptpaddr;
void __iomem *estaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ int sfty_irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
@@ -306,6 +308,7 @@ struct stmmac_priv {
char int_name_mac[IFNAMSIZ + 9];
char int_name_wol[IFNAMSIZ + 9];
char int_name_lpi[IFNAMSIZ + 9];
+ char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index 4da6ccc17c20..c9693f77e1f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -81,6 +81,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
void __iomem *est_addr = priv->estaddr;
u32 txqcnt_mask = BIT(txqcnt) - 1;
+ int i;
status = readl(est_addr + EST_STATUS);
@@ -125,6 +126,11 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
+ for (i = 0; i < txqcnt; i++) {
+ if (feqn & BIT(i))
+ x->mtl_est_txq_hlbf[i]++;
+ }
+
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 42d27b97dd1d..e1537a57815f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -243,8 +243,6 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
STMMAC_MMC_STAT(mmc_rx_align_err_frames),
- STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
- STMMAC_MMC_STAT(mmc_rx_ipc_intr),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
@@ -549,44 +547,79 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
}
+static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
+static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int start;
- int q, stat;
- char *p;
+ int q;
for (q = 0; q < tx_cnt; q++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
- struct stmmac_txq_stats snapshot;
+ u64 pkt_n;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
- for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
- *data++ = (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_tx_normal_irq_n(priv, q);
}
for (q = 0; q < rx_cnt; q++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
- struct stmmac_rxq_stats snapshot;
+ u64 pkt_n;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
- for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
- *data++ = (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_rx_normal_irq_n(priv, q);
}
}
@@ -645,39 +678,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < rx_queues_count; i++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
- struct stmmac_rxq_stats snapshot;
+ struct stmmac_napi_rx_stats snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
-
- data[j++] += snapshot.rx_pkt_n;
- data[j++] += snapshot.rx_normal_irq_n;
- normal_irq_n += snapshot.rx_normal_irq_n;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ snapshot = rxq_stats->napi;
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
+ n_irq = stmmac_get_rx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ napi_poll += u64_stats_read(&snapshot.poll);
}
pos = j;
for (i = 0; i < tx_queues_count; i++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
- struct stmmac_txq_stats snapshot;
+ struct stmmac_napi_tx_stats napi_snapshot;
+ struct stmmac_q_tx_stats q_snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
-
- data[j++] += snapshot.tx_pkt_n;
- data[j++] += snapshot.tx_normal_irq_n;
- normal_irq_n += snapshot.tx_normal_irq_n;
- data[j++] += snapshot.tx_clean;
- data[j++] += snapshot.tx_set_ic_bit;
- data[j++] += snapshot.tx_tso_frames;
- data[j++] += snapshot.tx_tso_nfrags;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ q_snapshot = txq_stats->q;
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ napi_snapshot = txq_stats->napi;
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
+ n_irq = stmmac_get_tx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
+ data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
+ u64_stats_read(&napi_snapshot.tx_set_ic_bit);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
+ napi_poll += u64_stats_read(&napi_snapshot.poll);
}
normal_irq_n += priv->xstats.rx_early_irq;
data[j++] = normal_irq_n;
@@ -852,15 +895,13 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static int stmmac_ethtool_op_get_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled;
- edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
edata->tx_lpi_enabled = priv->tx_lpi_enabled;
@@ -868,7 +909,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
}
static int stmmac_ethtool_op_set_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a0e46369ae15..24cd80490d19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2482,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct xdp_desc xdp_desc;
bool work_done = true;
u32 tx_set_ic_bit = 0;
- unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
@@ -2507,6 +2506,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdp_desc.len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ continue;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -2566,9 +2572,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->napi_syncp);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2616,7 +2622,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
- unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -2674,7 +2679,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
}
if (skb) {
stmmac_get_tx_hwtstamp(priv, p, skb);
- } else {
+ } else if (tx_q->xsk_pool &&
+ xp_tx_metadata_enabled(tx_q->xsk_pool)) {
struct stmmac_xsk_tx_complete tx_compl = {
.priv = priv,
.desc = p,
@@ -2782,11 +2788,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
if (tx_q->dirty_tx != tx_q->cur_tx)
*pending_packets = true;
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_packets += tx_packets;
- txq_stats->tx_pkt_n += tx_packets;
- txq_stats->tx_clean++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
+ u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
+ u64_stats_inc(&txq_stats->napi.tx_clean);
+ u64_stats_update_end(&txq_stats->napi_syncp);
priv->xstats.tx_errors += tx_errors;
@@ -3592,6 +3598,10 @@ static void stmmac_free_irq(struct net_device *dev,
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
+ case REQ_IRQ_ERR_SFTY:
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
+ free_irq(priv->sfty_irq, dev);
+ fallthrough;
case REQ_IRQ_ERR_WOL:
free_irq(dev->irq, dev);
fallthrough;
@@ -3662,6 +3672,23 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ int_name = priv->int_name_sfty;
+ sprintf(int_name, "%s:%s", dev->name, "safety");
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty MSI %d (error: %d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
/* Request the Safety Feature Correctible Error line in
* case of another line is used
*/
@@ -3799,6 +3826,21 @@ static int stmmac_request_irq_single(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
return 0;
irq_error:
@@ -3932,6 +3974,9 @@ static int __stmmac_open(struct net_device *dev,
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
buf_sz = dma_conf->dma_buf_sz;
+ for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
+ dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
stmmac_reset_queues_param(priv);
@@ -4004,8 +4049,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
- if (priv->fpe_wq)
+ if (priv->fpe_wq) {
destroy_workqueue(priv->fpe_wq);
+ priv->fpe_wq = NULL;
+ }
netdev_info(priv->dev, "FPE workqueue stop");
}
@@ -4210,7 +4257,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
- unsigned long flags;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -4375,13 +4421,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
- txq_stats->tx_tso_frames++;
- txq_stats->tx_tso_nfrags += nfrags;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
+ u64_stats_inc(&txq_stats->q.tx_tso_frames);
+ u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4480,7 +4526,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
- unsigned long flags;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4498,6 +4543,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ skb->len > priv->plat->est->max_sdu[queue]){
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
+
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
@@ -4650,11 +4702,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4715,6 +4767,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
+max_sdu_err:
dev_kfree_skb(skb);
priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
@@ -4871,6 +4924,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdpf->len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ return STMMAC_XDP_CONSUMED;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -4918,12 +4978,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
- unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -5073,7 +5132,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
- unsigned long flags;
struct sk_buff *skb;
u32 hash;
@@ -5103,10 +5161,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n++;
- rxq_stats->rx_bytes += len;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, len);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5188,7 +5246,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
- unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5343,9 +5400,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5383,7 +5440,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
- unsigned long flags;
int xdp_status = 0;
int buf_sz;
@@ -5643,11 +5699,11 @@ drain_data:
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_packets += rx_packets;
- rxq_stats->rx_bytes += rx_bytes;
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5662,13 +5718,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_priv *priv = ch->priv_data;
struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5690,13 +5745,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_txq_stats *txq_stats;
bool pending_packets = false;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
work_done = min(work_done, budget);
@@ -5726,17 +5780,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_rxq_stats *rxq_stats;
struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
- unsigned long flags;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
tx_done = min(tx_done, budget);
@@ -6011,10 +6064,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
priv->tx_path_in_lpi_mode = false;
}
- for (queue = 0; queue < queues_count; queue++) {
- status = stmmac_host_mtl_irq_status(priv, priv->hw,
- queue);
- }
+ for (queue = 0; queue < queues_count; queue++)
+ stmmac_host_mtl_irq_status(priv, priv->hw, queue);
/* PCS link status */
if (priv->hw->pcs &&
@@ -6049,8 +6100,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
- /* Check if a fatal error happened */
- if (stmmac_safety_feat_interrupt(priv))
+ /* Check ASP error if it isn't delivered via an individual IRQ */
+ if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
return IRQ_HANDLED;
/* To handle Common interrupts */
@@ -6067,11 +6118,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6087,11 +6133,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6113,11 +6154,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6144,11 +6180,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -7062,10 +7093,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 tx_bytes;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- tx_packets = txq_stats->tx_packets;
- tx_bytes = txq_stats->tx_bytes;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
@@ -7077,10 +7111,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 rx_bytes;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- rx_packets = rxq_stats->rx_packets;
- rx_bytes = rxq_stats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
+ rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -7474,9 +7508,16 @@ int stmmac_dvr_probe(struct device *device,
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
- for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->xstats.txq_stats[i].syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
+ }
+
+ priv->xstats.pcpu_stats =
+ devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
+ if (!priv->xstats.pcpu_stats)
+ return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
@@ -7489,6 +7530,7 @@ int stmmac_dvr_probe(struct device *device,
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
+ priv->sfty_irq = res->sfty_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
@@ -7542,6 +7584,9 @@ int stmmac_dvr_probe(struct device *device,
dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
ERR_PTR(ret));
+ /* Wait a bit for the reset to take effect */
+ udelay(10);
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index aefc121464b5..13a30e6df4c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -110,6 +110,8 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
/* Enable and restart the Auto-Negotiation */
if (ane)
value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
+ else
+ value &= ~GMAC_AN_CTRL_ANE;
/* In case of MAC-2-MAC connection, block is configured to operate
* according to MAC conf register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 70eadc83ca68..54797edc9b38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -743,6 +743,14 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
}
+ stmmac_res->sfty_irq =
+ platform_get_irq_byname_optional(pdev, "sfty");
+ if (stmmac_res->sfty_irq < 0) {
+ if (stmmac_res->sfty_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(&pdev->dev, "IRQ sfty not found\n");
+ }
+
stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
return PTR_ERR_OR_ZERO(stmmac_res->addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 26fa33e5ec34..cce00719937d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -915,8 +915,30 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
return time;
}
-static int tc_setup_taprio(struct stmmac_priv *priv,
- struct tc_taprio_qopt_offload *qopt)
+static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct plat_stmmacenet_data *plat = priv->plat;
+ u32 num_tc = qopt->mqprio.qopt.num_tc;
+ u32 offset, count, i, j;
+
+ /* QueueMaxSDU received from the driver corresponds to the Linux traffic
+ * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
+ */
+ for (i = 0; i < num_tc; i++) {
+ if (!qopt->max_sdu[i])
+ continue;
+
+ offset = qopt->mqprio.qopt.offset[i];
+ count = qopt->mqprio.qopt.count[i];
+
+ for (j = offset; j < offset + count; j++)
+ plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
+ }
+}
+
+static int tc_taprio_configure(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
@@ -968,8 +990,6 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- else if (qopt->cmd != TAPRIO_CMD_REPLACE)
- return -EOPNOTSUPP;
if (qopt->num_entries >= dep)
return -EINVAL;
@@ -1045,6 +1065,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ter = qopt->cycle_time_extension;
+ tc_taprio_map_maxsdu_txq(priv, qopt);
+
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
@@ -1078,6 +1100,11 @@ disable:
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
+ /* Reset taprio status */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ priv->xstats.max_sdu_txq_drop[i] = 0;
+ priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ }
mutex_unlock(&priv->plat->est->lock);
}
@@ -1095,6 +1122,57 @@ disable:
return ret;
}
+static void tc_taprio_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u64 window_drops = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ window_drops += priv->xstats.max_sdu_txq_drop[i] +
+ priv->xstats.mtl_est_txq_hlbf[i];
+ qopt->stats.window_drops = window_drops;
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ qopt->stats.tx_overruns = 0;
+}
+
+static void tc_taprio_queue_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
+ int queue = qopt->queue_stats.queue;
+
+ q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
+ priv->xstats.mtl_est_txq_hlbf[queue];
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ q_stats->stats.tx_overruns = 0;
+}
+
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int err = 0;
+
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ case TAPRIO_CMD_DESTROY:
+ err = tc_taprio_configure(priv, qopt);
+ break;
+ case TAPRIO_CMD_STATS:
+ tc_taprio_stats(priv, qopt);
+ break;
+ case TAPRIO_CMD_QUEUE_STATS:
+ tc_taprio_queue_stats(priv, qopt);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1126,6 +1204,7 @@ static int tc_query_caps(struct stmmac_priv *priv,
return -EOPNOTSUPP;
caps->gate_mask_per_txq = true;
+ caps->supports_queue_max_sdu = true;
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 3525d5c0d694..351609f4f011 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1144,9 +1144,9 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
nskb->protocol = skb->protocol;
offset = skb_mac_header(skb) - skb->data;
skb_set_mac_header(nskb, offset);
- offset = skb_network_header(skb) - skb->data;
+ offset = skb_network_offset(skb);
skb_set_network_header(nskb, offset);
- offset = skb_transport_header(skb) - skb->data;
+ offset = skb_transport_offset(skb);
skb_set_transport_header(nskb, offset);
offset = 0;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index be01450c20dc..1530d13984d4 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -189,6 +189,7 @@ config TI_ICSSG_PRUETH
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available starting with the AM65 platform.
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 35fceba01ea4..d6ce2c9f0a8d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -514,14 +514,14 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
}
-static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
return phylink_ethtool_get_eee(salve->phylink, edata);
}
-static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 9d2f4ac783e4..2939a21ca74f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -294,7 +294,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
txqueue,
netif_tx_queue_stopped(netif_txq),
jiffies_to_msecs(jiffies - trans_start),
- dql_avail(&netif_txq->dql),
+ netdev_queue_dql_avail(netif_txq),
k3_cppi_desc_pool_avail(tx_chn->desc_pool));
if (netif_tx_queue_stopped(netif_txq)) {
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 26dc906eae90..57fe936bb177 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -90,4 +90,5 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
}
EXPORT_SYMBOL_GPL(ti_cm_get_macid);
+MODULE_DESCRIPTION("TI CPSW Switch common module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ea85c6dd5484..c0a5abd8d9a8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
}
+ phy->mac_managed_pm = true;
+
slave->phy = phy;
phy_attached_info(slave->phy);
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index a557a477d039..f7b283353ba2 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -422,7 +422,7 @@ int cpsw_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
}
-int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
@@ -434,7 +434,7 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 498c50c6d1a7..087dcb67505a 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->slave_num);
return;
}
+
+ phy->mac_managed_pm = true;
+
slave->phy = phy;
phy_attached_info(slave->phy);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 0e27c433098d..7efa72502c86 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -496,8 +496,8 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *ecmd);
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
-int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index bcccf43d368b..dbbea9146040 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -638,6 +638,16 @@ static void cpts_calc_mult_shift(struct cpts *cpts)
freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
}
+static void cpts_clk_unregister(void *clk)
+{
+ clk_hw_unregister_mux(clk);
+}
+
+static void cpts_clk_del_provider(void *np)
+{
+ of_clk_del_provider(np);
+}
+
static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
{
struct device_node *refclk_np;
@@ -687,9 +697,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
goto mux_fail;
}
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))clk_hw_unregister_mux,
- clk_hw);
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw);
if (ret) {
dev_err(cpts->dev, "add clkmux unreg action %d", ret);
goto mux_fail;
@@ -699,8 +707,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
if (ret)
goto mux_fail;
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))of_clk_del_provider,
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider,
refclk_np);
if (ret) {
dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index a27ec1dcc8d5..9a7dd7efcf69 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -45,7 +45,7 @@ static int emac_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_set_link_ksettings(ndev, ecmd);
}
-static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int emac_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
if (!ndev->phydev)
return -EOPNOTSUPP;
@@ -53,7 +53,7 @@ static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
return phy_ethtool_get_eee(ndev->phydev, edata);
}
-static int emac_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int emac_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
if (!ndev->phydev)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 411898a4f38c..cf7b73f8f450 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1489,9 +1489,6 @@ static int emac_ndo_stop(struct net_device *ndev)
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- /* stop PRUs */
- prueth_emac_stop(emac);
-
if (prueth->emacs_initialized == 1)
icss_iep_exit(emac->iep);
@@ -1502,7 +1499,6 @@ static int emac_ndo_stop(struct net_device *ndev)
free_irq(emac->rx_chns.irq[rx_flow], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_tx_chns(emac);
prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
prueth_cleanup_tx_chns(emac);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d5b75af163d3..5ee8e8980393 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -384,18 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
- if (!descr->skb) {
- descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
- return -ENOMEM;
- }
descr->hw_regs.dmac_cmd_status = 0;
descr->hw_regs.result_size = 0;
descr->hw_regs.valid_size = 0;
descr->hw_regs.data_error = 0;
descr->hw_regs.payload.dev_addr = 0;
descr->hw_regs.payload.size = 0;
- descr->skb = NULL;
+
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
+ if (!descr->skb) {
+ descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
+ return -ENOMEM;
+ }
offset = ((unsigned long)descr->skb->data) &
(GELIC_NET_RXBUF_ALIGN - 1);
@@ -698,7 +698,7 @@ gelic_card_get_next_tx_descr(struct gelic_card *card)
}
/**
- * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field
+ * gelic_descr_set_tx_cmdstat - sets the tx descriptor command field
* @descr: descriptor structure to fill out
* @skb: packet to consider
*
@@ -1461,7 +1461,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
}
/**
- * gelic_ether_setup_netdev - initialization of net_device
+ * gelic_net_setup_netdev - initialization of net_device
* @netdev: net_device structure
* @card: card structure
*
@@ -1518,14 +1518,16 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
return 0;
}
+#define GELIC_ALIGN (32)
+
/**
* gelic_alloc_card_net - allocates net_device and card structure
+ * @netdev: interface device structure
*
* returns the card structure or NULL in case of errors
*
* the card and net_device structures are linked to each other
*/
-#define GELIC_ALIGN (32)
static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
{
struct gelic_card *card;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1db754615cca..945c13d1a982 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1958,8 +1958,6 @@ int wx_sw_init(struct wx *wx)
return -ENOMEM;
}
- wx->msix_in_use = false;
-
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 8706223a6e5a..6dff2c85682d 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1257,7 +1257,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* compute header lengths */
l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
- *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) :
+ *hdr_len = enc ? skb_inner_transport_offset(skb) :
skb_transport_offset(skb);
*hdr_len += l4len;
@@ -1614,14 +1614,12 @@ static int wx_acquire_msix_vectors(struct wx *wx)
/* One for non-queue interrupts */
nvecs += 1;
- if (!wx->msix_in_use) {
- wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!wx->msix_entry) {
- kfree(wx->msix_q_entries);
- wx->msix_q_entries = NULL;
- return -ENOMEM;
- }
+ wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entry) {
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ return -ENOMEM;
}
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
@@ -1931,10 +1929,8 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (pdev->msix_enabled) {
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
- if (!wx->msix_in_use) {
- kfree(wx->msix_entry);
- wx->msix_entry = NULL;
- }
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
}
pci_free_irq_vectors(wx->pdev);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b4dc4f341117..1fdeb464d5f4 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -1047,7 +1047,6 @@ struct wx {
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
struct msix_entry *msix_entry;
- bool msix_in_use;
struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 7507f762edfe..42718875277c 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o \
txgbe_hw.o \
txgbe_phy.o \
+ txgbe_irq.o \
txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
new file mode 100644
index 000000000000..b3e3605d1edb
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/irqdomain.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_phy.h"
+#include "txgbe_irq.h"
+
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @wx: pointer to private structure
+ * @queues: enable irqs for queues
+ **/
+void txgbe_irq_enable(struct wx *wx, bool queues)
+{
+ wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+
+ /* unmask interrupt */
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if (queues)
+ wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
+}
+
+/**
+ * txgbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ /* re-enable link(maybe) and non-queue interrupts, no flush.
+ * txgbe_poll will re-enable the queue interrupts
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * Allocate MSI-X vectors and request interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_q_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+int txgbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = txgbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
+static int txgbe_request_link_irq(struct txgbe *txgbe)
+{
+ txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ return request_threaded_irq(txgbe->link_irq, NULL,
+ txgbe_link_irq_handler,
+ IRQF_ONESHOT, "txgbe-link-irq", txgbe);
+}
+
+static const struct irq_chip txgbe_irq_chip = {
+ .name = "txgbe-misc-irq",
+};
+
+static int txgbe_misc_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct txgbe *txgbe = d->host_data;
+
+ irq_set_chip_data(irq, txgbe);
+ irq_set_chip(irq, &txgbe->misc.chip);
+ irq_set_nested_thread(irq, true);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
+ .map = txgbe_misc_irq_domain_map,
+};
+
+static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (eicr & TXGBE_PX_MISC_GPIO) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+ if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
+ TXGBE_PX_MISC_ETH_AN)) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void txgbe_del_irq_domain(struct txgbe *txgbe)
+{
+ int hwirq, virq;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++) {
+ virq = irq_find_mapping(txgbe->misc.domain, hwirq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(txgbe->misc.domain);
+}
+
+void txgbe_free_misc_irq(struct txgbe *txgbe)
+{
+ free_irq(txgbe->gpio_irq, txgbe);
+ free_irq(txgbe->link_irq, txgbe);
+ free_irq(txgbe->misc.irq, txgbe);
+ txgbe_del_irq_domain(txgbe);
+}
+
+int txgbe_setup_misc_irq(struct txgbe *txgbe)
+{
+ struct wx *wx = txgbe->wx;
+ int hwirq, err;
+
+ txgbe->misc.nirqs = 2;
+ txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
+ if (!txgbe->misc.domain)
+ return -ENOMEM;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++)
+ irq_create_mapping(txgbe->misc.domain, hwirq);
+
+ txgbe->misc.chip = txgbe_irq_chip;
+ if (wx->pdev->msix_enabled)
+ txgbe->misc.irq = wx->msix_entry->vector;
+ else
+ txgbe->misc.irq = wx->pdev->irq;
+
+ err = request_threaded_irq(txgbe->misc.irq, NULL,
+ txgbe_misc_irq_handle,
+ IRQF_ONESHOT,
+ wx->netdev->name, txgbe);
+ if (err)
+ goto del_misc_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
+ if (err)
+ goto free_msic_irq;
+
+ err = txgbe_request_link_irq(txgbe);
+ if (err)
+ goto free_gpio_irq;
+
+ return 0;
+
+free_gpio_irq:
+ free_irq(txgbe->gpio_irq, txgbe);
+free_msic_irq:
+ free_irq(txgbe->misc.irq, txgbe);
+del_misc_irq:
+ txgbe_del_irq_domain(txgbe);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
new file mode 100644
index 000000000000..b77945e7a0f2
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+void txgbe_irq_enable(struct wx *wx, bool queues);
+int txgbe_request_irq(struct wx *wx);
+void txgbe_free_misc_irq(struct txgbe *txgbe);
+int txgbe_setup_misc_irq(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 3b151c410a5c..bd4624d14ca0 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -17,6 +17,7 @@
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_irq.h"
#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -76,137 +77,11 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
-/**
- * txgbe_irq_enable - Enable default interrupt generation settings
- * @wx: pointer to private structure
- * @queues: enable irqs for queues
- **/
-static void txgbe_irq_enable(struct wx *wx, bool queues)
-{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
-
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
- if (queues)
- wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
-}
-
-/**
- * txgbe_intr - msi/legacy mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
-{
- struct wx_q_vector *q_vector;
- struct wx *wx = data;
- struct pci_dev *pdev;
- u32 eicr;
-
- q_vector = wx->q_vector[0];
- pdev = wx->pdev;
-
- eicr = wx_misc_isb(wx, WX_ISB_VEC0);
- if (!eicr) {
- /* shared interrupt alert!
- * the interrupt that we masked before the ICR read.
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, true);
- return IRQ_NONE; /* Not our interrupt */
- }
- wx->isb_mem[WX_ISB_VEC0] = 0;
- if (!(pdev->msi_enabled))
- wr32(wx, WX_PX_INTA, 1);
-
- wx->isb_mem[WX_ISB_MISC] = 0;
- /* would disable interrupts here but it is auto disabled */
- napi_schedule_irqoff(&q_vector->napi);
-
- /* re-enable link(maybe) and non-queue interrupts, no flush.
- * txgbe_poll will re-enable the queue interrupts
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, false);
-
- return IRQ_HANDLED;
-}
-
-/**
- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
- * @wx: board private structure
- *
- * Allocate MSI-X vectors and request interrupts from the kernel.
- **/
-static int txgbe_request_msix_irqs(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- int vector, err;
-
- for (vector = 0; vector < wx->num_q_vectors; vector++) {
- struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_q_entries[vector];
-
- if (q_vector->tx.ring && q_vector->rx.ring)
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-TxRx-%d", netdev->name, entry->entry);
- else
- /* skip this unused q_vector */
- continue;
-
- err = request_irq(entry->vector, wx_msix_clean_rings, 0,
- q_vector->name, q_vector);
- if (err) {
- wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
- q_vector->name, err);
- goto free_queue_irqs;
- }
- }
-
- return 0;
-
-free_queue_irqs:
- while (vector) {
- vector--;
- free_irq(wx->msix_q_entries[vector].vector,
- wx->q_vector[vector]);
- }
- wx_reset_interrupt_capability(wx);
- return err;
-}
-
-/**
- * txgbe_request_irq - initialize interrupts
- * @wx: board private structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static int txgbe_request_irq(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- struct pci_dev *pdev = wx->pdev;
- int err;
-
- if (pdev->msix_enabled)
- err = txgbe_request_msix_irqs(wx);
- else if (pdev->msi_enabled)
- err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
- netdev->name, wx);
- else
- err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
- netdev->name, wx);
-
- if (err)
- wx_err(wx, "request_irq failed, Error %d\n", err);
-
- return err;
-}
-
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ txgbe_reinit_gpio_intr(wx);
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -518,6 +393,7 @@ static void txgbe_shutdown(struct pci_dev *pdev)
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
@@ -528,6 +404,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
else
txgbe_reset(wx);
+ txgbe_free_misc_irq(txgbe);
wx_clear_interrupt_scheme(wx);
if (tc)
@@ -536,6 +413,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
+ txgbe_setup_misc_irq(txgbe);
if (netif_running(dev))
txgbe_open(dev);
@@ -751,10 +629,14 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe->wx = wx;
wx->priv = txgbe;
- err = txgbe_init_phy(txgbe);
+ err = txgbe_setup_misc_irq(txgbe);
if (err)
goto err_release_hw;
+ err = txgbe_init_phy(txgbe);
+ if (err)
+ goto err_free_misc_irq;
+
err = register_netdev(netdev);
if (err)
goto err_remove_phy;
@@ -781,6 +663,8 @@ static int txgbe_probe(struct pci_dev *pdev,
err_remove_phy:
txgbe_remove_phy(txgbe);
+err_free_misc_irq:
+ txgbe_free_misc_irq(txgbe);
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
@@ -813,6 +697,7 @@ static void txgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
+ txgbe_free_misc_irq(txgbe);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 1b84d495d14e..93295916b1d2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -292,6 +292,21 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
return 0;
}
+irqreturn_t txgbe_link_irq_handler(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status;
+ bool up;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
+
+ phylink_mac_change(wx->phylink, up);
+
+ return IRQ_HANDLED;
+}
+
static int txgbe_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct wx *wx = gpiochip_get_data(chip);
@@ -437,7 +452,7 @@ static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type)
}
static const struct irq_chip txgbe_gpio_irq_chip = {
- .name = "txgbe_gpio_irq",
+ .name = "txgbe-gpio-irq",
.irq_ack = txgbe_gpio_irq_ack,
.irq_mask = txgbe_gpio_irq_mask,
.irq_unmask = txgbe_gpio_irq_unmask,
@@ -446,29 +461,25 @@ static const struct irq_chip txgbe_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static void txgbe_irq_handler(struct irq_desc *desc)
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data)
{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct wx *wx = irq_desc_get_handler_data(desc);
- struct txgbe *txgbe = wx->priv;
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
irq_hw_number_t hwirq;
unsigned long gpioirq;
struct gpio_chip *gc;
unsigned long flags;
- u32 eicr;
-
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
-
- chained_irq_enter(chip, desc);
gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
gc = txgbe->gpio;
for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
u32 irq_type = irq_get_trigger_type(gpio);
- generic_handle_domain_irq(gc->irq.domain, hwirq);
+ txgbe_gpio_irq_ack(d);
+ handle_nested_irq(gpio);
if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
raw_spin_lock_irqsave(&wx->gpio_lock, flags);
@@ -477,17 +488,34 @@ static void txgbe_irq_handler(struct irq_desc *desc)
}
}
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
+}
+
+void txgbe_reinit_gpio_intr(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+ irq_hw_number_t hwirq;
+ unsigned long gpioirq;
+ struct gpio_chip *gc;
+ unsigned long flags;
+
+ /* for gpio interrupt pending before irq enable */
+ gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
- if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
- TXGBE_PX_MISC_ETH_AN)) {
- u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
+ gc = txgbe->gpio;
+ for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
+ int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
+ u32 irq_type = irq_get_trigger_type(gpio);
- phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
- }
+ txgbe_gpio_irq_ack(d);
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+ txgbe_toggle_trigger(gc, hwirq);
+ raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+ }
+ }
}
static int txgbe_gpio_init(struct txgbe *txgbe)
@@ -524,19 +552,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
girq = &gc->irq;
gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip);
- girq->parent_handler = txgbe_irq_handler;
- girq->parent_handler_data = wx;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents), GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
-
- /* now only suuported on MSI-X interrupt */
- if (!wx->msix_entry)
- return -EPERM;
-
- girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -754,8 +769,6 @@ int txgbe_init_phy(struct txgbe *txgbe)
goto err_unregister_i2c;
}
- wx->msix_in_use = true;
-
return 0;
err_unregister_i2c:
@@ -788,5 +801,4 @@ void txgbe_remove_phy(struct txgbe *txgbe)
phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group);
- txgbe->wx->msix_in_use = false;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 1ab592124986..8a026d804fe2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -4,6 +4,9 @@
#ifndef _TXGBE_PHY_H_
#define _TXGBE_PHY_H_
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data);
+void txgbe_reinit_gpio_intr(struct wx *wx);
+irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 270a6fd9ad0b..1b4ff50d5857 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -5,6 +5,7 @@
#define _TXGBE_TYPE_H_
#include <linux/property.h>
+#include <linux/irq.h>
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
@@ -169,15 +170,31 @@ struct txgbe_nodes {
const struct software_node *group[SWNODE_MAX + 1];
};
+enum txgbe_misc_irqs {
+ TXGBE_IRQ_GPIO = 0,
+ TXGBE_IRQ_LINK,
+ TXGBE_IRQ_MAX
+};
+
+struct txgbe_irq {
+ struct irq_chip chip;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq;
+};
+
struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
+ struct txgbe_irq misc;
struct dw_xpcs *xpcs;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;
struct clk *clk;
struct gpio_chip *gpio;
+ unsigned int gpio_irq;
+ unsigned int link_irq;
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 3318b50a5911..f165616f36fe 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -539,8 +539,7 @@ static int w5300_hw_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 765aa516aada..940452d0a4d2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1114,8 +1114,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
ndev->irq = rc;
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
+ lp->base_addr = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
if (IS_ERR(lp->base_addr)) {
rc = PTR_ERR(lp->base_addr);
goto error;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 9f505cf02d96..e9bc38fd2025 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1240,9 +1240,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
SelectPage(0);
PutWord(XIRCREG0_TRS, (u_short)pktlen+2);
- freespace = GetWord(XIRCREG0_TSO);
- okay = freespace & 0x8000;
- freespace &= 0x7fff;
+ freespace = GetWord(XIRCREG0_TSO) & 0x7fff;
/* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
okay = pktlen +2 < freespace;
pr_debug("%s: avail. tx space=%u%s\n",
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 2b6a607ac0b7..a273362c9e70 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -153,6 +153,7 @@ static const struct pci_device_id skfddi_pci_tbl[] = {
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
+MODULE_DESCRIPTION("SysKonnect FDDI PCI driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 704e949484d0..b9b5554ea862 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!(hw->hw_info.req_buf))
- return -ENOMEM;
+ if (!(hw->hw_info.req_buf)) {
+ result = -ENOMEM;
+ goto free_ep_info;
+ }
hw->hw_info.req_buf_size = mem_size;
mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!(hw->hw_info.res_buf))
- return -ENOMEM;
+ if (!(hw->hw_info.res_buf)) {
+ result = -ENOMEM;
+ goto free_req_buf;
+ }
hw->hw_info.res_buf_size = mem_size;
result = fjes_hw_alloc_shared_status_region(hw);
if (result)
- return result;
+ goto free_res_buf;
hw->hw_info.buffer_share_bit = 0;
hw->hw_info.buffer_unshare_reserve_bit = 0;
@@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
result = fjes_hw_alloc_epbuf(&buf_pair->tx);
if (result)
- return result;
+ goto free_epbuf;
result = fjes_hw_alloc_epbuf(&buf_pair->rx);
if (result)
- return result;
+ goto free_epbuf;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
@@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
fjes_hw_init_command_registers(hw, &param);
return 0;
+
+free_epbuf:
+ for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+ }
+ fjes_hw_free_shared_status_region(hw);
+free_res_buf:
+ kfree(hw->hw_info.res_buf);
+ hw->hw_info.res_buf = NULL;
+free_req_buf:
+ kfree(hw->hw_info.req_buf);
+ hw->hw_info.req_buf = NULL;
+free_ep_info:
+ kfree(hw->ep_shm_info);
+ hw->ep_shm_info = NULL;
+ return result;
}
static void fjes_hw_cleanup(struct fjes_hw *hw)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 32c51c244153..2f6739fe78af 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -221,7 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
unsigned int len;
- int err = 0;
+ int nh, err = 0;
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
@@ -272,9 +272,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(geneve->dev, rx_length_errors);
+ DEV_STATS_INC(geneve->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (geneve_get_sk_family(gs) == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)
@@ -319,22 +333,16 @@ static int geneve_init(struct net_device *dev)
struct geneve_dev *geneve = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&geneve->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL);
if (err) {
- free_percpu(dev->tstats);
gro_cells_destroy(&geneve->gro_cells);
return err;
}
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -344,7 +352,6 @@ static void geneve_uninit(struct net_device *dev)
dst_cache_destroy(&geneve->cfg.info.dst_cache);
gro_cells_destroy(&geneve->gro_cells);
- free_percpu(dev->tstats);
}
/* Callback from net/ipv4/udp.c to receive packets */
@@ -507,7 +514,7 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
gh_len = geneve_hlen(gh);
hlen = off_gnv + gh_len;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
gh = skb_gro_header_slow(skb, hlen, off_gnv);
if (unlikely(!gh))
goto out;
@@ -1121,7 +1128,6 @@ static const struct net_device_ops geneve_netdev_ops = {
.ndo_open = geneve_open,
.ndo_stop = geneve_stop,
.ndo_start_xmit = geneve_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = geneve_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -1141,7 +1147,7 @@ static const struct ethtool_ops geneve_ethtool_ops = {
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type geneve_type = {
+static const struct device_type geneve_type = {
.name = "geneve",
};
@@ -1188,6 +1194,7 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
/* MTU range: 68 - (something less than 65535) */
dev->min_mtu = ETH_MIN_MTU;
/* The max_mtu calculation does not take account of GENEVE
@@ -1900,29 +1907,26 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
}
}
-static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
- geneve_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ geneve_destroy_tunnels(net, dev_to_kill);
+}
- list_for_each_entry(net, net_list, exit_list) {
- const struct geneve_net *gn = net_generic(net, geneve_net_id);
+static void __net_exit geneve_exit_net(struct net *net)
+{
+ const struct geneve_net *gn = net_generic(net, geneve_net_id);
- WARN_ON_ONCE(!list_empty(&gn->sock_list));
- }
+ WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit_batch = geneve_exit_batch_net,
+ .exit_batch_rtnl = geneve_exit_batch_rtnl,
+ .exit = geneve_exit_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index b1919278e931..ba4704c2c640 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -711,25 +711,11 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
return ret;
}
-static int gtp_dev_init(struct net_device *dev)
-{
- struct gtp_dev *gtp = netdev_priv(dev);
-
- gtp->dev = dev;
-
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
- return 0;
-}
-
static void gtp_dev_uninit(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp_encap_disable(gtp);
- free_percpu(dev->tstats);
}
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
@@ -942,10 +928,8 @@ tx_err:
}
static const struct net_device_ops gtp_netdev_ops = {
- .ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static const struct device_type gtp_type = {
@@ -957,6 +941,7 @@ static void gtp_link_setup(struct net_device *dev)
unsigned int max_gtp_header_len = sizeof(struct iphdr) +
sizeof(struct udphdr) +
sizeof(struct gtp0_header);
+ struct gtp_dev *gtp = netdev_priv(dev);
dev->netdev_ops = &gtp_netdev_ops;
dev->needs_free_netdev = true;
@@ -970,11 +955,13 @@ static void gtp_link_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->priv_flags |= IFF_NO_QUEUE;
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
+ gtp->dev = dev;
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
@@ -1876,23 +1863,23 @@ static int __net_init gtp_net_init(struct net *net)
return 0;
}
-static void __net_exit gtp_net_exit(struct net *net)
+static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
- LIST_HEAD(list);
+ struct net *net;
- rtnl_lock();
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
- gtp_dellink(gtp->dev, &list);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
+ }
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
- .exit = gtp_net_exit,
+ .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};
@@ -1903,26 +1890,26 @@ static int __init gtp_init(void)
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
- err = rtnl_link_register(&gtp_link_ops);
+ err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto error_out;
- err = genl_register_family(&gtp_genl_family);
+ err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
- goto unreg_rtnl_link;
+ goto unreg_pernet_subsys;
- err = register_pernet_subsys(&gtp_net_ops);
+ err = genl_register_family(&gtp_genl_family);
if (err < 0)
- goto unreg_genl_family;
+ goto unreg_rtnl_link;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
-unreg_genl_family:
- genl_unregister_family(&gtp_genl_family);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
+unreg_pernet_subsys:
+ unregister_pernet_subsys(&gtp_net_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1dafa44155d0..a6fcbda64ecc 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -708,7 +708,10 @@ void netvsc_device_remove(struct hv_device *device)
/* Disable NAPI and disassociate its context from the device. */
for (i = 0; i < net_device->num_chn; i++) {
/* See also vmbus_reset_channel_cb(). */
- napi_disable(&net_device->chan_table[i].napi);
+ /* only disable enabled NAPI channel */
+ if (i < ndev->real_num_rx_queues)
+ napi_disable(&net_device->chan_table[i].napi);
+
netif_napi_del(&net_device->chan_table[i].napi);
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4406427d4617..11831a1c9762 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -42,9 +42,13 @@
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
+/* Macros to define the context of vf registration */
+#define VF_REG_IN_PROBE 1
+#define VF_REG_IN_NOTIFIER 2
+
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
-MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -2185,7 +2189,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
}
static int netvsc_vf_join(struct net_device *vf_netdev,
- struct net_device *ndev)
+ struct net_device *ndev, int context)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
int ret;
@@ -2208,7 +2212,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed;
}
- schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+ /* If this registration is called from probe context vf_takeover
+ * is taken care of later in probe itself.
+ */
+ if (context == VF_REG_IN_NOTIFIER)
+ schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@@ -2346,7 +2354,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
return NOTIFY_DONE;
}
-static int netvsc_register_vf(struct net_device *vf_netdev)
+static int netvsc_register_vf(struct net_device *vf_netdev, int context)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
@@ -2386,7 +2394,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
- if (netvsc_vf_join(vf_netdev, ndev) != 0)
+ if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
return NOTIFY_DONE;
dev_hold(vf_netdev);
@@ -2484,10 +2492,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_OK;
}
+static int check_dev_is_matching_vf(struct net_device *event_ndev)
+{
+ /* Skip NetVSC interfaces */
+ if (event_ndev->netdev_ops == &device_ops)
+ return -ENODEV;
+
+ /* Avoid non-Ethernet type devices */
+ if (event_ndev->type != ARPHRD_ETHER)
+ return -ENODEV;
+
+ /* Avoid Vlan dev with same MAC registering as VF */
+ if (is_vlan_dev(event_ndev))
+ return -ENODEV;
+
+ /* Avoid Bonding master dev with same MAC registering as VF */
+ if (netif_is_bond_master(event_ndev))
+ return -ENODEV;
+
+ return 0;
+}
+
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
- struct net_device *net = NULL;
+ struct net_device *net = NULL, *vf_netdev;
struct net_device_context *net_device_ctx;
struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
@@ -2599,6 +2628,30 @@ static int netvsc_probe(struct hv_device *dev,
}
list_add(&net_device_ctx->list, &netvsc_dev_list);
+
+ /* When the hv_netvsc driver is unloaded and reloaded, the
+ * NET_DEVICE_REGISTER for the vf device is replayed before probe
+ * is complete. This is because register_netdevice_notifier() gets
+ * registered before vmbus_driver_register() so that callback func
+ * is set before probe and we don't miss events like NETDEV_POST_INIT
+ * So, in this section we try to register the matching vf device that
+ * is present as a netdevice, knowing that its register call is not
+ * processed in the netvsc_netdev_notifier(as probing is progress and
+ * get_netvsc_byslot fails).
+ */
+ for_each_netdev(dev_net(net), vf_netdev) {
+ ret = check_dev_is_matching_vf(vf_netdev);
+ if (ret != 0)
+ continue;
+
+ if (net != get_netvsc_byslot(vf_netdev))
+ continue;
+
+ netvsc_prepare_bonding(vf_netdev);
+ netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
+ __netvsc_vf_setup(net, vf_netdev);
+ break;
+ }
rtnl_unlock();
netvsc_devinfo_put(device_info);
@@ -2754,28 +2807,17 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
- /* Skip our own events */
- if (event_dev->netdev_ops == &device_ops)
- return NOTIFY_DONE;
-
- /* Avoid non-Ethernet type devices */
- if (event_dev->type != ARPHRD_ETHER)
- return NOTIFY_DONE;
-
- /* Avoid Vlan dev with same MAC registering as VF */
- if (is_vlan_dev(event_dev))
- return NOTIFY_DONE;
-
- /* Avoid Bonding master dev with same MAC registering as VF */
- if (netif_is_bond_master(event_dev))
+ ret = check_dev_is_matching_vf(event_dev);
+ if (ret != 0)
return NOTIFY_DONE;
switch (event) {
case NETDEV_POST_INIT:
return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER:
- return netvsc_register_vf(event_dev);
+ return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
case NETDEV_UNREGISTER:
return netvsc_unregister_vf(event_dev);
case NETDEV_UP:
@@ -2807,7 +2849,7 @@ static int __init netvsc_drv_init(void)
pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
- netvsc_ring_bytes = ring_size * PAGE_SIZE;
+ netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
register_netdevice_notifier(&netvsc_netdev_notifier);
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 35e55f198e05..2930141d7dd2 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -259,4 +259,5 @@ static __exit void fake_remove_module(void)
module_init(fakelb_init_module);
module_exit(fake_remove_module);
+MODULE_DESCRIPTION("IEEE 802.15.4 loopback driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index f3355e040a9e..334cd62cf286 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -21,7 +21,6 @@
struct clk;
struct icc_path;
struct net_device;
-struct platform_device;
struct ipa_power;
struct ipa_smp2p;
@@ -31,7 +30,7 @@ struct ipa_interrupt;
* struct ipa - IPA information
* @gsi: Embedded GSI structure
* @version: IPA hardware version
- * @pdev: Platform device
+ * @dev: IPA device pointer
* @completion: Used to signal pipeline clear transfer complete
* @nb: Notifier block used for remoteproc SSR
* @notifier: Remoteproc SSR notifier
@@ -79,7 +78,7 @@ struct ipa_interrupt;
struct ipa {
struct gsi gsi;
enum ipa_version version;
- struct platform_device *pdev;
+ struct device *dev;
struct completion completion;
struct notifier_block nb;
void *notifier;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index f1419fbd776c..39219963dbb3 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -174,7 +174,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
const char *table = route ? "route" : "filter";
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 size;
size = route ? ipa->route_count : ipa->filter_count + 1;
@@ -204,7 +204,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
@@ -256,7 +256,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
const char *name, u32 offset)
{
struct ipa_cmd_register_write *payload;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 offset_max;
u32 bit_count;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index afa1d56d9095..dd490941615e 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -233,8 +233,8 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *other_data;
- struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name other_name;
+ struct device *dev = ipa->dev;
if (ipa_gsi_endpoint_data_empty(data))
return true;
@@ -388,7 +388,7 @@ static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
enum ipa_endpoint_name name;
u32 max;
@@ -606,7 +606,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"no transaction to reset modem exception endpoints\n");
return -EBUSY;
}
@@ -1498,8 +1498,7 @@ ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
if (endpoint_id == command_endpoint->endpoint_id) {
complete(&ipa->completion);
} else {
- dev_err(&ipa->pdev->dev,
- "unexpected tagged packet from endpoint %u\n",
+ dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
endpoint_id);
}
@@ -1536,6 +1535,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
struct ipa *ipa = endpoint->ipa;
+ struct device *dev = ipa->dev;
u32 resid = total_len;
while (resid) {
@@ -1544,7 +1544,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
u32 len;
if (resid < IPA_STATUS_SIZE) {
- dev_err(&endpoint->ipa->pdev->dev,
+ dev_err(dev,
"short message (%u bytes < %zu byte status)\n",
resid, IPA_STATUS_SIZE);
break;
@@ -1666,8 +1666,8 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa)
*/
static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
struct ipa *ipa = endpoint->ipa;
+ struct device *dev = ipa->dev;
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
@@ -1769,7 +1769,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d resetting channel %u for endpoint %u\n",
ret, endpoint->channel_id, endpoint->endpoint_id);
}
@@ -1817,7 +1817,7 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
ret = gsi_channel_start(gsi, endpoint->channel_id);
if (ret) {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d starting %cX channel %u for endpoint %u\n",
ret, endpoint->toward_ipa ? 'T' : 'R',
endpoint->channel_id, endpoint_id);
@@ -1854,14 +1854,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
/* Note that if stop fails, the channel's state is not well-defined */
ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
- dev_err(&ipa->pdev->dev,
- "error %d attempting to stop endpoint %u\n", ret,
- endpoint_id);
+ dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
+ ret, endpoint_id);
}
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
+ struct device *dev = endpoint->ipa->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
@@ -1881,7 +1880,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
+ struct device *dev = endpoint->ipa->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
@@ -1983,7 +1982,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct reg *reg;
u32 endpoint_id;
u32 hw_limit;
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 4bc05948f772..c3e8784d51d9 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -19,6 +19,7 @@
* time only these three are supported.
*/
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
@@ -43,6 +44,30 @@ struct ipa_interrupt {
u32 enabled;
};
+/* Clear the suspend interrupt for all endpoints that signaled it */
+static void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 unit_count;
+ u32 unit;
+
+ unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
+
+ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
+ if (!val || ipa->version == IPA_VERSION_3_0)
+ continue;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
+ }
+}
+
/* Process a particular interrupt type that has been received */
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
@@ -70,7 +95,7 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
* caused the interrupt, so defer clearing until after
* the handler has been called.
*/
- ipa_power_suspend_handler(ipa, irq_id);
+ ipa_interrupt_suspend_clear_all(interrupt);
fallthrough;
default: /* Silently ignore (and clear) any other condition */
@@ -85,14 +110,13 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ struct device *dev = ipa->dev;
const struct reg *reg;
- struct device *dev;
u32 pending;
u32 offset;
u32 mask;
int ret;
- dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto out_power_put;
@@ -205,30 +229,6 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
}
-/* Clear the suspend interrupt for all endpoints that signaled it */
-void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
-{
- struct ipa *ipa = interrupt->ipa;
- u32 unit_count;
- u32 unit;
-
- unit_count = roundup(ipa->endpoint_count, 32);
- for (unit = 0; unit < unit_count; unit++) {
- const struct reg *reg;
- u32 val;
-
- reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
-
- /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
- if (ipa->version == IPA_VERSION_3_0)
- continue;
-
- reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
- }
-}
-
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
{
@@ -236,29 +236,17 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
}
/* Configure the IPA interrupt framework */
-struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
+int ipa_interrupt_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
- struct ipa_interrupt *interrupt;
+ struct ipa_interrupt *interrupt = ipa->interrupt;
+ unsigned int irq = interrupt->irq;
+ struct device *dev = ipa->dev;
const struct reg *reg;
- unsigned int irq;
int ret;
- ret = platform_get_irq_byname(ipa->pdev, "ipa");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
- ret);
- return ERR_PTR(ret ? : -EINVAL);
- }
- irq = ret;
-
- interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
- if (!interrupt)
- return ERR_PTR(-ENOMEM);
interrupt->ipa = ipa;
- interrupt->irq = irq;
- /* Start with all IPA interrupts disabled */
+ /* Disable all IPA interrupt types */
reg = ipa_reg(ipa, IPA_IRQ_EN);
iowrite32(0, ipa->reg_virt + reg_offset(reg));
@@ -271,26 +259,59 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
+ dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n",
+ ret);
goto err_free_irq;
}
- return interrupt;
+ ipa->interrupt = interrupt;
+
+ return 0;
err_free_irq:
free_irq(interrupt->irq, interrupt);
err_kfree:
kfree(interrupt);
- return ERR_PTR(ret);
+ return ret;
}
/* Inverse of ipa_interrupt_config() */
-void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
+void ipa_interrupt_deconfig(struct ipa *ipa)
{
- struct device *dev = &interrupt->ipa->pdev->dev;
+ struct ipa_interrupt *interrupt = ipa->interrupt;
+ struct device *dev = ipa->dev;
+
+ ipa->interrupt = NULL;
dev_pm_clear_wake_irq(dev);
free_irq(interrupt->irq, interrupt);
+}
+
+/* Initialize the IPA interrupt structure */
+struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "ipa");
+ if (irq <= 0) {
+ dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n", irq);
+
+ return ERR_PTR(irq ? : -EINVAL);
+ }
+
+ interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
+ if (!interrupt)
+ return ERR_PTR(-ENOMEM);
+ interrupt->irq = irq;
+
+ return interrupt;
+}
+
+/* Inverse of ipa_interrupt_init() */
+void ipa_interrupt_exit(struct ipa_interrupt *interrupt)
+{
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 12e3e798ccb3..f3f4f4330a59 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -35,14 +35,6 @@ void ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt,
u32 endpoint_id);
/**
- * ipa_interrupt_suspend_clear_all - clear all suspend interrupts
- * @interrupt: IPA interrupt structure
- *
- * Clear the TX_SUSPEND interrupt for all endpoints that signaled it.
- */
-void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
-
-/**
* ipa_interrupt_simulate_suspend() - Simulate TX_SUSPEND IPA interrupt
* @interrupt: IPA interrupt structure
*
@@ -84,17 +76,31 @@ void ipa_interrupt_irq_enable(struct ipa *ipa);
void ipa_interrupt_irq_disable(struct ipa *ipa);
/**
- * ipa_interrupt_config() - Configure the IPA interrupt framework
+ * ipa_interrupt_config() - Configure IPA interrupts
* @ipa: IPA pointer
*
- * Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ * Return: 0 if successful, or a negative error code
*/
-struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa);
+int ipa_interrupt_config(struct ipa *ipa);
/**
* ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config()
+ * @ipa: IPA pointer
+ */
+void ipa_interrupt_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_init() - Initialize the IPA interrupt structure
+ * @pdev: IPA platform device pointer
+ *
+ * Return: Pointer to an IPA interrupt structure, or a pointer-coded error
+ */
+struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev);
+
+/**
+ * ipa_interrupt_exit() - Inverse of ipa_interrupt_init()
* @interrupt: IPA interrupt structure
*/
-void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt);
+void ipa_interrupt_exit(struct ipa_interrupt *interrupt);
#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 00475fd7a205..57b241417e8c 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
-#include <linux/device.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/firmware.h>
@@ -114,7 +113,7 @@ int ipa_setup(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
ret = gsi_setup(&ipa->gsi);
@@ -542,12 +541,9 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
if (ret)
goto err_hardware_deconfig;
- ipa->interrupt = ipa_interrupt_config(ipa);
- if (IS_ERR(ipa->interrupt)) {
- ret = PTR_ERR(ipa->interrupt);
- ipa->interrupt = NULL;
+ ret = ipa_interrupt_config(ipa);
+ if (ret)
goto err_mem_deconfig;
- }
ipa_uc_config(ipa);
@@ -572,8 +568,7 @@ err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
err_uc_deconfig:
ipa_uc_deconfig(ipa);
- ipa_interrupt_deconfig(ipa->interrupt);
- ipa->interrupt = NULL;
+ ipa_interrupt_deconfig(ipa);
err_mem_deconfig:
ipa_mem_deconfig(ipa);
err_hardware_deconfig:
@@ -591,8 +586,7 @@ static void ipa_deconfig(struct ipa *ipa)
ipa_modem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
ipa_uc_deconfig(ipa);
- ipa_interrupt_deconfig(ipa->interrupt);
- ipa->interrupt = NULL;
+ ipa_interrupt_deconfig(ipa);
ipa_mem_deconfig(ipa);
ipa_hardware_deconfig(ipa);
}
@@ -808,6 +802,7 @@ out_self:
static int ipa_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
enum ipa_firmware_loader loader;
const struct ipa_data *data;
struct ipa_power *power;
@@ -839,12 +834,21 @@ static int ipa_probe(struct platform_device *pdev)
if (loader == IPA_LOADER_DEFER)
return -EPROBE_DEFER;
- /* The clock and interconnects might not be ready when we're
- * probed, so might return -EPROBE_DEFER.
+ /* The IPA interrupt might not be ready when we're probed, so this
+ * might return -EPROBE_DEFER.
+ */
+ interrupt = ipa_interrupt_init(pdev);
+ if (IS_ERR(interrupt))
+ return PTR_ERR(interrupt);
+
+ /* The clock and interconnects might not be ready when we're probed,
+ * so this might return -EPROBE_DEFER.
*/
power = ipa_power_init(dev, data->power_data);
- if (IS_ERR(power))
- return PTR_ERR(power);
+ if (IS_ERR(power)) {
+ ret = PTR_ERR(power);
+ goto err_interrupt_exit;
+ }
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
@@ -853,18 +857,19 @@ static int ipa_probe(struct platform_device *pdev)
goto err_power_exit;
}
- ipa->pdev = pdev;
+ ipa->dev = dev;
dev_set_drvdata(dev, ipa);
+ ipa->interrupt = interrupt;
ipa->power = power;
ipa->version = data->version;
ipa->modem_route_count = data->modem_route_count;
init_completion(&ipa->completion);
- ret = ipa_reg_init(ipa);
+ ret = ipa_reg_init(ipa, pdev);
if (ret)
goto err_kfree_ipa;
- ret = ipa_mem_init(ipa, data->mem_data);
+ ret = ipa_mem_init(ipa, pdev, data->mem_data);
if (ret)
goto err_reg_exit;
@@ -882,7 +887,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_endpoint_exit;
- ret = ipa_smp2p_init(ipa, loader == IPA_LOADER_MODEM);
+ ret = ipa_smp2p_init(ipa, pdev, loader == IPA_LOADER_MODEM);
if (ret)
goto err_table_exit;
@@ -939,17 +944,27 @@ err_kfree_ipa:
kfree(ipa);
err_power_exit:
ipa_power_exit(power);
+err_interrupt_exit:
+ ipa_interrupt_exit(interrupt);
return ret;
}
static void ipa_remove(struct platform_device *pdev)
{
- struct ipa *ipa = dev_get_drvdata(&pdev->dev);
- struct ipa_power *power = ipa->power;
- struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
+ struct ipa_power *power;
+ struct device *dev;
+ struct ipa *ipa;
int ret;
+ ipa = dev_get_drvdata(&pdev->dev);
+ dev = ipa->dev;
+ WARN_ON(dev != &pdev->dev);
+
+ power = ipa->power;
+ interrupt = ipa->interrupt;
+
/* Prevent the modem from triggering a call to ipa_setup(). This
* also ensures a modem-initiated setup that's underway completes.
*/
@@ -991,6 +1006,7 @@ out_power_put:
ipa_reg_exit(ipa);
kfree(ipa);
ipa_power_exit(power);
+ ipa_interrupt_exit(interrupt);
dev_info(dev, "IPA driver removed");
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 694960537ecd..709f061ede61 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -9,6 +9,7 @@
#include <linux/bug.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/soc/qcom/smem.h>
@@ -75,9 +76,9 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
- const struct reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
+ const struct reg *reg;
u32 offset;
u16 size;
u32 val;
@@ -87,7 +88,7 @@ int ipa_mem_setup(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 4);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
+ dev_err(ipa->dev, "no transaction for memory setup\n");
return -EBUSY;
}
@@ -217,8 +218,8 @@ static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
{
- struct device *dev = &ipa->pdev->dev;
enum ipa_mem_id mem_id = mem->id;
+ struct device *dev = ipa->dev;
u16 size_multiple;
/* Make sure the memory region is valid for this version of IPA */
@@ -254,7 +255,7 @@ static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
enum ipa_mem_id mem_id;
u32 i;
@@ -290,7 +291,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
/* Do all memory regions fit within the IPA local memory? */
static bool ipa_mem_size_valid(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 limit = ipa->mem_size;
u32 i;
@@ -317,7 +318,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
*/
int ipa_mem_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct ipa_mem *mem;
const struct reg *reg;
dma_addr_t addr;
@@ -393,7 +394,7 @@ err_dma_free:
/* Inverse of ipa_mem_config() */
void ipa_mem_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
ipa->zero_size = 0;
@@ -420,8 +421,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 3);
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction to zero modem memory\n");
+ dev_err(ipa->dev, "no transaction to zero modem memory\n");
return -EBUSY;
}
@@ -452,7 +452,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
*/
static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
@@ -485,13 +485,12 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
static void ipa_imem_exit(struct ipa *ipa)
{
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
- struct device *dev;
if (!ipa->imem_size)
return;
- dev = &ipa->pdev->dev;
domain = iommu_get_domain_for_dev(dev);
if (domain) {
size_t size;
@@ -527,7 +526,7 @@ static void ipa_imem_exit(struct ipa *ipa)
*/
static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
@@ -594,7 +593,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
static void ipa_smem_exit(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
domain = iommu_get_domain_for_dev(dev);
@@ -615,9 +614,10 @@ static void ipa_smem_exit(struct ipa *ipa)
}
/* Perform memory region-related initialization */
-int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
+int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
+ const struct ipa_mem_data *mem_data)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
struct resource *res;
int ret;
@@ -634,14 +634,13 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
if (!ipa_table_mem_valid(ipa, true))
return -EINVAL;
- ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "error %d setting DMA mask\n", ret);
return ret;
}
- res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
- "ipa-shared");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-shared");
if (!res) {
dev_err(dev,
"DT error getting \"ipa-shared\" memory property\n");
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 868e9c20e8c4..28aad00a151d 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -6,6 +6,8 @@
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
+struct platform_device;
+
struct ipa;
struct ipa_mem_data;
@@ -100,7 +102,8 @@ int ipa_mem_setup(struct ipa *ipa); /* No ipa_mem_teardown() needed */
int ipa_mem_zero_modem(struct ipa *ipa);
-int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data);
+int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
+ const struct ipa_mem_data *mem_data);
void ipa_mem_exit(struct ipa *ipa);
#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 423422a2a445..c27ca3f27f7d 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -39,10 +39,14 @@ enum ipa_modem_state {
/**
* struct ipa_priv - IPA network device private data
* @ipa: IPA pointer
+ * @tx: Transmit endpoint pointer
+ * @rx: Receive endpoint pointer
* @work: Work structure used to wake the modem netdev TX queue
*/
struct ipa_priv {
struct ipa *ipa;
+ struct ipa_endpoint *tx;
+ struct ipa_endpoint *rx;
struct work_struct work;
};
@@ -54,16 +58,16 @@ static int ipa_open(struct net_device *netdev)
struct device *dev;
int ret;
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_power_put;
- ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ret = ipa_endpoint_enable_one(priv->tx);
if (ret)
goto err_power_put;
- ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ret = ipa_endpoint_enable_one(priv->rx);
if (ret)
goto err_disable_tx;
@@ -75,7 +79,7 @@ static int ipa_open(struct net_device *netdev)
return 0;
err_disable_tx:
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_disable_one(priv->tx);
err_power_put:
pm_runtime_put_noidle(dev);
@@ -90,15 +94,15 @@ static int ipa_stop(struct net_device *netdev)
struct device *dev;
int ret;
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto out_power_put;
netif_stop_queue(netdev);
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_disable_one(priv->rx);
+ ipa_endpoint_disable_one(priv->tx);
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
@@ -106,13 +110,16 @@ out_power_put:
return 0;
}
-/** ipa_start_xmit() - Transmits an skb.
- * @skb: skb to be transmitted
- * @dev: network device
+/** ipa_start_xmit() - Transmit an skb
+ * @skb: Socket buffer to be transmitted
+ * @netdev: Network device
*
- * Return codes:
- * NETDEV_TX_OK: Success
- * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
+ * Return: NETDEV_TX_OK if successful (or dropped), NETDEV_TX_BUSY otherwise
+
+ * Normally NETDEV_TX_OK indicates the buffer was successfully transmitted.
+ * If the buffer has an unexpected protocol or its size is out of range it
+ * is quietly dropped, returning NETDEV_TX_OK. NETDEV_TX_BUSY indicates
+ * the buffer cannot be sent at this time and should retried later.
*/
static netdev_tx_t
ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -132,29 +139,41 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
- /* The hardware must be powered for us to transmit */
- dev = &ipa->pdev->dev;
+ /* The hardware must be powered for us to transmit, so if we're not
+ * ready we want the network stack to stop queueing until power is
+ * ACTIVE. Once runtime resume has completed, we inform the network
+ * stack it's OK to try transmitting again.
+ *
+ * We learn from pm_runtime_get() whether the hardware is powered.
+ * If it was not, powering up is either started or already underway.
+ * And in that case we want to disable queueing, expecting it to be
+ * re-enabled once power is ACTIVE. But runtime PM and network
+ * transmit run concurrently, and if we're not careful the requests
+ * to stop and start queueing could occur in the wrong order.
+ *
+ * For that reason we *always* stop queueing here, *before* the call
+ * to pm_runtime_get(). If we determine here that power is ACTIVE,
+ * we restart queueing before transmitting the SKB. Otherwise
+ * queueing will eventually be enabled after resume completes.
+ */
+ netif_stop_queue(netdev);
+
+ dev = ipa->dev;
ret = pm_runtime_get(dev);
if (ret < 1) {
/* If a resume won't happen, just drop the packet */
if (ret < 0 && ret != -EINPROGRESS) {
- ipa_power_modem_queue_active(ipa);
+ netif_wake_queue(netdev);
pm_runtime_put_noidle(dev);
goto err_drop_skb;
}
- /* No power (yet). Stop the network stack from transmitting
- * until we're resumed; ipa_modem_resume() arranges for the
- * TX queue to be started again.
- */
- ipa_power_modem_queue_stop(ipa);
-
pm_runtime_put_noidle(dev);
return NETDEV_TX_BUSY;
}
- ipa_power_modem_queue_active(ipa);
+ netif_wake_queue(netdev);
ret = ipa_endpoint_skb_tx(endpoint, skb);
@@ -233,14 +252,14 @@ static void ipa_modem_netdev_setup(struct net_device *netdev)
*/
void ipa_modem_suspend(struct net_device *netdev)
{
- struct ipa_priv *priv = netdev_priv(netdev);
- struct ipa *ipa = priv->ipa;
+ struct ipa_priv *priv;
if (!(netdev->flags & IFF_UP))
return;
- ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ priv = netdev_priv(netdev);
+ ipa_endpoint_suspend_one(priv->rx);
+ ipa_endpoint_suspend_one(priv->tx);
}
/**
@@ -258,7 +277,7 @@ static void ipa_modem_wake_queue_work(struct work_struct *work)
{
struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
- ipa_power_modem_queue_wake(priv->ipa);
+ netif_wake_queue(priv->tx->netdev);
}
/** ipa_modem_resume() - resume callback for runtime_pm
@@ -268,14 +287,14 @@ static void ipa_modem_wake_queue_work(struct work_struct *work)
*/
void ipa_modem_resume(struct net_device *netdev)
{
- struct ipa_priv *priv = netdev_priv(netdev);
- struct ipa *ipa = priv->ipa;
+ struct ipa_priv *priv;
if (!(netdev->flags & IFF_UP))
return;
- ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
- ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ priv = netdev_priv(netdev);
+ ipa_endpoint_resume_one(priv->tx);
+ ipa_endpoint_resume_one(priv->rx);
/* Arrange for the TX queue to be restarted */
(void)queue_pm_work(&priv->work);
@@ -303,19 +322,24 @@ int ipa_modem_start(struct ipa *ipa)
goto out_set_state;
}
- SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
+ SET_NETDEV_DEV(netdev, ipa->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
+ priv->tx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
+ priv->rx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX];
INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+
+ priv->tx->netdev = netdev;
+ priv->rx->netdev = netdev;
+
ipa->modem_netdev = netdev;
ret = register_netdev(netdev);
if (ret) {
ipa->modem_netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ priv->rx->netdev = NULL;
+ priv->tx->netdev = NULL;
+
free_netdev(netdev);
}
@@ -355,9 +379,11 @@ int ipa_modem_stop(struct ipa *ipa)
if (netdev->flags & IFF_UP)
(void)ipa_stop(netdev);
unregister_netdev(netdev);
+
ipa->modem_netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ priv->rx->netdev = NULL;
+ priv->tx->netdev = NULL;
+
free_netdev(netdev);
}
@@ -370,7 +396,7 @@ int ipa_modem_stop(struct ipa *ipa)
/* Treat a "clean" modem stop the same as a crash */
static void ipa_modem_crashed(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
/* Prevent the modem from triggering a call to ipa_setup() */
@@ -417,7 +443,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
{
struct ipa *ipa = container_of(nb, struct ipa, nb);
struct qcom_ssr_notify_data *notify_data = data;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
switch (action) {
case QCOM_SSR_BEFORE_POWERUP:
@@ -466,7 +492,7 @@ int ipa_modem_config(struct ipa *ipa)
void ipa_modem_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index e223886123ce..41ca7ef5e20f 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -35,28 +35,10 @@
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
- * enum ipa_power_flag - IPA power flags
- * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
- * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
- * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit()
- * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume()
- * @IPA_POWER_FLAG_COUNT: Number of defined power flags
- */
-enum ipa_power_flag {
- IPA_POWER_FLAG_RESUMED,
- IPA_POWER_FLAG_SYSTEM,
- IPA_POWER_FLAG_STOPPED,
- IPA_POWER_FLAG_STARTED,
- IPA_POWER_FLAG_COUNT, /* Last; not a flag */
-};
-
-/**
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
* @qmp: QMP handle for AOSS communication
- * @spinlock: Protects modem TX queue enable/disable
- * @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
@@ -64,8 +46,6 @@ struct ipa_power {
struct device *dev;
struct clk *core;
struct qmp *qmp;
- spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
- DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
struct icc_bulk_data interconnect[] __counted_by(interconnect_count);
};
@@ -147,7 +127,6 @@ static int ipa_runtime_suspend(struct device *dev)
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
- __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
@@ -179,8 +158,6 @@ static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
- __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
-
/* Increment the disable depth to ensure that the IRQ won't
* be re-enabled until the matching _enable call in
* ipa_resume(). We do this to ensure that the interrupt
@@ -202,8 +179,6 @@ static int ipa_resume(struct device *dev)
ret = pm_runtime_force_resume(dev);
- __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
-
/* Now that PM runtime is enabled again it's safe
* to turn the IRQ back on and process any data
* that was received during suspend.
@@ -219,84 +194,6 @@ u32 ipa_core_clock_rate(struct ipa *ipa)
return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
}
-void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
-{
- /* To handle an IPA interrupt we will have resumed the hardware
- * just to handle the interrupt, so we're done. If we are in a
- * system suspend, trigger a system resume.
- */
- if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
- if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
- pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
-
- /* Acknowledge/clear the suspend interrupt on all endpoints */
- ipa_interrupt_suspend_clear_all(ipa->interrupt);
-}
-
-/* The next few functions coordinate stopping and starting the modem
- * network device transmit queue.
- *
- * Transmit can be running concurrent with power resume, and there's a
- * chance the resume completes before the transmit path stops the queue,
- * leaving the queue in a stopped state. The next two functions are used
- * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
- * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
- * is used by ipa_runtime_resume() to conditionally restart it.
- *
- * Two flags and a spinlock are used. If the queue is stopped, the STOPPED
- * power flag is set. And if the queue is started, the STARTED flag is set.
- * The queue is only started on resume if the STOPPED flag is set. And the
- * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
- * set. As a result, the queue remains operational if the two activites
- * happen concurrently regardless of the order they complete. The spinlock
- * ensures the flag and TX queue operations are done atomically.
- *
- * The first function stops the modem netdev transmit queue, but only if
- * the STARTED flag is *not* set. That flag is cleared if it was set.
- * If the queue is stopped, the STOPPED flag is set. This is called only
- * from the power ->runtime_resume operation.
- */
-void ipa_power_modem_queue_stop(struct ipa *ipa)
-{
- struct ipa_power *power = ipa->power;
- unsigned long flags;
-
- spin_lock_irqsave(&power->spinlock, flags);
-
- if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
- netif_stop_queue(ipa->modem_netdev);
- __set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
- }
-
- spin_unlock_irqrestore(&power->spinlock, flags);
-}
-
-/* This function starts the modem netdev transmit queue, but only if the
- * STOPPED flag is set. That flag is cleared if it was set. If the queue
- * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
- * to skip stopping the queue in the event of a race.
- */
-void ipa_power_modem_queue_wake(struct ipa *ipa)
-{
- struct ipa_power *power = ipa->power;
- unsigned long flags;
-
- spin_lock_irqsave(&power->spinlock, flags);
-
- if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
- __set_bit(IPA_POWER_FLAG_STARTED, power->flags);
- netif_wake_queue(ipa->modem_netdev);
- }
-
- spin_unlock_irqrestore(&power->spinlock, flags);
-}
-
-/* This function clears the STARTED flag once the TX queue is operating */
-void ipa_power_modem_queue_active(struct ipa *ipa)
-{
- clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
-}
-
static int ipa_power_retention_init(struct ipa_power *power)
{
struct qmp *qmp = qmp_get(power->dev);
@@ -341,7 +238,7 @@ int ipa_power_setup(struct ipa *ipa)
ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
- ret = device_init_wakeup(&ipa->pdev->dev, true);
+ ret = device_init_wakeup(ipa->dev, true);
if (ret)
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
@@ -350,7 +247,7 @@ int ipa_power_setup(struct ipa *ipa)
void ipa_power_teardown(struct ipa *ipa)
{
- (void)device_init_wakeup(&ipa->pdev->dev, false);
+ (void)device_init_wakeup(ipa->dev, false);
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
}
@@ -385,7 +282,6 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
}
power->dev = dev;
power->core = clk;
- spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
ret = ipa_interconnect_init(power, data->interconnect_data);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 3a4c59ea1222..227cc04bea80 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -24,24 +24,6 @@ extern const struct dev_pm_ops ipa_pm_ops;
u32 ipa_core_clock_rate(struct ipa *ipa);
/**
- * ipa_power_modem_queue_stop() - Possibly stop the modem netdev TX queue
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_stop(struct ipa *ipa);
-
-/**
- * ipa_power_modem_queue_wake() - Possibly wake the modem netdev TX queue
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_wake(struct ipa *ipa);
-
-/**
- * ipa_power_modem_queue_active() - Report modem netdev TX queue active
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_active(struct ipa *ipa);
-
-/**
* ipa_power_retention() - Control register retention on power collapse
* @ipa: IPA pointer
* @enable: Whether retention should be enabled or disabled
@@ -49,17 +31,6 @@ void ipa_power_modem_queue_active(struct ipa *ipa);
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
- * ipa_power_suspend_handler() - Handler for SUSPEND IPA interrupts
- * @ipa: IPA pointer
- * @irq_id: IPA interrupt ID (unused)
- *
- * If an RX endpoint is suspended, and the IPA has a packet destined for
- * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
- * that it should resume the endpoint.
- */
-void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
-
-/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index f70f0a1d1cda..65c40e207802 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -96,7 +96,7 @@ static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
IPA_QMI_INIT_COMPLETE_IND_SZ,
ipa_init_complete_ind_ei, &ind);
if (ret)
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending init complete indication\n", ret);
else
ipa_qmi->indication_sent = true;
@@ -148,7 +148,7 @@ static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
ipa = container_of(ipa_qmi, struct ipa, qmi);
ret = ipa_modem_start(ipa);
if (ret)
- dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
+ dev_err(ipa->dev, "error %d starting modem\n", ret);
}
/* All QMI clients from the modem node are gone (modem shut down or crashed). */
@@ -199,7 +199,7 @@ static void ipa_server_indication_register(struct qmi_handle *qmi,
ipa_qmi->indication_requested = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending register indication response\n", ret);
}
}
@@ -228,7 +228,7 @@ static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
ipa_qmi->uc_ready = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending init complete response\n", ret);
}
}
@@ -417,7 +417,7 @@ static void ipa_client_init_driver_work(struct work_struct *work)
qmi = &ipa_qmi->client_handle;
ipa = container_of(ipa_qmi, struct ipa, qmi);
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = qmi_txn_init(qmi, &txn, NULL, NULL);
if (ret < 0) {
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 6a3203ae6f1e..98625956e0bb 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -4,6 +4,7 @@
* Copyright (C) 2019-2023 Linaro Ltd.
*/
+#include <linux/platform_device.h>
#include <linux/io.h>
#include "ipa.h"
@@ -132,9 +133,9 @@ static const struct regs *ipa_regs(enum ipa_version version)
}
}
-int ipa_reg_init(struct ipa *ipa)
+int ipa_reg_init(struct ipa *ipa, struct platform_device *pdev)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
const struct regs *regs;
struct resource *res;
@@ -146,8 +147,7 @@ int ipa_reg_init(struct ipa *ipa)
return -EINVAL;
/* Setup IPA register memory */
- res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
- "ipa-reg");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-reg");
if (!res) {
dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
return -ENODEV;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 2998f115f12c..62c62495b796 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -12,6 +12,8 @@
#include "ipa_version.h"
#include "reg.h"
+struct platform_device;
+
struct ipa;
/**
@@ -643,7 +645,7 @@ extern const struct regs ipa_regs_v5_5;
const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
-int ipa_reg_init(struct ipa *ipa);
+int ipa_reg_init(struct ipa *ipa, struct platform_device *pdev);
void ipa_reg_exit(struct ipa *ipa);
#endif /* _IPA_REG_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 5620dc271fac..aeccce9fab72 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -5,7 +5,7 @@
*/
#include <linux/types.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
@@ -84,15 +84,13 @@ struct ipa_smp2p {
*/
static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
{
- struct device *dev;
u32 value;
u32 mask;
if (smp2p->notified)
return;
- dev = &smp2p->ipa->pdev->dev;
- smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0;
+ smp2p->power_on = pm_runtime_get_if_active(smp2p->ipa->dev, true) > 0;
/* Signal whether the IPA power is enabled */
mask = BIT(smp2p->enabled_bit);
@@ -152,15 +150,16 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
{
struct ipa_smp2p *smp2p = dev_id;
+ struct ipa *ipa = smp2p->ipa;
struct device *dev;
int ret;
/* Ignore any (spurious) interrupts received after the first */
- if (smp2p->ipa->setup_complete)
+ if (ipa->setup_complete)
return IRQ_HANDLED;
/* Power needs to be active for setup */
- dev = &smp2p->ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "error %d getting power for setup\n", ret);
@@ -168,7 +167,7 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
}
/* An error here won't cause driver shutdown, so warn if one occurs */
- ret = ipa_setup(smp2p->ipa);
+ ret = ipa_setup(ipa);
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
out_power_put:
@@ -179,14 +178,15 @@ out_power_put:
}
/* Initialize SMP2P interrupts */
-static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
- irq_handler_t handler)
+static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p,
+ struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
{
- struct device *dev = &smp2p->ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
unsigned int irq;
int ret;
- ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
+ ret = platform_get_irq_byname(pdev, name);
if (ret <= 0)
return ret ? : -EINVAL;
irq = ret;
@@ -208,7 +208,7 @@ static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
/* Drop the power reference if it was taken in ipa_smp2p_notify() */
static void ipa_smp2p_power_release(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
if (!ipa->smp2p->power_on)
return;
@@ -219,10 +219,11 @@ static void ipa_smp2p_power_release(struct ipa *ipa)
}
/* Initialize the IPA SMP2P subsystem */
-int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
+int
+ipa_smp2p_init(struct ipa *ipa, struct platform_device *pdev, bool modem_init)
{
struct qcom_smem_state *enabled_state;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
struct qcom_smem_state *valid_state;
struct ipa_smp2p *smp2p;
u32 enabled_bit;
@@ -261,7 +262,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
/* We have enough information saved to handle notifications */
ipa->smp2p = smp2p;
- ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
+ ret = ipa_smp2p_irq_init(smp2p, pdev, "ipa-clock-query",
ipa_smp2p_modem_clk_query_isr);
if (ret < 0)
goto err_null_smp2p;
@@ -273,7 +274,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
if (modem_init) {
/* Result will be non-zero (negative for error) */
- ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
+ ret = ipa_smp2p_irq_init(smp2p, pdev, "ipa-setup-ready",
ipa_smp2p_modem_setup_ready_isr);
if (ret < 0)
goto err_notifier_unregister;
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 9b969b03d1a4..2a3d8eefb13b 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -8,17 +8,20 @@
#include <linux/types.h>
+struct platform_device;
+
struct ipa;
/**
* ipa_smp2p_init() - Initialize the IPA SMP2P subsystem
* @ipa: IPA pointer
+ * @pdev: Platform device pointer
* @modem_init: Whether the modem is responsible for GSI initialization
*
* Return: 0 if successful, or a negative error code
- *
*/
-int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
+int ipa_smp2p_init(struct ipa *ipa, struct platform_device *pdev,
+ bool modem_init);
/**
* ipa_smp2p_exit() - Inverse of ipa_smp2p_init()
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 7b637bb8b41c..a24ac11b8893 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -163,7 +163,7 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 count;
if (!filtered) {
@@ -236,8 +236,7 @@ ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for %s filter reset\n",
+ dev_err(ipa->dev, "no transaction for %s filter reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
@@ -298,8 +297,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for %s route reset\n",
+ dev_err(ipa->dev, "no transaction for %s route reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
@@ -327,7 +325,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
void ipa_table_reset(struct ipa *ipa, bool modem)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const char *ee_name;
int ret;
@@ -356,7 +354,7 @@ int ipa_table_hash_flush(struct ipa *ipa)
trans = ipa_cmd_trans_alloc(ipa, 1);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
+ dev_err(ipa->dev, "no transaction for hash flush\n");
return -EBUSY;
}
@@ -469,7 +467,7 @@ int ipa_table_setup(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 8);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
+ dev_err(ipa->dev, "no transaction for table setup\n");
return -EBUSY;
}
@@ -713,7 +711,7 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
*/
int ipa_table_init(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
dma_addr_t addr;
__le64 le_addr;
__le64 *virt;
@@ -763,7 +761,7 @@ int ipa_table_init(struct ipa *ipa)
void ipa_table_exit(struct ipa *ipa)
{
u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
size_t size;
size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 7eaa0b4ebed9..bfd5dc6dab43 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -127,7 +127,7 @@ static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
static void ipa_uc_event_handler(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
@@ -141,7 +141,7 @@ static void ipa_uc_event_handler(struct ipa *ipa)
static void ipa_uc_response_hdlr(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
/* An INIT_COMPLETED response message is sent to the AP by the
* microcontroller when it is operational. Other than this, the AP
@@ -191,7 +191,7 @@ void ipa_uc_config(struct ipa *ipa)
/* Inverse of ipa_uc_config() */
void ipa_uc_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
@@ -208,8 +208,8 @@ void ipa_uc_deconfig(struct ipa *ipa)
/* Take a proxy power reference for the microcontroller */
void ipa_uc_power(struct ipa *ipa)
{
+ struct device *dev = ipa->dev;
static bool already;
- struct device *dev;
int ret;
if (already)
@@ -217,7 +217,6 @@ void ipa_uc_power(struct ipa *ipa)
already = true; /* Only do this on first boot */
/* This power reference dropped in ipa_uc_response_hdlr() above */
- dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index df7c43a109e1..5920f7e63352 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -349,7 +349,7 @@ static int ipvlan_get_iflink(const struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- return ipvlan->phy_dev->ifindex;
+ return READ_ONCE(ipvlan->phy_dev->ifindex);
}
static const struct net_device_ops ipvlan_netdev_ops = {
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 60944a4beada..1afc4c47be73 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -237,4 +237,5 @@ static void __exit ipvtap_exit(void)
module_exit(ipvtap_exit);
MODULE_ALIAS_RTNL_LINK("ipvtap");
MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
+MODULE_DESCRIPTION("IP-VLAN based tap driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f6d53e63ef4e..f6eab66c2660 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -144,6 +144,7 @@ static int loopback_dev_init(struct net_device *dev)
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index e34816638569..0206b84284ab 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -607,11 +607,26 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
- ret = skb_ensure_writable_head_tail(skb, dev);
- if (unlikely(ret < 0)) {
- macsec_txsa_put(tx_sa);
- kfree_skb(skb);
- return ERR_PTR(ret);
+ if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
+ skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
+ struct sk_buff *nskb = skb_copy_expand(skb,
+ MACSEC_NEEDED_HEADROOM,
+ MACSEC_NEEDED_TAILROOM,
+ GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ macsec_txsa_put(tx_sa);
+ kfree_skb(skb);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb) {
+ macsec_txsa_put(tx_sa);
+ return ERR_PTR(-ENOMEM);
+ }
}
unprotected_len = skb->len;
@@ -3504,18 +3519,13 @@ static int macsec_dev_init(struct net_device *dev)
struct net_device *real_dev = macsec->real_dev;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&macsec->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
macsec_set_head_tail_room(dev);
@@ -3535,7 +3545,6 @@ static void macsec_dev_uninit(struct net_device *dev)
struct macsec_dev *macsec = macsec_priv(dev);
gro_cells_destroy(&macsec->gro_cells);
- free_percpu(dev->tstats);
}
static netdev_features_t macsec_fix_features(struct net_device *dev,
@@ -3738,7 +3747,7 @@ static void macsec_get_stats64(struct net_device *dev,
static int macsec_get_iflink(const struct net_device *dev)
{
- return macsec_priv(dev)->real_dev->ifindex;
+ return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
}
static const struct net_device_ops macsec_netdev_ops = {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a3cc665757e8..0cec2783a3e7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1158,7 +1158,7 @@ static int macvlan_dev_get_iflink(const struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- return vlan->lowerdev->ifindex;
+ return READ_ONCE(vlan->lowerdev->ifindex);
}
static const struct ethtool_ops macvlan_ethtool_ops = {
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 68f8ee0ec8ba..f40eb50bb978 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -94,6 +94,10 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
int ret;
u32 cmd;
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
/* Prepare the read operation */
cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
unimac_mdio_writel(priv, cmd, MDIO_CMD);
@@ -103,7 +107,7 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
ret = priv->wait_func(priv->wait_func_data);
if (ret)
- return ret;
+ goto out;
cmd = unimac_mdio_readl(priv, MDIO_CMD);
@@ -112,10 +116,15 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* that condition here and ignore the MDIO controller read failure
* indication.
*/
- if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
- return -EIO;
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL)) {
+ ret = -EIO;
+ goto out;
+ }
- return cmd & 0xffff;
+ ret = cmd & 0xffff;
+out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
@@ -123,6 +132,11 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
{
struct unimac_mdio_priv *priv = bus->priv;
u32 cmd;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
/* Prepare the write operation */
cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
@@ -131,7 +145,10 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
unimac_mdio_start(priv);
- return priv->wait_func(priv->wait_func_data);
+ ret = priv->wait_func(priv->wait_func_data);
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
}
/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
@@ -178,14 +195,19 @@ static int unimac_mdio_reset(struct mii_bus *bus)
return 0;
}
-static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
+static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
{
unsigned long rate;
u32 reg, div;
+ int ret;
/* Keep the hardware default values */
if (!priv->clk_freq)
- return;
+ return 0;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
if (!priv->clk)
rate = 250000000;
@@ -195,7 +217,8 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
div = (rate / (2 * priv->clk_freq)) - 1;
if (div & ~MDIO_CLK_DIV_MASK) {
pr_warn("Incorrect MDIO clock frequency, ignoring\n");
- return;
+ ret = 0;
+ goto out;
}
/* The MDIO clock is the reference clock (typically 250Mhz) divided by
@@ -205,6 +228,9 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT);
reg |= div << MDIO_CLK_DIV_SHIFT;
unimac_mdio_writel(priv, reg, MDIO_CFG);
+out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int unimac_mdio_probe(struct platform_device *pdev)
@@ -235,24 +261,12 @@ static int unimac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
priv->clk_freq = 0;
- unimac_mdio_clk_set(priv);
-
priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- ret = -ENOMEM;
- goto out_clk_disable;
- }
+ if (!priv->mii_bus)
+ return -ENOMEM;
bus = priv->mii_bus;
bus->priv = priv;
@@ -261,17 +275,29 @@ static int unimac_mdio_probe(struct platform_device *pdev)
priv->wait_func = pdata->wait_func;
priv->wait_func_data = pdata->wait_func_data;
bus->phy_mask = ~pdata->phy_mask;
+ priv->clk = pdata->clk;
} else {
bus->name = "unimac MII bus";
priv->wait_func_data = priv;
priv->wait_func = unimac_mdio_poll;
+ priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ }
+
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto out_mdio_free;
}
+
bus->parent = &pdev->dev;
bus->read = unimac_mdio_read;
bus->write = unimac_mdio_write;
bus->reset = unimac_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+ ret = unimac_mdio_clk_set(priv);
+ if (ret)
+ goto out_mdio_free;
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
@@ -286,8 +312,6 @@ static int unimac_mdio_probe(struct platform_device *pdev)
out_mdio_free:
mdiobus_free(bus);
-out_clk_disable:
- clk_disable_unprepare(priv->clk);
return ret;
}
@@ -297,36 +321,20 @@ static void unimac_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
- clk_disable_unprepare(priv->clk);
-}
-
-static int __maybe_unused unimac_mdio_suspend(struct device *d)
-{
- struct unimac_mdio_priv *priv = dev_get_drvdata(d);
-
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
static int __maybe_unused unimac_mdio_resume(struct device *d)
{
struct unimac_mdio_priv *priv = dev_get_drvdata(d);
- int ret;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- unimac_mdio_clk_set(priv);
-
- return 0;
+ return unimac_mdio_clk_set(priv);
}
static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
- unimac_mdio_suspend, unimac_mdio_resume);
+ NULL, unimac_mdio_resume);
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index abd8b508ec16..9d8f43b28aac 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -14,6 +14,20 @@
#include <linux/clk.h>
#define MDIO_MODE_REG 0x40
+#define MDIO_MODE_MDC_MODE BIT(12)
+/* 0 = Clause 22, 1 = Clause 45 */
+#define MDIO_MODE_C45 BIT(8)
+#define MDIO_MODE_DIV_MASK GENMASK(7, 0)
+#define MDIO_MODE_DIV(x) FIELD_PREP(MDIO_MODE_DIV_MASK, (x) - 1)
+#define MDIO_MODE_DIV_1 0x0
+#define MDIO_MODE_DIV_2 0x1
+#define MDIO_MODE_DIV_4 0x3
+#define MDIO_MODE_DIV_8 0x7
+#define MDIO_MODE_DIV_16 0xf
+#define MDIO_MODE_DIV_32 0x1f
+#define MDIO_MODE_DIV_64 0x3f
+#define MDIO_MODE_DIV_128 0x7f
+#define MDIO_MODE_DIV_256 0xff
#define MDIO_ADDR_REG 0x44
#define MDIO_DATA_WRITE_REG 0x48
#define MDIO_DATA_READ_REG 0x4c
@@ -26,9 +40,6 @@
#define MDIO_CMD_ACCESS_CODE_C45_WRITE 1
#define MDIO_CMD_ACCESS_CODE_C45_READ 2
-/* 0 = Clause 22, 1 = Clause 45 */
-#define MDIO_MODE_C45 BIT(8)
-
#define IPQ4019_MDIO_TIMEOUT 10000
#define IPQ4019_MDIO_SLEEP 10
@@ -41,6 +52,7 @@ struct ipq4019_mdio_data {
void __iomem *membase;
void __iomem *eth_ldo_rdy;
struct clk *mdio_clk;
+ unsigned int mdc_rate;
};
static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
@@ -203,6 +215,38 @@ static int ipq4019_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
+static int ipq4019_mdio_set_div(struct ipq4019_mdio_data *priv)
+{
+ unsigned long ahb_rate;
+ int div;
+ u32 val;
+
+ /* If we don't have a clock for AHB use the fixed value */
+ ahb_rate = IPQ_MDIO_CLK_RATE;
+ if (priv->mdio_clk)
+ ahb_rate = clk_get_rate(priv->mdio_clk);
+
+ /* MDC rate is ahb_rate/(MDIO_MODE_DIV + 1)
+ * While supported, internal documentation doesn't
+ * assure correct functionality of the MDIO bus
+ * with divider of 1, 2 or 4.
+ */
+ for (div = 8; div <= 256; div *= 2) {
+ /* The requested rate is supported by the div */
+ if (priv->mdc_rate == DIV_ROUND_UP(ahb_rate, div)) {
+ val = readl(priv->membase + MDIO_MODE_REG);
+ val &= ~MDIO_MODE_DIV_MASK;
+ val |= MDIO_MODE_DIV(div);
+ writel(val, priv->membase + MDIO_MODE_REG);
+
+ return 0;
+ }
+ }
+
+ /* The requested rate is not supported */
+ return -EINVAL;
+}
+
static int ipq_mdio_reset(struct mii_bus *bus)
{
struct ipq4019_mdio_data *priv = bus->priv;
@@ -225,10 +269,58 @@ static int ipq_mdio_reset(struct mii_bus *bus)
return ret;
ret = clk_prepare_enable(priv->mdio_clk);
- if (ret == 0)
- mdelay(10);
+ if (ret)
+ return ret;
- return ret;
+ mdelay(10);
+
+ /* Restore MDC rate */
+ return ipq4019_mdio_set_div(priv);
+}
+
+static void ipq4019_mdio_select_mdc_rate(struct platform_device *pdev,
+ struct ipq4019_mdio_data *priv)
+{
+ unsigned long ahb_rate;
+ int div;
+ u32 val;
+
+ /* MDC rate defined in DT, we don't have to decide a default value */
+ if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &priv->mdc_rate))
+ return;
+
+ /* If we don't have a clock for AHB use the fixed value */
+ ahb_rate = IPQ_MDIO_CLK_RATE;
+ if (priv->mdio_clk)
+ ahb_rate = clk_get_rate(priv->mdio_clk);
+
+ /* Check what is the current div set */
+ val = readl(priv->membase + MDIO_MODE_REG);
+ div = FIELD_GET(MDIO_MODE_DIV_MASK, val);
+
+ /* div is not set to the default value of /256
+ * Probably someone changed that (bootloader, other drivers)
+ * Keep this and don't overwrite it.
+ */
+ if (div != MDIO_MODE_DIV_256) {
+ priv->mdc_rate = DIV_ROUND_UP(ahb_rate, div + 1);
+ return;
+ }
+
+ /* If div is /256 assume nobody have set this value and
+ * try to find one MDC rate that is close the 802.3 spec of
+ * 2.5MHz
+ */
+ for (div = 256; div >= 8; div /= 2) {
+ /* Stop as soon as we found a divider that
+ * reached the closest value to 2.5MHz
+ */
+ if (DIV_ROUND_UP(ahb_rate, div) > 2500000)
+ break;
+
+ priv->mdc_rate = DIV_ROUND_UP(ahb_rate, div);
+ }
}
static int ipq4019_mdio_probe(struct platform_device *pdev)
@@ -252,6 +344,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
if (IS_ERR(priv->mdio_clk))
return PTR_ERR(priv->mdio_clk);
+ ipq4019_mdio_select_mdc_rate(pdev, priv);
+ ret = ipq4019_mdio_set_div(priv);
+ if (ret)
+ return ret;
+
/* The platform resource is provided on the chipset IPQ5018 */
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 64ebcb6d235c..08e607f62e10 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -139,6 +139,53 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
}
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
+static int __of_mdiobus_parse_phys(struct mii_bus *mdio, struct device_node *np,
+ bool *scanphys)
+{
+ struct device_node *child;
+ int addr, rc = 0;
+
+ /* Loop over the child nodes and register a phy_device for each phy */
+ for_each_available_child_of_node(np, child) {
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Ignore invalid ethernet-phy-package node */
+ if (!of_property_present(child, "reg"))
+ continue;
+
+ rc = __of_mdiobus_parse_phys(mdio, child, NULL);
+ if (rc && rc != -ENODEV)
+ goto exit;
+
+ continue;
+ }
+
+ addr = of_mdio_parse_addr(&mdio->dev, child);
+ if (addr < 0) {
+ /* Skip scanning for invalid ethernet-phy-package node */
+ if (scanphys)
+ *scanphys = true;
+ continue;
+ }
+
+ if (of_mdiobus_child_is_phy(child))
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ else
+ rc = of_mdiobus_register_device(mdio, child, addr);
+
+ if (rc == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ else if (rc)
+ goto exit;
+ }
+
+ return 0;
+exit:
+ of_node_put(child);
+ return rc;
+}
+
/**
* __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -180,33 +227,18 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
return rc;
/* Loop over the child nodes and register a phy_device for each phy */
- for_each_available_child_of_node(np, child) {
- addr = of_mdio_parse_addr(&mdio->dev, child);
- if (addr < 0) {
- scanphys = true;
- continue;
- }
-
- if (of_mdiobus_child_is_phy(child))
- rc = of_mdiobus_register_phy(mdio, child, addr);
- else
- rc = of_mdiobus_register_device(mdio, child, addr);
-
- if (rc == -ENODEV)
- dev_err(&mdio->dev,
- "MDIO device at address %d is missing.\n",
- addr);
- else if (rc)
- goto unregister;
- }
+ rc = __of_mdiobus_parse_phys(mdio, np, &scanphys);
+ if (rc)
+ goto unregister;
if (!scanphys)
return 0;
/* auto scan for PHYs with empty reg property */
for_each_available_child_of_node(np, child) {
- /* Skip PHYs with reg property set */
- if (of_property_present(child, "reg"))
+ /* Skip PHYs with reg property set or ethernet-phy-package node */
+ if (of_property_present(child, "reg") ||
+ of_node_name_eq(child, "ethernet-phy-package"))
continue;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
@@ -227,15 +259,16 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
if (!rc)
break;
if (rc != -ENODEV)
- goto unregister;
+ goto put_unregister;
}
}
}
return 0;
-unregister:
+put_unregister:
of_node_put(child);
+unregister:
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 6e14ba5e06c8..0de108a1c0c8 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -43,13 +43,17 @@ MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
#define MAX_PARAM_LENGTH 256
+#define MAX_USERDATA_NAME_LENGTH 54
+#define MAX_USERDATA_VALUE_LENGTH 200
+#define MAX_USERDATA_ENTRY_LENGTH 256
+#define MAX_USERDATA_ITEMS 16
#define MAX_PRINT_CHUNK 1000
static char config[MAX_PARAM_LENGTH];
module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
-static bool oops_only = false;
+static bool oops_only;
module_param(oops_only, bool, 0600);
MODULE_PARM_DESC(oops_only, "Only log oops messages");
@@ -79,7 +83,10 @@ static struct console netconsole_ext;
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
- * @item: Links us into the configfs subsystem hierarchy.
+ * @group: Links us into the configfs subsystem hierarchy.
+ * @userdata_group: Links to the userdata configfs hierarchy
+ * @userdata_complete: Cached, formatted string of append
+ * @userdata_length: String length of userdata_complete
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
* We maintain a strict 1:1 correspondence between this and
@@ -102,7 +109,10 @@ static struct console netconsole_ext;
struct netconsole_target {
struct list_head list;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- struct config_item item;
+ struct config_group group;
+ struct config_group userdata_group;
+ char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
+ size_t userdata_length;
#endif
bool enabled;
bool extended;
@@ -134,14 +144,14 @@ static void __exit dynamic_netconsole_exit(void)
*/
static void netconsole_target_get(struct netconsole_target *nt)
{
- if (config_item_name(&nt->item))
- config_item_get(&nt->item);
+ if (config_item_name(&nt->group.cg_item))
+ config_group_get(&nt->group);
}
static void netconsole_target_put(struct netconsole_target *nt)
{
- if (config_item_name(&nt->item))
- config_item_put(&nt->item);
+ if (config_item_name(&nt->group.cg_item))
+ config_group_put(&nt->group);
}
#else /* !CONFIG_NETCONSOLE_DYNAMIC */
@@ -215,15 +225,33 @@ static struct netconsole_target *alloc_and_init(void)
* | remote_ip
* | local_mac
* | remote_mac
+ * | userdata/
+ * | <key>/
+ * | value
+ * | ...
* |
* <target>/...
*/
static struct netconsole_target *to_target(struct config_item *item)
{
- return item ?
- container_of(item, struct netconsole_target, item) :
- NULL;
+ struct config_group *cfg_group;
+
+ cfg_group = to_config_group(item);
+ if (!cfg_group)
+ return NULL;
+ return container_of(to_config_group(item),
+ struct netconsole_target, group);
+}
+
+/* Get rid of possible trailing newline, returning the new length */
+static void trim_newline(char *s, size_t maxlen)
+{
+ size_t len;
+
+ len = strnlen(s, maxlen);
+ if (s[len - 1] == '\n')
+ s[len - 1] = '\0';
}
/*
@@ -370,7 +398,7 @@ static ssize_t release_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
err = -EINVAL;
goto out_unlock;
}
@@ -398,7 +426,7 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
err = -EINVAL;
goto out_unlock;
}
@@ -420,22 +448,17 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
- size_t len;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
mutex_unlock(&dynamic_netconsole_mutex);
return -EINVAL;
}
strscpy(nt->np.dev_name, buf, IFNAMSIZ);
-
- /* Get rid of possible trailing newline from echo(1) */
- len = strnlen(nt->np.dev_name, IFNAMSIZ);
- if (nt->np.dev_name[len - 1] == '\n')
- nt->np.dev_name[len - 1] = '\0';
+ trim_newline(nt->np.dev_name, IFNAMSIZ);
mutex_unlock(&dynamic_netconsole_mutex);
return strnlen(buf, count);
@@ -450,7 +473,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -473,7 +496,7 @@ static ssize_t remote_port_store(struct config_item *item,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -495,12 +518,13 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
if (strnchr(buf, count, ':')) {
const char *end;
+
if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
pr_err("invalid IPv6 address at: <%c>\n", *end);
@@ -510,9 +534,9 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
} else
goto out_unlock;
} else {
- if (!nt->np.ipv6) {
+ if (!nt->np.ipv6)
nt->np.local_ip.ip = in_aton(buf);
- } else
+ else
goto out_unlock;
}
@@ -531,12 +555,13 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
if (strnchr(buf, count, ':')) {
const char *end;
+
if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
pr_err("invalid IPv6 address at: <%c>\n", *end);
@@ -546,9 +571,9 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
} else
goto out_unlock;
} else {
- if (!nt->np.ipv6) {
+ if (!nt->np.ipv6)
nt->np.remote_ip.ip = in_aton(buf);
- } else
+ else
goto out_unlock;
}
@@ -568,7 +593,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -585,6 +610,180 @@ out_unlock:
return -EINVAL;
}
+struct userdatum {
+ struct config_item item;
+ char value[MAX_USERDATA_VALUE_LENGTH];
+};
+
+static struct userdatum *to_userdatum(struct config_item *item)
+{
+ return container_of(item, struct userdatum, item);
+}
+
+struct userdata {
+ struct config_group group;
+};
+
+static struct userdata *to_userdata(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct userdata, group);
+}
+
+static struct netconsole_target *userdata_to_target(struct userdata *ud)
+{
+ struct config_group *netconsole_group;
+
+ netconsole_group = to_config_group(ud->group.cg_item.ci_parent);
+ return to_target(&netconsole_group->cg_item);
+}
+
+static ssize_t userdatum_value_show(struct config_item *item, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", &(to_userdatum(item)->value[0]));
+}
+
+static void update_userdata(struct netconsole_target *nt)
+{
+ int complete_idx = 0, child_count = 0;
+ struct list_head *entry;
+
+ /* Clear the current string in case the last userdatum was deleted */
+ nt->userdata_length = 0;
+ nt->userdata_complete[0] = 0;
+
+ list_for_each(entry, &nt->userdata_group.cg_children) {
+ struct userdatum *udm_item;
+ struct config_item *item;
+
+ if (child_count >= MAX_USERDATA_ITEMS)
+ break;
+ child_count++;
+
+ item = container_of(entry, struct config_item, ci_entry);
+ udm_item = to_userdatum(item);
+
+ /* Skip userdata with no value set */
+ if (strnlen(udm_item->value, MAX_USERDATA_VALUE_LENGTH) == 0)
+ continue;
+
+ /* This doesn't overflow userdata_complete since it will write
+ * one entry length (1/MAX_USERDATA_ITEMS long), entry count is
+ * checked to not exceed MAX items with child_count above
+ */
+ complete_idx += scnprintf(&nt->userdata_complete[complete_idx],
+ MAX_USERDATA_ENTRY_LENGTH, "%s=%s\n",
+ item->ci_name, udm_item->value);
+ }
+ nt->userdata_length = strnlen(nt->userdata_complete,
+ sizeof(nt->userdata_complete));
+}
+
+static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
+ size_t count)
+{
+ struct userdatum *udm = to_userdatum(item);
+ struct netconsole_target *nt;
+ struct userdata *ud;
+ int ret;
+
+ if (count > MAX_USERDATA_VALUE_LENGTH)
+ return -EMSGSIZE;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+
+ ret = strscpy(udm->value, buf, sizeof(udm->value));
+ if (ret < 0)
+ goto out_unlock;
+ trim_newline(udm->value, sizeof(udm->value));
+
+ ud = to_userdata(item->ci_parent);
+ nt = userdata_to_target(ud);
+ update_userdata(nt);
+
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return count;
+out_unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
+CONFIGFS_ATTR(userdatum_, value);
+
+static struct configfs_attribute *userdatum_attrs[] = {
+ &userdatum_attr_value,
+ NULL,
+};
+
+static void userdatum_release(struct config_item *item)
+{
+ kfree(to_userdatum(item));
+}
+
+static struct configfs_item_operations userdatum_ops = {
+ .release = userdatum_release,
+};
+
+static const struct config_item_type userdatum_type = {
+ .ct_item_ops = &userdatum_ops,
+ .ct_attrs = userdatum_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *userdatum_make_item(struct config_group *group,
+ const char *name)
+{
+ struct netconsole_target *nt;
+ struct userdatum *udm;
+ struct userdata *ud;
+ size_t child_count;
+
+ if (strlen(name) > MAX_USERDATA_NAME_LENGTH)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ ud = to_userdata(&group->cg_item);
+ nt = userdata_to_target(ud);
+ child_count = list_count_nodes(&nt->userdata_group.cg_children);
+ if (child_count >= MAX_USERDATA_ITEMS)
+ return ERR_PTR(-ENOSPC);
+
+ udm = kzalloc(sizeof(*udm), GFP_KERNEL);
+ if (!udm)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&udm->item, name, &userdatum_type);
+ return &udm->item;
+}
+
+static void userdatum_drop(struct config_group *group, struct config_item *item)
+{
+ struct netconsole_target *nt;
+ struct userdata *ud;
+
+ ud = to_userdata(&group->cg_item);
+ nt = userdata_to_target(ud);
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ update_userdata(nt);
+ config_item_put(item);
+ mutex_unlock(&dynamic_netconsole_mutex);
+}
+
+static struct configfs_attribute *userdata_attrs[] = {
+ NULL,
+};
+
+static struct configfs_group_operations userdata_ops = {
+ .make_item = userdatum_make_item,
+ .drop_item = userdatum_drop,
+};
+
+static struct config_item_type userdata_type = {
+ .ct_item_ops = &userdatum_ops,
+ .ct_group_ops = &userdata_ops,
+ .ct_attrs = userdata_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
CONFIGFS_ATTR(, enabled);
CONFIGFS_ATTR(, extended);
CONFIGFS_ATTR(, dev_name);
@@ -629,6 +828,15 @@ static const struct config_item_type netconsole_target_type = {
.ct_owner = THIS_MODULE,
};
+static void init_target_config_group(struct netconsole_target *nt,
+ const char *name)
+{
+ config_group_init_type_name(&nt->group, name, &netconsole_target_type);
+ config_group_init_type_name(&nt->userdata_group, "userdata",
+ &userdata_type);
+ configfs_add_default_group(&nt->userdata_group, &nt->group);
+}
+
static struct netconsole_target *find_cmdline_target(const char *name)
{
struct netconsole_target *nt, *ret = NULL;
@@ -636,7 +844,7 @@ static struct netconsole_target *find_cmdline_target(const char *name)
spin_lock_irqsave(&target_list_lock, flags);
list_for_each_entry(nt, &target_list, list) {
- if (!strcmp(nt->item.ci_name, name)) {
+ if (!strcmp(nt->group.cg_item.ci_name, name)) {
ret = nt;
break;
}
@@ -650,8 +858,8 @@ static struct netconsole_target *find_cmdline_target(const char *name)
* Group operations and type for netconsole_subsys.
*/
-static struct config_item *make_netconsole_target(struct config_group *group,
- const char *name)
+static struct config_group *make_netconsole_target(struct config_group *group,
+ const char *name)
{
struct netconsole_target *nt;
unsigned long flags;
@@ -663,23 +871,25 @@ static struct config_item *make_netconsole_target(struct config_group *group,
if (!strncmp(name, NETCONSOLE_PARAM_TARGET_PREFIX,
strlen(NETCONSOLE_PARAM_TARGET_PREFIX))) {
nt = find_cmdline_target(name);
- if (nt)
- return &nt->item;
+ if (nt) {
+ init_target_config_group(nt, name);
+ return &nt->group;
+ }
}
nt = alloc_and_init();
if (!nt)
return ERR_PTR(-ENOMEM);
- /* Initialize the config_item member */
- config_item_init_type_name(&nt->item, name, &netconsole_target_type);
+ /* Initialize the config_group member */
+ init_target_config_group(nt, name);
/* Adding, but it is disabled */
spin_lock_irqsave(&target_list_lock, flags);
list_add(&nt->list, &target_list);
spin_unlock_irqrestore(&target_list_lock, flags);
- return &nt->item;
+ return &nt->group;
}
static void drop_netconsole_target(struct config_group *group,
@@ -699,11 +909,11 @@ static void drop_netconsole_target(struct config_group *group,
if (nt->enabled)
netpoll_cleanup(&nt->np);
- config_item_put(&nt->item);
+ config_item_put(&nt->group.cg_item);
}
static struct configfs_group_operations netconsole_subsys_group_ops = {
- .make_item = make_netconsole_target,
+ .make_group = make_netconsole_target,
.drop_item = drop_netconsole_target,
};
@@ -729,8 +939,7 @@ static void populate_configfs_item(struct netconsole_target *nt,
snprintf(target_name, sizeof(target_name), "%s%d",
NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count);
- config_item_init_type_name(&nt->item, target_name,
- &netconsole_target_type);
+ init_target_config_group(nt, target_name);
}
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
@@ -781,6 +990,7 @@ restart:
spin_unlock_irqrestore(&target_list_lock, flags);
if (stopped) {
const char *msg = "had an event";
+
switch (event) {
case NETDEV_UNREGISTER:
msg = "unregistered";
@@ -824,19 +1034,34 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
const char *msg_ready = msg;
const char *release;
int release_len = 0;
+ int userdata_len = 0;
+ char *userdata = NULL;
+
+#ifdef CONFIG_NETCONSOLE_DYNAMIC
+ userdata = nt->userdata_complete;
+ userdata_len = nt->userdata_length;
+#endif
if (nt->release) {
release = init_utsname()->release;
release_len = strlen(release) + 1;
}
- if (msg_len + release_len <= MAX_PRINT_CHUNK) {
+ if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK) {
/* No fragmentation needed */
if (nt->release) {
scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
msg_len += release_len;
- msg_ready = buf;
+ } else {
+ memcpy(buf, msg, msg_len);
}
+
+ if (userdata)
+ msg_len += scnprintf(&buf[msg_len],
+ MAX_PRINT_CHUNK - msg_len,
+ "%s", userdata);
+
+ msg_ready = buf;
netpoll_send_udp(&nt->np, msg_ready, msg_len);
return;
}
@@ -860,24 +1085,48 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
memcpy(buf + release_len, header, header_len);
header_len += release_len;
- while (offset < body_len) {
+ while (offset < body_len + userdata_len) {
int this_header = header_len;
- int this_chunk;
+ int this_offset = 0;
+ int this_chunk = 0;
this_header += scnprintf(buf + this_header,
sizeof(buf) - this_header,
- ",ncfrag=%d/%d;", offset, body_len);
-
- this_chunk = min(body_len - offset,
- MAX_PRINT_CHUNK - this_header);
- if (WARN_ON_ONCE(this_chunk <= 0))
- return;
-
- memcpy(buf + this_header, body + offset, this_chunk);
-
- netpoll_send_udp(&nt->np, buf, this_header + this_chunk);
+ ",ncfrag=%d/%d;", offset,
+ body_len + userdata_len);
+
+ /* Not all body data has been written yet */
+ if (offset < body_len) {
+ this_chunk = min(body_len - offset,
+ MAX_PRINT_CHUNK - this_header);
+ if (WARN_ON_ONCE(this_chunk <= 0))
+ return;
+ memcpy(buf + this_header, body + offset, this_chunk);
+ this_offset += this_chunk;
+ }
+ /* Body is fully written and there is pending userdata to write,
+ * append userdata in this chunk
+ */
+ if (offset + this_offset >= body_len &&
+ offset + this_offset < userdata_len + body_len) {
+ int sent_userdata = (offset + this_offset) - body_len;
+ int preceding_bytes = this_chunk + this_header;
+
+ if (WARN_ON_ONCE(sent_userdata < 0))
+ return;
+
+ this_chunk = min(userdata_len - sent_userdata,
+ MAX_PRINT_CHUNK - preceding_bytes);
+ if (WARN_ON_ONCE(this_chunk <= 0))
+ return;
+ memcpy(buf + this_header + this_offset,
+ userdata + sent_userdata,
+ this_chunk);
+ this_offset += this_chunk;
+ }
- offset += this_chunk;
+ netpoll_send_udp(&nt->np, buf, this_header + this_offset);
+ offset += this_offset;
}
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index bcbc1e19edde..64c0cdd31bf8 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -129,7 +129,7 @@ static void nsim_bus_dev_release(struct device *dev)
complete(&nsim_bus_devs_released);
}
-static struct device_type nsim_bus_dev_type = {
+static const struct device_type nsim_bus_dev_type = {
.groups = nsim_bus_dev_attr_groups,
.release = nsim_bus_dev_release,
};
@@ -232,9 +232,154 @@ del_device_store(const struct bus_type *bus, const char *buf, size_t count)
}
static BUS_ATTR_WO(del_device);
+static ssize_t link_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct netdevsim *nsim_a, *nsim_b, *peer;
+ struct net_device *dev_a, *dev_b;
+ unsigned int ifidx_a, ifidx_b;
+ int netnsfd_a, netnsfd_b, err;
+ struct net *ns_a, *ns_b;
+
+ err = sscanf(buf, "%d:%u %d:%u", &netnsfd_a, &ifidx_a, &netnsfd_b,
+ &ifidx_b);
+ if (err != 4) {
+ pr_err("Format for linking two devices is \"netnsfd_a:ifidx_a netnsfd_b:ifidx_b\" (int uint int uint).\n");
+ return -EINVAL;
+ }
+
+ ns_a = get_net_ns_by_fd(netnsfd_a);
+ if (IS_ERR(ns_a)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd_a);
+ return -EINVAL;
+ }
+
+ ns_b = get_net_ns_by_fd(netnsfd_b);
+ if (IS_ERR(ns_b)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd_b);
+ put_net(ns_a);
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+ rtnl_lock();
+ dev_a = __dev_get_by_index(ns_a, ifidx_a);
+ if (!dev_a) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx_a, netnsfd_a);
+ goto out_err;
+ }
+
+ if (!netdev_is_nsim(dev_a)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx_a, netnsfd_a);
+ goto out_err;
+ }
+
+ dev_b = __dev_get_by_index(ns_b, ifidx_b);
+ if (!dev_b) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx_b, netnsfd_b);
+ goto out_err;
+ }
+
+ if (!netdev_is_nsim(dev_b)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx_b, netnsfd_b);
+ goto out_err;
+ }
+
+ if (dev_a == dev_b) {
+ pr_err("Cannot link a netdevsim to itself\n");
+ goto out_err;
+ }
+
+ err = -EBUSY;
+ nsim_a = netdev_priv(dev_a);
+ peer = rtnl_dereference(nsim_a->peer);
+ if (peer) {
+ pr_err("Netdevsim %d:%u is already linked\n", netnsfd_a,
+ ifidx_a);
+ goto out_err;
+ }
+
+ nsim_b = netdev_priv(dev_b);
+ peer = rtnl_dereference(nsim_b->peer);
+ if (peer) {
+ pr_err("Netdevsim %d:%u is already linked\n", netnsfd_b,
+ ifidx_b);
+ goto out_err;
+ }
+
+ err = 0;
+ rcu_assign_pointer(nsim_a->peer, nsim_b);
+ rcu_assign_pointer(nsim_b->peer, nsim_a);
+
+out_err:
+ put_net(ns_b);
+ put_net(ns_a);
+ rtnl_unlock();
+
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(link_device);
+
+static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct netdevsim *nsim, *peer;
+ struct net_device *dev;
+ unsigned int ifidx;
+ int netnsfd, err;
+ struct net *ns;
+
+ err = sscanf(buf, "%u:%u", &netnsfd, &ifidx);
+ if (err != 2) {
+ pr_err("Format for unlinking a device is \"netnsfd:ifidx\" (int uint).\n");
+ return -EINVAL;
+ }
+
+ ns = get_net_ns_by_fd(netnsfd);
+ if (IS_ERR(ns)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd);
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+ rtnl_lock();
+ dev = __dev_get_by_index(ns, ifidx);
+ if (!dev) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx, netnsfd);
+ goto out_put_netns;
+ }
+
+ if (!netdev_is_nsim(dev)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx, netnsfd);
+ goto out_put_netns;
+ }
+
+ nsim = netdev_priv(dev);
+ peer = rtnl_dereference(nsim->peer);
+ if (!peer)
+ goto out_put_netns;
+
+ err = 0;
+ RCU_INIT_POINTER(nsim->peer, NULL);
+ RCU_INIT_POINTER(peer->peer, NULL);
+
+out_put_netns:
+ put_net(ns);
+ rtnl_unlock();
+
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(unlink_device);
+
static struct attribute *nsim_bus_attrs[] = {
&bus_attr_new_device.attr,
&bus_attr_del_device.attr,
+ &bus_attr_link_device.attr,
+ &bus_attr_unlink_device.attr,
NULL
};
ATTRIBUTE_GROUPS(nsim_bus);
@@ -260,7 +405,7 @@ static int nsim_num_vf(struct device *dev)
return nsim_bus_dev->num_vfs;
}
-static struct bus_type nsim_bus = {
+static const struct bus_type nsim_bus = {
.name = DRV_NAME,
.dev_name = DRV_NAME,
.bus_groups = nsim_bus_groups,
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b4d3b9cde8bd..92a7a36b93ac 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
trap_report_dw.work);
nsim_dev = nsim_trap_data->nsim_dev;
- /* For each running port and enabled packet trap, generate a UDP
- * packet with a random 5-tuple and report it.
- */
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
return;
}
+ /* For each running port and enabled packet trap, generate a UDP
+ * packet with a random 5-tuple and report it.
+ */
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
if (!netif_running(nsim_dev_port->ns->netdev))
continue;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 77e8250282a5..8330bc0bcb7e 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -29,18 +29,35 @@
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ unsigned int len = skb->len;
+ struct netdevsim *peer_ns;
+ rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
- goto out;
+ goto out_drop_free;
+ peer_ns = rcu_dereference(ns->peer);
+ if (!peer_ns)
+ goto out_drop_free;
+
+ skb_tx_timestamp(skb);
+ if (unlikely(dev_forward_skb(peer_ns->netdev, skb) == NET_RX_DROP))
+ goto out_drop_cnt;
+
+ rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
- ns->tx_bytes += skb->len;
+ ns->tx_bytes += len;
u64_stats_update_end(&ns->syncp);
+ return NETDEV_TX_OK;
-out:
+out_drop_free:
dev_kfree_skb(skb);
-
+out_drop_cnt:
+ rcu_read_unlock();
+ u64_stats_update_begin(&ns->syncp);
+ ns->tx_dropped++;
+ u64_stats_update_end(&ns->syncp);
return NETDEV_TX_OK;
}
@@ -70,6 +87,7 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
+ stats->tx_dropped = ns->tx_dropped;
} while (u64_stats_fetch_retry(&ns->syncp, start));
}
@@ -265,6 +283,21 @@ nsim_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
+static int nsim_get_iflink(const struct net_device *dev)
+{
+ struct netdevsim *nsim, *peer;
+ int iflink;
+
+ nsim = netdev_priv(dev);
+
+ rcu_read_lock();
+ peer = rcu_dereference(nsim->peer);
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ rcu_read_unlock();
+
+ return iflink;
+}
+
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
@@ -282,6 +315,7 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
+ .ndo_get_iflink = nsim_get_iflink,
.ndo_bpf = nsim_bpf,
};
@@ -302,7 +336,6 @@ static void nsim_setup(struct net_device *dev)
eth_hw_addr_random(dev);
dev->tx_queue_len = 0;
- dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
IFF_NO_QUEUE;
@@ -413,8 +446,13 @@ err_free_netdev:
void nsim_destroy(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
+ struct netdevsim *peer;
rtnl_lock();
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ RCU_INIT_POINTER(peer->peer, NULL);
+ RCU_INIT_POINTER(ns->peer, NULL);
unregister_netdevice(dev);
if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
nsim_macsec_teardown(ns);
@@ -427,6 +465,11 @@ void nsim_destroy(struct netdevsim *ns)
free_netdev(dev);
}
+bool netdev_is_nsim(struct net_device *dev)
+{
+ return dev->netdev_ops == &nsim_netdev_ops;
+}
+
static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 028c825b86db..553c4b9b4f63 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -98,6 +98,7 @@ struct netdevsim {
u64 tx_packets;
u64 tx_bytes;
+ u64 tx_dropped;
struct u64_stats_sync syncp;
struct nsim_bus_dev *nsim_bus_dev;
@@ -125,11 +126,13 @@ struct netdevsim {
} udp_ports;
struct nsim_ethtool ethtool;
+ struct netdevsim __rcu *peer;
};
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
void nsim_destroy(struct netdevsim *ns);
+bool netdev_is_nsim(struct net_device *dev);
void nsim_ethtool_init(struct netdevsim *ns);
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 39171380ccf2..a4d2e76a8d58 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -145,7 +145,7 @@ static int netkit_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nk->peer);
if (peer)
- iflink = peer->ifindex;
+ iflink = READ_ONCE(peer->ifindex);
rcu_read_unlock();
return iflink;
}
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 5e19a6839dea..e5a0987a263e 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -17,17 +17,6 @@ static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int nlmon_dev_init(struct net_device *dev)
-{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- return dev->lstats == NULL ? -ENOMEM : 0;
-}
-
-static void nlmon_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
struct nlmon {
struct netlink_tap nt;
};
@@ -51,15 +40,7 @@ static int nlmon_close(struct net_device *dev)
static void
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- u64 packets, bytes;
-
- dev_lstats_read(dev, &packets, &bytes);
-
- stats->rx_packets = packets;
- stats->tx_packets = 0;
-
- stats->rx_bytes = bytes;
- stats->tx_bytes = 0;
+ dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
}
static u32 always_on(struct net_device *dev)
@@ -72,8 +53,6 @@ static const struct ethtool_ops nlmon_ethtool_ops = {
};
static const struct net_device_ops nlmon_ops = {
- .ndo_init = nlmon_dev_init,
- .ndo_uninit = nlmon_dev_uninit,
.ndo_open = nlmon_open,
.ndo_stop = nlmon_close,
.ndo_start_xmit = nlmon_xmit,
@@ -92,6 +71,7 @@ static void nlmon_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->flags = IFF_NOARP;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
/* That's rather a softlimit here, which, of course,
* can be altered. Not a real MTU, but what is to be
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index dc3962b2aa6b..853b8c138718 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -398,4 +398,5 @@ void lynx_pcs_destroy(struct phylink_pcs *pcs)
}
EXPORT_SYMBOL(lynx_pcs_destroy);
+MODULE_DESCRIPTION("NXP Lynx PCS phylink library");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 8501dd365279..4f63abe638c4 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -303,4 +303,5 @@ void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs)
}
EXPORT_SYMBOL(mtk_pcs_lynxi_destroy);
+MODULE_DESCRIPTION("MediaTek SGMII library for LynxI");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index d93f84fbb1fd..4bd66fdde367 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -183,7 +183,7 @@ static void miic_converter_enable(struct miic *miic, int port, int enable)
miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val);
}
-static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
+static int miic_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising, bool permit)
{
@@ -234,7 +234,7 @@ static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
return 0;
}
-static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode,
+static void miic_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
@@ -333,6 +333,7 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
miic_port->miic = miic;
miic_port->port = port - 1;
miic_port->pcs.ops = &miic_phylink_ops;
+ miic_port->pcs.neg_mode = true;
return &miic_port->pcs;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 31f0beba638a..31525fe9c32e 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -10,7 +10,7 @@
#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
-#include <linux/workqueue.h>
+
#include "pcs-xpcs.h"
#define phylink_pcs_to_xpcs(pl_pcs) \
@@ -130,7 +130,6 @@ static const phy_interface_t xpcs_1000basex_interfaces[] = {
static const phy_interface_t xpcs_2500basex_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
- PHY_INTERFACE_MODE_MAX,
};
enum {
@@ -293,7 +292,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
dev = MDIO_MMD_VEND2;
break;
default:
- return -1;
+ return -EINVAL;
}
ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
@@ -614,14 +613,15 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
xpcs = phylink_pcs_to_xpcs(pcs);
compat = xpcs_find_compat(xpcs->id, state->interface);
+ if (!compat)
+ return -EINVAL;
/* Populate the supported link modes for this PHY interface type.
* FIXME: what about the port modes and autoneg bit? This masks
* all those away.
*/
- if (compat)
- for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
- set_bit(compat->supported[i], xpcs_supported);
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(compat->supported[i], xpcs_supported);
linkmode_and(supported, supported, xpcs_supported);
@@ -636,8 +636,7 @@ void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
const struct xpcs_compat *compat = &xpcs->id->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
- if (compat->interface[j] < PHY_INTERFACE_MODE_MAX)
- __set_bit(compat->interface[j], interfaces);
+ __set_bit(compat->interface[j], interfaces);
}
}
EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
@@ -891,7 +890,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return ret;
break;
default:
- return -1;
+ return -EINVAL;
}
if (compat->pma_config) {
@@ -1456,4 +1455,5 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
}
EXPORT_SYMBOL_GPL(xpcs_create_mdiodev);
+MODULE_DESCRIPTION("Synopsys DesignWare XPCS library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9e2672800f0b..1df0595c5ba9 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -232,6 +232,7 @@ config MARVELL_10G_PHY
config MARVELL_88Q2XXX_PHY
tristate "Marvell 88Q2XXX PHY"
+ depends on HWMON || HWMON=n
help
Support for the Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet
PHYs.
@@ -335,12 +336,7 @@ config NCN26000_PHY
Currently supports the NCN26000 10BASE-T1S Industrial PHY
with MII interface.
-config AT803X_PHY
- tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
- depends on REGULATOR
- help
- Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
- QCA8337(Internal qca8k PHY) model
+source "drivers/net/phy/qcom/Kconfig"
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 6097afd44392..197acfa0b412 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
-obj-$(CONFIG_AT803X_PHY) += at803x.o
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
else
@@ -91,6 +90,7 @@ endif
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja.o
obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
+obj-y += qcom/
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 7619d6185801..85f910e2d4fb 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -18,6 +18,12 @@
#define PHY_ID_ADIN1110 0x0283bc91
#define PHY_ID_ADIN2111 0x0283bca1
+#define ADIN_PHY_SUBSYS_IRQ_MASK 0x0021
+#define ADIN_LINK_STAT_CHNG_IRQ_EN BIT(1)
+
+#define ADIN_PHY_SUBSYS_IRQ_STATUS 0x0011
+#define ADIN_LINK_STAT_CHNG BIT(1)
+
#define ADIN_FORCED_MODE 0x8000
#define ADIN_FORCED_MODE_EN BIT(0)
@@ -136,6 +142,53 @@ static int adin_config_aneg(struct phy_device *phydev)
return genphy_c45_config_aneg(phydev);
}
+static int adin_phy_ack_intr(struct phy_device *phydev)
+{
+ /* Clear pending interrupts */
+ int rc = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_STATUS);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int adin_config_intr(struct phy_device *phydev)
+{
+ u16 irq_mask;
+ int ret;
+
+ ret = adin_phy_ack_intr(phydev);
+ if (ret)
+ return ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ irq_mask = ADIN_LINK_STAT_CHNG_IRQ_EN;
+ else
+ irq_mask = 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_MASK,
+ ADIN_LINK_STAT_CHNG_IRQ_EN, irq_mask);
+}
+
+static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & ADIN_LINK_STAT_CHNG))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
{
int ret;
@@ -275,6 +328,8 @@ static struct phy_driver adin_driver[] = {
.probe = adin_probe,
.config_aneg = adin_config_aneg,
.read_status = adin_read_status,
+ .config_intr = adin_config_intr,
+ .handle_interrupt = adin_phy_handle_interrupt,
.set_loopback = adin_set_loopback,
.suspend = adin_suspend,
.resume = adin_resume,
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 97a2fafa15ca..71bfddb8f453 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -22,9 +22,13 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define PHY_ID_AQR111 0x03a1b610
+#define PHY_ID_AQR111B0 0x03a1b612
#define PHY_ID_AQR112 0x03a1b662
#define PHY_ID_AQR412 0x03a1b712
+#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
+#define PHY_ID_AQR813 0x31c31cb2
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
@@ -727,6 +731,15 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS,
+ MDIO_PMD_TXDIS_GLOBAL);
+ if (ret)
+ return ret;
+
+ ret = aqr107_wait_processor_intensive_op(phydev);
+ if (ret)
+ return ret;
+
return aqr107_fill_interface_modes(phydev);
}
@@ -746,6 +759,16 @@ static int aqr107_probe(struct phy_device *phydev)
return aqr_hwmon_probe(phydev);
}
+static int aqr111_config_init(struct phy_device *phydev)
+{
+ /* AQR111 reports supporting speed up to 10G,
+ * however only speeds up to 5G are supported.
+ */
+ phy_set_max_speed(phydev, SPEED_5000);
+
+ return aqr107_config_init(phydev);
+}
+
static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
@@ -820,6 +843,44 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
+ .name = "Aquantia AQR111",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
+ .name = "Aquantia AQR111B0",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
.name = "Aquantia AQR405",
.config_aneg = aqr_config_aneg,
@@ -864,6 +925,25 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR113),
+ .name = "Aquantia AQR113",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr113c_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
@@ -882,6 +962,25 @@ static struct phy_driver aqr_driver[] = {
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
+ .name = "Aquantia AQR813",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
};
module_phy_driver(aqr_driver);
@@ -894,9 +993,13 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
deleted file mode 100644
index a62442a55774..000000000000
--- a/drivers/net/phy/at803x.c
+++ /dev/null
@@ -1,2432 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * drivers/net/phy/at803x.c
- *
- * Driver for Qualcomm Atheros AR803x PHY
- *
- * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
- */
-
-#include <linux/phy.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool_netlink.h>
-#include <linux/bitfield.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/consumer.h>
-#include <linux/of.h>
-#include <linux/phylink.h>
-#include <linux/sfp.h>
-#include <dt-bindings/net/qca-ar803x.h>
-
-#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
-#define AT803X_SFC_ASSERT_CRS BIT(11)
-#define AT803X_SFC_FORCE_LINK BIT(10)
-#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5)
-#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3
-#define AT803X_SFC_MANUAL_MDIX 0x1
-#define AT803X_SFC_MANUAL_MDI 0x0
-#define AT803X_SFC_SQE_TEST BIT(2)
-#define AT803X_SFC_POLARITY_REVERSAL BIT(1)
-#define AT803X_SFC_DISABLE_JABBER BIT(0)
-
-#define AT803X_SPECIFIC_STATUS 0x11
-#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
-#define AT803X_SS_SPEED_1000 2
-#define AT803X_SS_SPEED_100 1
-#define AT803X_SS_SPEED_10 0
-#define AT803X_SS_DUPLEX BIT(13)
-#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
-#define AT803X_SS_MDIX BIT(6)
-
-#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
-#define QCA808X_SS_SPEED_2500 4
-
-#define AT803X_INTR_ENABLE 0x12
-#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
-#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
-#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
-#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
-#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
-#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
-#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
-#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
-#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
-#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
-#define AT803X_INTR_ENABLE_WOL BIT(0)
-
-#define AT803X_INTR_STATUS 0x13
-
-#define AT803X_SMART_SPEED 0x14
-#define AT803X_SMART_SPEED_ENABLE BIT(5)
-#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
-#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
-#define AT803X_CDT 0x16
-#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
-#define AT803X_CDT_ENABLE_TEST BIT(0)
-#define AT803X_CDT_STATUS 0x1c
-#define AT803X_CDT_STATUS_STAT_NORMAL 0
-#define AT803X_CDT_STATUS_STAT_SHORT 1
-#define AT803X_CDT_STATUS_STAT_OPEN 2
-#define AT803X_CDT_STATUS_STAT_FAIL 3
-#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
-#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
-#define AT803X_LED_CONTROL 0x18
-
-#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
-#define AT803X_WOL_EN BIT(5)
-#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
-#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
-#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
-#define AT803X_DEBUG_ADDR 0x1D
-#define AT803X_DEBUG_DATA 0x1E
-
-#define AT803X_MODE_CFG_MASK 0x0F
-#define AT803X_MODE_CFG_BASET_RGMII 0x00
-#define AT803X_MODE_CFG_BASET_SGMII 0x01
-#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
-#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
-#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
-#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
-#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
-#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
-#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
-#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
-#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
-
-#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
-#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-
-#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
-#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
-#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
-#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
-
-#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
-#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-
-#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
-#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
-#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
-#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
-
-#define AT803X_DEBUG_REG_3C 0x3C
-
-#define AT803X_DEBUG_REG_GREEN 0x3D
-#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
-
-#define AT803X_DEBUG_REG_1F 0x1F
-#define AT803X_DEBUG_PLL_ON BIT(2)
-#define AT803X_DEBUG_RGMII_1V8 BIT(3)
-
-#define MDIO_AZ_DEBUG 0x800D
-
-/* AT803x supports either the XTAL input pad, an internal PLL or the
- * DSP as clock reference for the clock output pad. The XTAL reference
- * is only used for 25 MHz output, all other frequencies need the PLL.
- * The DSP as a clock reference is used in synchronous ethernet
- * applications.
- *
- * By default the PLL is only enabled if there is a link. Otherwise
- * the PHY will go into low power state and disabled the PLL. You can
- * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
- * enabled.
- */
-#define AT803X_MMD7_CLK25M 0x8016
-#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
-#define AT803X_CLK_OUT_25MHZ_XTAL 0
-#define AT803X_CLK_OUT_25MHZ_DSP 1
-#define AT803X_CLK_OUT_50MHZ_PLL 2
-#define AT803X_CLK_OUT_50MHZ_DSP 3
-#define AT803X_CLK_OUT_62_5MHZ_PLL 4
-#define AT803X_CLK_OUT_62_5MHZ_DSP 5
-#define AT803X_CLK_OUT_125MHZ_PLL 6
-#define AT803X_CLK_OUT_125MHZ_DSP 7
-
-/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
- * but doesn't support choosing between XTAL/PLL and DSP.
- */
-#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
-
-#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
-#define AT803X_CLK_OUT_STRENGTH_FULL 0
-#define AT803X_CLK_OUT_STRENGTH_HALF 1
-#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
-
-#define AT803X_DEFAULT_DOWNSHIFT 5
-#define AT803X_MIN_DOWNSHIFT 2
-#define AT803X_MAX_DOWNSHIFT 9
-
-#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
-#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
-#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
-#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
-
-#define ATH9331_PHY_ID 0x004dd041
-#define ATH8030_PHY_ID 0x004dd076
-#define ATH8031_PHY_ID 0x004dd074
-#define ATH8032_PHY_ID 0x004dd023
-#define ATH8035_PHY_ID 0x004dd072
-#define AT8030_PHY_ID_MASK 0xffffffef
-
-#define QCA8081_PHY_ID 0x004dd101
-
-#define QCA8327_A_PHY_ID 0x004dd033
-#define QCA8327_B_PHY_ID 0x004dd034
-#define QCA8337_PHY_ID 0x004dd036
-#define QCA9561_PHY_ID 0x004dd042
-#define QCA8K_PHY_ID_MASK 0xffffffff
-
-#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
-
-#define AT803X_PAGE_FIBER 0
-#define AT803X_PAGE_COPPER 1
-
-/* don't turn off internal PLL */
-#define AT803X_KEEP_PLL_ENABLED BIT(0)
-#define AT803X_DISABLE_SMARTEEE BIT(1)
-
-/* disable hibernation mode */
-#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
-
-/* ADC threshold */
-#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
-#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
-#define QCA808X_ADC_THRESHOLD_80MV 0
-#define QCA808X_ADC_THRESHOLD_100MV 0xf0
-#define QCA808X_ADC_THRESHOLD_200MV 0x0f
-#define QCA808X_ADC_THRESHOLD_300MV 0xff
-
-/* CLD control */
-#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
-#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
-#define QCA808X_8023AZ_AFE_EN 0x90
-
-/* AZ control */
-#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
-#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
-#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
-#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
-#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
-#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
-
-#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
-#define QCA808X_TOP_OPTION1_DATA 0x0
-
-#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
-#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
-#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
-#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
-#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
-#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
-#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
-#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
-#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
-#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
-#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
-#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
-
-/* master/slave seed config */
-#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
-#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
-#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
-#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
-
-/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
- * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
- */
-#define QCA808X_DBG_AN_TEST 0xb
-#define QCA808X_HIBERNATION_EN BIT(15)
-
-#define QCA808X_CDT_ENABLE_TEST BIT(15)
-#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
-#define QCA808X_CDT_STATUS BIT(11)
-#define QCA808X_CDT_LENGTH_UNIT BIT(10)
-
-#define QCA808X_MMD3_CDT_STATUS 0x8064
-#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
-#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
-#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
-#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
-#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
-#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
-
-#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
-#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
-#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
-#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
-
-#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
-#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
-#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
-#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
-#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
-
-#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
-#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
-
-/* NORMAL are MDI with type set to 0 */
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-
-/* Added for reference of existence but should be handled by wait_for_completion already */
-#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
-
-/* QCA808X 1G chip type */
-#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
-#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
-
-#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
-#define QCA8081_PHY_FIFO_RSTN BIT(11)
-
-MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
-MODULE_AUTHOR("Matus Ujhelyi");
-MODULE_LICENSE("GPL");
-
-enum stat_access_type {
- PHY,
- MMD
-};
-
-struct at803x_hw_stat {
- const char *string;
- u8 reg;
- u32 mask;
- enum stat_access_type access_type;
-};
-
-static struct at803x_hw_stat qca83xx_hw_stats[] = {
- { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
- { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
- { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
-};
-
-struct at803x_ss_mask {
- u16 speed_mask;
- u8 speed_shift;
-};
-
-struct at803x_priv {
- int flags;
- u16 clk_25m_reg;
- u16 clk_25m_mask;
- u8 smarteee_lpi_tw_1g;
- u8 smarteee_lpi_tw_100m;
- bool is_fiber;
- bool is_1000basex;
- struct regulator_dev *vddio_rdev;
- struct regulator_dev *vddh_rdev;
- u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
-};
-
-struct at803x_context {
- u16 bmcr;
- u16 advertise;
- u16 control1000;
- u16 int_enable;
- u16 smart_speed;
- u16 led_control;
-};
-
-static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, data);
-}
-
-static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_read(phydev, AT803X_DEBUG_DATA);
-}
-
-static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
- u16 clear, u16 set)
-{
- u16 val;
- int ret;
-
- ret = at803x_debug_reg_read(phydev, reg);
- if (ret < 0)
- return ret;
-
- val = ret & 0xffff;
- val &= ~clear;
- val |= set;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, val);
-}
-
-static int at803x_write_page(struct phy_device *phydev, int page)
-{
- int mask;
- int set;
-
- if (page == AT803X_PAGE_COPPER) {
- set = AT803X_BT_BX_REG_SEL;
- mask = 0;
- } else {
- set = 0;
- mask = AT803X_BT_BX_REG_SEL;
- }
-
- return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
-}
-
-static int at803x_read_page(struct phy_device *phydev)
-{
- int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
-
- if (ccr < 0)
- return ccr;
-
- if (ccr & AT803X_BT_BX_REG_SEL)
- return AT803X_PAGE_COPPER;
-
- return AT803X_PAGE_FIBER;
-}
-
-static int at803x_enable_rx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
- AT803X_DEBUG_RX_CLK_DLY_EN);
-}
-
-static int at803x_enable_tx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
- AT803X_DEBUG_TX_CLK_DLY_EN);
-}
-
-static int at803x_disable_rx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- AT803X_DEBUG_RX_CLK_DLY_EN, 0);
-}
-
-static int at803x_disable_tx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
- AT803X_DEBUG_TX_CLK_DLY_EN, 0);
-}
-
-/* save relevant PHY registers to private copy */
-static void at803x_context_save(struct phy_device *phydev,
- struct at803x_context *context)
-{
- context->bmcr = phy_read(phydev, MII_BMCR);
- context->advertise = phy_read(phydev, MII_ADVERTISE);
- context->control1000 = phy_read(phydev, MII_CTRL1000);
- context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
- context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
- context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
-}
-
-/* restore relevant PHY registers from private copy */
-static void at803x_context_restore(struct phy_device *phydev,
- const struct at803x_context *context)
-{
- phy_write(phydev, MII_BMCR, context->bmcr);
- phy_write(phydev, MII_ADVERTISE, context->advertise);
- phy_write(phydev, MII_CTRL1000, context->control1000);
- phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
- phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
- phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
-}
-
-static int at803x_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret, irq_enabled;
-
- if (wol->wolopts & WAKE_MAGIC) {
- struct net_device *ndev = phydev->attached_dev;
- const u8 *mac;
- unsigned int i;
- static const unsigned int offsets[] = {
- AT803X_LOC_MAC_ADDR_32_47_OFFSET,
- AT803X_LOC_MAC_ADDR_16_31_OFFSET,
- AT803X_LOC_MAC_ADDR_0_15_OFFSET,
- };
-
- if (!ndev)
- return -ENODEV;
-
- mac = (const u8 *)ndev->dev_addr;
-
- if (!is_valid_ether_addr(mac))
- return -EINVAL;
-
- for (i = 0; i < 3; i++)
- phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
- mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
-
- /* Enable WOL interrupt */
- ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
- if (ret)
- return ret;
- } else {
- /* Disable WOL interrupt */
- ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
- if (ret)
- return ret;
- }
-
- /* Clear WOL status */
- ret = phy_read(phydev, AT803X_INTR_STATUS);
- if (ret < 0)
- return ret;
-
- /* Check if there are other interrupts except for WOL triggered when PHY is
- * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
- * be passed up to the interrupt PIN.
- */
- irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
- if (irq_enabled < 0)
- return irq_enabled;
-
- irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
- if (ret & irq_enabled && !phy_polling_mode(phydev))
- phy_trigger_machine(phydev);
-
- return 0;
-}
-
-static void at803x_get_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int value;
-
- wol->supported = WAKE_MAGIC;
- wol->wolopts = 0;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- if (value < 0)
- return;
-
- if (value & AT803X_INTR_ENABLE_WOL)
- wol->wolopts |= WAKE_MAGIC;
-}
-
-static int qca83xx_get_sset_count(struct phy_device *phydev)
-{
- return ARRAY_SIZE(qca83xx_hw_stats);
-}
-
-static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
- strscpy(data + i * ETH_GSTRING_LEN,
- qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
-{
- struct at803x_hw_stat stat = qca83xx_hw_stats[i];
- struct at803x_priv *priv = phydev->priv;
- int val;
- u64 ret;
-
- if (stat.access_type == MMD)
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
- else
- val = phy_read(phydev, stat.reg);
-
- if (val < 0) {
- ret = U64_MAX;
- } else {
- val = val & stat.mask;
- priv->stats[i] += val;
- ret = priv->stats[i];
- }
-
- return ret;
-}
-
-static void qca83xx_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
- data[i] = qca83xx_get_stat(phydev, i);
-}
-
-static int at803x_suspend(struct phy_device *phydev)
-{
- int value;
- int wol_enabled;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- wol_enabled = value & AT803X_INTR_ENABLE_WOL;
-
- if (wol_enabled)
- value = BMCR_ISOLATE;
- else
- value = BMCR_PDOWN;
-
- phy_modify(phydev, MII_BMCR, 0, value);
-
- return 0;
-}
-
-static int at803x_resume(struct phy_device *phydev)
-{
- return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
-}
-
-static int at803x_parse_dt(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct at803x_priv *priv = phydev->priv;
- u32 freq, strength, tw;
- unsigned int sel;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF_MDIO))
- return 0;
-
- if (of_property_read_bool(node, "qca,disable-smarteee"))
- priv->flags |= AT803X_DISABLE_SMARTEEE;
-
- if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
- priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
-
- if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
- if (!tw || tw > 255) {
- phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
- return -EINVAL;
- }
- priv->smarteee_lpi_tw_1g = tw;
- }
-
- if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
- if (!tw || tw > 255) {
- phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
- return -EINVAL;
- }
- priv->smarteee_lpi_tw_100m = tw;
- }
-
- ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
- if (!ret) {
- switch (freq) {
- case 25000000:
- sel = AT803X_CLK_OUT_25MHZ_XTAL;
- break;
- case 50000000:
- sel = AT803X_CLK_OUT_50MHZ_PLL;
- break;
- case 62500000:
- sel = AT803X_CLK_OUT_62_5MHZ_PLL;
- break;
- case 125000000:
- sel = AT803X_CLK_OUT_125MHZ_PLL;
- break;
- default:
- phydev_err(phydev, "invalid qca,clk-out-frequency\n");
- return -EINVAL;
- }
-
- priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
- priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
- }
-
- ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
- if (!ret) {
- priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
- switch (strength) {
- case AR803X_STRENGTH_FULL:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
- break;
- case AR803X_STRENGTH_HALF:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
- break;
- case AR803X_STRENGTH_QUARTER:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
- break;
- default:
- phydev_err(phydev, "invalid qca,clk-out-strength\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int at803x_probe(struct phy_device *phydev)
-{
- struct device *dev = &phydev->mdio.dev;
- struct at803x_priv *priv;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- phydev->priv = priv;
-
- ret = at803x_parse_dt(phydev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int at803x_get_features(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int err;
-
- err = genphy_read_abilities(phydev);
- if (err)
- return err;
-
- if (phydev->drv->phy_id != ATH8031_PHY_ID)
- return 0;
-
- /* AR8031/AR8033 have different status registers
- * for copper and fiber operation. However, the
- * extended status register is the same for both
- * operation modes.
- *
- * As a result of that, ESTATUS_1000_XFULL is set
- * to 1 even when operating in copper TP mode.
- *
- * Remove this mode from the supported link modes
- * when not operating in 1000BaseX mode.
- */
- if (!priv->is_1000basex)
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- phydev->supported);
-
- return 0;
-}
-
-static int at803x_smarteee_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- u16 mask = 0, val = 0;
- int ret;
-
- if (priv->flags & AT803X_DISABLE_SMARTEEE)
- return phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_MMD3_SMARTEEE_CTL3,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
-
- if (priv->smarteee_lpi_tw_1g) {
- mask |= 0xff00;
- val |= priv->smarteee_lpi_tw_1g << 8;
- }
- if (priv->smarteee_lpi_tw_100m) {
- mask |= 0x00ff;
- val |= priv->smarteee_lpi_tw_100m;
- }
- if (!mask)
- return 0;
-
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
- mask, val);
- if (ret)
- return ret;
-
- return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
-}
-
-static int at803x_clk_out_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- if (!priv->clk_25m_mask)
- return 0;
-
- return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
- priv->clk_25m_mask, priv->clk_25m_reg);
-}
-
-static int at8031_pll_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* The default after hardware reset is PLL OFF. After a soft reset, the
- * values are retained.
- */
- if (priv->flags & AT803X_KEEP_PLL_ENABLED)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_PLL_ON);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_PLL_ON, 0);
-}
-
-static int at803x_hibernation_mode_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* The default after hardware reset is hibernation mode enabled. After
- * software reset, the value is retained.
- */
- if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
- return 0;
-
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
-}
-
-static int at803x_config_init(struct phy_device *phydev)
-{
- int ret;
-
- /* The RX and TX delay default is:
- * after HW reset: RX delay enabled and TX delay disabled
- * after SW reset: RX delay enabled, while TX delay retains the
- * value before reset.
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- ret = at803x_enable_rx_delay(phydev);
- else
- ret = at803x_disable_rx_delay(phydev);
- if (ret < 0)
- return ret;
-
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- ret = at803x_enable_tx_delay(phydev);
- else
- ret = at803x_disable_tx_delay(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_smarteee_config(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_clk_out_config(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_hibernation_mode_config(phydev);
- if (ret < 0)
- return ret;
-
- /* Ar803x extended next page bit is enabled by default. Cisco
- * multigig switches read this bit and attempt to negotiate 10Gbps
- * rates even if the next page bit is disabled. This is incorrect
- * behaviour but we still need to accommodate it. XNP is only needed
- * for 10Gbps support, so disable XNP.
- */
- return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
-}
-
-static int at803x_ack_interrupt(struct phy_device *phydev)
-{
- int err;
-
- err = phy_read(phydev, AT803X_INTR_STATUS);
-
- return (err < 0) ? err : 0;
-}
-
-static int at803x_config_intr(struct phy_device *phydev)
-{
- int err;
- int value;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- if (err)
- return err;
-
- value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
- value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
- value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
- value |= AT803X_INTR_ENABLE_LINK_FAIL;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
-
- err = phy_write(phydev, AT803X_INTR_ENABLE, value);
- } else {
- err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
- if (err)
- return err;
-
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- }
-
- return err;
-}
-
-static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
-{
- int irq_status, int_enabled;
-
- irq_status = phy_read(phydev, AT803X_INTR_STATUS);
- if (irq_status < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- /* Read the current enabled interrupts */
- int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
- if (int_enabled < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- /* See if this was one of our enabled interrupts */
- if (!(irq_status & int_enabled))
- return IRQ_NONE;
-
- phy_trigger_machine(phydev);
-
- return IRQ_HANDLED;
-}
-
-static void at803x_link_change_notify(struct phy_device *phydev)
-{
- /*
- * Conduct a hardware reset for AT8030 every time a link loss is
- * signalled. This is necessary to circumvent a hardware bug that
- * occurs when the cable is unplugged while TX packets are pending
- * in the FIFO. In such cases, the FIFO enters an error mode it
- * cannot recover from by software.
- */
- if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
- struct at803x_context context;
-
- at803x_context_save(phydev, &context);
-
- phy_device_reset(phydev, 1);
- usleep_range(1000, 2000);
- phy_device_reset(phydev, 0);
- usleep_range(1000, 2000);
-
- at803x_context_restore(phydev, &context);
-
- phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
- }
-}
-
-static int at803x_read_specific_status(struct phy_device *phydev,
- struct at803x_ss_mask ss_mask)
-{
- int ss;
-
- /* Read the AT8035 PHY-Specific Status register, which indicates the
- * speed and duplex that the PHY is actually using, irrespective of
- * whether we are in autoneg mode or not.
- */
- ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
- if (ss < 0)
- return ss;
-
- if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
- int sfc, speed;
-
- sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
- if (sfc < 0)
- return sfc;
-
- speed = ss & ss_mask.speed_mask;
- speed >>= ss_mask.speed_shift;
-
- switch (speed) {
- case AT803X_SS_SPEED_10:
- phydev->speed = SPEED_10;
- break;
- case AT803X_SS_SPEED_100:
- phydev->speed = SPEED_100;
- break;
- case AT803X_SS_SPEED_1000:
- phydev->speed = SPEED_1000;
- break;
- case QCA808X_SS_SPEED_2500:
- phydev->speed = SPEED_2500;
- break;
- }
- if (ss & AT803X_SS_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (ss & AT803X_SS_MDIX)
- phydev->mdix = ETH_TP_MDI_X;
- else
- phydev->mdix = ETH_TP_MDI;
-
- switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
- case AT803X_SFC_MANUAL_MDI:
- phydev->mdix_ctrl = ETH_TP_MDI;
- break;
- case AT803X_SFC_MANUAL_MDIX:
- phydev->mdix_ctrl = ETH_TP_MDI_X;
- break;
- case AT803X_SFC_AUTOMATIC_CROSSOVER:
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- break;
- }
- }
-
- return 0;
-}
-
-static int at803x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int err, old_link = phydev->link;
-
- /* Update the link, but return if there was an error */
- err = genphy_update_link(phydev);
- if (err)
- return err;
-
- /* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
- return 0;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- err = genphy_read_lpa(phydev);
- if (err < 0)
- return err;
-
- ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
- err = at803x_read_specific_status(phydev, ss_mask);
- if (err < 0)
- return err;
-
- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
- phy_resolve_aneg_pause(phydev);
-
- return 0;
-}
-
-static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
-{
- u16 val;
-
- switch (ctrl) {
- case ETH_TP_MDI:
- val = AT803X_SFC_MANUAL_MDI;
- break;
- case ETH_TP_MDI_X:
- val = AT803X_SFC_MANUAL_MDIX;
- break;
- case ETH_TP_MDI_AUTO:
- val = AT803X_SFC_AUTOMATIC_CROSSOVER;
- break;
- default:
- return 0;
- }
-
- return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
- AT803X_SFC_MDI_CROSSOVER_MODE_M,
- FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
-}
-
-static int at803x_prepare_config_aneg(struct phy_device *phydev)
-{
- int ret;
-
- ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
- if (ret < 0)
- return ret;
-
- /* Changes of the midx bits are disruptive to the normal operation;
- * therefore any changes to these registers must be followed by a
- * software reset to take effect.
- */
- if (ret == 1) {
- ret = genphy_soft_reset(phydev);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int at803x_config_aneg(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- ret = at803x_prepare_config_aneg(phydev);
- if (ret)
- return ret;
-
- if (priv->is_1000basex)
- return genphy_c37_config_aneg(phydev);
-
- return genphy_config_aneg(phydev);
-}
-
-static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
-{
- int val;
-
- val = phy_read(phydev, AT803X_SMART_SPEED);
- if (val < 0)
- return val;
-
- if (val & AT803X_SMART_SPEED_ENABLE)
- *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
- else
- *d = DOWNSHIFT_DEV_DISABLE;
-
- return 0;
-}
-
-static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
-{
- u16 mask, set;
- int ret;
-
- switch (cnt) {
- case DOWNSHIFT_DEV_DEFAULT_COUNT:
- cnt = AT803X_DEFAULT_DOWNSHIFT;
- fallthrough;
- case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
- set = AT803X_SMART_SPEED_ENABLE |
- AT803X_SMART_SPEED_BYPASS_TIMER |
- FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
- mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
- break;
- case DOWNSHIFT_DEV_DISABLE:
- set = 0;
- mask = AT803X_SMART_SPEED_ENABLE |
- AT803X_SMART_SPEED_BYPASS_TIMER;
- break;
- default:
- return -EINVAL;
- }
-
- ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
-
- /* After changing the smart speed settings, we need to perform a
- * software reset, use phy_init_hw() to make sure we set the
- * reapply any values which might got lost during software reset.
- */
- if (ret == 1)
- ret = phy_init_hw(phydev);
-
- return ret;
-}
-
-static int at803x_get_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, void *data)
-{
- switch (tuna->id) {
- case ETHTOOL_PHY_DOWNSHIFT:
- return at803x_get_downshift(phydev, data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int at803x_set_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, const void *data)
-{
- switch (tuna->id) {
- case ETHTOOL_PHY_DOWNSHIFT:
- return at803x_set_downshift(phydev, *(const u8 *)data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int at803x_cable_test_result_trans(u16 status)
-{
- switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
- case AT803X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case AT803X_CDT_STATUS_STAT_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case AT803X_CDT_STATUS_STAT_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case AT803X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static bool at803x_cdt_test_failed(u16 status)
-{
- return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
- AT803X_CDT_STATUS_STAT_FAIL;
-}
-
-static bool at803x_cdt_fault_length_valid(u16 status)
-{
- switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
- case AT803X_CDT_STATUS_STAT_OPEN:
- case AT803X_CDT_STATUS_STAT_SHORT:
- return true;
- }
- return false;
-}
-
-static int at803x_cdt_fault_length(int dt)
-{
- /* According to the datasheet the distance to the fault is
- * DELTA_TIME * 0.824 meters.
- *
- * The author suspect the correct formula is:
- *
- * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
- *
- * where c is the speed of light, VF is the velocity factor of
- * the twisted pair cable, 125MHz the counter frequency and
- * we need to divide by 2 because the hardware will measure the
- * round trip time to the fault and back to the PHY.
- *
- * With a VF of 0.69 we get the factor 0.824 mentioned in the
- * datasheet.
- */
- return (dt * 824) / 10;
-}
-
-static int at803x_cdt_start(struct phy_device *phydev,
- u32 cdt_start)
-{
- return phy_write(phydev, AT803X_CDT, cdt_start);
-}
-
-static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
- u32 cdt_en)
-{
- int val, ret;
-
- /* One test run takes about 25ms */
- ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
- !(val & cdt_en),
- 30000, 100000, true);
-
- return ret < 0 ? ret : 0;
-}
-
-static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
-{
- static const int ethtool_pair[] = {
- ETHTOOL_A_CABLE_PAIR_A,
- ETHTOOL_A_CABLE_PAIR_B,
- ETHTOOL_A_CABLE_PAIR_C,
- ETHTOOL_A_CABLE_PAIR_D,
- };
- int ret, val;
-
- val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
- AT803X_CDT_ENABLE_TEST;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read(phydev, AT803X_CDT_STATUS);
- if (val < 0)
- return val;
-
- if (at803x_cdt_test_failed(val))
- return 0;
-
- ethnl_cable_test_result(phydev, ethtool_pair[pair],
- at803x_cable_test_result_trans(val));
-
- if (at803x_cdt_fault_length_valid(val)) {
- val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
- ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
- at803x_cdt_fault_length(val));
- }
-
- return 1;
-}
-
-static int at803x_cable_test_get_status(struct phy_device *phydev,
- bool *finished, unsigned long pair_mask)
-{
- int retries = 20;
- int pair, ret;
-
- *finished = false;
-
- /* According to the datasheet the CDT can be performed when
- * there is no link partner or when the link partner is
- * auto-negotiating. Starting the test will restart the AN
- * automatically. It seems that doing this repeatedly we will
- * get a slot where our link partner won't disturb our
- * measurement.
- */
- while (pair_mask && retries--) {
- for_each_set_bit(pair, &pair_mask, 4) {
- ret = at803x_cable_test_one_pair(phydev, pair);
- if (ret < 0)
- return ret;
- if (ret)
- clear_bit(pair, &pair_mask);
- }
- if (pair_mask)
- msleep(250);
- }
-
- *finished = true;
-
- return 0;
-}
-
-static void at803x_cable_test_autoneg(struct phy_device *phydev)
-{
- /* Enable auto-negotiation, but advertise no capabilities, no link
- * will be established. A restart of the auto-negotiation is not
- * required, because the cable test will automatically break the link.
- */
- phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
- phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
-}
-
-static int at803x_cable_test_start(struct phy_device *phydev)
-{
- at803x_cable_test_autoneg(phydev);
- /* we do all the (time consuming) work later */
- return 0;
-}
-
-static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
-
- if (selector)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_RGMII_1V8);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_RGMII_1V8, 0);
-}
-
-static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
- int val;
-
- val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
- if (val < 0)
- return val;
-
- return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
-}
-
-static const struct regulator_ops vddio_regulator_ops = {
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
- .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
-};
-
-static const unsigned int vddio_voltage_table[] = {
- 1500000,
- 1800000,
-};
-
-static const struct regulator_desc vddio_desc = {
- .name = "vddio",
- .of_match = of_match_ptr("vddio-regulator"),
- .n_voltages = ARRAY_SIZE(vddio_voltage_table),
- .volt_table = vddio_voltage_table,
- .ops = &vddio_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static const struct regulator_ops vddh_regulator_ops = {
-};
-
-static const struct regulator_desc vddh_desc = {
- .name = "vddh",
- .of_match = of_match_ptr("vddh-regulator"),
- .n_voltages = 1,
- .fixed_uV = 2500000,
- .ops = &vddh_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static int at8031_register_regulators(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct regulator_config config = { };
-
- config.dev = dev;
- config.driver_data = phydev;
-
- priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
- if (IS_ERR(priv->vddio_rdev)) {
- phydev_err(phydev, "failed to register VDDIO regulator\n");
- return PTR_ERR(priv->vddio_rdev);
- }
-
- priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
- if (IS_ERR(priv->vddh_rdev)) {
- phydev_err(phydev, "failed to register VDDH regulator\n");
- return PTR_ERR(priv->vddh_rdev);
- }
-
- return 0;
-}
-
-static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
-{
- struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- phy_interface_t iface;
-
- linkmode_zero(phy_support);
- phylink_set(phy_support, 1000baseX_Full);
- phylink_set(phy_support, 1000baseT_Full);
- phylink_set(phy_support, Autoneg);
- phylink_set(phy_support, Pause);
- phylink_set(phy_support, Asym_Pause);
-
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
- /* Some modules support 10G modes as well as others we support.
- * Mask out non-supported modes so the correct interface is picked.
- */
- linkmode_and(sfp_support, phy_support, sfp_support);
-
- if (linkmode_empty(sfp_support)) {
- dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
- return -EINVAL;
- }
-
- iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
-
- /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
- * interface for use with SFP modules.
- * However, some copper modules detected as having a preferred SGMII
- * interface do default to and function in 1000Base-X mode, so just
- * print a warning and allow such modules, as they may have some chance
- * of working.
- */
- if (iface == PHY_INTERFACE_MODE_SGMII)
- dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
- else if (iface != PHY_INTERFACE_MODE_1000BASEX)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct sfp_upstream_ops at8031_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .module_insert = at8031_sfp_insert,
-};
-
-static int at8031_parse_dt(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- if (of_property_read_bool(node, "qca,keep-pll-enabled"))
- priv->flags |= AT803X_KEEP_PLL_ENABLED;
-
- ret = at8031_register_regulators(phydev);
- if (ret < 0)
- return ret;
-
- ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
- "vddio");
- if (ret) {
- phydev_err(phydev, "failed to get VDDIO regulator\n");
- return ret;
- }
-
- /* Only AR8031/8033 support 1000Base-X for SFP modules */
- return phy_sfp_probe(phydev, &at8031_sfp_ops);
-}
-
-static int at8031_probe(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int mode_cfg;
- int ccr;
- int ret;
-
- ret = at803x_probe(phydev);
- if (ret)
- return ret;
-
- /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
- * options.
- */
- ret = at8031_parse_dt(phydev);
- if (ret)
- return ret;
-
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- if (ccr < 0)
- return ccr;
- mode_cfg = ccr & AT803X_MODE_CFG_MASK;
-
- switch (mode_cfg) {
- case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
- case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
- priv->is_1000basex = true;
- fallthrough;
- case AT803X_MODE_CFG_FX100_RGMII_50OHM:
- case AT803X_MODE_CFG_FX100_RGMII_75OHM:
- priv->is_fiber = true;
- break;
- }
-
- /* Disable WoL in 1588 register which is enabled
- * by default
- */
- return phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-}
-
-static int at8031_config_init(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- /* Some bootloaders leave the fiber page selected.
- * Switch to the appropriate page (fiber or copper), as otherwise we
- * read the PHY capabilities from the wrong page.
- */
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev,
- priv->is_fiber ? AT803X_PAGE_FIBER :
- AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
- return ret;
-
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
-
- return at803x_config_init(phydev);
-}
-
-static int at8031_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret;
-
- /* First setup MAC address and enable WOL interrupt */
- ret = at803x_set_wol(phydev, wol);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- /* Enable WOL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- 0, AT803X_WOL_EN);
- else
- /* Disable WoL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-
- return ret;
-}
-
-static int at8031_config_intr(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int err, value = 0;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
- priv->is_fiber) {
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- if (err)
- return err;
-
- value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
-
- err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
- if (err)
- return err;
- }
-
- return at803x_config_intr(phydev);
-}
-
-/* AR8031 and AR8033 share the same read status logic */
-static int at8031_read_status(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- if (priv->is_1000basex)
- return genphy_c37_read_status(phydev);
-
- return at803x_read_status(phydev);
-}
-
-/* AR8031 and AR8035 share the same cable test get status reg */
-static int at8031_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
-{
- return at803x_cable_test_get_status(phydev, finished, 0xf);
-}
-
-/* AR8031 and AR8035 share the same cable test start logic */
-static int at8031_cable_test_start(struct phy_device *phydev)
-{
- at803x_cable_test_autoneg(phydev);
- phy_write(phydev, MII_CTRL1000, 0);
- /* we do all the (time consuming) work later */
- return 0;
-}
-
-/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
-static int at8032_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
-{
- return at803x_cable_test_get_status(phydev, finished, 0x3);
-}
-
-static int at8035_parse_dt(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* Mask is set by the generic at803x_parse_dt
- * if property is set. Assume property is set
- * with the mask not zero.
- */
- if (priv->clk_25m_mask) {
- /* Fixup for the AR8030/AR8035. This chip has another mask and
- * doesn't support the DSP reference. Eg. the lowest bit of the
- * mask. The upper two bits select the same frequencies. Mask
- * the lowest bit here.
- *
- * Warning:
- * There was no datasheet for the AR8030 available so this is
- * just a guess. But the AR8035 is listed as pin compatible
- * to the AR8030 so there might be a good chance it works on
- * the AR8030 too.
- */
- priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
- priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
- }
-
- return 0;
-}
-
-/* AR8030 and AR8035 shared the same special mask for clk_25m */
-static int at8035_probe(struct phy_device *phydev)
-{
- int ret;
-
- ret = at803x_probe(phydev);
- if (ret)
- return ret;
-
- return at8035_parse_dt(phydev);
-}
-
-static int qca83xx_config_init(struct phy_device *phydev)
-{
- u8 switch_revision;
-
- switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
-
- switch (switch_revision) {
- case 1:
- /* For 100M waveform */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
- /* Turn on Gigabit clock */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
- break;
-
- case 2:
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
- fallthrough;
- case 4:
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
- break;
- }
-
- /* Following original QCA sourcecode set port to prefer master */
- phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
-
- return 0;
-}
-
-static int qca8327_config_init(struct phy_device *phydev)
-{
- /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
- * Disable on init and enable only with 100m speed following
- * qca original source code.
- */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
-
- return qca83xx_config_init(phydev);
-}
-
-static void qca83xx_link_change_notify(struct phy_device *phydev)
-{
- /* Set DAC Amplitude adjustment to +6% for 100m on link running */
- if (phydev->state == PHY_RUNNING) {
- if (phydev->speed == SPEED_100)
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN,
- QCA8327_DEBUG_MANU_CTRL_EN);
- } else {
- /* Reset DAC Amplitude adjustment */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
- }
-}
-
-static int qca83xx_resume(struct phy_device *phydev)
-{
- int ret, val;
-
- /* Skip reset if not suspended */
- if (!phydev->suspended)
- return 0;
-
- /* Reinit the port, reset values set by suspend */
- qca83xx_config_init(phydev);
-
- /* Reset the port on port resume */
- phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
-
- /* On resume from suspend the switch execute a reset and
- * restart auto-negotiation. Wait for reset to complete.
- */
- ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
- 50000, 600000, true);
- if (ret)
- return ret;
-
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-static int qca83xx_suspend(struct phy_device *phydev)
-{
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
- AT803X_DEBUG_GATE_CLK_IN1000, 0);
-
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
- AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
-
- return 0;
-}
-
-static int qca8337_suspend(struct phy_device *phydev)
-{
- /* Only QCA8337 support actual suspend. */
- genphy_suspend(phydev);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca8327_suspend(struct phy_device *phydev)
-{
- u16 mask = 0;
-
- /* QCA8327 cause port unreliability when phy suspend
- * is set.
- */
- mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
- phy_modify(phydev, MII_BMCR, mask, 0);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
-{
- int ret;
-
- /* Enable fast retrain */
- ret = genphy_c45_fast_retrain(phydev, true);
- if (ret)
- return ret;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
- QCA808X_TOP_OPTION1_DATA);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
- QCA808X_MSE_THRESHOLD_20DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
- QCA808X_MSE_THRESHOLD_17DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
- QCA808X_MSE_THRESHOLD_27DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
- QCA808X_MSE_THRESHOLD_28DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
- QCA808X_MMD3_DEBUG_1_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
- QCA808X_MMD3_DEBUG_4_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
- QCA808X_MMD3_DEBUG_5_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
- QCA808X_MMD3_DEBUG_3_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
- QCA808X_MMD3_DEBUG_6_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
- QCA808X_MMD3_DEBUG_2_VALUE);
-
- return 0;
-}
-
-static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
-{
- u16 seed_value;
-
- if (!enable)
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
-
- seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
- FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
- QCA808X_MASTER_SLAVE_SEED_ENABLE);
-}
-
-static bool qca808x_is_prefer_master(struct phy_device *phydev)
-{
- return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
- (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
-}
-
-static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
-{
- return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-}
-
-static int qca808x_config_init(struct phy_device *phydev)
-{
- int ret;
-
- /* Active adc&vga on 802.3az for the link 1000M and 100M */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
- QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
- if (ret)
- return ret;
-
- /* Adjust the threshold on 802.3az for the link 1000M */
- ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
- QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
- QCA808X_MMD3_AZ_TRAINING_VAL);
- if (ret)
- return ret;
-
- if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
- /* Config the fast retrain for the link 2500M */
- ret = qca808x_phy_fast_retrain_config(phydev);
- if (ret)
- return ret;
-
- ret = genphy_read_master_slave(phydev);
- if (ret < 0)
- return ret;
-
- if (!qca808x_is_prefer_master(phydev)) {
- /* Enable seed and configure lower ramdom seed to make phy
- * linked as slave mode.
- */
- ret = qca808x_phy_ms_seed_enable(phydev, true);
- if (ret)
- return ret;
- }
- }
-
- /* Configure adc threshold as 100mv for the link 10M */
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
- QCA808X_ADC_THRESHOLD_MASK,
- QCA808X_ADC_THRESHOLD_100MV);
-}
-
-static int qca808x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
- if (ret < 0)
- return ret;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
- ret & MDIO_AN_10GBT_STAT_LP2_5G);
-
- ret = genphy_read_status(phydev);
- if (ret)
- return ret;
-
- /* qca8081 takes the different bits for speed value from at803x */
- ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
- ret = at803x_read_specific_status(phydev, ss_mask);
- if (ret < 0)
- return ret;
-
- if (phydev->link) {
- if (phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
- } else {
- /* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected or the master mode
- * preferred.
- *
- * the reason for not putting this code into the function link_change_notify is
- * the corner case where the link partner is also the qca8081 PHY and the seed
- * value is configured as the same value, the link can't be up and no link change
- * occurs.
- */
- if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
- qca808x_is_prefer_master(phydev)) {
- qca808x_phy_ms_seed_enable(phydev, false);
- } else {
- qca808x_phy_ms_seed_enable(phydev, true);
- }
- }
- }
-
- return 0;
-}
-
-static int qca808x_soft_reset(struct phy_device *phydev)
-{
- int ret;
-
- ret = genphy_soft_reset(phydev);
- if (ret < 0)
- return ret;
-
- if (qca808x_has_fast_retrain_or_slave_seed(phydev))
- ret = qca808x_phy_ms_seed_enable(phydev, true);
-
- return ret;
-}
-
-static bool qca808x_cdt_fault_length_valid(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return true;
- default:
- return false;
- }
-}
-
-static int qca808x_cable_test_result_trans(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
- case QCA808X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
- int result)
-{
- int val;
- u32 cdt_length_reg = 0;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
- break;
- default:
- return -EINVAL;
- }
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
- if (val < 0)
- return val;
-
- if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
- else
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
-
- return at803x_cdt_fault_length(val);
-}
-
-static int qca808x_cable_test_start(struct phy_device *phydev)
-{
- int ret;
-
- /* perform CDT with the following configs:
- * 1. disable hibernation.
- * 2. force PHY working in MDI mode.
- * 3. for PHY working in 1000BaseT.
- * 4. configure the threshold.
- */
-
- ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
- if (ret < 0)
- return ret;
-
- ret = at803x_config_mdix(phydev, ETH_TP_MDI);
- if (ret < 0)
- return ret;
-
- /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
- phydev->duplex = DUPLEX_FULL;
- phydev->speed = SPEED_1000;
- ret = genphy_c45_pma_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- ret = genphy_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- /* configure the thresholds for open, short, pair ok test */
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
-
- return 0;
-}
-
-static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
- u16 status)
-{
- int length, result;
- u16 pair_code;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
- break;
- default:
- return -EINVAL;
- }
-
- result = qca808x_cable_test_result_trans(pair_code);
- ethnl_cable_test_result(phydev, pair, result);
-
- if (qca808x_cdt_fault_length_valid(pair_code)) {
- length = qca808x_cdt_fault_length(phydev, pair, result);
- ethnl_cable_test_fault_length(phydev, pair, length);
- }
-
- return 0;
-}
-
-static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
-{
- int ret, val;
-
- *finished = false;
-
- val = QCA808X_CDT_ENABLE_TEST |
- QCA808X_CDT_LENGTH_UNIT;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
- if (val < 0)
- return val;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
- if (ret)
- return ret;
-
- *finished = true;
-
- return 0;
-}
-
-static int qca808x_get_features(struct phy_device *phydev)
-{
- int ret;
-
- ret = genphy_c45_pma_read_abilities(phydev);
- if (ret)
- return ret;
-
- /* The autoneg ability is not existed in bit3 of MMD7.1,
- * but it is supported by qca808x PHY, so we add it here
- * manually.
- */
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
-
- /* As for the qca8081 1G version chip, the 2500baseT ability is also
- * existed in the bit0 of MMD1.21, we need to remove it manually if
- * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
- */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
- if (ret < 0)
- return ret;
-
- if (QCA808X_PHY_CHIP_TYPE_1G & ret)
- linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-
- return 0;
-}
-
-static int qca808x_config_aneg(struct phy_device *phydev)
-{
- int phy_ctrl = 0;
- int ret;
-
- ret = at803x_prepare_config_aneg(phydev);
- if (ret)
- return ret;
-
- /* The reg MII_BMCR also needs to be configured for force mode, the
- * genphy_config_aneg is also needed.
- */
- if (phydev->autoneg == AUTONEG_DISABLE)
- genphy_c45_pma_setup_forced(phydev);
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
- phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
-
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
- if (ret < 0)
- return ret;
-
- return __genphy_config_aneg(phydev, ret);
-}
-
-static void qca808x_link_change_notify(struct phy_device *phydev)
-{
- /* Assert interface sgmii fifo on link down, deassert it on link up,
- * the interface device address is always phy address added by 1.
- */
- mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
- MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
- QCA8081_PHY_FIFO_RSTN,
- phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
-}
-
-static struct phy_driver at803x_driver[] = {
-{
- /* Qualcomm Atheros AR8035 */
- PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
- .name = "Qualcomm Atheros AR8035",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at8035_probe,
- .config_aneg = at803x_config_aneg,
- .config_init = at803x_config_init,
- .soft_reset = genphy_soft_reset,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_GBIT_FEATURES */
- .read_status = at803x_read_status,
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .cable_test_start = at8031_cable_test_start,
- .cable_test_get_status = at8031_cable_test_get_status,
-}, {
- /* Qualcomm Atheros AR8030 */
- .phy_id = ATH8030_PHY_ID,
- .name = "Qualcomm Atheros AR8030",
- .phy_id_mask = AT8030_PHY_ID_MASK,
- .probe = at8035_probe,
- .config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
-}, {
- /* Qualcomm Atheros AR8031/AR8033 */
- PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
- .name = "Qualcomm Atheros AR8031/AR8033",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at8031_probe,
- .config_init = at8031_config_init,
- .config_aneg = at803x_config_aneg,
- .soft_reset = genphy_soft_reset,
- .set_wol = at8031_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .read_page = at803x_read_page,
- .write_page = at803x_write_page,
- .get_features = at803x_get_features,
- .read_status = at8031_read_status,
- .config_intr = at8031_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .cable_test_start = at8031_cable_test_start,
- .cable_test_get_status = at8031_cable_test_get_status,
-}, {
- /* Qualcomm Atheros AR8032 */
- PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
- .name = "Qualcomm Atheros AR8032",
- .probe = at803x_probe,
- .flags = PHY_POLL_CABLE_TEST,
- .config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
-}, {
- /* ATHEROS AR9331 */
- PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
- .name = "Qualcomm Atheros AR9331 built-in PHY",
- .probe = at803x_probe,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .flags = PHY_POLL_CABLE_TEST,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
- .read_status = at803x_read_status,
- .soft_reset = genphy_soft_reset,
- .config_aneg = at803x_config_aneg,
-}, {
- /* Qualcomm Atheros QCA9561 */
- PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
- .name = "Qualcomm Atheros QCA9561 built-in PHY",
- .probe = at803x_probe,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .flags = PHY_POLL_CABLE_TEST,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
- .read_status = at803x_read_status,
- .soft_reset = genphy_soft_reset,
- .config_aneg = at803x_config_aneg,
-}, {
- /* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8337 internal PHY",
- /* PHY_GBIT_FEATURES */
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8337_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-A from switch QCA8327-AL1A */
- .phy_id = QCA8327_A_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-A internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-B from switch QCA8327-BL1A */
- .phy_id = QCA8327_B_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-B internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* Qualcomm QCA8081 */
- PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
- .name = "Qualcomm QCA8081",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at803x_probe,
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .get_features = qca808x_get_features,
- .config_aneg = qca808x_config_aneg,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .read_status = qca808x_read_status,
- .config_init = qca808x_config_init,
- .soft_reset = qca808x_soft_reset,
- .cable_test_start = qca808x_cable_test_start,
- .cable_test_get_status = qca808x_cable_test_get_status,
- .link_change_notify = qca808x_link_change_notify,
-}, };
-
-module_phy_driver(at803x_driver);
-
-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
- { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
- { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
- { }
-};
-
-MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 312a8bb35d78..370e4ed45098 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -665,10 +665,11 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
static int bcm54616s_read_status(struct phy_device *phydev)
{
struct bcm54616s_phy_priv *priv = phydev->priv;
+ bool changed;
int err;
if (priv->mode_1000bx_en)
- err = genphy_c37_read_status(phydev);
+ err = genphy_c37_read_status(phydev, &changed);
else
err = genphy_read_status(phydev);
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index b7cb71817780..95178e26a060 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#define DP83822_PHY_ID 0x2000a240
#define DP83825S_PHY_ID 0x2000a140
@@ -34,6 +35,10 @@
#define MII_DP83822_GENCFG 0x465
#define MII_DP83822_SOR1 0x467
+/* DP83826 specific registers */
+#define MII_DP83826_VOD_CFG1 0x30b
+#define MII_DP83826_VOD_CFG2 0x30c
+
/* GENCFG */
#define DP83822_SIG_DET_LOW BIT(0)
@@ -95,6 +100,8 @@
#define DP83822_WOL_CLR_INDICATION BIT(11)
/* RCSR bits */
+#define DP83822_RMII_MODE_EN BIT(5)
+#define DP83822_RMII_MODE_SEL BIT(7)
#define DP83822_RGMII_MODE_EN BIT(9)
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
@@ -110,6 +117,19 @@
#define DP83822_RX_ER_STR_MASK GENMASK(9, 8)
#define DP83822_RX_ER_SHIFT 8
+/* DP83826: VOD_CFG1 & VOD_CFG2 */
+#define DP83826_VOD_CFG1_MINUS_MDIX_MASK GENMASK(13, 12)
+#define DP83826_VOD_CFG1_MINUS_MDI_MASK GENMASK(11, 6)
+#define DP83826_VOD_CFG2_MINUS_MDIX_MASK GENMASK(15, 12)
+#define DP83826_VOD_CFG2_PLUS_MDIX_MASK GENMASK(11, 6)
+#define DP83826_VOD_CFG2_PLUS_MDI_MASK GENMASK(5, 0)
+#define DP83826_CFG_DAC_MINUS_MDIX_5_TO_4 GENMASK(5, 4)
+#define DP83826_CFG_DAC_MINUS_MDIX_3_TO_0 GENMASK(3, 0)
+#define DP83826_CFG_DAC_PERCENT_PER_STEP 625
+#define DP83826_CFG_DAC_PERCENT_DEFAULT 10000
+#define DP83826_CFG_DAC_MINUS_DEFAULT 0x30
+#define DP83826_CFG_DAC_PLUS_DEFAULT 0x10
+
#define MII_DP83822_FIBER_ADVERTISE (ADVERTISED_TP | ADVERTISED_MII | \
ADVERTISED_FIBRE | \
ADVERTISED_Pause | ADVERTISED_Asym_Pause)
@@ -118,6 +138,8 @@ struct dp83822_private {
bool fx_signal_det_low;
int fx_enabled;
u16 fx_sd_enable;
+ u8 cfg_dac_minus;
+ u8 cfg_dac_plus;
};
static int dp83822_set_wol(struct phy_device *phydev,
@@ -233,7 +255,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- /* Private data pointer is NULL on DP83825/26 */
+ /* Private data pointer is NULL on DP83825 */
if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
@@ -254,7 +276,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- /* Private data pointer is NULL on DP83825/26 */
+ /* Private data pointer is NULL on DP83825 */
if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
@@ -474,6 +496,85 @@ static int dp83822_config_init(struct phy_device *phydev)
return dp8382x_disable_wol(phydev);
}
+static int dp83826_config_rmii_mode(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const char *of_val;
+ int ret;
+
+ if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) {
+ if (strcmp(of_val, "master") == 0) {
+ ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_SEL);
+ } else if (strcmp(of_val, "slave") == 0) {
+ ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_SEL);
+ } else {
+ phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n",
+ of_val);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dp83826_config_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ u16 val, mask;
+ int ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_EN);
+ if (ret)
+ return ret;
+
+ ret = dp83826_config_rmii_mode(phydev);
+ if (ret)
+ return ret;
+ } else {
+ ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_EN);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->cfg_dac_minus != DP83826_CFG_DAC_MINUS_DEFAULT) {
+ val = FIELD_PREP(DP83826_VOD_CFG1_MINUS_MDI_MASK, dp83822->cfg_dac_minus) |
+ FIELD_PREP(DP83826_VOD_CFG1_MINUS_MDIX_MASK,
+ FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4,
+ dp83822->cfg_dac_minus));
+ mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(DP83826_VOD_CFG2_MINUS_MDIX_MASK,
+ FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0,
+ dp83822->cfg_dac_minus));
+ mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->cfg_dac_plus != DP83826_CFG_DAC_PLUS_DEFAULT) {
+ val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) |
+ FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus);
+ mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ if (ret)
+ return ret;
+ }
+
+ return dp8382x_disable_wol(phydev);
+}
+
static int dp8382x_config_init(struct phy_device *phydev)
{
return dp8382x_disable_wol(phydev);
@@ -509,11 +610,44 @@ static int dp83822_of_init(struct phy_device *phydev)
return 0;
}
+
+static int dp83826_to_dac_minus_one_regval(int percent)
+{
+ int tmp = DP83826_CFG_DAC_PERCENT_DEFAULT - percent;
+
+ return tmp / DP83826_CFG_DAC_PERCENT_PER_STEP;
+}
+
+static int dp83826_to_dac_plus_one_regval(int percent)
+{
+ int tmp = percent - DP83826_CFG_DAC_PERCENT_DEFAULT;
+
+ return tmp / DP83826_CFG_DAC_PERCENT_PER_STEP;
+}
+
+static void dp83826_of_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ u32 val;
+
+ dp83822->cfg_dac_minus = DP83826_CFG_DAC_MINUS_DEFAULT;
+ if (!device_property_read_u32(dev, "ti,cfg-dac-minus-one-bp", &val))
+ dp83822->cfg_dac_minus += dp83826_to_dac_minus_one_regval(val);
+
+ dp83822->cfg_dac_plus = DP83826_CFG_DAC_PLUS_DEFAULT;
+ if (!device_property_read_u32(dev, "ti,cfg-dac-plus-one-bp", &val))
+ dp83822->cfg_dac_plus += dp83826_to_dac_plus_one_regval(val);
+}
#else
static int dp83822_of_init(struct phy_device *phydev)
{
return 0;
}
+
+static void dp83826_of_init(struct phy_device *phydev)
+{
+}
#endif /* CONFIG_OF_MDIO */
static int dp83822_read_straps(struct phy_device *phydev)
@@ -567,6 +701,22 @@ static int dp83822_probe(struct phy_device *phydev)
return 0;
}
+static int dp83826_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+
+ dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
+ GFP_KERNEL);
+ if (!dp83822)
+ return -ENOMEM;
+
+ phydev->priv = dp83822;
+
+ dp83826_of_init(phydev);
+
+ return 0;
+}
+
static int dp83822_suspend(struct phy_device *phydev)
{
int value;
@@ -610,6 +760,22 @@ static int dp83822_resume(struct phy_device *phydev)
.resume = dp83822_resume, \
}
+#define DP83826_PHY_DRIVER(_id, _name) \
+ { \
+ PHY_ID_MATCH_MODEL(_id), \
+ .name = (_name), \
+ /* PHY_BASIC_FEATURES */ \
+ .probe = dp83826_probe, \
+ .soft_reset = dp83822_phy_reset, \
+ .config_init = dp83826_config_init, \
+ .get_wol = dp83822_get_wol, \
+ .set_wol = dp83822_set_wol, \
+ .config_intr = dp83822_config_intr, \
+ .handle_interrupt = dp83822_handle_interrupt, \
+ .suspend = dp83822_suspend, \
+ .resume = dp83822_resume, \
+ }
+
#define DP8382X_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
@@ -628,8 +794,8 @@ static int dp83822_resume(struct phy_device *phydev)
static struct phy_driver dp83822_driver[] = {
DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
DP8382X_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
- DP8382X_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
- DP8382X_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
+ DP83826_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
+ DP83826_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
DP8382X_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
DP8382X_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
DP8382X_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 5f08f9d38bd7..4120385c5a79 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -158,6 +158,7 @@
/* LED_DRV bits */
#define DP83867_LED_DRV_EN(x) BIT((x) * 4)
#define DP83867_LED_DRV_VAL(x) BIT((x) * 4 + 1)
+#define DP83867_LED_POLARITY(x) BIT((x) * 4 + 2)
#define DP83867_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4))
#define DP83867_LED_FN_MASK(idx) (0xf << ((idx) * 4))
@@ -1152,6 +1153,26 @@ static int dp83867_led_hw_control_get(struct phy_device *phydev, u8 index,
return 0;
}
+static int dp83867_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ /* Default active high */
+ u16 polarity = DP83867_LED_POLARITY(index);
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ polarity = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return phy_modify(phydev, DP83867_LEDCR2,
+ DP83867_LED_POLARITY(index), polarity);
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -1184,6 +1205,7 @@ static struct phy_driver dp83867_driver[] = {
.led_hw_is_supported = dp83867_led_hw_is_supported,
.led_hw_control_set = dp83867_led_hw_control_set,
.led_hw_control_get = dp83867_led_hw_control_get,
+ .led_polarity_set = dp83867_led_polarity_set,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 1c3ff77de56b..6b4bd9883304 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -1,10 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Marvell 88Q2XXX automotive 100BASE-T1/1000BASE-T1 PHY driver
+ *
+ * Derived from Marvell Q222x API
+ *
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
*/
#include <linux/ethtool_netlink.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#include <linux/hwmon.h>
+
+#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
#define MDIO_MMD_AN_MV_STAT 32769
#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
@@ -13,8 +20,38 @@
#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
+#define MDIO_MMD_AN_MV_STAT2 32794
+#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
+#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
+#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
+
+#define MDIO_MMD_PCS_MV_INT_EN 32784
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
+
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_100BT1_GEN 0x1000
+
+#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787
+#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT_EN 0x0080
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR2 32834
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK 0xc000
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3 32835
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK 0xff00
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK 0x00ff
+
#define MDIO_MMD_PCS_MV_100BT1_STAT1 33032
-#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00FF
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00ff
#define MDIO_MMD_PCS_MV_100BT1_STAT1_JABBER 0x0100
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LINK 0x0200
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX 0x1000
@@ -27,6 +64,71 @@
#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
+#define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042
+#define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400
+
+#define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043
+#define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400
+
+#define MDIO_MMD_PCS_MV_RX_STAT 33328
+
+#define MDIO_MMD_PCS_MV_TDR_RESET 65226
+#define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE 65241
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE 65242
+
+#define MDIO_MMD_PCS_MV_TDR_STATUS 65245
+#define MDIO_MMD_PCS_MV_TDR_STATUS_MASK 0x0003
+#define MDIO_MMD_PCS_MV_TDR_STATUS_OFF 0x0001
+#define MDIO_MMD_PCS_MV_TDR_STATUS_ON 0x0002
+#define MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK 0xff00
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK 0x00f0
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT 0x0030
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN 0x00e0
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK 0x0070
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_IN_PROGR 0x0080
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_NOISE 0x0050
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+
+struct mmd_val {
+ int devad;
+ u32 regnum;
+ u16 val;
+};
+
+static const struct mmd_val mv88q222x_revb0_init_seq0[] = {
+ { MDIO_MMD_PCS, 0x8033, 0x6801 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER | MDIO_PMA_CTRL1_SPEED1000 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x48 },
+ { MDIO_MMD_PCS, 0xffe4, 0x6b6 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0 },
+ { MDIO_MMD_PCS, MDIO_CTRL1, 0x0 },
+};
+
+static const struct mmd_val mv88q222x_revb0_init_seq1[] = {
+ { MDIO_MMD_PCS, 0xfe79, 0x0 },
+ { MDIO_MMD_PCS, 0xfe07, 0x125a },
+ { MDIO_MMD_PCS, 0xfe09, 0x1288 },
+ { MDIO_MMD_PCS, 0xfe08, 0x2588 },
+ { MDIO_MMD_PCS, 0xfe11, 0x1105 },
+ { MDIO_MMD_PCS, 0xfe72, 0x042c },
+ { MDIO_MMD_PCS, 0xfbba, 0xcb2 },
+ { MDIO_MMD_PCS, 0xfbbb, 0xc4a },
+ { MDIO_MMD_AN, 0x8032, 0x2020 },
+ { MDIO_MMD_AN, 0x8031, 0xa28 },
+ { MDIO_MMD_AN, 0x8031, 0xc28 },
+ { MDIO_MMD_PCS, 0xffdb, 0xfc10 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x58 },
+ { MDIO_MMD_PCS, 0xfe79, 0x4 },
+ { MDIO_MMD_PCS, 0xfe5f, 0xe8 },
+ { MDIO_MMD_PCS, 0xfe05, 0x755c },
+};
+
static int mv88q2xxx_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -50,20 +152,23 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
/* Read vendor specific Auto-Negotiation status register to get local
* and remote receiver status according to software initialization
- * guide.
+ * guide. However, when not in polling mode the local and remote
+ * receiver status are not evaluated due to the Marvell 88Q2xxx APIs.
*/
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
if (ret < 0) {
return ret;
- } else if ((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
- (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) {
+ } else if (((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
+ (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) ||
+ !phy_polling_mode(phydev)) {
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
* in polling mode to detect such short link drops except
* the link was already down.
*/
if (!phy_polling_mode(phydev) || !phydev->link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_STAT);
if (ret < 0)
return ret;
else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
@@ -71,7 +176,8 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
}
if (!link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_STAT);
if (ret < 0)
return ret;
else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
@@ -94,8 +200,20 @@ static int mv88q2xxx_read_link_100m(struct phy_device *phydev)
* the link was already down. In case we are not polling,
* we always read the realtime status.
*/
- if (!phy_polling_mode(phydev) || !phydev->link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1);
+ if (!phy_polling_mode(phydev)) {
+ phydev->link = false;
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_MMD_PCS_MV_100BT1_STAT2_LINK)
+ phydev->link = true;
+
+ return 0;
+ } else if (!phydev->link) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_STAT1);
if (ret < 0)
return ret;
else if (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK)
@@ -120,24 +238,90 @@ out:
static int mv88q2xxx_read_link(struct phy_device *phydev)
{
- int ret;
-
/* The 88Q2XXX PHYs do not have the PMA/PMD status register available,
* therefore we need to read the link status from the vendor specific
* registers depending on the speed.
*/
+
if (phydev->speed == SPEED_1000)
- ret = mv88q2xxx_read_link_gbit(phydev);
+ return mv88q2xxx_read_link_gbit(phydev);
+ else if (phydev->speed == SPEED_100)
+ return mv88q2xxx_read_link_100m(phydev);
+
+ phydev->link = false;
+ return 0;
+}
+
+static int mv88q2xxx_read_master_slave_state(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_MMD_AN_MV_STAT_LOCAL_MASTER)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
else
- ret = mv88q2xxx_read_link_100m(phydev);
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+
+ return 0;
+}
+
+static int mv88q2xxx_read_aneg_speed(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_MMD_AN_MV_STAT2_AN_RESOLVED))
+ return 0;
- return ret;
+ if (ret & MDIO_MMD_AN_MV_STAT2_100BT1)
+ phydev->speed = SPEED_100;
+ else if (ret & MDIO_MMD_AN_MV_STAT2_1000BT1)
+ phydev->speed = SPEED_1000;
+
+ return 0;
}
static int mv88q2xxx_read_status(struct phy_device *phydev)
{
int ret;
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ /* We have to get the negotiated speed first, otherwise we are
+ * not able to read the link.
+ */
+ ret = mv88q2xxx_read_aneg_speed(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88q2xxx_read_link(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_c45_baset1_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88q2xxx_read_master_slave_state(phydev);
+ if (ret < 0)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+
+ return 0;
+ }
+
ret = mv88q2xxx_read_link(phydev);
if (ret < 0)
return ret;
@@ -166,7 +350,9 @@ static int mv88q2xxx_get_features(struct phy_device *phydev)
* sequence provided by Marvell. Disable it for now until a proper
* workaround is found or a new PHY revision is released.
*/
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ if (phydev->drv->phy_id == MARVELL_PHY_ID_88Q2110)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported);
return 0;
}
@@ -179,28 +365,29 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
if (ret)
return ret;
- return mv88q2xxx_soft_reset(phydev);
+ return phydev->drv->soft_reset(phydev);
}
static int mv88q2xxx_config_init(struct phy_device *phydev)
{
- int ret;
-
/* The 88Q2XXX PHYs do have the extended ability register available, but
* register MDIO_PMA_EXTABLE where they should signalize it does not
* work according to specification. Therefore, we force it here.
*/
phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
- /* Read the current PHY configuration */
- ret = genphy_c45_read_pma(phydev);
- if (ret)
- return ret;
+ /* Configure interrupt with default settings, output is driven low for
+ * active interrupt and high for inactive.
+ */
+ if (phy_interrupt_is_valid(phydev))
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
- return mv88q2xxx_config_aneg(phydev);
+ return 0;
}
-static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
+static int mv88q2xxx_get_sqi(struct phy_device *phydev)
{
int ret;
@@ -208,7 +395,8 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
/* Read the SQI from the vendor specific receiver status
* register
*/
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8230);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RX_STAT);
if (ret < 0)
return ret;
@@ -218,7 +406,7 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
* but can be found in the Software Initialization Guide. Only
* revisions >= A0 are supported.
*/
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xFC5D, 0x00FF, 0x00AC);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xfc5d, 0xff, 0xac);
if (ret < 0)
return ret;
@@ -227,14 +415,386 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
return ret;
}
- return ret & 0x0F;
+ return ret & 0x0f;
}
-static int mv88q2xxxx_get_sqi_max(struct phy_device *phydev)
+static int mv88q2xxx_get_sqi_max(struct phy_device *phydev)
{
return 15;
}
+static int mv88q2xxx_config_intr(struct phy_device *phydev)
+{
+ int ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Enable interrupts for 1000BASE-T1 link up and down events
+ * and enable general interrupts for 100BASE-T1.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_INT_EN,
+ MDIO_MMD_PCS_MV_INT_EN_LINK_UP |
+ MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN |
+ MDIO_MMD_PCS_MV_INT_EN_100BT1);
+ if (ret < 0)
+ return ret;
+
+ /* Enable interrupts for 100BASE-T1 link events */
+ return phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_INT_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN, 0);
+ }
+}
+
+static irqreturn_t mv88q2xxx_handle_interrupt(struct phy_device *phydev)
+{
+ bool trigger_machine = false;
+ int irq;
+
+ /* Before we can acknowledge the 100BT1 general interrupt, that is in
+ * the 1000BT1 interrupt status register, we have to acknowledge any
+ * interrupts that are related to it. Therefore we read first the 100BT1
+ * interrupt status register, followed by reading the 1000BT1 interrupt
+ * status register.
+ */
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_COPPER_INT_STAT);
+ if (irq < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Check link status for 100BT1 */
+ if (irq & MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT)
+ trigger_machine = true;
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_GPIO_INT_STAT);
+ if (irq < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Check link status for 1000BT1 */
+ if ((irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP) ||
+ (irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN))
+ trigger_machine = true;
+
+ if (!trigger_machine)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int mv88q2xxx_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable PHY interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_DISABLED;
+ ret = mv88q2xxx_config_intr(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int mv88q2xxx_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable PHY interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ ret = mv88q2xxx_config_intr(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+#if IS_ENABLED(CONFIG_HWMON)
+static const struct hwmon_channel_info * const mv88q2xxx_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_ALARM),
+ NULL
+};
+
+static umode_t mv88q2xxx_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int mv88q2xxx_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK, ret);
+ *val = (ret - 75) * 1000;
+ return 0;
+ case hwmon_temp_max:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ ret);
+ *val = (ret - 75) * 1000;
+ return 0;
+ case hwmon_temp_alarm:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR1);
+ if (ret < 0)
+ return ret;
+
+ *val = !!(ret & MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88q2xxx_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ clamp_val(val, -75000, 180000);
+ val = (val / 1000) + 75;
+ val = FIELD_PREP(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ val);
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops mv88q2xxx_hwmon_hwmon_ops = {
+ .is_visible = mv88q2xxx_hwmon_is_visible,
+ .read = mv88q2xxx_hwmon_read,
+ .write = mv88q2xxx_hwmon_write,
+};
+
+static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
+ .ops = &mv88q2xxx_hwmon_hwmon_ops,
+ .info = mv88q2xxx_hwmon_info,
+};
+
+static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device *hwmon;
+ char *hwmon_name;
+ int ret;
+
+ /* Enable temperature sense */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(hwmon_name))
+ return PTR_ERR(hwmon_name);
+
+ hwmon = devm_hwmon_device_register_with_info(dev,
+ hwmon_name,
+ phydev,
+ &mv88q2xxx_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+
+#else
+static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
+static int mv88q2xxx_probe(struct phy_device *phydev)
+{
+ return mv88q2xxx_hwmon_probe(phydev);
+}
+
+static int mv88q222x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable RESET of DCL */
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x48);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_CTRL,
+ MDIO_PCS_1000BT1_CTRL_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xffe4, 0xc);
+ if (ret < 0)
+ return ret;
+
+ /* Disable RESET of DCL */
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000)
+ return phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x58);
+
+ return 0;
+}
+
+static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) {
+ ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad,
+ mv88q222x_revb0_init_seq0[i].regnum,
+ mv88q222x_revb0_init_seq0[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) {
+ ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad,
+ mv88q222x_revb0_init_seq1[i].regnum,
+ mv88q222x_revb0_init_seq1[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return mv88q2xxx_config_init(phydev);
+}
+
+static int mv88q222x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF, 0x0058);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE, 0x00eb);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE, 0x010e);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET,
+ 0x0d90);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS,
+ MDIO_MMD_PCS_MV_TDR_STATUS_ON);
+ if (ret < 0)
+ return ret;
+
+ /* According to the Marvell API the test is finished within 500 ms */
+ msleep(500);
+
+ return 0;
+}
+
+static int mv88q222x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, status;
+ u32 dist;
+
+ status = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS);
+ if (status < 0)
+ return status;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET,
+ MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST | 0xd90);
+ if (ret < 0)
+ return ret;
+
+ /* Test could not be finished */
+ if (FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_MASK, status) !=
+ MDIO_MMD_PCS_MV_TDR_STATUS_OFF)
+ return -ETIMEDOUT;
+
+ *finished = true;
+ /* Fault length reported in meters, convert to centimeters */
+ dist = FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK, status) * 100;
+ switch (status & MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK) {
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ dist);
+ break;
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ dist);
+ break;
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK);
+ break;
+ default:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ return 0;
+}
+
static struct phy_driver mv88q2xxx_driver[] = {
{
.phy_id = MARVELL_PHY_ID_88Q2110,
@@ -246,8 +806,29 @@ static struct phy_driver mv88q2xxx_driver[] = {
.read_status = mv88q2xxx_read_status,
.soft_reset = mv88q2xxx_soft_reset,
.set_loopback = genphy_c45_loopback,
- .get_sqi = mv88q2xxxx_get_sqi,
- .get_sqi_max = mv88q2xxxx_get_sqi_max,
+ .get_sqi = mv88q2xxx_get_sqi,
+ .get_sqi_max = mv88q2xxx_get_sqi_max,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0),
+ .name = "mv88q2220",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = mv88q2xxx_probe,
+ .get_features = mv88q2xxx_get_features,
+ .config_aneg = mv88q2xxx_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .config_init = mv88q222x_revb0_config_init,
+ .read_status = mv88q2xxx_read_status,
+ .soft_reset = mv88q222x_soft_reset,
+ .config_intr = mv88q2xxx_config_intr,
+ .handle_interrupt = mv88q2xxx_handle_interrupt,
+ .set_loopback = genphy_c45_loopback,
+ .cable_test_start = mv88q222x_cable_test_start,
+ .cable_test_get_status = mv88q222x_cable_test_get_status,
+ .get_sqi = mv88q2xxx_get_sqi,
+ .get_sqi_max = mv88q2xxx_get_sqi_max,
+ .suspend = mv88q2xxx_suspend,
+ .resume = mv88q2xxx_resume,
},
};
@@ -255,6 +836,7 @@ module_phy_driver(mv88q2xxx_driver);
static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), },
{ /*sentinel*/ }
};
MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index eba652a4c1d8..42ed013385bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -919,7 +919,10 @@ static int m88e1111_config_init_1000basex(struct phy_device *phydev)
if (extsr < 0)
return extsr;
- /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled */
+ /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled.
+ * FIXME: this does not actually enable 1000BaseX auto-negotiation if
+ * it was previously disabled in the Fiber BMCR!
+ */
mode = extsr & MII_M1111_HWCFG_MODE_MASK;
if (mode == MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN) {
err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
@@ -1461,7 +1464,7 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
int val, ret;
if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index afbad1ad8683..b765466e767f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -459,19 +459,34 @@ EXPORT_SYMBOL(of_mdio_find_bus);
* found, set the of_node pointer for the mdio device. This allows
* auto-probed phy devices to be supplied with information passed in
* via DT.
+ * If a PHY package is found, PHY is searched also there.
*/
-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
- struct mdio_device *mdiodev)
+static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
+ struct device_node *np)
{
- struct device *dev = &mdiodev->dev;
struct device_node *child;
- if (dev->of_node || !bus->dev.of_node)
- return;
-
- for_each_available_child_of_node(bus->dev.of_node, child) {
+ for_each_available_child_of_node(np, child) {
int addr;
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Validate PHY package reg presence */
+ if (!of_property_present(child, "reg")) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
+ /* The refcount for the PHY package will be
+ * incremented later when PHY join the Package.
+ */
+ of_node_put(child);
+ return 0;
+ }
+
+ continue;
+ }
+
addr = of_mdio_parse_addr(dev, child);
if (addr < 0)
continue;
@@ -481,9 +496,22 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
/* The refcount on "child" is passed to the mdio
* device. Do _not_ use of_node_put(child) here.
*/
- return;
+ return 0;
}
}
+
+ return -ENODEV;
+}
+
+static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+ struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+
+ if (dev->of_node || !bus->dev.of_node)
+ return;
+
+ of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
}
#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
@@ -1398,7 +1426,7 @@ static const struct attribute_group *mdio_bus_dev_groups[] = {
NULL,
};
-struct bus_type mdio_bus_type = {
+const struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.dev_groups = mdio_bus_dev_groups,
.match = mdio_bus_match,
diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
index 69b829e6ab35..7fd3377dbd79 100644
--- a/drivers/net/phy/mdio_devres.c
+++ b/drivers/net/phy/mdio_devres.c
@@ -131,4 +131,5 @@ int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
EXPORT_SYMBOL(__devm_of_mdiobus_register);
#endif /* CONFIG_OF_MDIO */
+MODULE_DESCRIPTION("Network MDIO bus devres helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index 8a20d9889f10..0f3a1538a8b8 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -489,7 +489,7 @@ static int tx_r50_fill_result(struct phy_device *phydev, u16 tx_r50_cal_val,
u16 reg, val;
if (phydev->drv->phy_id == MTK_GPHY_ID_MT7988)
- bias = -2;
+ bias = -1;
val = clamp_val(bias + tx_r50_cal_val, 0, 63);
@@ -705,6 +705,11 @@ restore:
static void mt798x_phy_common_finetune(struct phy_device *phydev)
{
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+ /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
+ __phy_write(phydev, 0x11, 0xc71);
+ __phy_write(phydev, 0x12, 0xc);
+ __phy_write(phydev, 0x10, 0x8fae);
+
/* EnabRandUpdTrig = 1 */
__phy_write(phydev, 0x11, 0x2f00);
__phy_write(phydev, 0x12, 0xe);
@@ -715,15 +720,56 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x83aa);
- /* TrFreeze = 0 */
+ /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */
+ __phy_write(phydev, 0x11, 0x240);
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x9680);
+
+ /* TrFreeze = 0 (mt7988 default) */
__phy_write(phydev, 0x11, 0x0);
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x9686);
+ /* SSTrKp100 = 5 */
+ /* SSTrKf100 = 6 */
+ /* SSTrKp1000Mas = 5 */
+ /* SSTrKf1000Mas = 6 */
/* SSTrKp1000Slv = 5 */
+ /* SSTrKf1000Slv = 6 */
__phy_write(phydev, 0x11, 0xbaef);
__phy_write(phydev, 0x12, 0x2e);
__phy_write(phydev, 0x10, 0x968c);
+ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+}
+
+static void mt7981_phy_finetune(struct phy_device *phydev)
+{
+ u16 val[8] = { 0x01ce, 0x01c1,
+ 0x020f, 0x0202,
+ 0x03d0, 0x03c0,
+ 0x0013, 0x0005 };
+ int i, k;
+
+ /* 100M eye finetune:
+ * Keep middle level of TX MLT3 shapper as default.
+ * Only change TX MLT3 overshoot level here.
+ */
+ for (k = 0, i = 1; i < 12; i++) {
+ if (i % 3 == 0)
+ continue;
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]);
+ }
+
+ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+ /* ResetSyncOffset = 6 */
+ __phy_write(phydev, 0x11, 0x600);
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x8fc0);
+
+ /* VgaDecRate = 1 */
+ __phy_write(phydev, 0x11, 0x4c2a);
+ __phy_write(phydev, 0x12, 0x3e);
+ __phy_write(phydev, 0x10, 0x8fa4);
/* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2,
* MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2
@@ -738,7 +784,7 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
__phy_write(phydev, 0x10, 0x8ec0);
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
- /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9*/
+ /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */
phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK,
BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0x9));
@@ -771,48 +817,6 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_OUTPUT_V, 0x2222);
}
-static void mt7981_phy_finetune(struct phy_device *phydev)
-{
- u16 val[8] = { 0x01ce, 0x01c1,
- 0x020f, 0x0202,
- 0x03d0, 0x03c0,
- 0x0013, 0x0005 };
- int i, k;
-
- /* 100M eye finetune:
- * Keep middle level of TX MLT3 shapper as default.
- * Only change TX MLT3 overshoot level here.
- */
- for (k = 0, i = 1; i < 12; i++) {
- if (i % 3 == 0)
- continue;
- phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]);
- }
-
- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
- __phy_write(phydev, 0x11, 0xc71);
- __phy_write(phydev, 0x12, 0xc);
- __phy_write(phydev, 0x10, 0x8fae);
-
- /* ResetSyncOffset = 6 */
- __phy_write(phydev, 0x11, 0x600);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8fc0);
-
- /* VgaDecRate = 1 */
- __phy_write(phydev, 0x11, 0x4c2a);
- __phy_write(phydev, 0x12, 0x3e);
- __phy_write(phydev, 0x10, 0x8fa4);
-
- /* FfeUpdGainForce = 4 */
- __phy_write(phydev, 0x11, 0x240);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9680);
-
- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
-}
-
static void mt7988_phy_finetune(struct phy_device *phydev)
{
u16 val[12] = { 0x0187, 0x01cd, 0x01c8, 0x0182,
@@ -827,17 +831,7 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
/* TCT finetune */
phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5);
- /* Disable TX power saving */
- phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
- MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8);
-
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
-
- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 12 */
- __phy_write(phydev, 0x11, 0x671);
- __phy_write(phydev, 0x12, 0xc);
- __phy_write(phydev, 0x10, 0x8fae);
-
/* ResetSyncOffset = 5 */
__phy_write(phydev, 0x11, 0x500);
__phy_write(phydev, 0x12, 0x0);
@@ -845,13 +839,27 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
/* VgaDecRate is 1 at default on mt7988 */
- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+ /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7,
+ * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7
+ */
+ __phy_write(phydev, 0x11, 0xb90a);
+ __phy_write(phydev, 0x12, 0x6f);
+ __phy_write(phydev, 0x10, 0x8f82);
+
+ /* RemAckCntLimitCtrl = 1 */
+ __phy_write(phydev, 0x11, 0xfbba);
+ __phy_write(phydev, 0x12, 0xc3);
+ __phy_write(phydev, 0x10, 0x87f8);
- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_2A30);
- /* TxClkOffset = 2 */
- __phy_modify(phydev, MTK_PHY_ANARG_RG, MTK_PHY_TCLKOFFSET_MASK,
- FIELD_PREP(MTK_PHY_TCLKOFFSET_MASK, 0x2));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+
+ /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
+ MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK,
+ BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0xa));
+
+ /* rg_tr_lpf_cnt_val = 1023 */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_LPF_CNT_VAL, 0x3ff);
}
static void mt798x_phy_eee(struct phy_device *phydev)
@@ -884,11 +892,11 @@ static void mt798x_phy_eee(struct phy_device *phydev)
MTK_PHY_LPI_SLV_SEND_TX_EN,
FIELD_PREP(MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK, 0x120));
- phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
- MTK_PHY_LPI_SEND_LOC_TIMER_MASK |
- MTK_PHY_LPI_TXPCS_LOC_RCV,
- FIELD_PREP(MTK_PHY_LPI_SEND_LOC_TIMER_MASK, 0x117));
+ /* Keep MTK_PHY_LPI_SEND_LOC_TIMER as 375 */
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
+ MTK_PHY_LPI_TXPCS_LOC_RCV);
+ /* This also fixes some IoT issues, such as CH340 */
phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG2C7,
MTK_PHY_MAX_GAIN_MASK | MTK_PHY_MIN_GAIN_MASK,
FIELD_PREP(MTK_PHY_MAX_GAIN_MASK, 0x8) |
@@ -922,7 +930,7 @@ static void mt798x_phy_eee(struct phy_device *phydev)
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x9690);
- /* REG_EEE_st2TrKf1000 = 3 */
+ /* REG_EEE_st2TrKf1000 = 2 */
__phy_write(phydev, 0x11, 0x114f);
__phy_write(phydev, 0x12, 0x2);
__phy_write(phydev, 0x10, 0x969a);
@@ -947,7 +955,7 @@ static void mt798x_phy_eee(struct phy_device *phydev)
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x96b8);
- /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 1 */
+ /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */
__phy_write(phydev, 0x11, 0x1463);
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x96ca);
@@ -1459,6 +1467,13 @@ static int mt7988_phy_probe(struct phy_device *phydev)
if (err)
return err;
+ /* Disable TX power saving at probing to:
+ * 1. Meet common mode compliance test criteria
+ * 2. Make sure that TX-VCM calibration works fine
+ */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
+ MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8);
+
return mt798x_phy_calibration(phydev);
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 81c20eb4b54b..8b8634600c51 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,12 +114,30 @@
#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+#define LAN8814_EEE_STATE 0x38
+#define LAN8814_EEE_STATE_MASK2P5P BIT(10)
+
+#define LAN8814_PD_CONTROLS 0x9d
+#define LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK GENMASK(3, 0)
+#define LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL 0xb
+
/* Represents 1ppm adjustment in 2^32 format with
* each nsec contains 4 clock cycles.
* The value is calculated as following: (1/1000000)/((2^-32)/4)
*/
#define LAN8814_1PPM_FORMAT 17179
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 8 clock cycles.
+ * The value is calculated as following: (1/1000000)/((2^-32)/8)
+ */
+#define LAN8841_1PPM_FORMAT 34360
+
+#define PTP_RX_VERSION 0x0248
+#define PTP_TX_VERSION 0x0288
+#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
+#define PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
+
#define PTP_RX_MOD 0x024F
#define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
#define PTP_RX_TIMESTAMP_EN 0x024D
@@ -149,11 +167,13 @@
#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+#define PTP_CLOCK_SET_SEC_HI 0x0205
#define PTP_CLOCK_SET_SEC_MID 0x0206
#define PTP_CLOCK_SET_SEC_LO 0x0207
#define PTP_CLOCK_SET_NS_HI 0x0208
#define PTP_CLOCK_SET_NS_LO 0x0209
+#define PTP_CLOCK_READ_SEC_HI 0x0229
#define PTP_CLOCK_READ_SEC_MID 0x022A
#define PTP_CLOCK_READ_SEC_LO 0x022B
#define PTP_CLOCK_READ_NS_HI 0x022C
@@ -2587,35 +2607,31 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
}
static void lan8814_ptp_clock_set(struct phy_device *phydev,
- u32 seconds, u32 nano_seconds)
+ time64_t sec, u32 nsec)
{
- u32 sec_low, sec_high, nsec_low, nsec_high;
-
- sec_low = seconds & 0xffff;
- sec_high = (seconds >> 16) & 0xffff;
- nsec_low = nano_seconds & 0xffff;
- nsec_high = (nano_seconds >> 16) & 0x3fff;
-
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, sec_low);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, sec_high);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, nsec_low);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, nsec_high);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
}
static void lan8814_ptp_clock_get(struct phy_device *phydev,
- u32 *seconds, u32 *nano_seconds)
+ time64_t *sec, u32 *nsec)
{
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
- *seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
- *seconds = (*seconds << 16) |
- lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+ *sec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_HI);
+ *sec <<= 16;
+ *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *sec <<= 16;
+ *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
- *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+ *nsec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nsec <<= 16;
+ *nsec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
}
static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
@@ -2625,7 +2641,7 @@ static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
ptp_clock_info);
struct phy_device *phydev = shared->phydev;
u32 nano_seconds;
- u32 seconds;
+ time64_t seconds;
mutex_lock(&shared->shared_lock);
lan8814_ptp_clock_get(phydev, &seconds, &nano_seconds);
@@ -2655,38 +2671,37 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
{
u32 nano_seconds_step;
u64 abs_time_step_ns;
- u32 unsigned_seconds;
+ time64_t set_seconds;
u32 nano_seconds;
u32 remainder;
s32 seconds;
if (time_step_ns > 15000000000LL) {
/* convert to clock set */
- lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
- unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
- &remainder);
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nano_seconds);
+ set_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
nano_seconds += remainder;
if (nano_seconds >= 1000000000) {
- unsigned_seconds++;
+ set_seconds++;
nano_seconds -= 1000000000;
}
- lan8814_ptp_clock_set(phydev, unsigned_seconds, nano_seconds);
+ lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
return;
} else if (time_step_ns < -15000000000LL) {
/* convert to clock set */
time_step_ns = -time_step_ns;
- lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
- unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
- &remainder);
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nano_seconds);
+ set_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
nano_seconds_step = remainder;
if (nano_seconds < nano_seconds_step) {
- unsigned_seconds--;
+ set_seconds--;
nano_seconds += 1000000000;
}
nano_seconds -= nano_seconds_step;
- lan8814_ptp_clock_set(phydev, unsigned_seconds,
- nano_seconds);
+ lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
return;
}
@@ -3150,6 +3165,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+ /* Disable checking for minorVersionPTP field */
+ lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
+ PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+ lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
+ PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+
skb_queue_head_init(&ptp_priv->tx_queue);
skb_queue_head_init(&ptp_priv->rx_queue);
INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
@@ -3274,6 +3295,33 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
return 0;
}
+static void lan8814_clear_2psp_bit(struct phy_device *phydev)
+{
+ u16 val;
+
+ /* It was noticed that when traffic is passing through the PHY and the
+ * cable is removed then the LED was still one even though there is no
+ * link
+ */
+ val = lanphy_read_page_reg(phydev, 2, LAN8814_EEE_STATE);
+ val &= ~LAN8814_EEE_STATE_MASK2P5P;
+ lanphy_write_page_reg(phydev, 2, LAN8814_EEE_STATE, val);
+}
+
+static void lan8814_update_meas_time(struct phy_device *phydev)
+{
+ u16 val;
+
+ /* By setting the measure time to a value of 0xb this will allow cables
+ * longer than 100m to be used. This configuration can be used
+ * regardless of the mode of operation of the PHY
+ */
+ val = lanphy_read_page_reg(phydev, 1, LAN8814_PD_CONTROLS);
+ val &= ~LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK;
+ val |= LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL;
+ lanphy_write_page_reg(phydev, 1, LAN8814_PD_CONTROLS, val);
+}
+
static int lan8814_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -3310,6 +3358,10 @@ static int lan8814_probe(struct phy_device *phydev)
lan8814_ptp_init(phydev);
+ /* Errata workarounds */
+ lan8814_clear_2psp_bit(phydev);
+ lan8814_update_meas_time(phydev);
+
return 0;
}
@@ -4107,8 +4159,8 @@ static int lan8841_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
faster = false;
}
- rate = LAN8814_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
- rate += (LAN8814_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+ rate = LAN8841_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (LAN8841_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
mutex_lock(&ptp_priv->ptp_lock);
phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_RATE_ADJ_HI,
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index ea1073adc5a1..b2d36a3a96f1 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -274,6 +274,14 @@ static int gpy_config_init(struct phy_device *phydev)
return ret < 0 ? ret : 0;
}
+static int gpy21x_config_init(struct phy_device *phydev)
+{
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, phydev->possible_interfaces);
+
+ return gpy_config_init(phydev);
+}
+
static int gpy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -867,7 +875,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPY21xB_MASK,
.name = "Maxlinear Ethernet GPY211B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -884,7 +892,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY211C),
.name = "Maxlinear Ethernet GPY211C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -902,7 +910,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPY21xB_MASK,
.name = "Maxlinear Ethernet GPY212B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -919,7 +927,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY212C),
.name = "Maxlinear Ethernet GPY212C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -937,7 +945,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPYx15B_MASK,
.name = "Maxlinear Ethernet GPY215B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -954,7 +962,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY215C),
.name = "Maxlinear Ethernet GPY215C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 747d14bf152c..5695935fdce9 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -208,7 +208,8 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
MDIO_AN_T1_ADV_L_PAUSE_ASYM;
- adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+ adv_m_mask = MDIO_AN_T1_ADV_M_1000BT1 | MDIO_AN_T1_ADV_M_100BT1 |
+ MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -706,6 +707,22 @@ int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv)
changed = 1;
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ val = linkmode_to_mii_eee_cap2_t(adv);
+
+ /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
+ * (Register 7.62)
+ */
+ val = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV2,
+ MDIO_EEE_2_5GT | MDIO_EEE_5GT,
+ val);
+ if (val < 0)
+ return val;
+ if (val > 0)
+ changed = 1;
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
val = linkmode_adv_to_mii_10base_t1_t(adv);
@@ -745,6 +762,17 @@ int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv)
mii_eee_cap1_mod_linkmode_t(adv, val);
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
+ * (Register 7.62)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV2);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap2_mod_linkmode_adv_t(adv, val);
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
/* IEEE 802.3cg-2019 45.2.7.25 10BASE-T1 AN control register
@@ -781,6 +809,17 @@ static int genphy_c45_read_eee_lpa(struct phy_device *phydev,
mii_eee_cap1_mod_linkmode_t(lpa, val);
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ /* IEEE 802.3-2022 45.2.7.17 EEE link partner ability 2
+ * (Register 7.63)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE2);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap2_mod_linkmode_adv_t(lpa, val);
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
/* IEEE 802.3cg-2019 45.2.7.26 10BASE-T1 AN status register
@@ -831,6 +870,30 @@ static int genphy_c45_read_eee_cap1(struct phy_device *phydev)
}
/**
+ * genphy_c45_read_eee_cap2 - read supported EEE link modes from register 3.21
+ * @phydev: target phy_device struct
+ */
+static int genphy_c45_read_eee_cap2(struct phy_device *phydev)
+{
+ int val;
+
+ /* IEEE 802.3-2022 45.2.3.11 EEE control and capability 2
+ * (Register 3.21)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE2);
+ if (val < 0)
+ return val;
+
+ /* IEEE 802.3-2022 45.2.3.11 says 9 bits are reserved. */
+ if (val == 0xffff)
+ return 0;
+
+ mii_eee_cap2_mod_linkmode_sup_t(phydev->supported_eee, val);
+
+ return 0;
+}
+
+/**
* genphy_c45_read_eee_abilities - read supported EEE link modes
* @phydev: target phy_device struct
*/
@@ -848,6 +911,13 @@ int genphy_c45_read_eee_abilities(struct phy_device *phydev)
return val;
}
+ /* Same for cap2 (3.21) */
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP2_FEATURES)) {
+ val = genphy_c45_read_eee_cap2(phydev);
+ if (val)
+ return val;
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported)) {
/* IEEE 802.3cg-2019 45.2.1.186b 10BASE-T1L PMA status register
@@ -1443,17 +1513,17 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active);
/**
* genphy_c45_ethtool_get_eee - get EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: it reports the Supported/Advertisement/LP Advertisement
* capabilities.
*/
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
- struct ethtool_eee *data)
+ struct ethtool_keee *data)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp) = {};
- bool overflow = false, is_enabled;
+ bool is_enabled;
int ret;
ret = genphy_c45_eee_is_active(phydev, adv, lp, &is_enabled);
@@ -1462,17 +1532,9 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
data->eee_enabled = is_enabled;
data->eee_active = ret;
-
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->supported,
- phydev->supported_eee))
- overflow = true;
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->advertised, adv))
- overflow = true;
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->lp_advertised, lp))
- overflow = true;
-
- if (overflow)
- phydev_warn(phydev, "Not all supported or advertised EEE link modes were passed to the user space\n");
+ linkmode_copy(data->supported, phydev->supported_eee);
+ linkmode_copy(data->advertised, adv);
+ linkmode_copy(data->lp_advertised, lp);
return 0;
}
@@ -1481,50 +1543,53 @@ EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
/**
* genphy_c45_ethtool_set_eee - set EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: sets the Supported/Advertisement/LP Advertisement
* capabilities. If eee_enabled is false, no links modes are
* advertised, but the previously advertised link modes are
* retained. This allows EEE to be enabled/disabled in a
* non-destructive way.
+ * Returns either error code, 0 if there was no change, or positive
+ * value if there was a change which triggered auto-neg.
*/
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
- struct ethtool_eee *data)
+ struct ethtool_keee *data)
{
int ret;
if (data->eee_enabled) {
- if (data->advertised) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
+ unsigned long *adv = data->advertised;
- ethtool_convert_legacy_u32_to_link_mode(adv,
- data->advertised);
- linkmode_andnot(adv, adv, phydev->supported_eee);
- if (!linkmode_empty(adv)) {
+ if (!linkmode_empty(adv)) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ if (linkmode_andnot(tmp, adv, phydev->supported_eee)) {
phydev_warn(phydev, "At least some EEE link modes are not supported.\n");
return -EINVAL;
}
-
- ethtool_convert_legacy_u32_to_link_mode(phydev->advertising_eee,
- data->advertised);
} else {
- linkmode_copy(phydev->advertising_eee,
- phydev->supported_eee);
+ adv = phydev->supported_eee;
}
- phydev->eee_enabled = true;
- } else {
- phydev->eee_enabled = false;
+ linkmode_copy(phydev->advertising_eee, adv);
}
+ phydev->eee_enabled = data->eee_enabled;
+
ret = genphy_c45_an_config_eee_aneg(phydev);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return phy_restart_aneg(phydev);
+ if (ret > 0) {
+ ret = phy_restart_aneg(phydev);
+ if (ret < 0)
+ return ret;
- return 0;
+ /* explicitly return 1, otherwise (ret > 0) value will be
+ * overwritten by phy_restart_aneg().
+ */
+ return 1;
+ }
+
+ return ret;
}
EXPORT_SYMBOL(genphy_c45_ethtool_set_eee);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3376e58e2b88..c3a0a5ee5f11 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -983,9 +983,17 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
+ err = genphy_c45_eee_is_active(phydev,
+ NULL, NULL, NULL);
+ if (err < 0)
+ phydev->enable_tx_lpi = false;
+ else
+ phydev->enable_tx_lpi = (err & phydev->eee_cfg.tx_lpi_enabled);
+
phy_link_up(phydev);
} else if (!phydev->link && phydev->state != PHY_NOLINK) {
phydev->state = PHY_NOLINK;
+ phydev->enable_tx_lpi = false;
phy_link_down(phydev);
}
@@ -1290,7 +1298,6 @@ int phy_disable_interrupts(struct phy_device *phydev)
static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
- struct phy_driver *drv = phydev->drv;
irqreturn_t ret;
/* Wakeup interrupts may occur during a system sleep transition.
@@ -1316,7 +1323,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
}
mutex_lock(&phydev->lock);
- ret = drv->handle_interrupt(phydev);
+ ret = phydev->drv->handle_interrupt(phydev);
mutex_unlock(&phydev->lock);
return ret;
@@ -1632,12 +1639,12 @@ EXPORT_SYMBOL(phy_get_eee_err);
/**
* phy_ethtool_get_eee - get EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
- * Description: it reportes the Supported/Advertisement/LP Advertisement
- * capabilities.
+ * Description: reports the Supported/Advertisement/LP Advertisement
+ * capabilities, etc.
*/
-int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
int ret;
@@ -1646,6 +1653,7 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
mutex_lock(&phydev->lock);
ret = genphy_c45_ethtool_get_eee(phydev, data);
+ eeecfg_to_eee(data, &phydev->eee_cfg);
mutex_unlock(&phydev->lock);
return ret;
@@ -1653,13 +1661,43 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
EXPORT_SYMBOL(phy_ethtool_get_eee);
/**
+ * phy_ethtool_set_eee_noneg - Adjusts MAC LPI configuration without PHY
+ * renegotiation
+ * @phydev: pointer to the target PHY device structure
+ * @data: pointer to the ethtool_keee structure containing the new EEE settings
+ *
+ * This function updates the Energy Efficient Ethernet (EEE) configuration
+ * for cases where only the MAC's Low Power Idle (LPI) configuration changes,
+ * without triggering PHY renegotiation. It ensures that the MAC is properly
+ * informed of the new LPI settings by cycling the link down and up, which
+ * is necessary for the MAC to adopt the new configuration. This adjustment
+ * is done only if there is a change in the tx_lpi_enabled or tx_lpi_timer
+ * configuration.
+ */
+static void phy_ethtool_set_eee_noneg(struct phy_device *phydev,
+ struct ethtool_keee *data)
+{
+ if (phydev->eee_cfg.tx_lpi_enabled != data->tx_lpi_enabled ||
+ phydev->eee_cfg.tx_lpi_timer != data->tx_lpi_timer) {
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+ phydev->enable_tx_lpi = eeecfg_mac_can_tx_lpi(&phydev->eee_cfg);
+ if (phydev->link) {
+ phydev->link = false;
+ phy_link_down(phydev);
+ phydev->link = true;
+ phy_link_up(phydev);
+ }
+ }
+}
+
+/**
* phy_ethtool_set_eee - set EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: it is to program the Advertisement EEE register.
*/
-int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
int ret;
@@ -1668,9 +1706,14 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
mutex_lock(&phydev->lock);
ret = genphy_c45_ethtool_set_eee(phydev, data);
+ if (ret >= 0) {
+ if (ret == 0)
+ phy_ethtool_set_eee_noneg(phydev, data);
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+ }
mutex_unlock(&phydev->lock);
- return ret;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3611ea64875e..72452e6a478c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -148,6 +148,14 @@ static const int phy_eee_cap1_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
+static const int phy_eee_cap2_features_array[] = {
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_eee_cap2_features);
+
static void features_init(void)
{
/* 10/100 half/full*/
@@ -232,6 +240,9 @@ static void features_init(void)
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
phy_eee_cap1_features);
+ linkmode_set_bit_array(phy_eee_cap2_features_array,
+ ARRAY_SIZE(phy_eee_cap2_features_array),
+ phy_eee_cap2_features);
}
@@ -780,7 +791,7 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
* and identifiers in @c45_ids.
*
* Returns zero on success, %-EIO on bus access error, or %-ENODEV if
- * the "devices in package" is invalid.
+ * the "devices in package" is invalid or no device responds.
*/
static int get_phy_c45_ids(struct mii_bus *bus, int addr,
struct phy_c45_device_ids *c45_ids)
@@ -803,7 +814,11 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr,
*/
ret = phy_c45_probe_present(bus, addr, i);
if (ret < 0)
- return -EIO;
+ /* returning -ENODEV doesn't stop bus
+ * scanning
+ */
+ return (phy_reg == -EIO ||
+ phy_reg == -ENODEV) ? -ENODEV : -EIO;
if (!ret)
continue;
@@ -1413,6 +1428,11 @@ int phy_sfp_probe(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_sfp_probe);
+static bool phy_drv_supports_irq(const struct phy_driver *phydrv)
+{
+ return phydrv->config_intr && phydrv->handle_interrupt;
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -1527,6 +1547,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->dev_flags & PHY_F_NO_IRQ)
phydev->irq = PHY_POLL;
+ if (!phy_drv_supports_irq(phydev->drv) && phy_interrupt_is_valid(phydev))
+ phydev->irq = PHY_POLL;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -1592,7 +1615,6 @@ EXPORT_SYMBOL(phy_attach_direct);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface)
{
- struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
struct device *d;
int rc;
@@ -1603,7 +1625,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
/* Search the list of PHY devices on the mdio bus for the
* PHY with the requested name
*/
- d = bus_find_device_by_name(bus, NULL, bus_id);
+ d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
return ERR_PTR(-ENODEV);
@@ -1700,6 +1722,7 @@ int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
shared->priv_size = priv_size;
}
shared->base_addr = base_addr;
+ shared->np = NULL;
refcount_set(&shared->refcnt, 1);
bus->shared[base_addr] = shared;
} else {
@@ -1723,6 +1746,63 @@ err_unlock:
EXPORT_SYMBOL_GPL(phy_package_join);
/**
+ * of_phy_package_join - join a common PHY group in PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This is a variant of phy_package_join for PHY package defined in DT.
+ *
+ * The parent node of the @phydev is checked as a valid PHY package node
+ * structure (by matching the node name "ethernet-phy-package") and the
+ * base_addr for the PHY package is passed to phy_package_join.
+ *
+ * With this configuration the shared struct will also have the np value
+ * filled to use additional DT defined properties in PHY specific
+ * probe_once and config_init_once PHY package OPs.
+ *
+ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error. Or a parent
+ * node is not detected or is not valid or doesn't match the expected node
+ * name for PHY package.
+ */
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *package_node;
+ u32 base_addr;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ package_node = of_get_parent(node);
+ if (!package_node)
+ return -EINVAL;
+
+ if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (of_property_read_u32(package_node, "reg", &base_addr)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+ if (ret)
+ goto exit;
+
+ phydev->shared->np = package_node;
+
+ return 0;
+exit:
+ of_node_put(package_node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_phy_package_join);
+
+/**
* phy_package_leave - leave a common PHY group
* @phydev: target phy_device struct
*
@@ -1738,6 +1818,10 @@ void phy_package_leave(struct phy_device *phydev)
if (!shared)
return;
+ /* Decrease the node refcount on leave if present */
+ if (shared->np)
+ of_node_put(shared->np);
+
if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
bus->shared[shared->base_addr] = NULL;
mutex_unlock(&bus->shared_lock);
@@ -1791,6 +1875,40 @@ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
EXPORT_SYMBOL_GPL(devm_phy_package_join);
/**
+ * devm_of_phy_package_join - resource managed of_phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed of_phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * of_phy_package_join() for more information.
+ */
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = of_phy_package_join(phydev, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
+
+/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
@@ -1859,7 +1977,7 @@ int phy_suspend(struct phy_device *phydev)
{
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct net_device *netdev = phydev->attached_dev;
- struct phy_driver *phydrv = phydev->drv;
+ const struct phy_driver *phydrv = phydev->drv;
int ret;
if (phydev->suspended)
@@ -1884,7 +2002,7 @@ EXPORT_SYMBOL(phy_suspend);
int __phy_resume(struct phy_device *phydev)
{
- struct phy_driver *phydrv = phydev->drv;
+ const struct phy_driver *phydrv = phydev->drv;
int ret;
lockdep_assert_held(&phydev->lock);
@@ -2513,12 +2631,15 @@ EXPORT_SYMBOL(genphy_read_status);
/**
* genphy_c37_read_status - check the link status and update current link state
* @phydev: target phy_device struct
+ * @changed: pointer where to store if link changed
*
* Description: Check the link, then figure out the current state
* by comparing what we advertise with what the link partner
* advertises. This function is for Clause 37 1000Base-X mode.
+ *
+ * If link has changed, @changed is set to true, false otherwise.
*/
-int genphy_c37_read_status(struct phy_device *phydev)
+int genphy_c37_read_status(struct phy_device *phydev, bool *changed)
{
int lpa, err, old_link = phydev->link;
@@ -2528,9 +2649,13 @@ int genphy_c37_read_status(struct phy_device *phydev)
return err;
/* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) {
+ *changed = false;
return 0;
+ }
+ /* Signal link has changed */
+ *changed = true;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -2770,6 +2895,50 @@ void phy_advertise_supported(struct phy_device *phydev)
EXPORT_SYMBOL(phy_advertise_supported);
/**
+ * phy_advertise_eee_all - Advertise all supported EEE modes
+ * @phydev: target phy_device struct
+ *
+ * Description: Per default phylib preserves the EEE advertising at the time of
+ * phy probing, which might be a subset of the supported EEE modes. Use this
+ * function when all supported EEE modes should be advertised. This does not
+ * trigger auto-negotiation, so must be called before phy_start()/
+ * phylink_start() which will start auto-negotiation.
+ */
+void phy_advertise_eee_all(struct phy_device *phydev)
+{
+ linkmode_copy(phydev->advertising_eee, phydev->supported_eee);
+}
+EXPORT_SYMBOL_GPL(phy_advertise_eee_all);
+
+/**
+ * phy_support_eee - Set initial EEE policy configuration
+ * @phydev: Target phy_device struct
+ *
+ * This function configures the initial policy for Energy Efficient Ethernet
+ * (EEE) on the specified PHY device, influencing that EEE capabilities are
+ * advertised before the link is established. It should be called during PHY
+ * registration by the MAC driver and/or the PHY driver (for SmartEEE PHYs)
+ * if MAC supports LPI or PHY is capable to compensate missing LPI functionality
+ * of the MAC.
+ *
+ * The function sets default EEE policy parameters, including preparing the PHY
+ * to advertise EEE capabilities based on hardware support.
+ *
+ * It also sets the expected configuration for Low Power Idle (LPI) in the MAC
+ * driver. If the PHY framework determines that both local and remote
+ * advertisements support EEE, and the negotiated link mode is compatible with
+ * EEE, it will set enable_tx_lpi = true. The MAC driver is expected to act on
+ * this setting by enabling the LPI timer if enable_tx_lpi is set.
+ */
+void phy_support_eee(struct phy_device *phydev)
+{
+ linkmode_copy(phydev->advertising_eee, phydev->supported_eee);
+ phydev->eee_cfg.tx_lpi_enabled = true;
+ phydev->eee_cfg.eee_enabled = true;
+}
+EXPORT_SYMBOL(phy_support_eee);
+
+/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
*
@@ -2992,11 +3161,6 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
-static bool phy_drv_supports_irq(struct phy_driver *phydrv)
-{
- return phydrv->config_intr && phydrv->handle_interrupt;
-}
-
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3097,6 +3261,7 @@ static int of_phy_led(struct phy_device *phydev,
struct device *dev = &phydev->mdio.dev;
struct led_init_data init_data = {};
struct led_classdev *cdev;
+ unsigned long modes = 0;
struct phy_led *phyled;
u32 index;
int err;
@@ -3114,6 +3279,21 @@ static int of_phy_led(struct phy_device *phydev,
if (index > U8_MAX)
return -EINVAL;
+ if (of_property_read_bool(led, "active-low"))
+ set_bit(PHY_LED_ACTIVE_LOW, &modes);
+ if (of_property_read_bool(led, "inactive-high-impedance"))
+ set_bit(PHY_LED_INACTIVE_HIGH_IMPEDANCE, &modes);
+
+ if (modes) {
+ /* Return error if asked to set polarity modes but not supported */
+ if (!phydev->drv->led_polarity_set)
+ return -EINVAL;
+
+ err = phydev->drv->led_polarity_set(phydev, index, modes);
+ if (err)
+ return err;
+ }
+
phyled->index = index;
if (phydev->drv->led_brightness_set)
cdev->brightness_set_blocking = phy_led_set_brightness;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ed0b4ccaa6a6..503fd7c40523 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -2764,9 +2764,9 @@ EXPORT_SYMBOL_GPL(phylink_init_eee);
/**
* phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
- * @eee: a pointer to a &struct ethtool_eee for the read parameters
+ * @eee: a pointer to a &struct ethtool_keee for the read parameters
*/
-int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
+int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_keee *eee)
{
int ret = -EOPNOTSUPP;
@@ -2782,9 +2782,9 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
/**
* phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
- * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+ * @eee: a pointer to a &struct ethtool_keee for the desired parameters
*/
-int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
+int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_keee *eee)
{
int ret = -EOPNOTSUPP;
diff --git a/drivers/net/phy/qcom/Kconfig b/drivers/net/phy/qcom/Kconfig
new file mode 100644
index 000000000000..570626cc8e14
--- /dev/null
+++ b/drivers/net/phy/qcom/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config QCOM_NET_PHYLIB
+ tristate
+
+config AT803X_PHY
+ tristate "Qualcomm Atheros AR803X PHYs"
+ select QCOM_NET_PHYLIB
+ depends on REGULATOR
+ help
+ Currently supports the AR8030, AR8031, AR8033, AR8035 model
+
+config QCA83XX_PHY
+ tristate "Qualcomm Atheros QCA833x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the internal QCA8337(Internal qca8k PHY) model
+
+config QCA808X_PHY
+ tristate "Qualcomm QCA808x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the QCA8081 model
+
+config QCA807X_PHY
+ tristate "Qualcomm QCA807x PHYs"
+ select QCOM_NET_PHYLIB
+ depends on OF_MDIO
+ help
+ Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII
+ control PHY.
diff --git a/drivers/net/phy/qcom/Makefile b/drivers/net/phy/qcom/Makefile
new file mode 100644
index 000000000000..f24fb550babd
--- /dev/null
+++ b/drivers/net/phy/qcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_NET_PHYLIB) += qcom-phy-lib.o
+obj-$(CONFIG_AT803X_PHY) += at803x.o
+obj-$(CONFIG_QCA83XX_PHY) += qca83xx.o
+obj-$(CONFIG_QCA808X_PHY) += qca808x.o
+obj-$(CONFIG_QCA807X_PHY) += qca807x.o
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
new file mode 100644
index 000000000000..4717c59d51d0
--- /dev/null
+++ b/drivers/net/phy/qcom/at803x.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/net/phy/at803x.c
+ *
+ * Driver for Qualcomm Atheros AR803x PHY
+ *
+ * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
+ */
+
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/bitfield.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/phylink.h>
+#include <linux/sfp.h>
+#include <dt-bindings/net/qca-ar803x.h>
+
+#include "qcom.h"
+
+#define AT803X_LED_CONTROL 0x18
+
+#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
+#define AT803X_WOL_EN BIT(5)
+
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
+
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_BASET_RGMII 0x00
+#define AT803X_MODE_CFG_BASET_SGMII 0x01
+#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
+#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
+#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
+#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
+#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
+#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
+#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
+#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
+#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
+#define AT803X_DEBUG_REG_1F 0x1F
+#define AT803X_DEBUG_PLL_ON BIT(2)
+#define AT803X_DEBUG_RGMII_1V8 BIT(3)
+
+/* AT803x supports either the XTAL input pad, an internal PLL or the
+ * DSP as clock reference for the clock output pad. The XTAL reference
+ * is only used for 25 MHz output, all other frequencies need the PLL.
+ * The DSP as a clock reference is used in synchronous ethernet
+ * applications.
+ *
+ * By default the PLL is only enabled if there is a link. Otherwise
+ * the PHY will go into low power state and disabled the PLL. You can
+ * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
+ * enabled.
+ */
+#define AT803X_MMD7_CLK25M 0x8016
+#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
+#define AT803X_CLK_OUT_25MHZ_XTAL 0
+#define AT803X_CLK_OUT_25MHZ_DSP 1
+#define AT803X_CLK_OUT_50MHZ_PLL 2
+#define AT803X_CLK_OUT_50MHZ_DSP 3
+#define AT803X_CLK_OUT_62_5MHZ_PLL 4
+#define AT803X_CLK_OUT_62_5MHZ_DSP 5
+#define AT803X_CLK_OUT_125MHZ_PLL 6
+#define AT803X_CLK_OUT_125MHZ_DSP 7
+
+/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
+ * but doesn't support choosing between XTAL/PLL and DSP.
+ */
+#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
+
+#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
+#define AT803X_CLK_OUT_STRENGTH_FULL 0
+#define AT803X_CLK_OUT_STRENGTH_HALF 1
+#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+
+#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
+#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
+#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
+#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
+
+#define ATH9331_PHY_ID 0x004dd041
+#define ATH8030_PHY_ID 0x004dd076
+#define ATH8031_PHY_ID 0x004dd074
+#define ATH8032_PHY_ID 0x004dd023
+#define ATH8035_PHY_ID 0x004dd072
+#define AT8030_PHY_ID_MASK 0xffffffef
+
+#define QCA9561_PHY_ID 0x004dd042
+
+#define AT803X_PAGE_FIBER 0
+#define AT803X_PAGE_COPPER 1
+
+/* don't turn off internal PLL */
+#define AT803X_KEEP_PLL_ENABLED BIT(0)
+#define AT803X_DISABLE_SMARTEEE BIT(1)
+
+/* disable hibernation mode */
+#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+
+MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+struct at803x_priv {
+ int flags;
+ u16 clk_25m_reg;
+ u16 clk_25m_mask;
+ u8 smarteee_lpi_tw_1g;
+ u8 smarteee_lpi_tw_100m;
+ bool is_fiber;
+ bool is_1000basex;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+};
+
+struct at803x_context {
+ u16 bmcr;
+ u16 advertise;
+ u16 control1000;
+ u16 int_enable;
+ u16 smart_speed;
+ u16 led_control;
+};
+
+static int at803x_write_page(struct phy_device *phydev, int page)
+{
+ int mask;
+ int set;
+
+ if (page == AT803X_PAGE_COPPER) {
+ set = AT803X_BT_BX_REG_SEL;
+ mask = 0;
+ } else {
+ set = 0;
+ mask = AT803X_BT_BX_REG_SEL;
+ }
+
+ return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
+}
+
+static int at803x_read_page(struct phy_device *phydev)
+{
+ int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+
+ if (ccr < 0)
+ return ccr;
+
+ if (ccr & AT803X_BT_BX_REG_SEL)
+ return AT803X_PAGE_COPPER;
+
+ return AT803X_PAGE_FIBER;
+}
+
+static int at803x_enable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
+ AT803X_DEBUG_RX_CLK_DLY_EN);
+}
+
+static int at803x_enable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
+ AT803X_DEBUG_TX_CLK_DLY_EN);
+}
+
+static int at803x_disable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ AT803X_DEBUG_RX_CLK_DLY_EN, 0);
+}
+
+static int at803x_disable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
+ AT803X_DEBUG_TX_CLK_DLY_EN, 0);
+}
+
+/* save relevant PHY registers to private copy */
+static void at803x_context_save(struct phy_device *phydev,
+ struct at803x_context *context)
+{
+ context->bmcr = phy_read(phydev, MII_BMCR);
+ context->advertise = phy_read(phydev, MII_ADVERTISE);
+ context->control1000 = phy_read(phydev, MII_CTRL1000);
+ context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
+ context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
+ context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
+}
+
+/* restore relevant PHY registers from private copy */
+static void at803x_context_restore(struct phy_device *phydev,
+ const struct at803x_context *context)
+{
+ phy_write(phydev, MII_BMCR, context->bmcr);
+ phy_write(phydev, MII_ADVERTISE, context->advertise);
+ phy_write(phydev, MII_CTRL1000, context->control1000);
+ phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
+ phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
+ phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+}
+
+static int at803x_suspend(struct phy_device *phydev)
+{
+ int value;
+ int wol_enabled;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ wol_enabled = value & AT803X_INTR_ENABLE_WOL;
+
+ if (wol_enabled)
+ value = BMCR_ISOLATE;
+ else
+ value = BMCR_PDOWN;
+
+ phy_modify(phydev, MII_BMCR, 0, value);
+
+ return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+ return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
+}
+
+static int at803x_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ u32 freq, strength, tw;
+ unsigned int sel;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (of_property_read_bool(node, "qca,disable-smarteee"))
+ priv->flags |= AT803X_DISABLE_SMARTEEE;
+
+ if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
+ priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_1g = tw;
+ }
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_100m = tw;
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
+ if (!ret) {
+ switch (freq) {
+ case 25000000:
+ sel = AT803X_CLK_OUT_25MHZ_XTAL;
+ break;
+ case 50000000:
+ sel = AT803X_CLK_OUT_50MHZ_PLL;
+ break;
+ case 62500000:
+ sel = AT803X_CLK_OUT_62_5MHZ_PLL;
+ break;
+ case 125000000:
+ sel = AT803X_CLK_OUT_125MHZ_PLL;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-frequency\n");
+ return -EINVAL;
+ }
+
+ priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
+ priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+ if (!ret) {
+ priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
+ switch (strength) {
+ case AR803X_STRENGTH_FULL:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
+ break;
+ case AR803X_STRENGTH_HALF:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
+ break;
+ case AR803X_STRENGTH_QUARTER:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-strength\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int at803x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct at803x_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ ret = at803x_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int at803x_get_features(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err;
+
+ err = genphy_read_abilities(phydev);
+ if (err)
+ return err;
+
+ if (phydev->drv->phy_id != ATH8031_PHY_ID)
+ return 0;
+
+ /* AR8031/AR8033 have different status registers
+ * for copper and fiber operation. However, the
+ * extended status register is the same for both
+ * operation modes.
+ *
+ * As a result of that, ESTATUS_1000_XFULL is set
+ * to 1 even when operating in copper TP mode.
+ *
+ * Remove this mode from the supported link modes
+ * when not operating in 1000BaseX mode.
+ */
+ if (!priv->is_1000basex)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int at803x_smarteee_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ u16 mask = 0, val = 0;
+ int ret;
+
+ if (priv->flags & AT803X_DISABLE_SMARTEEE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
+
+ if (priv->smarteee_lpi_tw_1g) {
+ mask |= 0xff00;
+ val |= priv->smarteee_lpi_tw_1g << 8;
+ }
+ if (priv->smarteee_lpi_tw_100m) {
+ mask |= 0x00ff;
+ val |= priv->smarteee_lpi_tw_100m;
+ }
+ if (!mask)
+ return 0;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
+ mask, val);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
+ priv->clk_25m_mask, priv->clk_25m_reg);
+}
+
+static int at8031_pll_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is PLL OFF. After a soft reset, the
+ * values are retained.
+ */
+ if (priv->flags & AT803X_KEEP_PLL_ENABLED)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_PLL_ON);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_PLL_ON, 0);
+}
+
+static int at803x_hibernation_mode_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is hibernation mode enabled. After
+ * software reset, the value is retained.
+ */
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ return 0;
+
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
+}
+
+static int at803x_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* The RX and TX delay default is:
+ * after HW reset: RX delay enabled and TX delay disabled
+ * after SW reset: RX delay enabled, while TX delay retains the
+ * value before reset.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ ret = at803x_enable_rx_delay(phydev);
+ else
+ ret = at803x_disable_rx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ ret = at803x_enable_tx_delay(phydev);
+ else
+ ret = at803x_disable_tx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_smarteee_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_clk_out_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_hibernation_mode_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Ar803x extended next page bit is enabled by default. Cisco
+ * multigig switches read this bit and attempt to negotiate 10Gbps
+ * rates even if the next page bit is disabled. This is incorrect
+ * behaviour but we still need to accommodate it. XNP is only needed
+ * for 10Gbps support, so disable XNP.
+ */
+ return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
+}
+
+static void at803x_link_change_notify(struct phy_device *phydev)
+{
+ /*
+ * Conduct a hardware reset for AT8030 every time a link loss is
+ * signalled. This is necessary to circumvent a hardware bug that
+ * occurs when the cable is unplugged while TX packets are pending
+ * in the FIFO. In such cases, the FIFO enters an error mode it
+ * cannot recover from by software.
+ */
+ if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
+ struct at803x_context context;
+
+ at803x_context_save(phydev, &context);
+
+ phy_device_reset(phydev, 1);
+ usleep_range(1000, 2000);
+ phy_device_reset(phydev, 0);
+ usleep_range(1000, 2000);
+
+ at803x_context_restore(phydev, &context);
+
+ phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
+ }
+}
+
+static int at803x_config_aneg(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
+
+ return genphy_config_aneg(phydev);
+}
+
+static int at803x_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case AT803X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool at803x_cdt_test_failed(u16 status)
+{
+ return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
+ AT803X_CDT_STATUS_STAT_FAIL;
+}
+
+static bool at803x_cdt_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ int ret, val;
+
+ val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, AT803X_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ if (at803x_cdt_test_failed(val))
+ return 0;
+
+ ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ at803x_cable_test_result_trans(val));
+
+ if (at803x_cdt_fault_length_valid(val)) {
+ val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
+ ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ at803x_cdt_fault_length(val));
+ }
+
+ return 1;
+}
+
+static int at803x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished, unsigned long pair_mask)
+{
+ int retries = 20;
+ int pair, ret;
+
+ *finished = false;
+
+ /* According to the datasheet the CDT can be performed when
+ * there is no link partner or when the link partner is
+ * auto-negotiating. Starting the test will restart the AN
+ * automatically. It seems that doing this repeatedly we will
+ * get a slot where our link partner won't disturb our
+ * measurement.
+ */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = at803x_cable_test_one_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ clear_bit(pair, &pair_mask);
+ }
+ if (pair_mask)
+ msleep(250);
+ }
+
+ *finished = true;
+
+ return 0;
+}
+
+static void at803x_cable_test_autoneg(struct phy_device *phydev)
+{
+ /* Enable auto-negotiation, but advertise no capabilities, no link
+ * will be established. A restart of the auto-negotiation is not
+ * required, because the cable test will automatically break the link.
+ */
+ phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+}
+
+static int at803x_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static const struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at8031_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at8031_sfp_insert,
+};
+
+static int at8031_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
+ "vddio");
+ if (ret) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return ret;
+ }
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ return phy_sfp_probe(phydev, &at8031_sfp_ops);
+}
+
+static int at8031_probe(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int mode_cfg;
+ int ccr;
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ ret = at8031_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if (ccr < 0)
+ return ccr;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
+
+ /* Disable WoL in 1588 register which is enabled
+ * by default
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+}
+
+static int at8031_config_init(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ return at803x_config_init(phydev);
+}
+
+static int at8031_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ /* First setup MAC address and enable WOL interrupt */
+ ret = at803x_set_wol(phydev, wol);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ /* Enable WOL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ else
+ /* Disable WoL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+
+ return ret;
+}
+
+static int at8031_config_intr(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err, value = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
+ priv->is_fiber) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+
+ err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
+ if (err)
+ return err;
+ }
+
+ return at803x_config_intr(phydev);
+}
+
+/* AR8031 and AR8033 share the same read status logic */
+static int at8031_read_status(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ bool changed;
+
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev, &changed);
+
+ return at803x_read_status(phydev);
+}
+
+/* AR8031 and AR8035 share the same cable test get status reg */
+static int at8031_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0xf);
+}
+
+/* AR8031 and AR8035 share the same cable test start logic */
+static int at8031_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ phy_write(phydev, MII_CTRL1000, 0);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
+static int at8032_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0x3);
+}
+
+static int at8035_parse_dt(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* Mask is set by the generic at803x_parse_dt
+ * if property is set. Assume property is set
+ * with the mask not zero.
+ */
+ if (priv->clk_25m_mask) {
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+ }
+
+ return 0;
+}
+
+/* AR8030 and AR8035 shared the same special mask for clk_25m */
+static int at8035_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ return at8035_parse_dt(phydev);
+}
+
+static struct phy_driver at803x_driver[] = {
+{
+ /* Qualcomm Atheros AR8035 */
+ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
+ .name = "Qualcomm Atheros AR8035",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at8035_probe,
+ .config_aneg = at803x_config_aneg,
+ .config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_GBIT_FEATURES */
+ .read_status = at803x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8030 */
+ .phy_id = ATH8030_PHY_ID,
+ .name = "Qualcomm Atheros AR8030",
+ .phy_id_mask = AT8030_PHY_ID_MASK,
+ .probe = at8035_probe,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+}, {
+ /* Qualcomm Atheros AR8031/AR8033 */
+ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
+ .name = "Qualcomm Atheros AR8031/AR8033",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at8031_probe,
+ .config_init = at8031_config_init,
+ .config_aneg = at803x_config_aneg,
+ .soft_reset = genphy_soft_reset,
+ .set_wol = at8031_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .read_page = at803x_read_page,
+ .write_page = at803x_write_page,
+ .get_features = at803x_get_features,
+ .read_status = at8031_read_status,
+ .config_intr = at8031_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8032 */
+ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ .name = "Qualcomm Atheros AR8032",
+ .probe = at803x_probe,
+ .flags = PHY_POLL_CABLE_TEST,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+}, {
+ /* ATHEROS AR9331 */
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, {
+ /* Qualcomm Atheros QCA9561 */
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, };
+
+module_phy_driver(at803x_driver);
+
+static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+ { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
new file mode 100644
index 000000000000..672c6929119a
--- /dev/null
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ * Christian Marangi <ansuelsmth@gmail.com>
+ *
+ * Qualcomm QCA8072 and QCA8075 PHY driver
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/bitfield.h>
+#include <linux/gpio/driver.h>
+#include <linux/sfp.h>
+
+#include "qcom.h"
+
+#define QCA807X_CHIP_CONFIGURATION 0x1f
+#define QCA807X_BT_BX_REG_SEL BIT(15)
+#define QCA807X_BT_BX_REG_SEL_FIBER 0
+#define QCA807X_BT_BX_REG_SEL_COPPER 1
+#define QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK GENMASK(3, 0)
+#define QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII 4
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER 3
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER 0
+
+#define QCA807X_MEDIA_SELECT_STATUS 0x1a
+#define QCA807X_MEDIA_DETECTED_COPPER BIT(5)
+#define QCA807X_MEDIA_DETECTED_1000_BASE_X BIT(4)
+#define QCA807X_MEDIA_DETECTED_100_BASE_FX BIT(3)
+
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION 0x807e
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN BIT(0)
+
+#define QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH 0x801a
+#define QCA807X_CONTROL_DAC_MASK GENMASK(2, 0)
+/* List of tweaks enabled by this bit:
+ * - With both FULL amplitude and FULL bias current: bias current
+ * is set to half.
+ * - With only DSP amplitude: bias current is set to half and
+ * is set to 1/4 with cable < 10m.
+ * - With DSP bias current (included both DSP amplitude and
+ * DSP bias current): bias current is half the detected current
+ * with cable < 10m.
+ */
+#define QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK BIT(2)
+#define QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT BIT(1)
+#define QCA807X_CONTROL_DAC_DSP_AMPLITUDE BIT(0)
+
+#define QCA807X_MMD7_LED_100N_1 0x8074
+#define QCA807X_MMD7_LED_100N_2 0x8075
+#define QCA807X_MMD7_LED_1000N_1 0x8076
+#define QCA807X_MMD7_LED_1000N_2 0x8077
+
+#define QCA807X_MMD7_LED_CTRL(x) (0x8074 + ((x) * 2))
+#define QCA807X_MMD7_LED_FORCE_CTRL(x) (0x8075 + ((x) * 2))
+
+/* LED hw control pattern for fiber port */
+#define QCA807X_LED_FIBER_PATTERN_MASK GENMASK(11, 1)
+#define QCA807X_LED_FIBER_TXACT_BLK_EN BIT(10)
+#define QCA807X_LED_FIBER_RXACT_BLK_EN BIT(9)
+#define QCA807X_LED_FIBER_FDX_ON_EN BIT(6)
+#define QCA807X_LED_FIBER_HDX_ON_EN BIT(5)
+#define QCA807X_LED_FIBER_1000BX_ON_EN BIT(2)
+#define QCA807X_LED_FIBER_100FX_ON_EN BIT(1)
+
+/* Some device repurpose the LED as GPIO out */
+#define QCA807X_GPIO_FORCE_EN QCA808X_LED_FORCE_EN
+#define QCA807X_GPIO_FORCE_MODE_MASK QCA808X_LED_FORCE_MODE_MASK
+
+#define QCA807X_FUNCTION_CONTROL 0x10
+#define QCA807X_FC_MDI_CROSSOVER_MODE_MASK GENMASK(6, 5)
+#define QCA807X_FC_MDI_CROSSOVER_AUTO 3
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDIX 1
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDI 0
+
+/* PQSGMII Analog PHY specific */
+#define PQSGMII_CTRL_REG 0x0
+#define PQSGMII_ANALOG_SW_RESET BIT(6)
+#define PQSGMII_DRIVE_CONTROL_1 0xb
+#define PQSGMII_TX_DRIVER_MASK GENMASK(7, 4)
+#define PQSGMII_TX_DRIVER_140MV 0x0
+#define PQSGMII_TX_DRIVER_160MV 0x1
+#define PQSGMII_TX_DRIVER_180MV 0x2
+#define PQSGMII_TX_DRIVER_200MV 0x3
+#define PQSGMII_TX_DRIVER_220MV 0x4
+#define PQSGMII_TX_DRIVER_240MV 0x5
+#define PQSGMII_TX_DRIVER_260MV 0x6
+#define PQSGMII_TX_DRIVER_280MV 0x7
+#define PQSGMII_TX_DRIVER_300MV 0x8
+#define PQSGMII_TX_DRIVER_320MV 0x9
+#define PQSGMII_TX_DRIVER_400MV 0xa
+#define PQSGMII_TX_DRIVER_500MV 0xb
+#define PQSGMII_TX_DRIVER_600MV 0xc
+#define PQSGMII_MODE_CTRL 0x6d
+#define PQSGMII_MODE_CTRL_AZ_WORKAROUND_MASK BIT(0)
+#define PQSGMII_MMD3_SERDES_CONTROL 0x805a
+
+#define PHY_ID_QCA8072 0x004dd0b2
+#define PHY_ID_QCA8075 0x004dd0b1
+
+#define QCA807X_COMBO_ADDR_OFFSET 4
+#define QCA807X_PQSGMII_ADDR_OFFSET 5
+#define SERDES_RESET_SLEEP 100
+
+enum qca807x_global_phy {
+ QCA807X_COMBO_ADDR = 4,
+ QCA807X_PQSGMII_ADDR = 5,
+};
+
+struct qca807x_shared_priv {
+ unsigned int package_mode;
+ u32 tx_drive_strength;
+};
+
+struct qca807x_gpio_priv {
+ struct phy_device *phy;
+};
+
+struct qca807x_priv {
+ bool dac_full_amplitude;
+ bool dac_full_bias_current;
+ bool dac_disable_bias_current_tweak;
+};
+
+static int qca807x_cable_test_start(struct phy_device *phydev)
+{
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int qca807x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ switch (phydev->port) {
+ case PORT_TP:
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+ break;
+ case PORT_FIBRE:
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_TXACT_BLK_EN;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_RXACT_BLK_EN;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_100FX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_1000BX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_HDX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_FDX_ON_EN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int qca807x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 1)
+ return -EINVAL;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_enable(phydev, reg);
+}
+
+static int qca807x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 1)
+ return -EINVAL;
+
+ return qca807x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca807x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, mask, offload_trigger = 0;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ ret = qca807x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca807x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ mask = QCA808X_LED_PATTERN_MASK;
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ mask = QCA807X_LED_FIBER_PATTERN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg, mask,
+ offload_trigger);
+}
+
+static bool qca807x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 1)
+ return false;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_status(phydev, reg);
+}
+
+static int qca807x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 1)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca807x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ /* Parsing specific to netdev trigger */
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA807X_LED_FIBER_TXACT_BLK_EN)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA807X_LED_FIBER_RXACT_BLK_EN)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA807X_LED_FIBER_100FX_ON_EN)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA807X_LED_FIBER_1000BX_ON_EN)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA807X_LED_FIBER_HDX_ON_EN)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA807X_LED_FIBER_FDX_ON_EN)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca807x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg, mask;
+
+ if (index > 1)
+ return -EINVAL;
+
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ mask = QCA808X_LED_PATTERN_MASK;
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ mask = QCA807X_LED_FIBER_PATTERN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg, mask);
+}
+
+static int qca807x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ /* If we are setting off the LED reset any hw control rule */
+ if (!value) {
+ ret = qca807x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_brightness_set(phydev, reg, value);
+}
+
+static int qca807x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 reg;
+
+ if (index > 1)
+ return -EINVAL;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_blink_set(phydev, reg, delay_on, delay_off);
+}
+
+#ifdef CONFIG_GPIOLIB
+static int qca807x_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+ u16 reg;
+ int val;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
+ val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+
+ return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val);
+}
+
+static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+ u16 reg;
+ int val;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
+
+ val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+ val &= ~QCA807X_GPIO_FORCE_MODE_MASK;
+ val |= QCA807X_GPIO_FORCE_EN;
+ val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value);
+
+ phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val);
+}
+
+static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ qca807x_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int qca807x_gpio(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca807x_gpio_priv *priv;
+ struct gpio_chip *gc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phy = phydev;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ gc->label = dev_name(dev);
+ gc->base = -1;
+ gc->ngpio = 2;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->can_sleep = true;
+ gc->get_direction = qca807x_gpio_get_direction;
+ gc->direction_output = qca807x_gpio_dir_out;
+ gc->get = qca807x_gpio_get;
+ gc->set = qca807x_gpio_set;
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+#endif
+
+static int qca807x_read_fiber_status(struct phy_device *phydev)
+{
+ bool changed;
+ int ss, err;
+
+ err = genphy_c37_read_status(phydev, &changed);
+ if (err || !changed)
+ return err;
+
+ /* Read the QCA807x PHY-Specific Status register fiber page,
+ * which indicates the speed and duplex that the PHY is actually
+ * using, irrespective of whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ switch (FIELD_GET(AT803X_SS_SPEED_MASK, ss)) {
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ }
+
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int qca807x_read_status(struct phy_device *phydev)
+{
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+ switch (phydev->port) {
+ case PORT_FIBRE:
+ return qca807x_read_fiber_status(phydev);
+ case PORT_TP:
+ return at803x_read_status(phydev);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return at803x_read_status(phydev);
+}
+
+static int qca807x_phy_package_probe_once(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct qca807x_shared_priv *priv = shared->priv;
+ unsigned int tx_drive_strength;
+ const char *package_mode_name;
+
+ /* Default to 600mw if not defined */
+ if (of_property_read_u32(shared->np, "qcom,tx-drive-strength-milliwatt",
+ &tx_drive_strength))
+ tx_drive_strength = 600;
+
+ switch (tx_drive_strength) {
+ case 140:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_140MV;
+ break;
+ case 160:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_160MV;
+ break;
+ case 180:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_180MV;
+ break;
+ case 200:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_200MV;
+ break;
+ case 220:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_220MV;
+ break;
+ case 240:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_240MV;
+ break;
+ case 260:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_260MV;
+ break;
+ case 280:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_280MV;
+ break;
+ case 300:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_300MV;
+ break;
+ case 320:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_320MV;
+ break;
+ case 400:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_400MV;
+ break;
+ case 500:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_500MV;
+ break;
+ case 600:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_600MV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ priv->package_mode = PHY_INTERFACE_MODE_NA;
+ if (!of_property_read_string(shared->np, "qcom,package-mode",
+ &package_mode_name)) {
+ if (!strcasecmp(package_mode_name,
+ phy_modes(PHY_INTERFACE_MODE_PSGMII)))
+ priv->package_mode = PHY_INTERFACE_MODE_PSGMII;
+ else if (!strcasecmp(package_mode_name,
+ phy_modes(PHY_INTERFACE_MODE_QSGMII)))
+ priv->package_mode = PHY_INTERFACE_MODE_QSGMII;
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca807x_phy_package_config_init_once(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct qca807x_shared_priv *priv = shared->priv;
+ int val, ret;
+
+ /* Make sure PHY follow PHY package mode if enforced */
+ if (priv->package_mode != PHY_INTERFACE_MODE_NA &&
+ phydev->interface != priv->package_mode)
+ return -EINVAL;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* Set correct PHY package mode */
+ val = __phy_package_read(phydev, QCA807X_COMBO_ADDR,
+ QCA807X_CHIP_CONFIGURATION);
+ val &= ~QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK;
+ /* package_mode can be QSGMII or PSGMII and we validate
+ * this in probe_once.
+ * With package_mode to NA, we default to PSGMII.
+ */
+ switch (priv->package_mode) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ val |= QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_PSGMII:
+ default:
+ val |= QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER;
+ }
+ ret = __phy_package_write(phydev, QCA807X_COMBO_ADDR,
+ QCA807X_CHIP_CONFIGURATION, val);
+ if (ret)
+ goto exit;
+
+ /* After mode change Serdes reset is required */
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG);
+ val &= ~PQSGMII_ANALOG_SW_RESET;
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG, val);
+ if (ret)
+ goto exit;
+
+ msleep(SERDES_RESET_SLEEP);
+
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG);
+ val |= PQSGMII_ANALOG_SW_RESET;
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG, val);
+ if (ret)
+ goto exit;
+
+ /* Workaround to enable AZ transmitting ability */
+ val = __phy_package_read_mmd(phydev, QCA807X_PQSGMII_ADDR,
+ MDIO_MMD_PMAPMD, PQSGMII_MODE_CTRL);
+ val &= ~PQSGMII_MODE_CTRL_AZ_WORKAROUND_MASK;
+ ret = __phy_package_write_mmd(phydev, QCA807X_PQSGMII_ADDR,
+ MDIO_MMD_PMAPMD, PQSGMII_MODE_CTRL, val);
+ if (ret)
+ goto exit;
+
+ /* Set PQSGMII TX AMP strength */
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_DRIVE_CONTROL_1);
+ val &= ~PQSGMII_TX_DRIVER_MASK;
+ val |= FIELD_PREP(PQSGMII_TX_DRIVER_MASK, priv->tx_drive_strength);
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_DRIVE_CONTROL_1, val);
+ if (ret)
+ goto exit;
+
+ /* Prevent PSGMII going into hibernation via PSGMII self test */
+ val = __phy_package_read_mmd(phydev, QCA807X_COMBO_ADDR,
+ MDIO_MMD_PCS, PQSGMII_MMD3_SERDES_CONTROL);
+ val &= ~BIT(1);
+ ret = __phy_package_write_mmd(phydev, QCA807X_COMBO_ADDR,
+ MDIO_MMD_PCS, PQSGMII_MMD3_SERDES_CONTROL, val);
+
+exit:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static int qca807x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ phy_interface_t iface;
+ int ret;
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+
+ sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
+ iface = sfp_select_interface(phydev->sfp_bus, support);
+
+ dev_info(&phydev->mdio.dev, "%s SFP module inserted\n", phy_modes(iface));
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_100BASEX:
+ /* Set PHY mode to PSGMII combo (1/4 copper + combo ports) mode */
+ ret = phy_modify(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK,
+ QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER);
+ /* Enable fiber mode autodection (1000Base-X or 100Base-FX) */
+ ret = phy_set_bits_mmd(phydev,
+ MDIO_MMD_AN,
+ QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION,
+ QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN);
+ /* Select fiber page */
+ ret = phy_clear_bits(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL);
+
+ phydev->port = PORT_FIBRE;
+ break;
+ default:
+ dev_err(&phydev->mdio.dev, "Incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void qca807x_sfp_remove(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+
+ /* Select copper page */
+ phy_set_bits(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL);
+
+ phydev->port = PORT_TP;
+}
+
+static const struct sfp_upstream_ops qca807x_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = qca807x_sfp_insert,
+ .module_remove = qca807x_sfp_remove,
+};
+
+static int qca807x_probe(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct qca807x_shared_priv *shared_priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct phy_package_shared *shared;
+ struct qca807x_priv *priv;
+ int ret;
+
+ ret = devm_of_phy_package_join(dev, phydev, sizeof(*shared_priv));
+ if (ret)
+ return ret;
+
+ if (phy_package_probe_once(phydev)) {
+ ret = qca807x_phy_package_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ shared = phydev->shared;
+ shared_priv = shared->priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dac_full_amplitude = of_property_read_bool(node, "qcom,dac-full-amplitude");
+ priv->dac_full_bias_current = of_property_read_bool(node, "qcom,dac-full-bias-current");
+ priv->dac_disable_bias_current_tweak = of_property_read_bool(node,
+ "qcom,dac-disable-bias-current-tweak");
+
+#if IS_ENABLED(CONFIG_GPIOLIB)
+ /* Make sure we don't have mixed leds node and gpio-controller
+ * to prevent registering leds and having gpio-controller usage
+ * conflicting with them.
+ */
+ if (of_find_property(node, "leds", NULL) &&
+ of_find_property(node, "gpio-controller", NULL)) {
+ phydev_err(phydev, "Invalid property detected. LEDs and gpio-controller are mutually exclusive.");
+ return -EINVAL;
+ }
+
+ /* Do not register a GPIO controller unless flagged for it */
+ if (of_property_read_bool(node, "gpio-controller")) {
+ ret = qca807x_gpio(phydev);
+ if (ret)
+ return ret;
+ }
+#endif
+
+ /* Attach SFP bus on combo port*/
+ if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
+ ret = phy_sfp_probe(phydev, &qca807x_sfp_ops);
+ if (ret)
+ return ret;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->advertising);
+ }
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca807x_config_init(struct phy_device *phydev)
+{
+ struct qca807x_priv *priv = phydev->priv;
+ u16 control_dac;
+ int ret;
+
+ if (phy_package_init_once(phydev)) {
+ ret = qca807x_phy_package_config_init_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ control_dac = phy_read_mmd(phydev, MDIO_MMD_AN,
+ QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH);
+ control_dac &= ~QCA807X_CONTROL_DAC_MASK;
+ if (!priv->dac_full_amplitude)
+ control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
+ if (!priv->dac_full_amplitude)
+ control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
+ if (!priv->dac_disable_bias_current_tweak)
+ control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
+ return phy_write_mmd(phydev, MDIO_MMD_AN,
+ QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH,
+ control_dac);
+}
+
+static struct phy_driver qca807x_drivers[] = {
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072),
+ .name = "Qualcomm QCA8072",
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_GBIT_FEATURES */
+ .probe = qca807x_probe,
+ .config_init = qca807x_config_init,
+ .read_status = qca807x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = qca807x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
+ .name = "Qualcomm QCA8075",
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_GBIT_FEATURES */
+ .probe = qca807x_probe,
+ .config_init = qca807x_config_init,
+ .read_status = qca807x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = qca807x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .led_brightness_set = qca807x_led_brightness_set,
+ .led_blink_set = qca807x_led_blink_set,
+ .led_hw_is_supported = qca807x_led_hw_is_supported,
+ .led_hw_control_set = qca807x_led_hw_control_set,
+ .led_hw_control_get = qca807x_led_hw_control_get,
+ },
+};
+module_phy_driver(qca807x_drivers);
+
+static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
+ { }
+};
+
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Qualcomm QCA807x PHY driver");
+MODULE_DEVICE_TABLE(mdio, qca807x_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
new file mode 100644
index 000000000000..5048304ccc9e
--- /dev/null
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+/* ADC threshold */
+#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
+#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
+#define QCA808X_ADC_THRESHOLD_80MV 0
+#define QCA808X_ADC_THRESHOLD_100MV 0xf0
+#define QCA808X_ADC_THRESHOLD_200MV 0x0f
+#define QCA808X_ADC_THRESHOLD_300MV 0xff
+
+/* CLD control */
+#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
+#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
+#define QCA808X_8023AZ_AFE_EN 0x90
+
+/* AZ control */
+#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
+#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
+#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
+#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
+#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
+#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
+#define QCA808X_TOP_OPTION1_DATA 0x0
+
+#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
+#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
+#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
+#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
+#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
+#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
+#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
+#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
+#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
+#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
+#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
+#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
+
+/* master/slave seed config */
+#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
+#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
+#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
+#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
+
+/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
+ * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
+ */
+#define QCA808X_DBG_AN_TEST 0xb
+#define QCA808X_HIBERNATION_EN BIT(15)
+
+#define QCA808X_MMD7_LED2_CTRL 0x8074
+#define QCA808X_MMD7_LED2_FORCE_CTRL 0x8075
+#define QCA808X_MMD7_LED1_CTRL 0x8076
+#define QCA808X_MMD7_LED1_FORCE_CTRL 0x8077
+#define QCA808X_MMD7_LED0_CTRL 0x8078
+#define QCA808X_MMD7_LED_CTRL(x) (0x8078 - ((x) * 2))
+
+#define QCA808X_MMD7_LED0_FORCE_CTRL 0x8079
+#define QCA808X_MMD7_LED_FORCE_CTRL(x) (0x8079 - ((x) * 2))
+
+#define QCA808X_MMD7_LED_POLARITY_CTRL 0x901a
+/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
+ * LED to active high. It's not clear what BIT 3 and BIT 4 does.
+ */
+#define QCA808X_LED_ACTIVE_HIGH BIT(6)
+
+/* QCA808X 1G chip type */
+#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
+#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
+
+#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
+#define QCA8081_PHY_FIFO_RSTN BIT(11)
+
+#define QCA8081_PHY_ID 0x004dd101
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+struct qca808x_priv {
+ int led_polarity_mode;
+};
+
+static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable fast retrain */
+ ret = genphy_c45_fast_retrain(phydev, true);
+ if (ret)
+ return ret;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
+ QCA808X_TOP_OPTION1_DATA);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
+ QCA808X_MSE_THRESHOLD_20DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
+ QCA808X_MSE_THRESHOLD_17DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
+ QCA808X_MSE_THRESHOLD_27DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
+ QCA808X_MSE_THRESHOLD_28DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
+ QCA808X_MMD3_DEBUG_1_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
+ QCA808X_MMD3_DEBUG_4_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
+ QCA808X_MMD3_DEBUG_5_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
+ QCA808X_MMD3_DEBUG_3_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
+ QCA808X_MMD3_DEBUG_6_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
+ QCA808X_MMD3_DEBUG_2_VALUE);
+
+ return 0;
+}
+
+static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
+{
+ u16 seed_value;
+
+ if (!enable)
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
+
+ seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
+ FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
+ QCA808X_MASTER_SLAVE_SEED_ENABLE);
+}
+
+static bool qca808x_is_prefer_master(struct phy_device *phydev)
+{
+ return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
+ (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
+}
+
+static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
+{
+ return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+}
+
+static bool qca808x_is_1g_only(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
+ if (ret < 0)
+ return true;
+
+ return !!(QCA808X_PHY_CHIP_TYPE_1G & ret);
+}
+
+static void qca808x_fill_possible_interfaces(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII, possible);
+
+ if (!qca808x_is_1g_only(phydev))
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, possible);
+}
+
+static int qca808x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca808x_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Init LED polarity mode to -1 */
+ priv->led_polarity_mode = -1;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca808x_config_init(struct phy_device *phydev)
+{
+ struct qca808x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Default to LED Active High if active-low not in DT */
+ if (priv->led_polarity_mode == -1) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH);
+ if (ret)
+ return ret;
+ }
+
+ /* Active adc&vga on 802.3az for the link 1000M and 100M */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+ QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+ if (ret)
+ return ret;
+
+ /* Adjust the threshold on 802.3az for the link 1000M */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
+ QCA808X_MMD3_AZ_TRAINING_VAL);
+ if (ret)
+ return ret;
+
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ /* Config the fast retrain for the link 2500M */
+ ret = qca808x_phy_fast_retrain_config(phydev);
+ if (ret)
+ return ret;
+
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (!qca808x_is_prefer_master(phydev)) {
+ /* Enable seed and configure lower ramdom seed to make phy
+ * linked as slave mode.
+ */
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ qca808x_fill_possible_interfaces(phydev);
+
+ /* Configure adc threshold as 100mv for the link 10M */
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
+ QCA808X_ADC_THRESHOLD_MASK,
+ QCA808X_ADC_THRESHOLD_100MV);
+}
+
+static int qca808x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (ret < 0)
+ return ret;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
+ ret & MDIO_AN_10GBT_STAT_LP2_5G);
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ /* qca8081 takes the different bits for speed value from at803x */
+ ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
+ ret = at803x_read_specific_status(phydev, ss_mask);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->link) {
+ if (phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ } else {
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected or the master mode
+ * preferred.
+ *
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
+ qca808x_is_prefer_master(phydev)) {
+ qca808x_phy_ms_seed_enable(phydev, false);
+ } else {
+ qca808x_phy_ms_seed_enable(phydev, true);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qca808x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev))
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+
+ return ret;
+}
+
+static int qca808x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ /* perform CDT with the following configs:
+ * 1. disable hibernation.
+ * 2. force PHY working in MDI mode.
+ * 3. for PHY working in 1000BaseT.
+ * 4. configure the threshold.
+ */
+
+ ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_config_mdix(phydev, ETH_TP_MDI);
+ if (ret < 0)
+ return ret;
+
+ /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* configure the thresholds for open, short, pair ok test */
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
+
+ return 0;
+}
+
+static int qca808x_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The autoneg ability is not existed in bit3 of MMD7.1,
+ * but it is supported by qca808x PHY, so we add it here
+ * manually.
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ /* As for the qca8081 1G version chip, the 2500baseT ability is also
+ * existed in the bit0 of MMD1.21, we need to remove it manually if
+ * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
+ */
+ if (qca808x_is_1g_only(phydev))
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int qca808x_config_aneg(struct phy_device *phydev)
+{
+ int phy_ctrl = 0;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The reg MII_BMCR also needs to be configured for force mode, the
+ * genphy_config_aneg is also needed.
+ */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ genphy_c45_pma_setup_forced(phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+ phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return __genphy_config_aneg(phydev, ret);
+}
+
+static void qca808x_link_change_notify(struct phy_device *phydev)
+{
+ /* Assert interface sgmii fifo on link down, deassert it on link up,
+ * the interface device address is always phy address added by 1.
+ */
+ mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
+ MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+ QCA8081_PHY_FIFO_RSTN,
+ phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+}
+
+static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED2500_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ /* Enable BLINK_CHECK_BYPASS by default to make the LED
+ * blink even with duplex or speed mode not enabled.
+ */
+ *offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_enable(phydev, reg);
+}
+
+static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 2)
+ return -EINVAL;
+
+ return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, offload_trigger = 0;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca808x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK,
+ offload_trigger);
+}
+
+static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return false;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_status(phydev, reg);
+}
+
+static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 2)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca808x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_SPEED2500_ON)
+ set_bit(TRIGGER_NETDEV_LINK_2500, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK);
+}
+
+static int qca808x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ if (!value) {
+ ret = qca808x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_brightness_set(phydev, reg, value);
+}
+
+static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_blink_set(phydev, reg, delay_on, delay_off);
+}
+
+static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ struct qca808x_priv *priv = phydev->priv;
+ bool active_low = false;
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ active_low = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* PHY polarity is global and can't be set per LED.
+ * To detect this, check if last requested polarity mode
+ * match the new one.
+ */
+ if (priv->led_polarity_mode >= 0 &&
+ priv->led_polarity_mode != active_low) {
+ phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
+ return -EINVAL;
+ }
+
+ /* Save the last PHY polarity mode */
+ priv->led_polarity_mode = active_low;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH,
+ active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+}
+
+static struct phy_driver qca808x_driver[] = {
+{
+ /* Qualcomm QCA8081 */
+ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+ .name = "Qualcomm QCA8081",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = qca808x_probe,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .get_features = qca808x_get_features,
+ .config_aneg = qca808x_config_aneg,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_status = qca808x_read_status,
+ .config_init = qca808x_config_init,
+ .soft_reset = qca808x_soft_reset,
+ .cable_test_start = qca808x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .link_change_notify = qca808x_link_change_notify,
+ .led_brightness_set = qca808x_led_brightness_set,
+ .led_blink_set = qca808x_led_blink_set,
+ .led_hw_is_supported = qca808x_led_hw_is_supported,
+ .led_hw_control_set = qca808x_led_hw_control_set,
+ .led_hw_control_get = qca808x_led_hw_control_get,
+ .led_polarity_set = qca808x_led_polarity_set,
+}, };
+
+module_phy_driver(qca808x_driver);
+
+static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca808x_tbl);
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
new file mode 100644
index 000000000000..5d083ef0250e
--- /dev/null
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+#define AT803X_DEBUG_REG_3C 0x3C
+
+#define AT803X_DEBUG_REG_GREEN 0x3D
+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
+
+#define MDIO_AZ_DEBUG 0x800D
+
+#define QCA8327_A_PHY_ID 0x004dd033
+#define QCA8327_B_PHY_ID 0x004dd034
+#define QCA8337_PHY_ID 0x004dd036
+#define QCA8K_PHY_ID_MASK 0xffffffff
+
+#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
+
+static struct at803x_hw_stat qca83xx_hw_stats[] = {
+ { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+ { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+ { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+};
+
+struct qca83xx_priv {
+ u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+};
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA83XX PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+static int qca83xx_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(qca83xx_hw_stats);
+}
+
+static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
+ strscpy(data + i * ETH_GSTRING_LEN,
+ qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
+{
+ struct at803x_hw_stat stat = qca83xx_hw_stats[i];
+ struct qca83xx_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.access_type == MMD)
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & stat.mask;
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void qca83xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+ data[i] = qca83xx_get_stat(phydev, i);
+}
+
+static int qca83xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca83xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca83xx_config_init(struct phy_device *phydev)
+{
+ u8 switch_revision;
+
+ switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
+
+ switch (switch_revision) {
+ case 1:
+ /* For 100M waveform */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
+ /* Turn on Gigabit clock */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
+ break;
+
+ case 2:
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
+ fallthrough;
+ case 4:
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
+ break;
+ }
+
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static int qca8327_config_init(struct phy_device *phydev)
+{
+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ * Disable on init and enable only with 100m speed following
+ * qca original source code.
+ */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+
+ return qca83xx_config_init(phydev);
+}
+
+static void qca83xx_link_change_notify(struct phy_device *phydev)
+{
+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ if (phydev->state == PHY_RUNNING) {
+ if (phydev->speed == SPEED_100)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN,
+ QCA8327_DEBUG_MANU_CTRL_EN);
+ } else {
+ /* Reset DAC Amplitude adjustment */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ }
+}
+
+static int qca83xx_resume(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Skip reset if not suspended */
+ if (!phydev->suspended)
+ return 0;
+
+ /* Reinit the port, reset values set by suspend */
+ qca83xx_config_init(phydev);
+
+ /* Reset the port on port resume */
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+
+ /* On resume from suspend the switch execute a reset and
+ * restart auto-negotiation. Wait for reset to complete.
+ */
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qca83xx_suspend(struct phy_device *phydev)
+{
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ AT803X_DEBUG_GATE_CLK_IN1000, 0);
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+
+ return 0;
+}
+
+static int qca8337_suspend(struct phy_device *phydev)
+{
+ /* Only QCA8337 support actual suspend. */
+ genphy_suspend(phydev);
+
+ return qca83xx_suspend(phydev);
+}
+
+static int qca8327_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+
+ return qca83xx_suspend(phydev);
+}
+
+static struct phy_driver qca83xx_driver[] = {
+{
+ /* QCA8337 */
+ .phy_id = QCA8337_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8337 internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8337_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-A from switch QCA8327-AL1A */
+ .phy_id = QCA8327_A_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-A internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-B from switch QCA8327-BL1A */
+ .phy_id = QCA8327_B_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-B internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, };
+
+module_phy_driver(qca83xx_driver);
+
+static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca83xx_tbl);
diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c
new file mode 100644
index 000000000000..d28815ef56bb
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom-phy-lib.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "qcom.h"
+
+MODULE_DESCRIPTION("Qualcomm PHY driver Common Functions");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_read(phydev, AT803X_DEBUG_DATA);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_read);
+
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set)
+{
+ u16 val;
+ int ret;
+
+ ret = at803x_debug_reg_read(phydev, reg);
+ if (ret < 0)
+ return ret;
+
+ val = ret & 0xffff;
+ val &= ~clear;
+ val |= set;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, val);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_mask);
+
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, data);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_write);
+
+int at803x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret, irq_enabled;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ struct net_device *ndev = phydev->attached_dev;
+ const u8 *mac;
+ unsigned int i;
+ static const unsigned int offsets[] = {
+ AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+ AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+ AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+ };
+
+ if (!ndev)
+ return -ENODEV;
+
+ mac = (const u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ for (i = 0; i < 3; i++)
+ phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+ if (ret)
+ return ret;
+ } else {
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear WOL status */
+ ret = phy_read(phydev, AT803X_INTR_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* Check if there are other interrupts except for WOL triggered when PHY is
+ * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
+ * be passed up to the interrupt PIN.
+ */
+ irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (irq_enabled < 0)
+ return irq_enabled;
+
+ irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
+ if (ret & irq_enabled && !phy_polling_mode(phydev))
+ phy_trigger_machine(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_set_wol);
+
+void at803x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int value;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (value < 0)
+ return;
+
+ if (value & AT803X_INTR_ENABLE_WOL)
+ wol->wolopts |= WAKE_MAGIC;
+}
+EXPORT_SYMBOL_GPL(at803x_get_wol);
+
+int at803x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, AT803X_INTR_STATUS);
+
+ return (err < 0) ? err : 0;
+}
+EXPORT_SYMBOL_GPL(at803x_ack_interrupt);
+
+int at803x_config_intr(struct phy_device *phydev)
+{
+ int err;
+ int value;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
+ value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
+ value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
+ value |= AT803X_INTR_ENABLE_LINK_FAIL;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+
+ err = phy_write(phydev, AT803X_INTR_ENABLE, value);
+ } else {
+ err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(at803x_config_intr);
+
+irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, int_enabled;
+
+ irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Read the current enabled interrupts */
+ int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (int_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* See if this was one of our enabled interrupts */
+ if (!(irq_status & int_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(at803x_handle_interrupt);
+
+int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask)
+{
+ int ss;
+
+ /* Read the AT8035 PHY-Specific Status register, which indicates the
+ * speed and duplex that the PHY is actually using, irrespective of
+ * whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ int sfc, speed;
+
+ sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
+ if (sfc < 0)
+ return sfc;
+
+ speed = ss & ss_mask.speed_mask;
+ speed >>= ss_mask.speed_shift;
+
+ switch (speed) {
+ case AT803X_SS_SPEED_10:
+ phydev->speed = SPEED_10;
+ break;
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case QCA808X_SS_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (ss & AT803X_SS_MDIX)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
+ case AT803X_SFC_MANUAL_MDI:
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ break;
+ case AT803X_SFC_MANUAL_MDIX:
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case AT803X_SFC_AUTOMATIC_CROSSOVER:
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_specific_status);
+
+int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = AT803X_SFC_MANUAL_MDI;
+ break;
+ case ETH_TP_MDI_X:
+ val = AT803X_SFC_MANUAL_MDIX;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = AT803X_SFC_AUTOMATIC_CROSSOVER;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
+ AT803X_SFC_MDI_CROSSOVER_MODE_M,
+ FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
+}
+EXPORT_SYMBOL_GPL(at803x_config_mdix);
+
+int at803x_prepare_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+
+ /* Changes of the midx bits are disruptive to the normal operation;
+ * therefore any changes to these registers must be followed by a
+ * software reset to take effect.
+ */
+ if (ret == 1) {
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_prepare_config_aneg);
+
+int at803x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+ err = at803x_read_specific_status(phydev, ss_mask);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_pause(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_status);
+
+static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+{
+ int val;
+
+ val = phy_read(phydev, AT803X_SMART_SPEED);
+ if (val < 0)
+ return val;
+
+ if (val & AT803X_SMART_SPEED_ENABLE)
+ *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
+ else
+ *d = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, set;
+ int ret;
+
+ switch (cnt) {
+ case DOWNSHIFT_DEV_DEFAULT_COUNT:
+ cnt = AT803X_DEFAULT_DOWNSHIFT;
+ fallthrough;
+ case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
+ set = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER |
+ FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
+ mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
+ break;
+ case DOWNSHIFT_DEV_DISABLE:
+ set = 0;
+ mask = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
+
+ /* After changing the smart speed settings, we need to perform a
+ * software reset, use phy_init_hw() to make sure we set the
+ * reapply any values which might got lost during software reset.
+ */
+ if (ret == 1)
+ ret = phy_init_hw(phydev);
+
+ return ret;
+}
+
+int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(at803x_get_tunable);
+
+int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(at803x_set_tunable);
+
+int at803x_cdt_fault_length(int dt)
+{
+ /* According to the datasheet the distance to the fault is
+ * DELTA_TIME * 0.824 meters.
+ *
+ * The author suspect the correct formula is:
+ *
+ * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
+ *
+ * where c is the speed of light, VF is the velocity factor of
+ * the twisted pair cable, 125MHz the counter frequency and
+ * we need to divide by 2 because the hardware will measure the
+ * round trip time to the fault and back to the PHY.
+ *
+ * With a VF of 0.69 we get the factor 0.824 mentioned in the
+ * datasheet.
+ */
+ return (dt * 824) / 10;
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_fault_length);
+
+int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start)
+{
+ return phy_write(phydev, AT803X_CDT, cdt_start);
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_start);
+
+int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en)
+{
+ int val, ret;
+
+ /* One test run takes about 25ms */
+ ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+ !(val & cdt_en),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_wait_for_completion);
+
+static bool qca808x_cdt_fault_length_valid(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int qca808x_cable_test_result_trans(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ case QCA808X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+ int result)
+{
+ int val;
+ u32 cdt_length_reg = 0;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+ if (val < 0)
+ return val;
+
+ if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+ else
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+
+ return at803x_cdt_fault_length(val);
+}
+
+static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+ u16 status)
+{
+ int length, result;
+ u16 pair_code;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ result = qca808x_cable_test_result_trans(pair_code);
+ ethnl_cable_test_result(phydev, pair, result);
+
+ if (qca808x_cdt_fault_length_valid(pair_code)) {
+ length = qca808x_cdt_fault_length(phydev, pair, result);
+ ethnl_cable_test_fault_length(phydev, pair, length);
+ }
+
+ return 0;
+}
+
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ int ret, val;
+
+ *finished = false;
+
+ val = QCA808X_CDT_ENABLE_TEST |
+ QCA808X_CDT_LENGTH_UNIT;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+ if (ret)
+ return ret;
+
+ *finished = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_cable_test_get_status);
+
+int qca808x_led_reg_hw_control_enable(struct phy_device *phydev, u16 reg)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN);
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_hw_control_enable);
+
+bool qca808x_led_reg_hw_control_status(struct phy_device *phydev, u16 reg)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ return !(val & QCA808X_LED_FORCE_EN);
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_hw_control_status);
+
+int qca808x_led_reg_brightness_set(struct phy_device *phydev,
+ u16 reg, enum led_brightness value)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | (value ? QCA808X_LED_FORCE_ON :
+ QCA808X_LED_FORCE_OFF));
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_brightness_set);
+
+int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int ret;
+
+ /* Set blink to 50% off, 50% on at 4Hz by default */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
+ QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
+ QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
+ if (ret)
+ return ret;
+
+ /* We use BLINK_1 for normal blinking */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
+ if (ret)
+ return ret;
+
+ /* We set blink to 4Hz, aka 250ms */
+ *delay_on = 250 / 2;
+ *delay_off = 250 / 2;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_blink_set);
diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h
new file mode 100644
index 000000000000..4bb541728846
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
+#define AT803X_SFC_ASSERT_CRS BIT(11)
+#define AT803X_SFC_FORCE_LINK BIT(10)
+#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5)
+#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3
+#define AT803X_SFC_MANUAL_MDIX 0x1
+#define AT803X_SFC_MANUAL_MDI 0x0
+#define AT803X_SFC_SQE_TEST BIT(2)
+#define AT803X_SFC_POLARITY_REVERSAL BIT(1)
+#define AT803X_SFC_DISABLE_JABBER BIT(0)
+
+#define AT803X_SPECIFIC_STATUS 0x11
+#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
+#define AT803X_SS_SPEED_1000 2
+#define AT803X_SS_SPEED_100 1
+#define AT803X_SS_SPEED_10 0
+#define AT803X_SS_DUPLEX BIT(13)
+#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
+#define AT803X_SS_MDIX BIT(6)
+
+#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
+#define QCA808X_SS_SPEED_2500 4
+
+#define AT803X_INTR_ENABLE 0x12
+#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
+#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
+#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
+#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
+#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
+#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
+#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
+#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
+#define AT803X_INTR_ENABLE_WOL BIT(0)
+
+#define AT803X_INTR_STATUS 0x13
+
+#define AT803X_SMART_SPEED 0x14
+#define AT803X_SMART_SPEED_ENABLE BIT(5)
+#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
+#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
+
+#define AT803X_CDT 0x16
+#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
+#define AT803X_CDT_ENABLE_TEST BIT(0)
+#define AT803X_CDT_STATUS 0x1c
+#define AT803X_CDT_STATUS_STAT_NORMAL 0
+#define AT803X_CDT_STATUS_STAT_SHORT 1
+#define AT803X_CDT_STATUS_STAT_OPEN 2
+#define AT803X_CDT_STATUS_STAT_FAIL 3
+#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
+#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
+
+#define QCA808X_CDT_ENABLE_TEST BIT(15)
+#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
+#define QCA808X_CDT_STATUS BIT(11)
+#define QCA808X_CDT_LENGTH_UNIT BIT(10)
+
+#define QCA808X_MMD3_CDT_STATUS 0x8064
+#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
+#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
+#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
+#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
+#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
+#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
+
+#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
+#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
+#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
+#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
+
+#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
+#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+
+#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+
+/* NORMAL are MDI with type set to 0 */
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+
+/* Added for reference of existence but should be handled by wait_for_completion already */
+#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
+
+#define QCA808X_MMD7_LED_GLOBAL 0x8073
+#define QCA808X_LED_BLINK_1 GENMASK(11, 6)
+#define QCA808X_LED_BLINK_2 GENMASK(5, 0)
+/* Values are the same for both BLINK_1 and BLINK_2 */
+#define QCA808X_LED_BLINK_FREQ_MASK GENMASK(5, 3)
+#define QCA808X_LED_BLINK_FREQ_2HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
+#define QCA808X_LED_BLINK_FREQ_4HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
+#define QCA808X_LED_BLINK_FREQ_8HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
+#define QCA808X_LED_BLINK_FREQ_16HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
+#define QCA808X_LED_BLINK_FREQ_32HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
+#define QCA808X_LED_BLINK_FREQ_64HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
+#define QCA808X_LED_BLINK_FREQ_128HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
+#define QCA808X_LED_BLINK_FREQ_256HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
+#define QCA808X_LED_BLINK_DUTY_MASK GENMASK(2, 0)
+#define QCA808X_LED_BLINK_DUTY_50_50 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
+#define QCA808X_LED_BLINK_DUTY_75_25 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
+#define QCA808X_LED_BLINK_DUTY_25_75 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
+#define QCA808X_LED_BLINK_DUTY_33_67 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
+#define QCA808X_LED_BLINK_DUTY_67_33 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
+#define QCA808X_LED_BLINK_DUTY_17_83 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
+#define QCA808X_LED_BLINK_DUTY_83_17 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
+#define QCA808X_LED_BLINK_DUTY_8_92 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
+
+/* LED hw control pattern is the same for every LED */
+#define QCA808X_LED_PATTERN_MASK GENMASK(15, 0)
+#define QCA808X_LED_SPEED2500_ON BIT(15)
+#define QCA808X_LED_SPEED2500_BLINK BIT(14)
+/* Follow blink trigger even if duplex or speed condition doesn't match */
+#define QCA808X_LED_BLINK_CHECK_BYPASS BIT(13)
+#define QCA808X_LED_FULL_DUPLEX_ON BIT(12)
+#define QCA808X_LED_HALF_DUPLEX_ON BIT(11)
+#define QCA808X_LED_TX_BLINK BIT(10)
+#define QCA808X_LED_RX_BLINK BIT(9)
+#define QCA808X_LED_TX_ON_10MS BIT(8)
+#define QCA808X_LED_RX_ON_10MS BIT(7)
+#define QCA808X_LED_SPEED1000_ON BIT(6)
+#define QCA808X_LED_SPEED100_ON BIT(5)
+#define QCA808X_LED_SPEED10_ON BIT(4)
+#define QCA808X_LED_COLLISION_BLINK BIT(3)
+#define QCA808X_LED_SPEED1000_BLINK BIT(2)
+#define QCA808X_LED_SPEED100_BLINK BIT(1)
+#define QCA808X_LED_SPEED10_BLINK BIT(0)
+
+/* LED force ctrl is the same for every LED
+ * No documentation exist for this, not even internal one
+ * with NDA as QCOM gives only info about configuring
+ * hw control pattern rules and doesn't indicate any way
+ * to force the LED to specific mode.
+ * These define comes from reverse and testing and maybe
+ * lack of some info or some info are not entirely correct.
+ * For the basic LED control and hw control these finding
+ * are enough to support LED control in all the required APIs.
+ *
+ * On doing some comparison with implementation with qca807x,
+ * it was found that it's 1:1 equal to it and confirms all the
+ * reverse done. It was also found further specification with the
+ * force mode and the blink modes.
+ */
+#define QCA808X_LED_FORCE_EN BIT(15)
+#define QCA808X_LED_FORCE_MODE_MASK GENMASK(14, 13)
+#define QCA808X_LED_FORCE_BLINK_1 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
+#define QCA808X_LED_FORCE_BLINK_2 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
+#define QCA808X_LED_FORCE_ON FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
+#define QCA808X_LED_FORCE_OFF FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
+
+#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
+#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
+#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
+
+#define AT803X_DEBUG_ADDR 0x1D
+#define AT803X_DEBUG_DATA 0x1E
+
+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
+#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
+
+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
+#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+
+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
+
+#define AT803X_DEFAULT_DOWNSHIFT 5
+#define AT803X_MIN_DOWNSHIFT 2
+#define AT803X_MAX_DOWNSHIFT 9
+
+enum stat_access_type {
+ PHY,
+ MMD
+};
+
+struct at803x_hw_stat {
+ const char *string;
+ u8 reg;
+ u32 mask;
+ enum stat_access_type access_type;
+};
+
+struct at803x_ss_mask {
+ u16 speed_mask;
+ u8 speed_shift;
+};
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg);
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set);
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);
+int at803x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+void at803x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+int at803x_ack_interrupt(struct phy_device *phydev);
+int at803x_config_intr(struct phy_device *phydev);
+irqreturn_t at803x_handle_interrupt(struct phy_device *phydev);
+int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask);
+int at803x_config_mdix(struct phy_device *phydev, u8 ctrl);
+int at803x_prepare_config_aneg(struct phy_device *phydev);
+int at803x_read_status(struct phy_device *phydev);
+int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data);
+int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data);
+int at803x_cdt_fault_length(int dt);
+int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start);
+int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en);
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished);
+int qca808x_led_reg_hw_control_enable(struct phy_device *phydev, u16 reg);
+bool qca808x_led_reg_hw_control_status(struct phy_device *phydev, u16 reg);
+int qca808x_led_reg_brightness_set(struct phy_device *phydev,
+ u16 reg, enum led_brightness value);
+int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 894172a3e15f..1fa70427b2a2 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -57,14 +57,6 @@
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
-#define RTL_SUPPORTS_5000FULL BIT(14)
-#define RTL_SUPPORTS_2500FULL BIT(13)
-#define RTL_SUPPORTS_10000FULL BIT(0)
-#define RTL_ADV_2500FULL BIT(7)
-#define RTL_LPADV_10000FULL BIT(11)
-#define RTL_LPADV_5000FULL BIT(6)
-#define RTL_LPADV_2500FULL BIT(5)
-
#define RTL9000A_GINMR 0x14
#define RTL9000A_GINMR_LINK_STATUS BIT(4)
@@ -421,9 +413,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
ERR_PTR(ret));
return ret;
}
+
+ return genphy_soft_reset(phydev);
}
- return genphy_soft_reset(phydev);
+ return 0;
}
static int rtl821x_suspend(struct phy_device *phydev)
@@ -674,11 +668,11 @@ static int rtl822x_get_features(struct phy_device *phydev)
return val;
linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_2500FULL);
+ phydev->supported, val & MDIO_PMA_SPEED_2_5G);
linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_5000FULL);
+ phydev->supported, val & MDIO_PMA_SPEED_5G);
linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_10000FULL);
+ phydev->supported, val & MDIO_SPEED_10G);
return genphy_read_abilities(phydev);
}
@@ -688,14 +682,12 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
int ret = 0;
if (phydev->autoneg == AUTONEG_ENABLE) {
- u16 adv2500 = 0;
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->advertising))
- adv2500 = RTL_ADV_2500FULL;
+ u16 adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
- RTL_ADV_2500FULL, adv2500);
+ MDIO_AN_10GBT_CTRL_ADV2_5G |
+ MDIO_AN_10GBT_CTRL_ADV5G,
+ adv);
if (ret < 0)
return ret;
}
@@ -713,12 +705,8 @@ static int rtl822x_read_status(struct phy_device *phydev)
if (lpadv < 0)
return lpadv;
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_10000FULL);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_5000FULL);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL);
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpadv);
}
ret = genphy_read_status(phydev);
@@ -736,7 +724,7 @@ static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
val = phy_read(phydev, 0x13);
phy_write(phydev, RTL821x_PAGE_SELECT, 0);
- return val >= 0 && val & RTL_SUPPORTS_2500FULL;
+ return val >= 0 && val & MDIO_PMA_SPEED_2_5G;
}
static int rtlgen_match_phy_device(struct phy_device *phydev)
@@ -1048,6 +1036,16 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(0x001cc862),
+ .name = "RTL8251B 5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7fd9fe6a602b..7b1bc5fcef9b 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -22,7 +22,7 @@
struct gmii2rgmii {
struct phy_device *phy_dev;
- struct phy_driver *phy_drv;
+ const struct phy_driver *phy_drv;
struct phy_driver conv_phy_drv;
struct mdio_device *mdio;
};
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 40ce8abe6999..cc7d1113ece0 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1437,4 +1437,5 @@ static int __init plip_init (void)
module_init(plip_init);
module_exit(plip_cleanup_module);
+MODULE_DESCRIPTION("PLIP (parallel port) network module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index db0dc36d12e3..55954594e157 100644
--- a/drivers/net/ppp/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
@@ -1166,5 +1166,6 @@ static void __exit bsdcomp_cleanup(void)
module_init(bsdcomp_init);
module_exit(bsdcomp_cleanup);
+MODULE_DESCRIPTION("PPP BSD-Compress compression module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 840da924708b..c33c3db3cc08 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -87,6 +87,7 @@ struct asyncppp {
static int flag_time = HZ;
module_param(flag_time, int, 0);
MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
+MODULE_DESCRIPTION("PPP async serial channel module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_PPP);
@@ -460,6 +461,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
case PPPIOCSMRU:
if (get_user(val, p))
break;
+ if (val > U16_MAX) {
+ err = -EINVAL;
+ break;
+ }
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index e6d48e5c65a3..4d2ff63f2ee2 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -630,6 +630,7 @@ static void __exit deflate_cleanup(void)
module_init(deflate_init);
module_exit(deflate_cleanup);
+MODULE_DESCRIPTION("PPP Deflate compression module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE));
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0193af2d31c9..fe380fe196e7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -295,7 +295,9 @@ static void ppp_setup(struct net_device *dev);
static const struct net_device_ops ppp_netdev_ops;
-static struct class *ppp_class;
+static const struct class ppp_class = {
+ .name = "ppp",
+};
/* per net-namespace data */
static inline struct ppp_net *ppp_pernet(struct net *net)
@@ -1394,11 +1396,9 @@ static int __init ppp_init(void)
goto out_net;
}
- ppp_class = class_create("ppp");
- if (IS_ERR(ppp_class)) {
- err = PTR_ERR(ppp_class);
+ err = class_register(&ppp_class);
+ if (err)
goto out_chrdev;
- }
err = rtnl_link_register(&ppp_link_ops);
if (err) {
@@ -1407,12 +1407,12 @@ static int __init ppp_init(void)
}
/* not a big deal if we fail here :-) */
- device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+ device_create(&ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
return 0;
out_class:
- class_destroy(ppp_class);
+ class_unregister(&ppp_class);
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
out_net:
@@ -1607,7 +1607,7 @@ static const struct net_device_ops ppp_netdev_ops = {
.ndo_fill_forward_path = ppp_fill_forward_path,
};
-static struct device_type ppp_type = {
+static const struct device_type ppp_type = {
.name = "ppp",
};
@@ -3549,8 +3549,8 @@ static void __exit ppp_cleanup(void)
pr_err("PPP: removing module but units remain!\n");
rtnl_link_unregister(&ppp_link_ops);
unregister_chrdev(PPP_MAJOR, "ppp");
- device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
- class_destroy(ppp_class);
+ device_destroy(&ppp_class, MKDEV(PPP_MAJOR, 0));
+ class_unregister(&ppp_class);
unregister_pernet_device(&ppp_net_ops);
}
@@ -3604,6 +3604,7 @@ EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+MODULE_DESCRIPTION("Generic PPP layer driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 52d05ce4a281..45bf59ac8f57 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -724,5 +724,6 @@ ppp_sync_cleanup(void)
module_init(ppp_sync_init);
module_exit(ppp_sync_cleanup);
+MODULE_DESCRIPTION("PPP synchronous TTY channel module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_SYNC_PPP);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 8e7238e97d0a..2ea4f4890d23 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
struct sk_buff *skb;
int error = 0;
- if (sk->sk_state & PPPOX_BOUND) {
- error = -EIO;
- goto end;
- }
+ if (sk->sk_state & PPPOX_BOUND)
+ return -EIO;
skb = skb_recv_datagram(sk, flags, &error);
- if (error < 0)
- goto end;
+ if (!skb)
+ return error;
- if (skb) {
- total_len = min_t(size_t, total_len, skb->len);
- error = skb_copy_datagram_msg(skb, 0, m, total_len);
- if (error == 0) {
- consume_skb(skb);
- return total_len;
- }
+ total_len = min_t(size_t, total_len, skb->len);
+ error = skb_copy_datagram_msg(skb, 0, m, total_len);
+ if (error == 0) {
+ consume_skb(skb);
+ return total_len;
}
kfree_skb(skb);
-end:
return error;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f575f225d417..0a44bbdcfb7b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -25,7 +25,6 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
-#include <generated/utsrelease.h>
#include <linux/if_team.h>
#define DRV_NAME "team"
@@ -2074,7 +2073,6 @@ static void team_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static int team_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index afa5497f7c35..0b3f21cba552 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -54,6 +54,7 @@
#include <linux/if_tun.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
+#include <linux/math.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
@@ -77,6 +78,7 @@
#include <net/ax25.h>
#include <net/rose.h>
#include <net/6lowpan.h>
+#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
@@ -523,8 +525,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
struct tun_flow_entry *e;
- u32 txq = 0;
- u32 numqueues = 0;
+ u32 txq, numqueues;
numqueues = READ_ONCE(tun->numqueues);
@@ -534,8 +535,7 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
} else {
- /* use multiply and shift instead of expensive divide */
- txq = ((u64)txq * numqueues) >> 32;
+ txq = reciprocal_scale(txq, numqueues);
}
return txq;
@@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
+ ntfile->xdp_rxq.queue_index = index;
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
NULL);
@@ -977,20 +978,15 @@ static int tun_net_init(struct net_device *dev)
struct ifreq *ifr = tun->ifr;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
spin_lock_init(&tun->lock);
err = security_tun_dev_alloc_security(&tun->security);
- if (err < 0) {
- free_percpu(dev->tstats);
+ if (err < 0)
return err;
- }
tun_flow_init(tun);
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
@@ -1008,7 +1004,6 @@ static int tun_net_init(struct net_device *dev)
if (err < 0) {
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
- free_percpu(dev->tstats);
return err;
}
return 0;
@@ -1344,7 +1339,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_select_queue = tun_select_queue,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_bpf = tun_xdp,
.ndo_xdp_xmit = tun_xdp_xmit,
.ndo_change_carrier = tun_net_change_carrier,
@@ -1630,13 +1624,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
switch (act) {
case XDP_REDIRECT:
err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
- if (err)
+ if (err) {
+ dev_core_stats_rx_dropped_inc(tun->dev);
return err;
+ }
+ dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
break;
case XDP_TX:
err = tun_xdp_tx(tun->dev, xdp);
- if (err < 0)
+ if (err < 0) {
+ dev_core_stats_rx_dropped_inc(tun->dev);
return err;
+ }
+ dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
break;
case XDP_PASS:
break;
@@ -1921,7 +1921,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
@@ -2311,7 +2311,6 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
- free_percpu(dev->tstats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2511,7 +2510,7 @@ build:
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
ret = 0;
goto out;
@@ -3638,12 +3637,22 @@ static int tun_set_coalesce(struct net_device *dev,
return 0;
}
+static void tun_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ channels->combined_count = tun->numqueues;
+ channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1;
+}
+
static const struct ethtool_ops tun_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_channels = tun_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = tun_get_coalesce,
.set_coalesce = tun_set_coalesce,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3fd7dccf0f9c..3c360d4f0635 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -99,6 +99,7 @@ config USB_RTL8150
config USB_RTL8152
tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
select MII
+ select PHYLIB
select CRC32
select CRYPTO
select CRYPTO_HASH
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d837c1887416..88e084534853 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -667,7 +667,7 @@ static int ax88179_set_link_ksettings(struct net_device *net,
}
static int
-ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
+ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_keee *data)
{
int val;
@@ -676,29 +676,29 @@ ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
MDIO_MMD_PCS);
if (val < 0)
return val;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->supported, val);
/* Get advertisement EEE */
val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN);
if (val < 0)
return val;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->advertised, val);
/* Get LP advertisement EEE */
val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_LPABLE,
MDIO_MMD_AN);
if (val < 0)
return val;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->lp_advertised, val);
return 0;
}
static int
-ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_eee *data)
+ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_keee *data)
{
- u16 tmp16 = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+ u16 tmp16 = linkmode_to_mii_eee_cap1_t(data->advertised);
return ax88179_phy_write_mmd_indirect(dev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN, tmp16);
@@ -807,7 +807,7 @@ static void ax88179_enable_eee(struct usbnet *dev)
GMII_PHY_PAGE_SELECT, 2, &tmp16);
}
-static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
+static int ax88179_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = dev->driver_priv;
@@ -818,7 +818,7 @@ static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
return ax88179_ethtool_get_eee(dev, edata);
}
-static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
+static int ax88179_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = dev->driver_priv;
@@ -1587,7 +1587,7 @@ static int ax88179_reset(struct usbnet *dev)
u16 *tmp16;
u8 *tmp;
struct ax88179_data *ax179_data = dev->driver_priv;
- struct ethtool_eee eee_data;
+ struct ethtool_keee eee_data;
tmp16 = (u16 *)buf;
tmp = (u8 *)buf;
@@ -1663,7 +1663,7 @@ static int ax88179_reset(struct usbnet *dev)
ax88179_disable_eee(dev);
ax88179_ethtool_get_eee(dev, &eee_data);
- eee_data.advertised = 0;
+ linkmode_zero(eee_data.advertised);
ax88179_ethtool_set_eee(dev, &eee_data);
/* Restart autoneg */
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index cd4083e0b3b9..e13e4920ee9b 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -339,7 +339,7 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
goto out;
- is_router = !!in6_dev->cnf.forwarding;
+ is_router = !!READ_ONCE(in6_dev->cnf.forwarding);
in6_dev_put(in6_dev);
/* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 99ec1d4a972d..8b6d6a1b3c2e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
err = dm_read_shared_word(dev, 1, loc, &res);
if (err < 0) {
netdev_err(dev->net, "MDIO read error: %d\n", err);
- return err;
+ return 0;
}
netdev_dbg(dev->net,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f088ea2ba6f3..1aeb36119d3f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2465,7 +2465,7 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
}
}
-static struct device_type hso_type = {
+static const struct device_type hso_type = {
.name = "wwan",
};
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a6d653ff552a..80ee4fcdfb36 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
lan78xx_rx_urb_submit_all(dev);
+ local_bh_disable();
napi_schedule(&dev->napi);
+ local_bh_enable();
}
return 0;
@@ -1673,7 +1675,7 @@ static int lan78xx_set_wol(struct net_device *netdev,
return ret;
}
-static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
@@ -1709,7 +1711,7 @@ exit:
return ret;
}
-static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
int ret;
@@ -3033,7 +3035,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
+ dev->chipid == ID_REV_CHIP_ID_7850_) {
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
if (!ret && sig != EEPROM_INDICATOR) {
/* Implies there is no external eeprom. Set mac speed */
@@ -3132,7 +3135,8 @@ static int lan78xx_open(struct net_device *net)
done:
mutex_unlock(&dev->dev_mutex);
- usb_autopm_put_interface(dev->intf);
+ if (ret < 0)
+ usb_autopm_put_interface(dev->intf);
return ret;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0d0672d2a654..6d0dc2c967c4 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10,6 +10,7 @@
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/phy.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
@@ -891,8 +892,8 @@ struct r8152 {
void (*up)(struct r8152 *tp);
void (*down)(struct r8152 *tp);
void (*unload)(struct r8152 *tp);
- int (*eee_get)(struct r8152 *tp, struct ethtool_eee *eee);
- int (*eee_set)(struct r8152 *tp, struct ethtool_eee *eee);
+ int (*eee_get)(struct r8152 *tp, struct ethtool_keee *eee);
+ int (*eee_set)(struct r8152 *tp, struct ethtool_keee *eee);
bool (*in_nway)(struct r8152 *tp);
void (*hw_phy_cfg)(struct r8152 *tp);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
@@ -8922,32 +8923,31 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8152_get_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u32 lp, adv, supported = 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u16 val;
val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->supported, val);
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- adv = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->advertised, val);
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- lp = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val);
eee->eee_enabled = tp->eee_en;
- eee->eee_active = !!(supported & adv & lp);
- eee->supported = supported;
- eee->advertised = tp->eee_adv;
- eee->lp_advertised = lp;
+
+ linkmode_and(common, eee->advertised, eee->lp_advertised);
+ eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common);
return 0;
}
-static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8152_set_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
+ u16 val = linkmode_to_mii_eee_cap1_t(eee->advertised);
tp->eee_en = eee->eee_enabled;
tp->eee_adv = val;
@@ -8957,31 +8957,30 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
return 0;
}
-static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8153_get_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u32 lp, adv, supported = 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u16 val;
val = ocp_reg_read(tp, OCP_EEE_ABLE);
- supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->supported, val);
val = ocp_reg_read(tp, OCP_EEE_ADV);
- adv = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->advertised, val);
val = ocp_reg_read(tp, OCP_EEE_LPABLE);
- lp = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val);
eee->eee_enabled = tp->eee_en;
- eee->eee_active = !!(supported & adv & lp);
- eee->supported = supported;
- eee->advertised = tp->eee_adv;
- eee->lp_advertised = lp;
+
+ linkmode_and(common, eee->advertised, eee->lp_advertised);
+ eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common);
return 0;
}
static int
-rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
+rtl_ethtool_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct r8152 *tp = netdev_priv(net);
int ret;
@@ -9008,7 +9007,7 @@ out:
}
static int
-rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata)
+rtl_ethtool_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct r8152 *tp = netdev_priv(net);
int ret;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index a530f20ee257..2fa46baa589e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -2105,6 +2105,11 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long) &smsc95xx_info,
},
{
+ /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
+ USB_DEVICE(0x0878, 0x1400),
+ .driver_info = (unsigned long)&smsc95xx_info,
+ },
+ {
/* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
USB_DEVICE(0x184F, 0x0051),
.driver_info = (unsigned long)&smsc95xx_info,
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 143bd4ab160d..57947a5590cc 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
data->eeprom_len = SR9800_EEPROM_LEN;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* LED Setting Rule :
* AABB:CCDD
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2d14b0d78541..e84efa661589 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1633,7 +1633,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
- free_percpu(net->tstats);
free_netdev(net);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1645,7 +1644,6 @@ static const struct net_device_ops usbnet_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1654,11 +1652,11 @@ static const struct net_device_ops usbnet_netdev_ops = {
// precondition: never called in_interrupt
-static struct device_type wlan_type = {
+static const struct device_type wlan_type = {
.name = "wlan",
};
-static struct device_type wwan_type = {
+static const struct device_type wwan_type = {
.name = "wwan",
};
@@ -1710,10 +1708,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->rx_speed = SPEED_UNSET;
dev->tx_speed = SPEED_UNSET;
- net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!net->tstats)
- goto out0;
-
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
init_waitqueue_head(&dev->wait);
@@ -1743,6 +1737,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
net->netdev_ops = &usbnet_netdev_ops;
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
net->ethtool_ops = &usbnet_ethtool_ops;
+ net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
// allow device-specific bind/init procedures
// NOTE net->name still not usable ...
@@ -1861,8 +1856,6 @@ out1:
*/
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
- free_percpu(net->tstats);
-out0:
free_netdev(net);
out:
return status;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 578e36ea1589..13d902462d8e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -729,80 +729,10 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb_shinfo(skb)->nr_frags ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- u32 size, len, max_head_size, off, truesize, page_offset;
- struct sk_buff *nskb;
- struct page *page;
- int i, head_off;
- void *va;
-
- /* We need a private copy of the skb and data buffers since
- * the ebpf program can modify it. We segment the original skb
- * into order-0 pages without linearize it.
- *
- * Make sure we have enough space for linear and paged area
- */
- max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
- VETH_XDP_HEADROOM);
- if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
- goto drop;
-
- size = min_t(u32, skb->len, max_head_size);
- truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
-
- /* Allocate skb head */
- va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
- if (!va)
- goto drop;
-
- nskb = napi_build_skb(va, truesize);
- if (!nskb) {
- page_pool_free_va(rq->page_pool, va, true);
+ if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM))
goto drop;
- }
-
- skb_reserve(nskb, VETH_XDP_HEADROOM);
- skb_copy_header(nskb, skb);
- skb_mark_for_recycle(nskb);
-
- if (skb_copy_bits(skb, 0, nskb->data, size)) {
- consume_skb(nskb);
- goto drop;
- }
- skb_put(nskb, size);
- head_off = skb_headroom(nskb) - skb_headroom(skb);
- skb_headers_offset_update(nskb, head_off);
-
- /* Allocate paged area of new skb */
- off = size;
- len = skb->len - off;
-
- for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
- size = min_t(u32, len, PAGE_SIZE);
- truesize = size;
-
- page = page_pool_dev_alloc(rq->page_pool, &page_offset,
- &truesize);
- if (!page) {
- consume_skb(nskb);
- goto drop;
- }
-
- skb_add_rx_frag(nskb, i, page, page_offset, size,
- truesize);
- if (skb_copy_bits(skb, off,
- page_address(page) + page_offset,
- size)) {
- consume_skb(nskb);
- goto drop;
- }
-
- len -= size;
- off += size;
- }
-
- consume_skb(skb);
- skb = nskb;
+ skb = *pskb;
}
/* SKB "head" area always have tailroom for skb_shared_info */
@@ -1208,14 +1138,6 @@ static int veth_enable_xdp(struct net_device *dev)
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
return err;
}
-
- if (!veth_gro_requested(dev)) {
- /* user-space did not require GRO, but adding XDP
- * is supposed to get GRO working
- */
- dev->features |= NETIF_F_GRO;
- netdev_features_change(dev);
- }
}
}
@@ -1235,18 +1157,9 @@ static void veth_disable_xdp(struct net_device *dev)
for (i = 0; i < dev->real_num_rx_queues; i++)
rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
- if (!netif_running(dev) || !veth_gro_requested(dev)) {
+ if (!netif_running(dev) || !veth_gro_requested(dev))
veth_napi_del(dev);
- /* if user-space did not require GRO, since adding XDP
- * enabled it, clear it now
- */
- if (!veth_gro_requested(dev) && netif_running(dev)) {
- dev->features &= ~NETIF_F_GRO;
- netdev_features_change(dev);
- }
- }
-
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
@@ -1478,7 +1391,8 @@ static int veth_alloc_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
int i;
- priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
+ priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!priv->rq)
return -ENOMEM;
@@ -1494,11 +1408,12 @@ static void veth_free_queues(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
- kfree(priv->rq);
+ kvfree(priv->rq);
}
static int veth_dev_init(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
return veth_alloc_queues(dev);
}
@@ -1530,7 +1445,7 @@ static int veth_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
@@ -1654,6 +1569,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
+ if (!veth_gro_requested(dev)) {
+ /* user-space did not require GRO, but adding
+ * XDP is supposed to get GRO working
+ */
+ dev->features |= NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
@@ -1669,6 +1592,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
+ /* if user-space did not require GRO, since adding XDP
+ * enabled it, clear it now
+ */
+ if (!veth_gro_requested(dev)) {
+ dev->features &= ~NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index b1bb1b04b664..a1ba5169ed5d 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -13,19 +13,6 @@
#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \
sizeof(struct af_vsockmon_hdr))
-static int vsockmon_dev_init(struct net_device *dev)
-{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
- return 0;
-}
-
-static void vsockmon_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
struct vsockmon {
struct vsock_tap vt;
};
@@ -59,9 +46,6 @@ static void
vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
-
- stats->tx_packets = 0;
- stats->tx_bytes = 0;
}
static int vsockmon_is_valid_mtu(int new_mtu)
@@ -79,8 +63,6 @@ static int vsockmon_change_mtu(struct net_device *dev, int new_mtu)
}
static const struct net_device_ops vsockmon_ops = {
- .ndo_init = vsockmon_dev_init,
- .ndo_uninit = vsockmon_dev_uninit,
.ndo_open = vsockmon_open,
.ndo_stop = vsockmon_close,
.ndo_start_xmit = vsockmon_xmit,
@@ -112,6 +94,7 @@ static void vsockmon_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->mtu = DEFAULT_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
}
static struct rtnl_link_ops vsockmon_link_ops __read_mostly = {
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 16106e088c63..386cbe4d3327 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2855,6 +2855,7 @@ static int vxlan_init(struct net_device *dev)
if (err)
goto err_gro_cells_destroy;
+ netdev_lockdep_set_classes(dev);
return 0;
err_gro_cells_destroy:
@@ -3253,7 +3254,7 @@ static const struct net_device_ops vxlan_netdev_raw_ops = {
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type vxlan_type = {
+static const struct device_type vxlan_type = {
.name = "vxlan",
};
@@ -4826,55 +4827,43 @@ static __net_init int vxlan_init_net(struct net *net)
NULL);
}
-static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
+ struct list_head *dev_to_kill)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
- struct net_device *dev, *aux;
-
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &vxlan_link_ops)
- unregister_netdevice_queue(dev, head);
-
- list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
- /* If vxlan->dev is in the same netns, it has already been added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(vxlan->dev), net))
- unregister_netdevice_queue(vxlan->dev, head);
- }
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next)
+ vxlan_dellink(vxlan->dev, dev_to_kill);
}
-static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- unsigned int h;
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
- }
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list)
- vxlan_destroy_tunnels(net, &list);
+ __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ vxlan_destroy_tunnels(vn, dev_to_kill);
+ }
+}
- list_for_each_entry(net, net_list, exit_list) {
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ unsigned int h;
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
- }
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit_batch = vxlan_exit_batch_net,
+ .exit_batch_rtnl = vxlan_exit_batch_rtnl,
+ .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index c04dc88bda6c..f547c22e26ac 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -18,7 +18,12 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-static struct class *framer_class;
+static void framer_release(struct device *dev);
+static const struct class framer_class = {
+ .name = "framer",
+ .dev_release = framer_release,
+};
+
static DEFINE_MUTEX(framer_provider_mutex);
static LIST_HEAD(framer_provider_list);
static DEFINE_IDA(framer_ida);
@@ -384,7 +389,7 @@ static struct framer_provider *framer_provider_of_lookup(const struct device_nod
return ERR_PTR(-EPROBE_DEFER);
}
-static struct framer *framer_of_get_from_provider(struct of_phandle_args *args)
+static struct framer *framer_of_get_from_provider(const struct of_phandle_args *args)
{
struct framer_provider *framer_provider;
struct framer *framer;
@@ -627,7 +632,7 @@ struct framer *framer_create(struct device *dev, struct device_node *node,
INIT_DELAYED_WORK(&framer->polling_work, framer_polling_work);
BLOCKING_INIT_NOTIFIER_HEAD(&framer->notifier_list);
- framer->dev.class = framer_class;
+ framer->dev.class = &framer_class;
framer->dev.parent = dev;
framer->dev.of_node = node ? node : dev->of_node;
framer->id = id;
@@ -735,12 +740,13 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
* should provide a custom of_xlate function that reads the *args* and returns
* the appropriate framer.
*/
-struct framer *framer_provider_simple_of_xlate(struct device *dev, struct of_phandle_args *args)
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct class_dev_iter iter;
struct framer *framer;
- class_dev_iter_init(&iter, framer_class, NULL, NULL);
+ class_dev_iter_init(&iter, &framer_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
framer = dev_to_framer(dev);
if (args->np != framer->dev.of_node)
@@ -768,7 +774,7 @@ EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
struct framer_provider *
__framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct framer_provider *framer_provider;
@@ -830,7 +836,7 @@ static void devm_framer_provider_of_unregister(struct device *dev, void *res)
struct framer_provider *
__devm_framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct framer_provider **ptr, *framer_provider;
@@ -869,14 +875,6 @@ static void framer_release(struct device *dev)
static int __init framer_core_init(void)
{
- framer_class = class_create("framer");
- if (IS_ERR(framer_class)) {
- pr_err("failed to create framer class (%pe)\n", framer_class);
- return PTR_ERR(framer_class);
- }
-
- framer_class->dev_release = framer_release;
-
- return 0;
+ return class_register(&framer_class);
}
device_initcall(framer_core_init);
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index a176653c8861..df275b4fccb6 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -263,7 +263,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
* call skb_cow_data, so that there's no chance that data is removed
* from the skb, so that later we can extract the original endpoint.
*/
- offset = skb->data - skb_network_header(skb);
+ offset = -skb_network_offset(skb);
skb_push(skb, offset);
num_frags = skb_cow_data(skb, 0, &trailer);
offset += sizeof(struct message_data);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 2fceea9f6550..e3fd48dd3909 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1759,6 +1759,10 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
}
static const struct ieee80211_ops adm8211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = adm8211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = adm8211_start,
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 43e0db78d42b..815f8f599f5d 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1358,6 +1358,10 @@ static void ar5523_configure_filter(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops ar5523_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = ar5523_start,
.stop = ar5523_stop,
.tx = ar5523_tx,
@@ -1803,5 +1807,6 @@ static struct usb_driver ar5523_driver = {
module_usb_driver(ar5523_driver);
+MODULE_DESCRIPTION("Atheros AR5523 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 0032f8aa892f..9ce6f49ab261 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -3613,7 +3613,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
goto err_free_mac;
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index e5ef0352e319..8d274e0f374b 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _COREDUMP_H_
@@ -13,7 +13,11 @@
/**
* enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_REGISTERS: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_CE_DATA: Copy Engine crash dump data
+ * @ATH10K_FW_CRASH_DUMP_RAM_DATA: RAM crash dump data, contains multiple
+ * struct ath10k_dump_ram_data_hdr
+ * @ATH10K_FW_CRASH_DUMP_MAX: Maximum enumeration
*/
enum ath10k_fw_crash_dump_type {
ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 907e1e13871a..dbaf262cd7c1 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@@ -381,7 +382,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
htt->target_version_major != 3) {
ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 4a9270e2a4c8..603f6de62b0a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HTT_H_
@@ -906,7 +906,7 @@ struct htt_data_tx_completion_ext {
__le16 msdus_rssi[];
} __packed;
-/**
+/*
* @brief target -> host TX completion indication message definition
*
* @details
@@ -1474,15 +1474,19 @@ enum htt_q_depth_type {
#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
/**
- * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ * struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
*
* Defines host q state format and behavior. See htt_q_state.
*
+ * @paddr: Queue physical address
+ * @num_peers: Number of supported peers
+ * @num_tids: Number of supported TIDs
* @record_size: Defines the size of each host q entry in bytes. In practice
* however firmware (at least 10.4.3-00191) ignores this host
* configuration value and uses hardcoded value of 1.
* @record_multiplier: This is valid only when q depth type is MSDUs. It
* defines the exponent for the power of 2 multiplication.
+ * @pad: struct padding for 32-bit alignment
*/
struct htt_q_state_conf {
__le32 paddr;
@@ -1518,7 +1522,7 @@ struct htt_frag_desc_bank_cfg64 {
#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
/**
- * htt_q_state - shared between host and firmware via DMA
+ * struct htt_q_state - shared between host and firmware via DMA
*
* This structure is used for the host to expose it's software queue state to
* firmware so that its rate control can schedule fetch requests for optimized
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 090bcf148d0c..e322b528baaf 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "mac.h"
@@ -2034,8 +2034,8 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (!arvif->is_up)
return;
- if (!ieee80211_beacon_cntdwn_is_complete(vif)) {
- ieee80211_beacon_update_cntdwn(vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_beacon_update_cntdwn(vif, 0);
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
@@ -2047,7 +2047,7 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
ret);
} else {
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
}
@@ -4056,7 +4056,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
!(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) {
WARN_ON_ONCE(1);
ieee80211_free_txskb(hw, skb);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -7065,7 +7065,7 @@ static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *st
if (sta) {
if (!sta->wme)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
arsta = (struct ath10k_sta *)sta->drv_priv;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3de2de6d44bc..5c34b156b4ff 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/pci.h>
@@ -889,7 +889,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->targ_cpu_to_ce_addr(ar, addr);
}
@@ -2668,7 +2668,7 @@ static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (!ar_pci->pci_soft_reset)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->pci_soft_reset(ar);
}
@@ -2808,7 +2808,7 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (WARN_ON(!ar_pci->pci_hard_reset))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->pci_hard_reset(ar);
}
@@ -3594,7 +3594,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
break;
default:
WARN_ON(1);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 6b6aa3c36744..aed97fd121ba 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "debug.h"
@@ -851,6 +852,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
}
ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
arg->desc_id = ev->desc_id;
arg->status = ev->status;
@@ -1347,7 +1352,7 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
__le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
__le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
__le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
arg->min_tx_power = ev->hw_min_tx_power;
@@ -2119,9 +2124,9 @@ static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_11S:
return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static struct sk_buff *
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 83a8f07a687f..8a2f87d0a3a3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
@@ -2343,7 +2343,7 @@ struct wmi_tlv_adaptive_qcs {
} __packed;
/**
- * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ * enum wmi_tlv_tx_pause_id - firmware tx queue pause reason types
*
* @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
* Only vdev_map is valid.
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 88befe92f95d..2e9661f4bea8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
@@ -3884,8 +3884,8 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* actual channel switch is done
*/
if (arvif->vif->bss_conf.csa_active &&
- ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) {
+ ieee80211_csa_finish(arvif->vif, 0);
continue;
}
@@ -6927,14 +6927,14 @@ void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
}
static void
-ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+ath10k_wmi_put_start_scan_tlvs(u8 *tlvs,
const struct wmi_start_scan_arg *arg)
{
struct wmi_ie_data *ie;
struct wmi_chan_list *channels;
struct wmi_ssid_list *ssids;
struct wmi_bssid_list *bssids;
- void *ptr = tlvs->tlvs;
+ void *ptr = tlvs;
int i;
if (arg->n_channels) {
@@ -7012,7 +7012,7 @@ ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
cmd = (struct wmi_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
cmd->burst_duration_ms = __cpu_to_le32(0);
@@ -7041,7 +7041,7 @@ ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
return skb;
@@ -8733,9 +8733,9 @@ int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
case WMI_VDEV_SUBTYPE_MESH_11S:
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
@@ -8755,9 +8755,9 @@ static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_11S:
return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
@@ -8779,7 +8779,7 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static struct sk_buff *
@@ -8918,8 +8918,6 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
if (!skb)
return ERR_PTR(-ENOMEM);
- memset(skb->data, 0, sizeof(*cmd));
-
cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9146df98fcee..2379501225a4 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_H_
@@ -3008,8 +3008,11 @@ enum wmi_coex_version {
* @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable
* @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
* enable/disable
- * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
+ * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY: Explicit TDLS mode enable/disable
* @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
+ * @WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT: Firmware supports Extended Peer
+ * TID configuration for QoS related settings
+ * @WMI_10_4_REPORT_AIRTIME: Firmware supports transmit airtime reporting
*/
enum wmi_10_4_feature_mask {
WMI_10_4_LTEU_SUPPORT = BIT(0),
@@ -3069,7 +3072,10 @@ struct host_memory_chunk {
struct wmi_host_mem_chunks {
__le32 count;
/* some fw revisions require at least 1 chunk regardless of count */
- struct host_memory_chunk items[1];
+ union {
+ struct host_memory_chunk item;
+ DECLARE_FLEX_ARRAY(struct host_memory_chunk, items);
+ };
} __packed;
struct wmi_init_cmd {
@@ -3215,23 +3221,16 @@ struct wmi_start_scan_common {
__le32 scan_ctrl_flags;
} __packed;
-struct wmi_start_scan_tlvs {
- /* TLV parameters. These includes channel list, ssid list, bssid list,
- * extra ies.
- */
- u8 tlvs[0];
-} __packed;
-
struct wmi_start_scan_cmd {
struct wmi_start_scan_common common;
__le32 burst_duration_ms;
- struct wmi_start_scan_tlvs tlvs;
+ u8 tlvs[];
} __packed;
/* This is the definition from 10.X firmware branch */
struct wmi_10x_start_scan_cmd {
struct wmi_start_scan_common common;
- struct wmi_start_scan_tlvs tlvs;
+ u8 tlvs[];
} __packed;
struct wmi_ssid_arg {
@@ -4260,13 +4259,6 @@ struct wmi_peer_sta_ps_state_chg_event {
__le32 peer_ps_state;
} __packed;
-struct wmi_pdev_chanlist_update_event {
- /* number of channels */
- __le32 num_chan;
- /* array of channels */
- struct wmi_channel channel_list[1];
-} __packed;
-
#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
struct wmi_debug_mesg_event {
@@ -5793,30 +5785,6 @@ struct wmi_bcn_prb_info {
/* app IE */
} __packed;
-struct wmi_bcn_tmpl_cmd {
- /* unique id identifying the VDEV, generated by the caller */
- __le32 vdev_id;
- /* TIM IE offset from the beginning of the template. */
- __le32 tim_ie_offset;
- /* beacon probe capabilities and IEs */
- struct wmi_bcn_prb_info bcn_prb_info;
- /* beacon buffer length */
- __le32 buf_len;
- /* variable length data */
- u8 data[1];
-} __packed;
-
-struct wmi_prb_tmpl_cmd {
- /* unique id identifying the VDEV, generated by the caller */
- __le32 vdev_id;
- /* beacon probe capabilities and IEs */
- struct wmi_bcn_prb_info bcn_prb_info;
- /* beacon buffer length */
- __le32 buf_len;
- /* Variable length data */
- u8 data[1];
-} __packed;
-
enum wmi_sta_ps_mode {
/* enable power save for the given STA VDEV */
WMI_STA_PS_MODE_DISABLED = 0,
@@ -7197,7 +7165,13 @@ struct wmi_tdls_peer_capabilities {
__le32 is_peer_responder;
__le32 pref_offchan_num;
__le32 pref_offchan_bw;
- struct wmi_channel peer_chan_list[1];
+ union {
+ /* to match legacy implementation allocate room for
+ * at least one record even if peer_chan_len is 0
+ */
+ struct wmi_channel peer_chan_min_allocation;
+ DECLARE_FLEX_ARRAY(struct wmi_channel, peer_chan_list);
+ };
} __packed;
struct wmi_10_4_tdls_peer_update_cmd {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 0c6ecbb9a066..922e67f8e04f 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -122,6 +122,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -205,6 +206,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
},
{
.name = "qca6390 hw2.0",
@@ -255,7 +257,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
@@ -290,6 +292,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "qcn9074 hw1.0",
@@ -372,6 +375,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
},
{
.name = "wcn6855 hw2.0",
@@ -422,7 +426,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@@ -457,6 +461,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "wcn6855 hw2.1",
@@ -505,7 +510,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@@ -540,6 +545,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "wcn6750 hw1.0",
@@ -621,6 +627,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
+ .support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -702,6 +709,93 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
+ },
+ {
+ .name = "qca2066 hw2.1",
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ .fw = {
+ .dir = "QCA2066/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
};
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 7e3b6779f4e9..b3fb74a226fb 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -147,6 +147,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6855_HW21,
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
+ ATH11K_HW_QCA2066_HW21,
};
enum ath11k_firmware_mode {
@@ -314,6 +315,43 @@ struct ath11k_rekey_data {
bool enable_offload;
};
+/**
+ * struct ath11k_chan_power_info - TPE containing power info per channel chunk
+ * @chan_cfreq: channel center freq (MHz)
+ * e.g.
+ * channel 37/20 MHz, it is 6135
+ * channel 37/40 MHz, it is 6125
+ * channel 37/80 MHz, it is 6145
+ * channel 37/160 MHz, it is 6185
+ * @tx_power: transmit power (dBm)
+ */
+struct ath11k_chan_power_info {
+ u16 chan_cfreq;
+ s8 tx_power;
+};
+
+/**
+ * struct ath11k_reg_tpc_power_info - regulatory TPC power info
+ * @is_psd_power: is PSD power or not
+ * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
+ * @ap_power_type: type of power (SP/LPI/VLP)
+ * @num_pwr_levels: number of power levels
+ * @reg_max: Array of maximum TX power (dBm) per PSD value
+ * @ap_constraint_power: AP constraint power (dBm)
+ * @tpe: TPE values processed from TPE IE
+ * @chan_power_info: power info to send to firmware
+ */
+struct ath11k_reg_tpc_power_info {
+ bool is_psd_power;
+ u8 eirp_power;
+ enum wmi_reg_6ghz_ap_type ap_power_type;
+ u8 num_pwr_levels;
+ u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL];
+ u8 ap_constraint_power;
+ s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL];
+ struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL];
+};
+
struct ath11k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -369,9 +407,7 @@ struct ath11k_vif {
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
-#ifdef CONFIG_ATH11K_DEBUGFS
- struct dentry *debugfs_twt;
-#endif /* CONFIG_ATH11K_DEBUGFS */
+ struct ath11k_reg_tpc_power_info reg_tpc_info;
};
struct ath11k_vif_iter {
@@ -739,6 +775,7 @@ struct ath11k {
/* protected by conf_mutex */
bool ps_state_enable;
bool ps_timekeeper_enable;
+ s8 max_allowed_tx_power;
};
struct ath11k_band_cap {
@@ -922,6 +959,7 @@ struct ath11k_base {
* This may or may not be used during the runtime
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct cur_regulatory_info *reg_info_store;
/* Current DFS Regulatory */
enum ath11k_dfs_region dfs_region;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index a847bc0d50c0..a48e737ef35d 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1894,35 +1894,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
.open = simple_open
};
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_base *ab = arvif->ar->ab;
+ struct dentry *debugfs_twt;
if (arvif->vif->type != NL80211_IFTYPE_AP &&
!(arvif->vif->type == NL80211_IFTYPE_STATION &&
test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
return;
- arvif->debugfs_twt = debugfs_create_dir("twt",
- arvif->vif->debugfs_dir);
- debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+ debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ debugfs_create_file("add_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_add_dialog);
- debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+ debugfs_create_file("del_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_del_dialog);
- debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+ debugfs_create_file("pause_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_pause_dialog);
- debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+ debugfs_create_file("resume_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_resume_dialog);
}
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
- if (!arvif->debugfs_twt)
- return;
-
- debugfs_remove_recursive(arvif->debugfs_twt);
- arvif->debugfs_twt = NULL;
-}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index 44d15845f39a..a39e458637b0 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -307,8 +307,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
@@ -387,14 +387,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
-static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
-{
-}
-
-static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
-}
-
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 8975dc57ad77..1a62407e5a9f 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -104,11 +104,14 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
- if (ring->cached)
+ if (ring->cached) {
+ dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
+ DMA_FROM_DEVICE);
kfree(ring->vaddr_unaligned);
- else
+ } else {
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
+ }
ring->vaddr_unaligned = NULL;
}
@@ -249,7 +252,18 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
- ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->paddr_unaligned = dma_map_single(ab->dev,
+ ring->vaddr_unaligned,
+ ring->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
+ kfree(ring->vaddr_unaligned);
+ ring->vaddr_unaligned = NULL;
+ return -ENOMEM;
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index c1072e66e3e8..272b1c35f98d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -103,7 +103,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control)))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
@@ -1018,7 +1018,7 @@ int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index c060c4b5c0cc..f3d04568c221 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -626,15 +626,30 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
return NULL;
}
+static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab,
+ struct hal_srng *srng, dma_addr_t *paddr)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
+ *paddr = srng->ring_base_paddr +
+ sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp;
+ return srng->ring_base_vaddr + srng->u.dst_ring.tp;
+ }
+
+ return NULL;
+}
+
static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
struct hal_srng *srng)
{
+ dma_addr_t desc_paddr;
u32 *desc;
/* prefetch only if desc is available */
- desc = ath11k_hal_srng_dst_peek(ab, srng);
+ desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr);
if (likely(desc)) {
- dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+ dma_sync_single_for_cpu(ab->dev, desc_paddr,
(srng->entry_size * sizeof(u32)),
DMA_FROM_DEVICE);
prefetch(desc);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 80447f488954..65e8f244ebb9 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_H
@@ -674,6 +674,7 @@ struct hal_srng_config {
* @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ * @HAL_RX_BUF_RBM_SW4_BM: For Tx completion -- returned to host
*/
enum hal_rx_buf_return_buf_manager {
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index e758ee8e17c9..8f7dd43dc1bd 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -246,7 +246,7 @@ int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath11k_warn(ab, "Unsupported reo command %d\n", type);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
default:
ath11k_warn(ab, "Unknown reo command %d\n", type);
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 77d8f9237680..caa6dc12a790 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -58,7 +58,7 @@ static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
- config->num_vdevs = 4;
+ config->num_vdevs = ab->hw_params.num_vdevs;
config->num_peers = 16;
config->num_tids = 32;
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 1b070747a5db..14ef4eb48f80 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -226,6 +226,7 @@ struct ath11k_hw_params {
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
+ bool support_dual_stations;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index db241589424d..a6a37d67a50a 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -255,9 +255,6 @@ static const u32 ath11k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
{
enum nl80211_he_ru_alloc ret;
@@ -1580,7 +1577,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
return;
if (vif->bss_conf.color_change_active &&
- ieee80211_beacon_cntdwn_is_complete(vif)) {
+ ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
arvif->bcca_zero_sent = true;
ieee80211_color_change_finish(vif);
return;
@@ -1589,7 +1586,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
arvif->bcca_zero_sent = false;
if (vif->bss_conf.color_change_active)
- ieee80211_beacon_update_cntdwn(vif);
+ ieee80211_beacon_update_cntdwn(vif, 0);
ath11k_mac_setup_bcn_tmpl(arvif);
}
@@ -2297,6 +2294,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ /* Initialize rx_mcs_160 to 9 which is an invalid value */
+ rx_mcs_160 = 9;
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
@@ -2308,6 +2307,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
}
}
+ /* Initialize rx_mcs_80 to 9 which is an invalid value */
+ rx_mcs_80 = 9;
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
@@ -3026,7 +3027,14 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
+ if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+ ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+ arvif->vdev_id, bss_conf->bssid);
+ return;
+ }
+
peer_arg.is_assoc = true;
+
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@@ -3049,12 +3057,6 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
return;
}
- if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
- ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
- arvif->vdev_id, bss_conf->bssid);
- return;
- }
-
WARN_ON(arvif->is_up);
arvif->aid = vif->cfg.aid;
@@ -3397,6 +3399,18 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return 0;
}
+static bool ath11k_mac_supports_station_tpc(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ chandef->chan &&
+ chandef->chan->band == NL80211_BAND_6GHZ;
+}
+
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -3596,7 +3610,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TXPOWER) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n",
arvif->vdev_id, info->txpower);
-
arvif->txpower = info->txpower;
ath11k_mac_txpower_recalc(ar);
}
@@ -4000,7 +4013,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
req->ssids[i].ssid_len);
}
} else {
- arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
@@ -4906,100 +4919,6 @@ static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
ar->num_stations--;
}
-static int ath11k_mac_station_add(struct ath11k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
- struct peer_create_params peer_param;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- ret = ath11k_mac_inc_num_stations(arvif, sta);
- if (ret) {
- ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
- ar->max_num_stations);
- goto exit;
- }
-
- arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
- if (!arsta->rx_stats) {
- ret = -ENOMEM;
- goto dec_num_station;
- }
-
- peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = sta->addr;
- peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
-
- ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
- if (ret) {
- ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- goto free_rx_stats;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
-
- if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
- arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
- if (!arsta->tx_stats) {
- ret = -ENOMEM;
- goto free_peer;
- }
- }
-
- if (ieee80211_vif_is_mesh(vif)) {
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
- ret = ath11k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_USE_4ADDR, 1);
- if (ret) {
- ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
- sta->addr, ret);
- goto free_tx_stats;
- }
- }
-
- ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
- if (ret) {
- ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
- sta->addr, arvif->vdev_id, ret);
- goto free_tx_stats;
- }
-
- if (ab->hw_params.vdev_start_delay &&
- !arvif->is_started &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP) {
- ret = ath11k_start_vdev_delay(ar->hw, vif);
- if (ret) {
- ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
- goto free_tx_stats;
- }
- }
-
- ewma_avg_rssi_init(&arsta->avg_rssi);
- return 0;
-
-free_tx_stats:
- kfree(arsta->tx_stats);
- arsta->tx_stats = NULL;
-free_peer:
- ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
-free_rx_stats:
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
-dec_num_station:
- ath11k_mac_dec_num_stations(arvif, sta);
-exit:
- return ret;
-}
-
static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
struct ieee80211_sta *sta)
{
@@ -5028,140 +4947,6 @@ static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
return bw;
}
-static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
-{
- struct ath11k *ar = hw->priv;
- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
- struct ath11k_peer *peer;
- int ret = 0;
-
- /* cancel must be done outside the mutex to avoid deadlock */
- if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- cancel_work_sync(&arsta->update_wk);
- cancel_work_sync(&arsta->set_4addr_wk);
- }
-
- mutex_lock(&ar->conf_mutex);
-
- if (old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- arsta->arvif = arvif;
- arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
- INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
- INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
-
- ret = ath11k_mac_station_add(ar, vif, sta);
- if (ret)
- ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- } else if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
- vif->type == NL80211_IFTYPE_STATION;
-
- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
-
- if (!skip_peer_delete) {
- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath11k_warn(ar->ab,
- "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath11k_dbg(ar->ab,
- ATH11K_DBG_MAC,
- "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- }
-
- ath11k_mac_dec_num_stations(arvif, sta);
- mutex_lock(&ar->ab->tbl_mtx_lock);
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (skip_peer_delete && peer) {
- peer->sta = NULL;
- } else if (peer && peer->sta == sta) {
- ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
- vif->addr, arvif->vdev_id);
- ath11k_peer_rhash_delete(ar->ab, peer);
- peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
- }
- spin_unlock_bh(&ar->ab->base_lock);
- mutex_unlock(&ar->ab->tbl_mtx_lock);
-
- kfree(arsta->tx_stats);
- arsta->tx_stats = NULL;
-
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
- } else if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath11k_station_assoc(ar, vif, sta, false);
- if (ret)
- ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
- sta->addr);
-
- spin_lock_bh(&ar->data_lock);
- /* Set arsta bw and prev bw */
- arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
- arsta->bw_prev = arsta->bw;
- spin_unlock_bh(&ar->data_lock);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTHORIZED) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = true;
-
- spin_unlock_bh(&ar->ab->base_lock);
-
- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
- ret = ath11k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
- sta->addr, arvif->vdev_id, ret);
- }
- } else if (old_state == IEEE80211_STA_AUTHORIZED &&
- new_state == IEEE80211_STA_ASSOC) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = false;
-
- spin_unlock_bh(&ar->ab->base_lock);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath11k_station_disassoc(ar, vif, sta);
- if (ret)
- ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
- sta->addr);
- }
-
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -6756,13 +6541,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
- /* In the case of hardware recovery, debugfs files are
- * not deleted since ieee80211_ops.remove_interface() is
- * not invoked. In such cases, try to delete the files.
- * These will be re-created later.
- */
- ath11k_debugfs_remove_interface(arvif);
-
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
@@ -6939,8 +6717,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ath11k_dp_vdev_tx_attach(ar, arvif);
- ath11k_debugfs_add_interface(arvif);
-
if (vif->type != NL80211_IFTYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_vdev_create(ar);
@@ -6949,6 +6725,14 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ret);
}
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar)) {
+ struct cur_regulatory_info *reg_info;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "interface added to change reg rules\n");
+ ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_LPI_AP);
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -7056,8 +6840,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- ath11k_debugfs_remove_interface(arvif);
-
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
@@ -7277,6 +7059,15 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
return ret;
}
+ /* TODO: For now we only set TPC power here. However when
+ * channel changes, say CSA, it should be updated again.
+ */
+ if (ath11k_mac_supports_station_tpc(ar, arvif, chandef)) {
+ ath11k_mac_fill_reg_tpc_info(ar, arvif->vif, &arvif->chanctx);
+ ath11k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
+ &arvif->reg_tpc_info);
+ }
+
if (!restart)
ar->num_started_vdevs++;
@@ -7553,8 +7344,8 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
@@ -7600,6 +7391,501 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
return 0;
}
+static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ arvif->is_started = false;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt)
+{
+ switch (txpwr_intrprt) {
+ /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield
+ * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of
+ * "IEEE Std 802.11ax 2021".
+ */
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3;
+ txpwr_cnt = txpwr_cnt + 1;
+ break;
+ /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield
+ * if Maximum Transmit Power Interpretation subfield is 1 or 3" of
+ * "IEEE Std 802.11ax 2021".
+ */
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4;
+ txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1;
+ break;
+ }
+
+ return txpwr_cnt;
+}
+
+static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
+{
+ if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 4;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 8;
+ default:
+ return 1;
+ }
+ } else {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ default:
+ return 1;
+ }
+ }
+}
+
+static u16 ath11k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def)
+{
+ u16 diff_seq;
+
+ /* It is to get the lowest channel number's center frequency of the chan.
+ * For example,
+ * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1
+ * with center frequency 5955, its diff is 5965 - 5955 = 10.
+ * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1
+ * with center frequency 5955, its diff is 5985 - 5955 = 30.
+ * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1
+ * with center frequency 5955, its diff is 6025 - 5955 = 70.
+ */
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_160:
+ diff_seq = 70;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ diff_seq = 30;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ diff_seq = 10;
+ break;
+ default:
+ diff_seq = 0;
+ }
+
+ return chan_def->center_freq1 - diff_seq;
+}
+
+static u16 ath11k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
+ u16 start_seq, u8 seq)
+{
+ u16 seg_seq;
+
+ /* It is to get the center frequency of the specific bandwidth.
+ * start_seq means the lowest channel number's center frequency.
+ * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz&80P80.
+ * For example,
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70.
+ */
+ if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3)
+ return chan_def->center_freq2;
+
+ seg_seq = 10 * (BIT(seq) - 1);
+ return seg_seq + start_seq;
+}
+
+static void ath11k_mac_get_psd_channel(struct ath11k *ar,
+ u16 step_freq,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for each 20 MHz.
+ * For example, if the chan is 160 MHz and center frequency is 6025,
+ * then it include 8 channels, they are 1/5/9/13/17/21/25/29,
+ * channel number 1's center frequency is 5955, it is parameter start_freq.
+ * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
+ * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
+ * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
+ * the gap is 20 for each channel, parameter step_freq means the gap.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = *start_freq + i * step_freq;
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+static void ath11k_mac_get_eirp_power(struct ath11k *ar,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ struct cfg80211_chan_def *def,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/
+ * 160 MHz&80P80 bandwidth, and then plus 10 to the center frequency,
+ * it is the center frequency of a channel number.
+ * For example, when configured channel number is 1.
+ * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975,
+ * then it is channel number 5.
+ * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995,
+ * then it is channel number 9.
+ * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035,
+ * then it is channel number 17.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = ath11k_mac_get_seg_freq(def, *start_freq, i);
+
+ /* For the 20 MHz, its center frequency is same with same channel */
+ if (i != 0)
+ *center_freq += 10;
+
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_channel *chan, *temp_chan;
+ u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
+ bool is_psd_power = false, is_tpe_present = false;
+ s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL],
+ psd_power, tx_power;
+ s8 eirp_power = 0;
+ u16 start_freq, center_freq;
+
+ chan = ctx->def.chan;
+ start_freq = ath11k_mac_get_6ghz_start_frequency(&ctx->def);
+ pwr_reduction = bss_conf->pwr_reduction;
+
+ if (arvif->reg_tpc_info.num_pwr_levels) {
+ is_tpe_present = true;
+ num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
+ } else {
+ num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def);
+ }
+
+ for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
+ /* STA received TPE IE*/
+ if (is_tpe_present) {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ psd_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ /* convert psd power to EIRP power based
+ * on channel width
+ */
+ tx_power =
+ min_t(s8, tx_power,
+ psd_power + 13 + pwr_lvl_idx * 3);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ /* local power is not PSD power */
+ } else {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ reg_tpc_info->tpe[pwr_lvl_idx];
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ }
+ /* STA not received TPE IE */
+ } else {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] = psd_power;
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] = tx_power;
+ }
+ }
+
+ if (is_psd_power) {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ eirp_power = eirp_power - pwr_reduction;
+
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "eirp power : %d firmware report power : %d\n",
+ eirp_power, ar->max_allowed_tx_power);
+ /* Firmware reports lower max_allowed_tx_power during vdev
+ * start response. In case of 6 GHz, firmware is not aware
+ * of EIRP power unless driver sets EIRP power through WMI
+ * TPC command. So radio which does not support idle power
+ * save can set maximum calculated EIRP power directly to
+ * firmware through TPC command without min comparison with
+ * vdev start response's max_allowed_tx_power.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ eirp_power = min_t(s8,
+ eirp_power,
+ ar->max_allowed_tx_power);
+ } else {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ max_tx_power[pwr_lvl_idx] =
+ max_tx_power[pwr_lvl_idx] - pwr_reduction;
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ max_tx_power[pwr_lvl_idx],
+ ar->max_allowed_tx_power);
+ }
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
+ max_tx_power[pwr_lvl_idx];
+ }
+
+ reg_tpc_info->num_pwr_levels = num_pwr_levels;
+ reg_tpc_info->is_psd_power = is_psd_power;
+ reg_tpc_info->eirp_power = eirp_power;
+ reg_tpc_info->ap_power_type =
+ ath11k_reg_ap_pwr_convert(vif->bss_conf.power_type);
+}
+
+static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ieee80211_tx_pwr_env *single_tpe;
+ enum wmi_reg_6ghz_client_type client_type;
+ struct cur_regulatory_info *reg_info;
+ int i;
+ u8 pwr_count, pwr_interpret, pwr_category;
+ u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0;
+ bool use_local_tpe, non_psd_set = false, psd_set = false;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ client_type = reg_info->client_type;
+
+ for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+ single_tpe = &bss_conf->tx_pwr_env[i];
+ pwr_category = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+ if (pwr_category == client_type) {
+ if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP ||
+ pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD)
+ local_tpe_count++;
+ else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP ||
+ pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD)
+ reg_tpe_count++;
+ }
+ }
+
+ if (!reg_tpe_count && !local_tpe_count) {
+ ath11k_warn(ab,
+ "no transmit power envelope match client power type %d\n",
+ client_type);
+ return;
+ } else if (!reg_tpe_count) {
+ use_local_tpe = true;
+ } else {
+ use_local_tpe = false;
+ }
+
+ for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+ single_tpe = &bss_conf->tx_pwr_env[i];
+ pwr_category = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+ if (pwr_category != client_type)
+ continue;
+
+ /* get local transmit power envelope */
+ if (use_local_tpe) {
+ if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) {
+ non_psd_index = i;
+ non_psd_set = true;
+ } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) {
+ psd_index = i;
+ psd_set = true;
+ }
+ /* get regulatory transmit power envelope */
+ } else {
+ if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) {
+ non_psd_index = i;
+ non_psd_set = true;
+ } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) {
+ psd_index = i;
+ psd_set = true;
+ }
+ }
+ }
+
+ if (non_psd_set && !psd_set) {
+ single_tpe = &bss_conf->tx_pwr_env[non_psd_index];
+ pwr_count = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ arvif->reg_tpc_info.is_psd_power = false;
+ arvif->reg_tpc_info.eirp_power = 0;
+
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "non PSD power[%d] : %d\n",
+ i, single_tpe->tx_power[i]);
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ }
+ }
+
+ if (psd_set) {
+ single_tpe = &bss_conf->tx_pwr_env[psd_index];
+ pwr_count = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ arvif->reg_tpc_info.is_psd_power = true;
+
+ if (pwr_count == 0) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power : %d\n", single_tpe->tx_power[0]);
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_num_pwr_levels(&ctx->def);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++)
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2;
+ } else {
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power[%d] : %d\n",
+ i, single_tpe->tx_power[i]);
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ }
+ }
+ }
+}
+
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -7610,7 +7896,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
- struct peer_create_params param;
+ struct cur_regulatory_info *reg_info;
+ enum ieee80211_ap_reg_power power_type;
mutex_lock(&ar->conf_mutex);
@@ -7618,6 +7905,24 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath11k_reg_handle_chan_list(ab, reg_info, power_type);
+ arvif->chanctx = *ctx;
+ ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
+ }
+
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
@@ -7633,21 +7938,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
- param.vdev_id = arvif->vdev_id;
- param.peer_type = WMI_PEER_TYPE_DEFAULT;
- param.peer_addr = ar->mac_addr;
-
- ret = ath11k_peer_create(ar, arvif, NULL, &param);
- if (ret) {
- ath11k_warn(ab, "failed to create peer after vdev start delay: %d",
- ret);
- goto out;
- }
- }
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_start(ar);
if (ret) {
@@ -7660,15 +7950,17 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = ath11k_mac_vdev_start(arvif, ctx);
- if (ret) {
- ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
- arvif->vdev_id, vif->addr,
- ctx->def.chan->center_freq, ret);
- goto out;
- }
+ if (!arvif->is_started) {
+ ret = ath11k_mac_vdev_start(arvif, ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto out;
+ }
- arvif->is_started = true;
+ arvif->is_started = true;
+ }
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
@@ -7708,8 +8000,6 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
"chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
- WARN_ON(!arvif->is_started);
-
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
spin_lock_bh(&ab->base_lock);
@@ -7733,24 +8023,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
return;
}
- ret = ath11k_mac_vdev_stop(arvif);
- if (ret)
- ath11k_warn(ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
- arvif->is_started = false;
-
- if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (arvif->is_started) {
+ ret = ath11k_mac_vdev_stop(arvif);
if (ret)
- ath11k_warn(ar->ab,
- "failed to delete peer %pM for vdev %d: %d\n",
- arvif->bssid, arvif->vdev_id, ret);
- else
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "removed peer %pM vdev %d after vdev stop\n",
- arvif->bssid, arvif->vdev_id);
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
}
if (ab->hw_params.vdev_start_delay &&
@@ -8973,8 +9252,8 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
arg->dwell_time_active = scan_time_msec;
arg->dwell_time_passive = scan_time_msec;
arg->max_scan_time = scan_time_msec;
- arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
- arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg->scan_f_passive = 1;
+ arg->scan_f_filter_prb_req = 1;
arg->burst_duration = duration;
ret = ath11k_start_scan(ar, arg);
@@ -9108,6 +9387,252 @@ err_fallback:
return 0;
}
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ewma_avg_rssi_init(&arsta->avg_rssi);
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath11k_mac_station_remove(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ int ret;
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_stop_vdev_early(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to do early vdev stop: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+
+ return ret;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k_peer *peer;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath11k_mac_station_remove(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+
+ spin_lock_bh(&ar->data_lock);
+ /* Set arsta bw and prev bw */
+ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -9153,6 +9678,7 @@ static const struct ieee80211_ops ath11k_ops = {
#endif
#ifdef CONFIG_ATH11K_DEBUGFS
+ .vif_add_debugfs = ath11k_debugfs_op_vif_add,
.sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif
@@ -9298,6 +9824,33 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
return 0;
}
+static void ath11k_mac_setup_mac_address_list(struct ath11k *ar)
+{
+ struct mac_address *addresses;
+ u16 n_addresses;
+ int i;
+
+ if (!ar->ab->hw_params.support_dual_stations)
+ return;
+
+ n_addresses = ar->ab->hw_params.num_vdevs;
+ addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL);
+ if (!addresses)
+ return;
+
+ memcpy(addresses[0].addr, ar->mac_addr, ETH_ALEN);
+ for (i = 1; i < n_addresses; i++) {
+ memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN);
+ /* set Local Administered Address bit */
+ addresses[i].addr[0] |= 0x2;
+
+ addresses[i].addr[0] += (i - 1) << 4;
+ }
+
+ ar->hw->wiphy->addresses = addresses;
+ ar->hw->wiphy->n_addresses = n_addresses;
+}
+
static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -9317,28 +9870,46 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
- limits[0].max = 1;
- limits[0].types |= BIT(NL80211_IFTYPE_STATION);
-
- limits[1].max = 16;
- limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (ab->hw_params.support_dual_stations) {
+ limits[0].max = 2;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
- if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
- limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+ limits[1].max = 1;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
- combinations[0].limits = limits;
- combinations[0].n_limits = n_limits;
- combinations[0].max_interfaces = 16;
- combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_80P80) |
- BIT(NL80211_CHAN_WIDTH_160);
+ combinations[0].limits = limits;
+ combinations[0].n_limits = 2;
+ combinations[0].max_interfaces = ab->hw_params.num_vdevs;
+ combinations[0].num_different_channels = 2;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ } else {
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ limits[1].max = 16;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = 2;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+ }
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -9403,6 +9974,8 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
+ kfree(ar->hw->wiphy->addresses);
+
SET_IEEE80211_DEV(ar->hw, NULL);
}
@@ -9445,6 +10018,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ath11k_pdev_caps_update(ar);
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+ ath11k_mac_setup_mac_address_list(ar);
SET_IEEE80211_DEV(ar->hw, ab->dev);
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 0dfdeed5177b..f5800fbecff8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
@@ -176,4 +176,7 @@ int ath11k_mac_wait_tx_complete(struct ath11k *ar);
int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6835c14b82cc..b53659145fcf 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -423,7 +423,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
- mhi_ctrl->iova_stop = 0xFFFFFFFF;
+ mhi_ctrl->iova_stop = ab_pci->dma_mask;
}
mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
@@ -443,6 +443,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_QCA6390_HW20:
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
+ case ATH11K_HW_QCA2066_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 09e65c5e55c4..be9d2c69cc41 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -18,7 +18,8 @@
#include "qmi.h"
#define ATH11K_PCI_BAR_NUM 0
-#define ATH11K_PCI_DMA_MASK 32
+#define ATH11K_PCI_DMA_MASK 36
+#define ATH11K_PCI_COHERENT_DMA_MASK 32
#define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
@@ -28,6 +29,8 @@
#define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103
+#define TCSR_SOC_HW_SUB_VER 0x1910010
+
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
@@ -526,14 +529,24 @@ static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
goto disable_device;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ ret = dma_set_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
if (ret) {
ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
ATH11K_PCI_DMA_MASK, ret);
goto release_region;
}
+ ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK);
+
+ ret = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
+ ATH11K_PCI_COHERENT_DMA_MASK, ret);
+ goto release_region;
+ }
+
pci_set_master(pdev);
ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
@@ -731,8 +744,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
- const struct ath11k_pci_ops *pci_ops;
int ret;
+ u32 sub_version;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
@@ -777,6 +790,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@@ -790,13 +809,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
- pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
- pci_ops = &ath11k_pci_ops_qcn9074;
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
@@ -809,7 +836,19 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
break;
case 0x10:
case 0x11:
- ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n",
+ sub_version);
+ switch (sub_version) {
+ case 0x1019A0E1:
+ case 0x1019B0E1:
+ case 0x1019C0E1:
+ case 0x1019D0E1:
+ ab->hw_rev = ATH11K_HW_QCA2066_HW21;
+ break;
+ default:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ }
break;
default:
goto unsupported_wcn6855_soc;
@@ -823,7 +862,6 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -832,12 +870,6 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
- if (ret) {
- ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
- goto err_pci_free_region;
- }
-
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index e9a01f344ec6..6be73333d90b 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H
@@ -72,6 +72,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
+ u64 dma_mask;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 15e2ceb22a44..add4db4c50bc 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -115,6 +115,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ },
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b4fd4d2107c7..737fcd450d4b 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -425,6 +425,11 @@ static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
+ if ((rule1->flags & NL80211_RRF_PSD) && (rule2->flags & NL80211_RRF_PSD))
+ new_rule->psd = min_t(s8, rule1->psd, rule2->psd);
+ else
+ new_rule->flags &= ~NL80211_RRF_PSD;
+
/* To be safe, lts use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
@@ -527,13 +532,14 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
static void
ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
- u32 reg_flags)
+ s8 psd, u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->psd = psd;
reg_rule->flags = reg_flags;
}
@@ -563,7 +569,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -584,7 +590,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
end_freq, bw, reg_rule->ant_gain,
- reg_rule->reg_power, flags);
+ reg_rule->reg_power, reg_rule->psd_eirp, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
@@ -605,7 +611,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -618,25 +624,68 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
*rule_idx = i;
}
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
+{
+ switch (power_type) {
+ case IEEE80211_REG_LPI_AP:
+ return WMI_REG_INDOOR_AP;
+ case IEEE80211_REG_SP_AP:
+ return WMI_REG_STANDARD_POWER_AP;
+ case IEEE80211_REG_VLP_AP:
+ return WMI_REG_VERY_LOW_POWER_AP;
+ default:
+ return WMI_REG_MAX_AP_TYPE;
+ }
+}
+
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect)
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
- struct cur_reg_rule *reg_rule;
+ struct cur_reg_rule *reg_rule, *reg_rule_6ghz;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
- u32 flags;
+ u32 flags, reg_6ghz_number, max_bw_6ghz;
char alpha2[3];
num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
- /* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list.
- * This can be updated after complete 6 GHz regulatory support is added.
- */
- if (reg_info->is_ext_reg_event)
- num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ if (reg_info->is_ext_reg_event) {
+ if (vdev_type == WMI_VDEV_TYPE_STA) {
+ enum wmi_reg_6ghz_ap_type ap_type;
+
+ ap_type = ath11k_reg_ap_pwr_convert(power_type);
+
+ if (ap_type == WMI_REG_MAX_AP_TYPE)
+ ap_type = WMI_REG_INDOOR_AP;
+
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+
+ if (reg_6ghz_number == 0) {
+ ap_type = WMI_REG_INDOOR_AP;
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ }
+
+ reg_rule_6ghz = reg_info->reg_rules_6ghz_client_ptr
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ max_bw_6ghz = reg_info->max_bw_6ghz_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ } else {
+ reg_6ghz_number = reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ reg_rule_6ghz =
+ reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP];
+ max_bw_6ghz = reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP];
+ }
+
+ num_rules += reg_6ghz_number;
+ }
if (!num_rules)
goto ret;
@@ -683,14 +732,13 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
- } else if (reg_info->is_ext_reg_event &&
- reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] &&
- (k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) {
- reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] +
- k++;
- max_bw = min_t(u16, reg_rule->max_bw,
- reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]);
+ } else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
+ k < reg_6ghz_number) {
+ reg_rule = reg_rule_6ghz + k++;
+ max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
flags = NL80211_RRF_AUTO_BW;
+ if (reg_rule->psd_flag)
+ flags |= NL80211_RRF_PSD;
} else {
break;
}
@@ -702,7 +750,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
@@ -758,6 +806,159 @@ ret:
return new_regd;
}
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
+}
+
+static enum wmi_vdev_type ath11k_reg_get_ar_vdev_type(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ /* Currently each struct ath11k maps to one struct ieee80211_hw/wiphy
+ * and one struct ieee80211_regdomain, so it could only store one group
+ * reg rules. It means multi-interface concurrency in the same ath11k is
+ * not support for the regdomain. So get the vdev type of the first entry
+ * now. After concurrency support for the regdomain, this should change.
+ */
+ arvif = list_first_entry_or_null(&ar->arvifs, struct ath11k_vif, list);
+ if (arvif)
+ return arvif->vdev_type;
+
+ return WMI_VDEV_TYPE_UNSPEC;
+}
+
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *regd;
+ bool intersect = false;
+ int pdev_idx;
+ struct ath11k *ar;
+ enum wmi_vdev_type vdev_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg handle chan list");
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ return -EINVAL;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock_bh(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock_bh(&ab->base_lock);
+ goto retfail;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback. In either case
+ * ath11k_reg_reset_info() needs to be called to avoid
+ * memory leak issue.
+ */
+ ath11k_reg_reset_info(reg_info);
+
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ return 0;
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto retfail;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ ar = ab->pdevs[pdev_idx].ar;
+ vdev_type = ath11k_reg_get_ar_vdev_type(ar);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi handle chan list power type %d vdev type %d intersect %d\n",
+ power_type, vdev_type, intersect);
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect, vdev_type, power_type);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_reg_reset_info(&ab->reg_info_store[pdev_idx]);
+ ab->reg_info_store[pdev_idx] = *reg_info;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
+ */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+
+retfail:
+
+ return -EINVAL;
+}
+
void ath11k_regd_update_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
@@ -781,10 +982,36 @@ void ath11k_reg_init(struct ath11k *ar)
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info)
+{
+ int i, j;
+
+ if (!reg_info)
+ return;
+
+ kfree(reg_info->reg_rules_2ghz_ptr);
+ kfree(reg_info->reg_rules_5ghz_ptr);
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+ kfree(reg_info->reg_rules_6ghz_client_ptr[i][j]);
+ }
+
+ memset(reg_info, 0, sizeof(*reg_info));
+}
+
void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
+ for (i = 0; i < ab->num_radios; i++)
+ ath11k_reg_reset_info(&ab->reg_info_store[i]);
+
+ kfree(ab->reg_info_store);
+ ab->reg_info_store = NULL;
+
for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index f28902f85e41..64edb794260a 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -30,11 +30,20 @@ enum ath11k_dfs_region {
/* ATH11K Regulatory API's */
void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect);
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 43bb23265d34..302d66092b97 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -198,7 +198,7 @@ static void ath11k_tm_wmi_event_segmented(struct ath11k_base *ab, u32 cmd_id,
u16 length;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 8a65fa04b48d..90f1fdec9adc 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -238,8 +238,8 @@ static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
(void *)tb);
}
-const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -248,7 +248,7 @@ const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -2098,7 +2098,7 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE,
ar->ab->wmi_ab.svc_map))
@@ -2379,6 +2379,70 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_tpc_power_cmd *cmd;
+ struct wmi_vdev_ch_power_info *ch;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ u8 *ptr;
+ int i, ret, len, array_len;
+
+ array_len = sizeof(*ch) * param->num_pwr_levels;
+ len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->psd_power = param->is_psd_power;
+ cmd->eirp_power = param->eirp_power;
+ cmd->power_type_6ghz = param->ap_power_type;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
+ vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
+
+ ptr += sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, array_len);
+
+ ptr += TLV_HDR_SIZE;
+ ch = (struct wmi_vdev_ch_power_info *)ptr;
+
+ for (i = 0; i < param->num_pwr_levels; i++, ch++) {
+ ch->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_CH_POWER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*ch) - TLV_HDR_SIZE);
+
+ ch->chan_cfreq = param->chan_power_info[i].chan_cfreq;
+ ch->tx_power = param->chan_power_info[i].tx_power;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n",
+ ch->chan_cfreq, ch->tx_power);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param)
{
@@ -3930,7 +3994,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
struct ath11k_vif *arvif;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -3956,8 +4020,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
- GFP_KERNEL);
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -4749,6 +4812,14 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
soc->pdevs[0].pdev_id = 0;
}
+ if (!soc->reg_info_store) {
+ soc->reg_info_store = kcalloc(soc->num_radios,
+ sizeof(*soc->reg_info_store),
+ GFP_ATOMIC);
+ if (!soc->reg_info_store)
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -5003,7 +5074,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5028,6 +5099,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf
vdev_rsp->mac_id = ev->mac_id;
vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+ vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power;
kfree(tb);
return 0;
@@ -5102,7 +5174,7 @@ static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5278,7 +5350,7 @@ static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5634,7 +5706,7 @@ static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5666,7 +5738,7 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5686,15 +5758,15 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
return 0;
}
-static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5722,7 +5794,7 @@ static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5876,7 +5948,7 @@ static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6052,7 +6124,7 @@ static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6085,7 +6157,7 @@ static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6112,7 +6184,7 @@ static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6153,14 +6225,14 @@ exit:
return idx;
}
-static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6199,7 +6271,7 @@ ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6239,7 +6311,7 @@ ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6270,7 +6342,7 @@ static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6995,7 +7067,7 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
const void **tb;
int ret, i;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -7060,32 +7132,15 @@ static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
wake_up(&wmi->tx_ce_desc_wq);
}
-static bool ath11k_reg_is_world_alpha(char *alpha)
-{
- if (alpha[0] == '0' && alpha[1] == '0')
- return true;
-
- if (alpha[0] == 'n' && alpha[1] == 'a')
- return true;
-
- return false;
-}
-
-static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
- struct sk_buff *skb,
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb,
enum wmi_reg_chan_list_cmd_type id)
{
- struct cur_regulatory_info *reg_info = NULL;
- struct ieee80211_regdomain *regd = NULL;
- bool intersect = false;
- int ret = 0, pdev_idx, i, j;
- struct ath11k *ar;
+ struct cur_regulatory_info *reg_info;
+ int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
- if (!reg_info) {
- ret = -ENOMEM;
- goto fallback;
- }
+ if (!reg_info)
+ return -ENOMEM;
if (id == WMI_REG_CHAN_LIST_CC_ID)
ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
@@ -7093,118 +7148,22 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
if (ret) {
- ath11k_warn(ab, "failed to extract regulatory info from received event\n");
- goto fallback;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg chan list id %d", id);
-
- if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
- /* In case of failure to set the requested ctry,
- * fw retains the current regd. We print a failure info
- * and return from here.
- */
- ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
- goto mem_free;
- }
-
- pdev_idx = reg_info->phy_id;
-
- /* Avoid default reg rule updates sent during FW recovery if
- * it is already available
- */
- spin_lock(&ab->base_lock);
- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
- ab->default_regd[pdev_idx]) {
- spin_unlock(&ab->base_lock);
+ ath11k_warn(ab, "failed to extract regulatory info\n");
goto mem_free;
}
- spin_unlock(&ab->base_lock);
- if (pdev_idx >= ab->num_radios) {
- /* Process the event for phy0 only if single_pdev_only
- * is true. If pdev_idx is valid but not 0, discard the
- * event. Otherwise, it goes to fallback.
- */
- if (ab->hw_params.single_pdev_only &&
- pdev_idx < ab->hw_params.num_rxmda_per_pdev)
- goto mem_free;
- else
- goto fallback;
- }
-
- /* Avoid multiple overwrites to default regd, during core
- * stop-start after mac registration.
- */
- if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
- !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
- (char *)reg_info->alpha2, 2))
+ ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP);
+ if (ret) {
+ ath11k_warn(ab, "failed to process regulatory info %d\n", ret);
goto mem_free;
-
- /* Intersect new rules with default regd if a new country setting was
- * requested, i.e a default regd was already set during initialization
- * and the regd coming from this event has a valid country info.
- */
- if (ab->default_regd[pdev_idx] &&
- !ath11k_reg_is_world_alpha((char *)
- ab->default_regd[pdev_idx]->alpha2) &&
- !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
- intersect = true;
-
- regd = ath11k_reg_build_regd(ab, reg_info, intersect);
- if (!regd) {
- ath11k_warn(ab, "failed to build regd from reg_info\n");
- goto fallback;
- }
-
- spin_lock(&ab->base_lock);
- if (ab->default_regd[pdev_idx]) {
- /* The initial rules from FW after WMI Init is to build
- * the default regd. From then on, any rules updated for
- * the pdev could be due to user reg changes.
- * Free previously built regd before assigning the newly
- * generated regd to ar. NULL pointer handling will be
- * taken care by kfree itself.
- */
- ar = ab->pdevs[pdev_idx].ar;
- kfree(ab->new_regd[pdev_idx]);
- ab->new_regd[pdev_idx] = regd;
- queue_work(ab->workqueue, &ar->regd_update_work);
- } else {
- /* This regd would be applied during mac registration and is
- * held constant throughout for regd intersection purpose
- */
- ab->default_regd[pdev_idx] = regd;
}
- ab->dfs_region = reg_info->dfs_region;
- spin_unlock(&ab->base_lock);
- goto mem_free;
+ kfree(reg_info);
+ return 0;
-fallback:
- /* Fallback to older reg (by sending previous country setting
- * again if fw has succeeded and we failed to process here.
- * The Regdomain should be uniform across driver and fw. Since the
- * FW has processed the command and sent a success status, we expect
- * this function to succeed as well. If it doesn't, CTRY needs to be
- * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
- */
- /* TODO: This is rare, but still should also be handled */
- WARN_ON(1);
mem_free:
- if (reg_info) {
- kfree(reg_info->reg_rules_2ghz_ptr);
- kfree(reg_info->reg_rules_5ghz_ptr);
- if (reg_info->is_ext_reg_event) {
- for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
-
- for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
- for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
- }
- kfree(reg_info);
- }
+ ath11k_reg_reset_info(reg_info);
+ kfree(reg_info);
return ret;
}
@@ -7362,7 +7321,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff
}
ar->last_wmi_vdev_start_status = 0;
-
+ ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power;
status = vdev_start_resp.status;
if (WARN_ON_ONCE(status)) {
@@ -7384,8 +7343,7 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
struct ath11k_vif *arvif;
u32 vdev_id, tx_status;
- if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath11k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -7416,7 +7374,7 @@ static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -7884,7 +7842,7 @@ static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath11k_warn(ab, "failed to extract chan info event");
return;
}
@@ -8216,7 +8174,7 @@ static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8267,7 +8225,7 @@ ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_csa_finish(arvif->vif, 0);
}
rcu_read_unlock();
}
@@ -8281,7 +8239,7 @@ ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8315,7 +8273,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
struct ath11k *ar;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8369,7 +8327,7 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
const struct wmi_pdev_temperature_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8409,7 +8367,7 @@ static void ath11k_fils_discovery_event(struct ath11k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8441,7 +8399,7 @@ static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8567,7 +8525,7 @@ static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
const struct wmi_twt_add_dialog_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8604,7 +8562,7 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
u64 replay_ctr;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -9793,3 +9751,9 @@ int ath11k_wmi_sta_keepalive(struct ath11k *ar,
return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
}
+
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar)
+{
+ return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index ff0a9a92beeb..bb419e3abb00 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -15,6 +15,7 @@ struct ath11k;
struct ath11k_fw_stats;
struct ath11k_fw_dbglog;
struct ath11k_vif;
+struct ath11k_reg_tpc_power_info;
#define PSOC_HOST_MAX_NUM_SS (8)
@@ -327,6 +328,22 @@ enum wmi_tlv_cmd_id {
WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_VDEV_SET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_TX_POWER_CMDID,
+ WMI_VDEV_LIMIT_OFFCHAN_CMDID,
+ WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
+ WMI_VDEV_CHAINMASK_CONFIG_CMDID,
+ WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
+ WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
+ WMI_VDEV_DELETE_ALL_PEER_CMDID,
+ WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
+ WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
+ WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
+ WMI_VDEV_SET_PCL_CMDID,
+ WMI_VDEV_GET_BIG_DATA_CMDID,
+ WMI_VDEV_GET_BIG_DATA_P2_CMDID,
+ WMI_VDEV_SET_TPC_POWER_CMDID,
WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
WMI_PEER_DELETE_CMDID,
WMI_PEER_FLUSH_TIDS_CMDID,
@@ -1880,6 +1897,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
+ WMI_TAG_VDEV_CH_POWER_INFO,
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
WMI_TAG_MAX
@@ -2114,6 +2133,7 @@ enum wmi_tlv_service {
/* The second 128 bits */
WMI_MAX_EXT_SERVICE = 256,
WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL = 265,
+ WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN = 357,
@@ -3168,6 +3188,41 @@ struct wlan_ssid {
u8 ssid[WLAN_SSID_MAX_LEN];
};
+struct wmi_vdev_ch_power_info {
+ u32 tlv_header;
+
+ /* Channel center frequency (MHz) */
+ u32 chan_cfreq;
+
+ /* Unit: dBm, either PSD/EIRP power for this frequency or
+ * incremental for non-PSD BW
+ */
+ u32 tx_power;
+} __packed;
+
+struct wmi_vdev_set_tpc_power_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+
+ /* Value: 0 or 1, is PSD power or not */
+ u32 psd_power;
+
+ /* Maximum EIRP power (dBm units), valid only if power is PSD */
+ u32 eirp_power;
+
+ /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
+ u32 power_type_6ghz;
+
+ /* This fixed_param TLV is followed by the below TLVs:
+ * num_pwr_levels of wmi_vdev_ch_power_info
+ * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks).
+ * For non-PSD power, the power values are for 20, 40, and till
+ * BSS BW power levels.
+ * The num_pwr_levels will be checked by sw how many elements present
+ * in the variable-length array.
+ */
+} __packed;
+
#define WMI_IE_BITMAP_SIZE 8
/* prefix used by scan requestor ids on the host */
@@ -3308,24 +3363,19 @@ struct scan_req_params {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
- union {
- struct {
- u32 scan_ev_started:1,
- scan_ev_completed:1,
- scan_ev_bss_chan:1,
- scan_ev_foreign_chan:1,
- scan_ev_dequeued:1,
- scan_ev_preempted:1,
- scan_ev_start_failed:1,
- scan_ev_restarted:1,
- scan_ev_foreign_chn_exit:1,
- scan_ev_invalid:1,
- scan_ev_gpio_timeout:1,
- scan_ev_suspended:1,
- scan_ev_resumed:1;
- };
- u32 scan_events;
- };
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
u32 scan_ctrl_flags_ext;
u32 dwell_time_active;
u32 dwell_time_active_2g;
@@ -3339,36 +3389,31 @@ struct scan_req_params {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
- union {
- struct {
- u32 scan_f_passive:1,
- scan_f_bcast_probe:1,
- scan_f_cck_rates:1,
- scan_f_ofdm_rates:1,
- scan_f_chan_stat_evnt:1,
- scan_f_filter_prb_req:1,
- scan_f_bypass_dfs_chn:1,
- scan_f_continue_on_err:1,
- scan_f_offchan_mgmt_tx:1,
- scan_f_offchan_data_tx:1,
- scan_f_promisc_mode:1,
- scan_f_capture_phy_err:1,
- scan_f_strict_passive_pch:1,
- scan_f_half_rate:1,
- scan_f_quarter_rate:1,
- scan_f_force_active_dfs_chn:1,
- scan_f_add_tpc_ie_in_probe:1,
- scan_f_add_ds_ie_in_probe:1,
- scan_f_add_spoofed_mac_in_probe:1,
- scan_f_add_rand_seq_in_probe:1,
- scan_f_en_ie_whitelist_in_probe:1,
- scan_f_forced:1,
- scan_f_2ghz:1,
- scan_f_5ghz:1,
- scan_f_80mhz:1;
- };
- u32 scan_flags;
- };
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@@ -4119,6 +4164,7 @@ struct wmi_vdev_start_resp_event {
};
u32 cfgd_tx_streams;
u32 cfgd_rx_streams;
+ s32 max_allowed_tx_power;
} __packed;
/* VDEV start response status codes */
@@ -4951,6 +4997,7 @@ struct ath11k_targ_cap {
};
enum wmi_vdev_type {
+ WMI_VDEV_TYPE_UNSPEC = 0,
WMI_VDEV_TYPE_AP = 1,
WMI_VDEV_TYPE_STA = 2,
WMI_VDEV_TYPE_IBSS = 3,
@@ -6295,8 +6342,8 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
-const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
- size_t len, gfp_t gfp);
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp);
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
@@ -6479,5 +6526,9 @@ int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_va
int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
int ath11k_wmi_sta_keepalive(struct ath11k *ar,
const struct wmi_sta_keepalive_arg *arg);
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar);
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 62c52e733b5e..71669f94ff75 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -19,7 +19,9 @@ ath12k-y += core.o \
hw.o \
mhi.o \
pci.o \
- dp_mon.o
+ dp_mon.o \
+ fw.o \
+ p2p.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 6c01b282fcd3..391b6fb2bd42 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -14,6 +14,7 @@
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
+#include "fw.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@@ -104,27 +105,66 @@ int ath12k_core_resume(struct ath12k_base *ab)
return 0;
}
-static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
- size_t name_len)
+static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len, bool with_variant,
+ bool bus_type_mode)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
- if (ab->qmi.target.bdf_ext[0] != '\0')
+ if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
- scnprintf(name, name_len,
- "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
- ath12k_bus_str(ab->hif.bus),
- ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ switch (ab->id.bdf_search) {
+ case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
+ if (bus_type_mode)
+ scnprintf(name, name_len,
+ "bus=%s",
+ ath12k_bus_str(ab->hif.bus));
+ else
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->id.vendor, ab->id.device,
+ ab->id.subsystem_vendor,
+ ab->id.subsystem_device,
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id,
+ variant);
+ break;
+ default:
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+ break;
+ }
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
return 0;
}
+static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+}
+
+static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+}
+
+static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+}
+
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *file)
{
@@ -159,7 +199,9 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
struct ath12k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
- int bd_ie_type)
+ int ie_id,
+ int name_id,
+ int data_id)
{
const struct ath12k_fw_ie *hdr;
bool name_match_found;
@@ -169,7 +211,7 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
name_match_found = false;
- /* go through ATH12K_BD_IE_BOARD_ elements */
+ /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
while (buf_len > sizeof(struct ath12k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
@@ -180,48 +222,50 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
- ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n",
+ ath12k_err(ab, "invalid %s length: %zu < %zu\n",
+ ath12k_bd_ie_type_str(ie_id),
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
- switch (board_ie_id) {
- case ATH12K_BD_IE_BOARD_NAME:
+ if (board_ie_id == name_id) {
ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
- break;
+ goto next;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
- break;
+ goto next;
name_match_found = true;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
- "boot found match for name '%s'",
+ "boot found match %s for name '%s'",
+ ath12k_bd_ie_type_str(ie_id),
boardname);
- break;
- case ATH12K_BD_IE_BOARD_DATA:
+ } else if (board_ie_id == data_id) {
if (!name_match_found)
/* no match found */
- break;
+ goto next;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
- "boot found board data for '%s'", boardname);
+ "boot found %s for '%s'",
+ ath12k_bd_ie_type_str(ie_id),
+ boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
- default:
- ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n",
+ } else {
+ ath12k_warn(ab, "unknown %s id found: %d\n",
+ ath12k_bd_ie_type_str(ie_id),
board_ie_id);
- break;
}
-
+next:
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
@@ -238,7 +282,10 @@ out:
static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
struct ath12k_board_data *bd,
- const char *boardname)
+ const char *boardname,
+ int ie_id_match,
+ int name_id,
+ int data_id)
{
size_t len, magic_len;
const u8 *data;
@@ -303,22 +350,23 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
goto err;
}
- switch (ie_id) {
- case ATH12K_BD_IE_BOARD:
+ if (ie_id == ie_id_match) {
ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
- ATH12K_BD_IE_BOARD);
+ ie_id_match,
+ name_id,
+ data_id);
if (ret == -ENOENT)
/* no match found, continue */
- break;
+ goto next;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
-
+next:
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
@@ -328,8 +376,9 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
out:
if (!bd->data || !bd->len) {
- ath12k_err(ab,
- "failed to fetch board data for %s from %s\n",
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to fetch %s for %s from %s\n",
+ ath12k_bd_ie_type_str(ie_id_match),
boardname, filepath);
ret = -ENODATA;
goto err;
@@ -356,28 +405,56 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
return 0;
}
-#define BOARD_NAME_SIZE 100
+#define BOARD_NAME_SIZE 200
int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
{
- char boardname[BOARD_NAME_SIZE];
+ char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
+ char *filename, filepath[100];
int bd_api;
int ret;
- ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ filename = ATH12K_BOARD_API2_FILE;
+
+ ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
if (ret) {
ath12k_err(ab, "failed to create board name: %d", ret);
return ret;
}
bd_api = 2;
- ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname);
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH12K_BD_IE_BOARD,
+ ATH12K_BD_IE_BOARD_NAME,
+ ATH12K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto success;
+
+ ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
+ sizeof(fallback_boardname));
+ if (ret) {
+ ath12k_err(ab, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
+ ATH12K_BD_IE_BOARD,
+ ATH12K_BD_IE_BOARD_NAME,
+ ATH12K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
bd_api = 1;
ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
if (ret) {
- ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ath12k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+ ath12k_err(ab, "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ if (memcmp(boardname, fallback_boardname, strlen(boardname)))
+ ath12k_err(ab, "failed to fetch board data for %s from %s\n",
+ fallback_boardname, filepath);
+
+ ath12k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params->fw.dir);
return ret;
}
@@ -387,6 +464,79 @@ success:
return 0;
}
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to create board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH12K_BD_IE_REGDB,
+ ATH12K_BD_IE_REGDB_NAME,
+ ATH12K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
+ BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to create default board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
+ ATH12K_BD_IE_REGDB,
+ ATH12K_BD_IE_REGDB_NAME,
+ ATH12K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
+ if (ret)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
+ ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
+
+exit:
+ if (!ret)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
+
+ return ret;
+}
+
+u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_STATIONS_DBS;
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_PEERS_PDEV_DBS_SBS;
+ return TARGET_NUM_STATIONS_SINGLE;
+}
+
+u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_PEERS_PDEV_DBS;
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_PEERS_PDEV_DBS_SBS;
+ return TARGET_NUM_PEERS_PDEV_SINGLE;
+}
+
+u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_TIDS(DBS);
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_TIDS(DBS_SBS);
+ return TARGET_NUM_TIDS(SINGLE);
+}
+
static void ath12k_core_stop(struct ath12k_base *ab)
{
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
@@ -592,14 +742,14 @@ static int ath12k_core_start(struct ath12k_base *ab,
ath12k_dp_cc_config(ab);
- ath12k_dp_pdev_pre_alloc(ab);
-
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
goto err_mac_destroy;
}
+ ath12k_dp_hal_rx_desc_init(ab);
+
ret = ath12k_wmi_cmd_init(ab);
if (ret) {
ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
@@ -759,20 +909,30 @@ static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
struct ath12k *ar;
+ struct ath12k_hw *ah;
+ struct ieee80211_hw *hw;
bool rfkill_radio_on;
- int i;
+ int i, j;
spin_lock_bh(&ab->base_lock);
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- if (!ar)
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ if (!ah)
continue;
- ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
- wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
+ if (!ar)
+ continue;
+
+ ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
+ }
+
+ hw = ah->hw;
+ wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
}
}
@@ -801,6 +961,7 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
+ struct ath12k_hw *ah;
int i;
spin_lock_bh(&ab->base_lock);
@@ -810,16 +971,24 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ for (i = 0; i < ab->num_hw; i++) {
+ if (!ab->ah[i])
+ continue;
+
+ ah = ab->ah[i];
+ ieee80211_stop_queues(ah->hw);
+ }
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH12K_STATE_OFF)
continue;
- ieee80211_stop_queues(ar->hw);
ath12k_mac_drain_tx(ar);
complete(&ar->scan.started);
complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
@@ -856,7 +1025,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
case ATH12K_STATE_ON:
ar->state = ATH12K_STATE_RESTARTING;
ath12k_core_halt(ar);
- ieee80211_restart_hw(ar->hw);
+ ieee80211_restart_hw(ath12k_ar_to_hw(ar));
break;
case ATH12K_STATE_OFF:
ath12k_warn(ab,
@@ -979,6 +1148,8 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return ret;
}
+ ath12k_fw_map(ab);
+
return 0;
}
@@ -1007,6 +1178,7 @@ void ath12k_core_deinit(struct ath12k_base *ab)
ath12k_hif_power_down(ab);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
+ ath12k_fw_unmap(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1054,6 +1226,8 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
+ ab->qmi.num_radios = U8_MAX;
+ ab->slo_capable = true;
return ab;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 8458dc292821..97e5a0ccd233 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CORE_H
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
+#include <linux/firmware.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -24,6 +25,7 @@
#include "hal_rx.h"
#include "reg.h"
#include "dbring.h"
+#include "fw.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -55,6 +57,11 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+enum ath12k_bdf_search {
+ ATH12K_BDF_SEARCH_DEFAULT,
+ ATH12K_BDF_SEARCH_BUS_AND_BOARD,
+};
+
enum wme_ac {
WME_AC_BE,
WME_AC_BK,
@@ -259,6 +266,7 @@ struct ath12k_vif {
u8 tx_encap_type;
u8 vdev_stats_id;
u32 punct_bitmap;
+ bool ps;
};
struct ath12k_vif_iter {
@@ -420,7 +428,7 @@ struct ath12k_sta {
};
#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5945
+#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
#define ATH12K_NUM_CHANS 100
#define ATH12K_MAX_5G_CHAN 173
@@ -468,7 +476,7 @@ struct ath12k_per_peer_tx_stats {
struct ath12k {
struct ath12k_base *ab;
struct ath12k_pdev *pdev;
- struct ieee80211_hw *hw;
+ struct ath12k_hw *ah;
struct ath12k_wmi_pdev *wmi;
struct ath12k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
@@ -532,6 +540,7 @@ struct ath12k {
/* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
u8 pdev_idx;
u8 lmac_id;
+ u8 hw_link_id;
struct completion peer_assoc_done;
struct completion peer_delete_done;
@@ -591,6 +600,13 @@ struct ath12k {
int monitor_vdev_id;
};
+struct ath12k_hw {
+ struct ieee80211_hw *hw;
+
+ u8 num_radio;
+ struct ath12k radio[] __aligned(sizeof(void *));
+};
+
struct ath12k_band_cap {
u32 phy_id;
u32 max_bw_supported;
@@ -724,6 +740,16 @@ struct ath12k_base {
u8 fw_pdev_count;
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[MAX_RADIOS];
+ u8 num_hw;
+
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -793,10 +819,44 @@ struct ath12k_base {
/* true means radio is on */
bool rfkill_radio_on;
+ struct {
+ enum ath12k_bdf_search bdf_search;
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+ } id;
+
+ struct {
+ u32 api_version;
+
+ const struct firmware *fw;
+ const u8 *amss_data;
+ size_t amss_len;
+ const u8 *amss_dualmac_data;
+ size_t amss_dualmac_len;
+ const u8 *m3_data;
+ size_t m3_len;
+
+ DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
+ } fw;
+
+ const struct hal_rx_ops *hal_rx_ops;
+
+ /* slo_capable denotes if the single/multi link operation
+ * is supported within the same chip (SoC).
+ */
+ bool slo_capable;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
+struct ath12k_pdev_map {
+ struct ath12k_base *ab;
+ u8 pdev_idx;
+};
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
@@ -810,6 +870,7 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
int ath12k_core_fetch_bdf(struct ath12k_base *ath12k,
struct ath12k_board_data *bd);
void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd);
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd);
int ath12k_core_check_dt(struct ath12k_base *ath12k);
int ath12k_core_check_smbios(struct ath12k_base *ab);
void ath12k_core_halt(struct ath12k *ar);
@@ -818,6 +879,9 @@ int ath12k_core_suspend(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
+u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
+u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
+u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
@@ -882,4 +946,18 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus)
return "unknown";
}
+static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw)
+{
+ return hw->priv;
+}
+
+static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah)
+{
+ return ah->radio;
+}
+
+static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
+{
+ return ar->ah->hw;
+}
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index a6f81f2f97ef..c8e1b244b69e 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -997,6 +997,29 @@ void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
}
}
+bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
+{
+ if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
+ ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
+ return true;
+ }
+ return false;
+}
+
+void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
+{
+ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
+ /* RX TLVS compaction is supported, hence change the hal_rx_ops
+ * to compact hal_rx_ops.
+ */
+ ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
+ }
+ ab->hal.hal_desc_sz =
+ ab->hal_rx_ops->rx_desc_get_desc_size();
+}
+
static void ath12k_dp_service_mon_ring(struct timer_list *t)
{
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 1df3cdd46140..eb2dd408e081 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@@ -150,7 +150,7 @@ struct ath12k_pdev_dp {
#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
-#define DP_BA_WIN_SZ_MAX 256
+#define DP_BA_WIN_SZ_MAX 1024
#define DP_TCL_NUM_RING_MAX 4
@@ -170,6 +170,7 @@ struct ath12k_pdev_dp {
#define DP_REO_CMD_RING_SIZE 128
#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RX_MAC_BUF_RING_SIZE 2048
#define DP_RXDMA_REFILL_RING_SIZE 2048
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
@@ -765,6 +766,11 @@ enum htt_stats_internal_ppdu_frametype {
#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0)
+
enum htt_rx_filter_tlv_flags {
HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
@@ -1088,6 +1094,11 @@ struct htt_rx_ring_selection_cfg_cmd {
__le32 rx_mpdu_offset;
__le32 rx_msdu_offset;
__le32 rx_attn_offset;
+ __le32 info2;
+ __le32 reserved[2];
+ __le32 rx_mpdu_start_end_mask;
+ __le32 rx_msdu_end_word_mask;
+ __le32 info3;
} __packed;
struct htt_rx_ring_tlv_filter {
@@ -1104,6 +1115,9 @@ struct htt_rx_ring_tlv_filter {
u16 rx_msdu_end_offset;
u16 rx_msdu_start_offset;
u16 rx_attn_offset;
+ u16 rx_mpdu_start_wmask;
+ u16 rx_mpdu_end_wmask;
+ u32 rx_msdu_end_wmask;
};
#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
@@ -1820,4 +1834,6 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie);
struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 desc_id);
+bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab);
+void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index be4b39f5fa80..2d56913a75d0 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
@@ -864,7 +864,7 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff
{
u32 rx_pkt_offset, l2_hdr_offset;
- rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
+ rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
(struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
@@ -917,7 +917,8 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
- hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
+ hdr_desc =
+ ab->hal_rx_ops->rx_desc_get_msdu_payload(rx_desc);
/* Base size */
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
@@ -1130,7 +1131,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 1ee83f765929..ca76c018dd0c 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -23,34 +23,34 @@
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
+ if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
- return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
}
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
}
static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
}
static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
}
static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
@@ -58,7 +58,7 @@ static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
@@ -67,156 +67,156 @@ static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
+ return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
}
static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
+ return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
+ return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
+ return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
}
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
+ return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
}
static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
}
static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
}
static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
}
static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
}
static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
}
static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
}
static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
+ return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
}
static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
}
static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
}
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
+ return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
}
static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
+ return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
}
static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
+ return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
}
static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
- ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
+ ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
}
static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc,
u16 len)
{
- ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
+ ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
- ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
+ ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
}
static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
+ return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
}
static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
+ return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
}
static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
- ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
+ ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
}
static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
@@ -224,13 +224,19 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
- ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+ ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
+}
+
+static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
@@ -1761,7 +1767,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
@@ -2458,7 +2464,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
@@ -2473,7 +2479,7 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
u8 l3_pad_bytes;
u16 msdu_len;
int ret;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
@@ -2804,7 +2810,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
- u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
u8 *key, *data;
u8 key_idx;
@@ -2844,7 +2850,7 @@ mic_fail:
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
- ieee80211_rx(ar->hw, msdu);
+ ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
return -EINVAL;
}
@@ -2854,7 +2860,7 @@ static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
if (!flags)
return;
@@ -2892,7 +2898,7 @@ static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
bool is_decrypted = false;
int msdu_len = 0;
int extra_space;
- u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
first_frag = skb_peek(&rx_tid->rx_frags);
last_frag = skb_peek_tail(&rx_tid->rx_frags);
@@ -2968,7 +2974,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct ath12k_rx_desc_info *desc_info;
u8 dst_ind;
- hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ hal_rx_desc_sz = ab->hal.hal_desc_sz;
link_desc_banks = dp->link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
@@ -3122,7 +3128,7 @@ static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
@@ -3305,7 +3311,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
struct ath12k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
struct ath12k_rx_desc_info *desc_info;
u64 desc_va;
@@ -3486,7 +3492,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
int n_buffs;
n_buffs = DIV_ROUND_UP(msdu_len,
- (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
+ (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
skb_queue_walk_safe(msdu_list, skb, tmp) {
rxcb = ATH12K_SKB_RXCB(skb);
@@ -3510,7 +3516,7 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
@@ -3607,7 +3613,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
@@ -3695,16 +3701,15 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
- struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- int mac_id;
+ u8 mac_id;
int num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
- int ret, i;
+ int ret, pdev_id;
- for (i = 0; i < ab->num_radios; i++)
- __skb_queue_head_init(&msdu_list[i]);
+ __skb_queue_head_init(&msdu_list);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
@@ -3737,11 +3742,6 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
}
- /* FIXME: Extract mac id correctly. Since descs are not tied
- * to mac, we can extract from vdev id in ring desc.
- */
- mac_id = 0;
-
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
@@ -3771,7 +3771,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
- __skb_queue_tail(&msdu_list[mac_id], msdu);
+
+ __skb_queue_tail(&msdu_list, msdu);
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
@@ -3788,21 +3789,22 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
rcu_read_lock();
- for (i = 0; i < ab->num_radios; i++) {
- if (!rcu_dereference(ab->pdevs_active[i])) {
- __skb_queue_purge(&msdu_list[i]);
+ while ((msdu = __skb_dequeue(&msdu_list))) {
+ mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
+ (struct hal_rx_desc *)msdu->data);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ ar = ab->pdevs[pdev_id].ar;
+
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+ dev_kfree_skb_any(msdu);
continue;
}
- ar = ab->pdevs[i].ar;
-
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- __skb_queue_purge(&msdu_list[i]);
+ dev_kfree_skb_any(msdu);
continue;
}
-
- while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
- ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
done:
@@ -3922,7 +3924,7 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@@ -3935,14 +3937,20 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
tlv_filter.rx_packet_offset = hal_rx_desc_sz;
tlv_filter.rx_mpdu_start_offset =
- ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
- ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+ ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
+
+ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
+ tlv_filter.rx_mpdu_start_wmask =
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
+ tlv_filter.rx_msdu_end_wmask =
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
+ tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
+ }
- /* TODO: Selectively subscribe to required qwords within msdu_end
- * and mpdu_start and setup the mask in below msg
- * and modify the rx_desc struct
- */
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
@@ -3957,7 +3965,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@@ -3973,9 +3981,9 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
tlv_filter.rx_mpdu_start_offset =
- ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
- ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+ ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
/* TODO: Selectively subscribe to required qwords within msdu_end
* and mpdu_start and setup the mask in below msg
@@ -4086,7 +4094,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
- i, 1024);
+ i, DP_RX_MAC_BUF_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
i);
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 62f9cdbb811c..572b87153647 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -151,7 +151,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
@@ -401,7 +401,7 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
}
}
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
}
static void
@@ -498,7 +498,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
* Might end up reporting it out-of-band from HTT stats.
*/
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
rcu_read_unlock();
@@ -837,7 +837,7 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -964,6 +964,26 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
}
+ if (tlv_filter->rx_mpdu_start_wmask > 0 &&
+ tlv_filter->rx_msdu_end_wmask > 0) {
+ cmd->info2 |=
+ le32_encode_bits(true,
+ HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
+ cmd->rx_mpdu_start_end_mask =
+ le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
+ /* mpdu_end is not used for any hardwares so far
+ * please assign it in future if any chip is
+ * using through hal ops
+ */
+ cmd->rx_mpdu_start_end_mask |=
+ le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
+ cmd->rx_msdu_end_word_mask =
+ le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
+ }
+
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
diff --git a/drivers/net/wireless/ath/ath12k/fw.c b/drivers/net/wireless/ath/ath12k/fw.c
new file mode 100644
index 000000000000..5be4b2d4a19d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/fw.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+
+#include "debug.h"
+
+static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab,
+ const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath12k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp;
+
+ ab->fw.fw = ath12k_core_firmware_request(ab, name);
+ if (IS_ERR(ab->fw.fw)) {
+ ret = PTR_ERR(ab->fw.fw);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to load %s: %d\n", name, ret);
+ ab->fw.fw = NULL;
+ return ret;
+ }
+
+ data = ab->fw.fw->data;
+ len = ab->fw.fw->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH12K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath12k_err(ab, "firmware image too small to contain magic: %zu\n",
+ len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH12K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath12k_err(ab, "Invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ /* make sure there's space for padding */
+ if (magic_len > len) {
+ ath12k_err(ab, "No space for padding after magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath12k_fw_ie)) {
+ hdr = (struct ath12k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath12k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH12K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH12K_FW_IE_FEATURES:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH12K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ab->fw.fw_features);
+ }
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "features", "",
+ ab->fw.fw_features,
+ sizeof(ab->fw.fw_features));
+ break;
+ case ATH12K_FW_IE_AMSS_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.amss_data = data;
+ ab->fw.amss_len = ie_len;
+ break;
+ case ATH12K_FW_IE_M3_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found m3 image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.m3_data = data;
+ ab->fw.m3_len = ie_len;
+ break;
+ case ATH12K_FW_IE_AMSS_DUALMAC_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found dualmac fw image ie (%zd B)\n",
+ ie_len);
+ ab->fw.amss_dualmac_data = data;
+ ab->fw.amss_dualmac_len = ie_len;
+ break;
+ default:
+ ath12k_warn(ab, "Unknown FW IE: %u\n", ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ /* make sure there's space for padding */
+ if (ie_len > len)
+ break;
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ return 0;
+
+err:
+ release_firmware(ab->fw.fw);
+ ab->fw.fw = NULL;
+ return ret;
+}
+
+void ath12k_fw_map(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_fw_request_firmware_api_n(ab, ATH12K_FW_API2_FILE);
+ if (ret == 0)
+ ab->fw.api_version = 2;
+ else
+ ab->fw.api_version = 1;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "using fw api %d\n",
+ ab->fw.api_version);
+}
+
+void ath12k_fw_unmap(struct ath12k_base *ab)
+{
+ release_firmware(ab->fw.fw);
+ memset(&ab->fw, 0, sizeof(ab->fw));
+}
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
new file mode 100644
index 000000000000..3ff041f15fa0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_FW_H
+#define ATH12K_FW_H
+
+#define ATH12K_FW_API2_FILE "firmware-2.bin"
+#define ATH12K_FIRMWARE_MAGIC "QCOM-ATH12K-FW"
+
+enum ath12k_fw_ie_type {
+ ATH12K_FW_IE_TIMESTAMP = 0,
+ ATH12K_FW_IE_FEATURES = 1,
+ ATH12K_FW_IE_AMSS_IMAGE = 2,
+ ATH12K_FW_IE_M3_IMAGE = 3,
+ ATH12K_FW_IE_AMSS_DUALMAC_IMAGE = 4,
+};
+
+enum ath12k_fw_features {
+ /* The firmware supports setting the QRTR id via register
+ * PCIE_LOCAL_REG_QRTR_NODE_ID
+ */
+ ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
+
+ /* keep last */
+ ATH12K_FW_FEATURE_COUNT,
+};
+
+void ath12k_fw_map(struct ath12k_base *ab);
+void ath12k_fw_unmap(struct ath12k_base *ab);
+
+#endif /* ATH12K_FW_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index a489369d8068..78310da8cfe8 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
- RX_MSDU_END_INFO5_DA_IS_MCBC;
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -626,6 +626,21 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
return 0;
}
+static u16 ath12k_hal_qcn9274_rx_mpdu_start_wmask_get(void)
+{
+ return QCN9274_MPDU_START_WMASK;
+}
+
+static u32 ath12k_hal_qcn9274_rx_msdu_end_wmask_get(void)
+{
+ return QCN9274_MSDU_END_WMASK;
+}
+
+static const struct hal_rx_ops *ath12k_hal_qcn9274_get_hal_rx_compact_ops(void)
+{
+ return &hal_rx_qcn9274_compact_ops;
+}
+
static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
@@ -680,7 +695,17 @@ static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-const struct hal_ops hal_qcn9274_ops = {
+static u32 ath12k_hw_qcn9274_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+const struct hal_rx_ops hal_rx_qcn9274_ops = {
.rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
@@ -712,13 +737,367 @@ const struct hal_ops hal_qcn9274_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
- .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
- .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_qcn9274_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link,
+};
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool
+ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16
+ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_QCN9274_INFO5_TID);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id);
+}
+
+static void ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end;
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id);
+}
+
+static void
+ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10);
+
+ info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+ desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static u8 *ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9274_compact.msdu_payload[0];
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9274_compact.mpdu_start.addr2;
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
+}
+
+static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl;
+}
+
+static void
+ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5,
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[5] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[6] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+ crypto_hdr[7] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static u32 ath12k_hw_qcn9274_compact_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274_compact);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag,
+ RX_MSDU_END_64_TLV_SRC_LINK_ID);
+}
+
+const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
+ .rx_desc_get_first_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes,
+ .rx_desc_encrypt_valid = ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath12k_hw_qcn9274_compact_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len,
+ .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload,
+ .rx_desc_get_mpdu_start_offset =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset,
+ .rx_desc_get_msdu_end_offset =
+ ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset,
+ .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2,
+ .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
+ .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
+ .rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
+ .rx_desc_get_mpdu_frame_ctl =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
+ .dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
+ .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
+ .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
+ .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted,
+ .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_qcn9274_compact_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id =
+ ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link,
+};
+
+const struct hal_ops hal_qcn9274_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
+ .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
+ .rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_qcn9274_rx_mpdu_start_wmask_get,
+ .rxdma_ring_wmask_rx_msdu_end = ath12k_hal_qcn9274_rx_msdu_end_wmask_get,
+ .get_hal_rx_compact_ops = ath12k_hal_qcn9274_get_hal_rx_compact_ops,
};
static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
@@ -1134,7 +1513,17 @@ static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-const struct hal_ops hal_wcn7850_ops = {
+static u32 ath12k_hw_wcn7850_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_wcn7850);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+const struct hal_rx_ops hal_rx_wcn7850_ops = {
.rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
@@ -1167,13 +1556,21 @@ const struct hal_ops hal_wcn7850_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
- .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
- .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_wcn7850_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id = ath12k_hw_wcn7850_rx_desc_get_msdu_src_link,
+};
+
+const struct hal_ops hal_wcn7850_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
+ .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
+ .rxdma_ring_wmask_rx_mpdu_start = NULL,
+ .rxdma_ring_wmask_rx_msdu_end = NULL,
+ .get_hal_rx_compact_ops = NULL,
};
static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index fc47e7e6b498..107927d64bbb 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_H
@@ -1023,6 +1023,8 @@ struct ath12k_hal {
/* shadow register configuration */
u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
+
+ u32 hal_desc_sz;
};
/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */
@@ -1031,7 +1033,7 @@ struct ath12k_hal_tcl_to_wbm_rbm_map {
u8 rbm_id;
};
-struct hal_ops {
+struct hal_rx_ops {
bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
@@ -1070,18 +1072,30 @@ struct hal_ops {
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype);
- int (*create_srng_config)(struct ath12k_base *ab);
bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_desc_size)(void);
+ u8 (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc);
+};
+
+struct hal_ops {
+ int (*create_srng_config)(struct ath12k_base *ab);
+ u16 (*rxdma_ring_wmask_rx_mpdu_start)(void);
+ u32 (*rxdma_ring_wmask_rx_msdu_end)(void);
+ const struct hal_rx_ops *(*get_hal_rx_compact_ops)(void);
const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
};
extern const struct hal_ops hal_qcn9274_ops;
extern const struct hal_ops hal_wcn7850_ops;
+extern const struct hal_rx_ops hal_rx_qcn9274_ops;
+extern const struct hal_rx_ops hal_rx_qcn9274_compact_ops;
+extern const struct hal_rx_ops hal_rx_wcn7850_ops;
+
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
int tid, u32 ba_window_size,
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 6c17adc6d60b..63340256d3f6 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -2500,13 +2500,13 @@ struct hal_rx_reo_queue {
#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(9, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(11, 10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(24, 13)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(27)
struct hal_reo_update_rx_queue {
struct hal_reo_cmd_hdr cmd;
@@ -2517,6 +2517,12 @@ struct hal_reo_update_rx_queue {
__le32 pn[4];
} __packed;
+struct hal_rx_reo_queue_1k {
+ struct hal_desc_header desc_hdr;
+ __le32 rx_bitmap_1023_288[23];
+ __le32 reserved[8];
+} __packed;
+
#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index 4f25eb9f7745..f7c1aaa3b5d4 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -247,7 +247,7 @@ int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath12k_warn(ab, "Unsupported reo command %d\n", type);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
default:
ath12k_warn(ab, "Unknown reo command %d\n", type);
@@ -688,23 +688,28 @@ void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
{
- u32 num_ext_desc;
+ u32 num_ext_desc, num_1k_desc = 0;
if (ba_window_size <= 1) {
if (tid != HAL_DESC_REO_NON_QOS_TID)
num_ext_desc = 1;
else
num_ext_desc = 0;
+
} else if (ba_window_size <= 105) {
num_ext_desc = 1;
} else if (ba_window_size <= 210) {
num_ext_desc = 2;
- } else {
+ } else if (ba_window_size <= 256) {
num_ext_desc = 3;
+ } else {
+ num_ext_desc = 10;
+ num_1k_desc = 1;
}
return sizeof(struct hal_rx_reo_queue) +
- (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)) +
+ (num_1k_desc * sizeof(struct hal_rx_reo_queue_1k));
}
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index de60d988d860..0b17dfd47856 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -897,7 +897,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
- .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@@ -914,6 +913,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 0,
.rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
},
{
.name = "wcn7850 hw2.0",
@@ -950,7 +956,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.vdev_start_delay = true,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.idle_ps = true,
@@ -960,7 +969,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = true,
- .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
.num_tcl_banks = 7,
.max_tx_ring = 3,
@@ -978,6 +986,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 1,
.rddm_size = 0x780000,
+
+ .def_num_link = 2,
+ .max_mlo_peer = 32,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = true,
},
{
.name = "qcn9274 hw2.0",
@@ -987,7 +1002,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
- .max_radios = 1,
+ .max_radios = 2,
.single_pdev_only = false,
.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
.internal_sleep_clock = false,
@@ -1023,7 +1038,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
- .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@@ -1040,6 +1054,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 0,
.rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index d2622bfef942..87965980b938 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HW_H
@@ -17,19 +17,30 @@
/* Num VDEVS per radio */
#define TARGET_NUM_VDEVS (16 + 1)
-#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_SINGLE (TARGET_NUM_STATIONS_SINGLE + \
+ TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_DBS (TARGET_NUM_STATIONS_DBS + \
+ TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_DBS_SBS (TARGET_NUM_STATIONS_DBS_SBS + \
+ TARGET_NUM_VDEVS)
/* Num of peers for Single Radio mode */
-#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV_SINGLE)
/* Num of peers for DBS */
-#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV_DBS)
/* Num of peers for DBS_SBS */
-#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV_DBS_SBS)
-/* Max num of stations (per radio) */
-#define TARGET_NUM_STATIONS 512
+/* Max num of stations for Single Radio mode */
+#define TARGET_NUM_STATIONS_SINGLE 512
+
+/* Max num of stations for DBS */
+#define TARGET_NUM_STATIONS_DBS 128
+
+/* Max num of stations for DBS_SBS */
+#define TARGET_NUM_STATIONS_DBS_SBS 128
#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
#define TARGET_NUM_PEER_KEYS 2
@@ -66,6 +77,8 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
+#define TARGET_RX_PEER_METADATA_VER_V1A 2
+#define TARGET_RX_PEER_METADATA_VER_V1B 3
#define ATH12K_HW_MAX_QUEUES 4
#define ATH12K_QUEUE_LEN 4096
@@ -174,7 +187,6 @@ struct ath12k_hw_params {
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
- u32 hal_desc_sz;
u32 num_tcl_banks;
u32 max_tx_ring;
@@ -192,6 +204,13 @@ struct ath12k_hw_params {
u32 rfkill_on_level;
u32 rddm_size;
+
+ u8 def_num_link;
+ u16 max_mlo_peer;
+
+ u32 otp_board_id_register;
+
+ bool supports_sta_ps;
};
struct ath12k_hw_ops {
@@ -242,10 +261,16 @@ enum ath12k_bd_ie_board_type {
ATH12K_BD_IE_BOARD_DATA = 1,
};
+enum ath12k_bd_ie_regdb_type {
+ ATH12K_BD_IE_REGDB_NAME = 0,
+ ATH12K_BD_IE_REGDB_DATA = 1,
+};
+
enum ath12k_bd_ie_type {
/* contains sub IEs of enum ath12k_bd_ie_board_type */
ATH12K_BD_IE_BOARD = 0,
- ATH12K_BD_IE_BOARD_EXT = 1,
+ /* contains sub IEs of enum ath12k_bd_ie_regdb_type */
+ ATH12K_BD_IE_REGDB = 1,
};
struct ath12k_hw_regs {
@@ -315,6 +340,18 @@ struct ath12k_hw_regs {
u32 hal_reo_status_ring_base;
};
+static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
+{
+ switch (type) {
+ case ATH12K_BD_IE_BOARD:
+ return "board data";
+ case ATH12K_BD_IE_REGDB:
+ return "regdb data";
+ }
+
+ return "unknown";
+}
+
int ath12k_hw_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 88cec54c6c2e..52a5fb8b03e9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -241,8 +241,8 @@ static const u32 ath12k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+static int ath12k_start_vdev_delay(struct ath12k *ar,
+ struct ath12k_vif *arvif);
static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
{
@@ -542,7 +542,7 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
arvif_iter.vdev_id = vdev_id;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
- ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
flags,
ath12k_get_arvif_iter,
&arvif_iter);
@@ -563,7 +563,8 @@ struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar) {
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
@@ -1040,7 +1041,7 @@ static int ath12k_mac_monitor_start(struct ath12k *ar)
if (ar->monitor_started)
return 0;
- ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
ath12k_mac_get_any_chandef_iter,
&chandef);
if (!chandef)
@@ -1083,9 +1084,49 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath12k_mac_config(struct ath12k *ar, u32 changed)
{
- struct ath12k *ar = hw->priv;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
@@ -1122,11 +1163,84 @@ err_mon_del:
return ret;
}
+static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ ret = ath12k_mac_config(ar, changed);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n",
+ ar->pdev_idx, ret);
+
+ return ret;
+}
+
+static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie) {
+ ath12k_warn(ar->ab, "no P2P ie found in beacon\n");
+ return -ENOENT;
+ }
+
+ ret = ath12k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ const u8 *next, *end;
+ size_t len;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
@@ -1154,14 +1268,37 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+ if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
+ ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
+ ret);
+ goto free_bcn_skb;
+ }
- kfree_skb(bcn);
+ /* P2P IE is inserted by firmware automatically (as
+ * configured above) so remove it from the base beacon
+ * template to avoid duplicate P2P IEs in beacon frames.
+ */
+ ret = ath12k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+ if (ret) {
+ ath12k_warn(ab, "failed to remove P2P vendor ie: %d\n",
+ ret);
+ goto free_bcn_skb;
+ }
+ }
+
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
ret);
+free_bcn_skb:
+ kfree_skb(bcn);
return ret;
}
@@ -1214,6 +1351,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
u32 aid;
lockdep_assert_held(&ar->conf_mutex);
@@ -1228,7 +1366,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
- arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_listen_intval = hw->conf.listen_interval;
arg->peer_nss = 1;
arg->peer_caps = vif->bss_conf.assoc_capability;
}
@@ -1242,6 +1380,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
@@ -1250,7 +1389,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
- bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (arvif->rsnie_present || arvif->wpaie_present) {
@@ -1270,7 +1409,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
ies->data,
ies->len);
rcu_read_unlock();
- cfg80211_put_bss(ar->hw->wiphy, bss);
+ cfg80211_put_bss(hw->wiphy, bss);
}
/* FIXME: base on RSN IE/WPA IE is a correct idea? */
@@ -1304,6 +1443,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
enum nl80211_band band;
u32 ratemask;
u8 rate;
@@ -1315,7 +1455,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
return;
band = def.chan->band;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -2266,12 +2406,11 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
ath12k_smps_map[smps]);
}
-static void ath12k_bss_assoc(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+static void ath12k_bss_assoc(struct ath12k *ar,
+ struct ath12k_vif *arvif,
struct ieee80211_bss_conf *bss_conf)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = arvif->vif;
struct ath12k_wmi_peer_assoc_arg peer_arg;
struct ieee80211_sta *ap_sta;
struct ath12k_peer *peer;
@@ -2361,11 +2500,9 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
-static void ath12k_bss_disassoc(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void ath12k_bss_disassoc(struct ath12k *ar,
+ struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -2413,6 +2550,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
struct cfg80211_chan_def *def)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const struct ieee80211_supported_band *sband;
u8 basic_rate_idx;
int hw_rate_code;
@@ -2422,7 +2560,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
lockdep_assert_held(&ar->conf_mutex);
- sband = ar->hw->wiphy->bands[def->chan->band];
+ sband = hw->wiphy->bands[def->chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
@@ -2449,6 +2587,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath12k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct sk_buff *tmpl;
int ret;
u32 interval;
@@ -2457,7 +2596,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
if (info->fils_discovery.max_interval) {
interval = info->fils_discovery.max_interval;
- tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+ tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif);
if (tmpl)
ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
tmpl);
@@ -2465,7 +2604,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
unsol_bcast_probe_resp_enabled = 1;
interval = info->unsol_bcast_probe_resp_interval;
- tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw,
arvif->vif);
if (tmpl)
ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
@@ -2491,13 +2630,60 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
return ret;
}
-static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ enable_ps = arvif->ps;
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+}
+
+static void ath12k_mac_bss_info_changed(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@@ -2510,7 +2696,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
u8 rateidx;
u32 rate;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
@@ -2666,9 +2852,9 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc)
- ath12k_bss_assoc(hw, vif, info);
+ ath12k_bss_assoc(ar, arvif, info);
else
- ath12k_bss_disassoc(hw, vif);
+ ath12k_bss_disassoc(ar, arvif);
}
if (changed & BSS_CHANGED_TXPOWER) {
@@ -2768,14 +2954,35 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath12k_mac_fils_discovery(arvif, info);
- if (changed & BSS_CHANGED_EHT_PUNCTURING)
- arvif->punct_bitmap = info->eht_puncturing;
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params->supports_sta_ps) {
+ arvif->ps = vif_cfg->ps;
+ ath12k_mac_vif_setup_ps(arvif);
+ }
+}
+
+static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_mac_bss_info_changed(ar, arvif, info, changed);
mutex_unlock(&ar->conf_mutex);
}
void __ath12k_mac_scan_finish(struct ath12k *ar)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -2784,7 +2991,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
if (ar->scan.is_roc && ar->scan.roc_notify)
- ieee80211_remain_on_channel_expired(ar->hw);
+ ieee80211_remain_on_channel_expired(hw);
fallthrough;
case ATH12K_SCAN_STARTING:
if (!ar->scan.is_roc) {
@@ -2795,7 +3002,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
ATH12K_SCAN_STARTING)),
};
- ieee80211_scan_completed(ar->hw, &info);
+ ieee80211_scan_completed(hw, &info);
}
ar->scan.state = ATH12K_SCAN_IDLE;
@@ -2940,13 +3147,16 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct ath12k_wmi_scan_req_arg arg = {};
int ret;
int i;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -2988,7 +3198,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
for (i = 0; i < arg.num_ssids; i++)
arg.ssid[i] = req->ssids[i];
} else {
- arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_f_passive = 1;
}
if (req->n_channels) {
@@ -3014,7 +3224,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
}
/* Add a margin to account for event/command processing */
- ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
msecs_to_jiffies(arg.max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
@@ -3025,13 +3235,17 @@ exit:
kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
@@ -3159,8 +3373,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
struct ath12k_sta *arsta;
@@ -3175,6 +3390,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 1;
@@ -3696,7 +3914,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
if (ab->hw_params->vdev_start_delay &&
!arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
- ret = ath12k_start_vdev_delay(ar->hw, vif);
+ ret = ath12k_start_vdev_delay(ar, arvif);
if (ret) {
ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
goto free_peer;
@@ -3750,7 +3968,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_peer *peer;
@@ -3761,6 +3980,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST))
cancel_work_sync(&arsta->update_wk);
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -3775,6 +3996,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
@@ -3856,6 +4084,7 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
}
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -3863,7 +4092,8 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
s16 txpwr;
@@ -3879,6 +4109,8 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
return -EINVAL;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
@@ -3899,12 +4131,15 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
u32 bw, smps;
+ ar = ath12k_ah_to_ar(ah);
+
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
@@ -3964,10 +4199,10 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &arsta->update_wk);
}
-static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif,
+static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif,
u16 ac, bool enable)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
u32 value;
int ret;
@@ -4021,17 +4256,16 @@ exit:
return ret;
}
-static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- unsigned int link_id, u16 ac,
- const struct ieee80211_tx_queue_params *params)
+static int ath12k_mac_conf_tx(struct ath12k_vif *arvif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct wmi_wmm_params_arg *p = NULL;
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
switch (ac) {
case IEEE80211_AC_VO:
@@ -4061,17 +4295,36 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id,
&arvif->wmm_params);
if (ret) {
- ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ ath12k_warn(ab, "pdev idx %d failed to set wmm params: %d\n",
+ ar->pdev_idx, ret);
goto exit;
}
- ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
-
+ ret = ath12k_conf_tx_uapsd(arvif, ac, params->uapsd);
if (ret)
- ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+ ath12k_warn(ab, "pdev idx %d failed to set sta uapsd: %d\n",
+ ar->pdev_idx, ret);
exit:
+ return ret;
+}
+
+static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_conf_tx(arvif, link_id, ac, params);
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -4782,7 +5035,7 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
- ieee80211_free_txskb(ar->hw, skb);
+ ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4914,8 +5167,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
arvif = ath12k_vif_to_arvif(skb_cb->vif);
- if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
- arvif->is_started) {
+
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -4959,20 +5212,41 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
return 0;
}
+static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ if (likely(!is_prb_rsp))
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arvif->u.ap.noa_data &&
+ !pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ GFP_ATOMIC))
+ skb_put_data(skb, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct ath12k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
u32 info_flags = info->flags;
@@ -4987,10 +5261,11 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
- is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
ath12k_warn(ar->ab, "failed to queue management frame %d\n",
@@ -5000,6 +5275,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
+ /* This is case only for P2P_GO */
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
+ ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
+
ret = ath12k_dp_tx(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
@@ -5018,7 +5297,7 @@ void ath12k_mac_drain_tx(struct ath12k *ar)
static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* TODO: Need to support new monitor mode */
}
@@ -5044,14 +5323,12 @@ static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
ATH12K_RECONFIGURE_TIMEOUT_HZ);
}
-static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+static int ath12k_mac_start(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
- ath12k_mac_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
@@ -5074,14 +5351,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret);
goto err;
}
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret);
goto err;
}
@@ -5111,7 +5388,7 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
goto err;
}
@@ -5130,14 +5407,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
* such as rssi, rx_duration.
*/
ret = ath12k_mac_config_mon_status_default(ar, true);
- if (ret && (ret != -ENOTSUPP)) {
+ if (ret && (ret != -EOPNOTSUPP)) {
ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
ret);
goto err;
}
- if (ret == -ENOTSUPP)
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ if (ret == -EOPNOTSUPP)
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
"monitor status config is not yet supported");
/* Configure the hash seed for hash based reo dest ring selection */
@@ -5159,7 +5436,6 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
&ab->pdevs[ar->pdev_idx]);
return 0;
-
err:
ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
@@ -5167,6 +5443,25 @@ err:
return ret;
}
+static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ ath12k_mac_drain_tx(ar);
+
+ ret = ath12k_mac_start(ar);
+ if (ret) {
+ ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath12k_mac_rfkill_config(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -5224,17 +5519,14 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
return 0;
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath12k_mac_stop(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
- ath12k_mac_drain_tx(ar);
-
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_config_mon_status_default(ar, false);
- if (ret && (ret != -ENOTSUPP))
+ if (ret && (ret != -EOPNOTSUPP))
ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
@@ -5260,6 +5552,16 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ ath12k_mac_drain_tx(ar);
+
+ ath12k_mac_stop(ar);
+}
+
static u8
ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
{
@@ -5269,7 +5571,7 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
do {
if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
vdev_stats_id++;
- if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
+ if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) {
vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
break;
}
@@ -5376,12 +5678,11 @@ static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
return ret;
}
-static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
u32 param_id, param_value;
int ret;
@@ -5423,11 +5724,20 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
}
}
+static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ ath12k_mac_update_vif_offload(arvif);
+}
+
static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param;
@@ -5439,6 +5749,9 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_AP &&
@@ -5483,17 +5796,29 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
default:
WARN_ON(1);
break;
@@ -5526,7 +5851,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
- ath12k_mac_op_update_vif_offload(hw, vif);
+ ath12k_mac_update_vif_offload(arvif);
nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -5685,12 +6010,16 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif
static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_base *ab;
unsigned long time_left;
int ret;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
@@ -5766,19 +6095,15 @@ err_vdev_del:
FIF_PROBE_REQ | \
FIF_FCSFAIL)
-static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
+static void ath12k_mac_configure_filter(struct ath12k *ar,
+ unsigned int total_flags)
{
- struct ath12k *ar = hw->priv;
bool reset_flag;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
- *total_flags &= SUPPORTED_FILTERS;
- ar->filter_flags = *total_flags;
+ ar->filter_flags = total_flags;
/* For monitor mode */
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
@@ -5793,16 +6118,36 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
}
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
- *total_flags, reset_flag);
+ total_flags, reset_flag);
+}
+
+static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ *total_flags &= SUPPORTED_FILTERS;
+ ath12k_mac_configure_filter(ar, *total_flags);
mutex_unlock(&ar->conf_mutex);
}
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
@@ -5816,9 +6161,12 @@ static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *
static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
int ret;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
mutex_unlock(&ar->conf_mutex);
@@ -5826,14 +6174,13 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
return ret;
}
-static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif,
+ struct ieee80211_ampdu_params *params)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k *ar = arvif->ar;
int ret = -EINVAL;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
@@ -5854,16 +6201,40 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
break;
}
+ return ret;
+}
+
+static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int ret = -EINVAL;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_ampdu_action(arvif, params);
mutex_unlock(&ar->conf_mutex);
+ if (ret)
+ ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
+ ar->pdev_idx, params->action, ret);
+
return ret;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx add freq %u width %d ptr %pK\n",
@@ -5886,8 +6257,12 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx remove freq %u width %d ptr %pK\n",
@@ -5995,6 +6370,11 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
+ /* Fill the MBSSID flags to indicate AP is non MBSSID by default
+ * Corresponding flags would be updated with MBSSID support.
+ */
+ arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
@@ -6071,46 +6451,6 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
return 0;
}
-static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
-{
- struct ath12k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto err;
- }
-
- ret = ath12k_mac_vdev_setup_sync(ar);
- if (ret) {
- ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto err;
- }
-
- WARN_ON(ar->num_started_vdevs == 0);
-
- ar->num_started_vdevs--;
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
- arvif->vif->addr, arvif->vdev_id);
-
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
- arvif->vdev_id);
- }
-
- return 0;
-err:
- return ret;
-}
-
static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
@@ -6215,6 +6555,8 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
if (WARN_ON(!arvif->is_started))
continue;
+ arvif->punct_bitmap = vifs[i].new_ctx->def.punctured;
+
/* Firmware expect vdev_restart only if vdev is up.
* If vdev is down then it expect vdev_stop->vdev_start.
*/
@@ -6266,7 +6608,7 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
lockdep_assert_held(&ar->conf_mutex);
@@ -6295,8 +6637,12 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
mutex_lock(&ar->conf_mutex);
@@ -6311,7 +6657,8 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
goto unlock;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
- changed & IEEE80211_CHANCTX_CHANGE_RADAR)
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR ||
+ changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
ath12k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
@@ -6320,12 +6667,11 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_start_vdev_delay(struct ath12k *ar,
+ struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = arvif->vif;
int ret;
if (WARN_ON(arvif->is_started))
@@ -6359,19 +6705,23 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
struct ath12k_wmi_peer_create_arg param;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
- arvif->punct_bitmap = link_conf->eht_puncturing;
+ arvif->punct_bitmap = ctx->def.punctured;
/* for some targets bss peer must be created before vdev_start */
if (ab->hw_params->vdev_start_delay &&
@@ -6438,11 +6788,15 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
@@ -6466,11 +6820,13 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
}
- ret = ath12k_mac_vdev_stop(arvif);
- if (ret)
- ath12k_warn(ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
arvif->is_started = false;
if (ab->hw_params->vdev_start_delay &&
@@ -6490,7 +6846,10 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
@@ -6532,10 +6891,15 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
*/
static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath12k *ar = hw->priv;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
- return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ return ret;
}
static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -6553,15 +6917,10 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+static void ath12k_mac_flush(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
long time_left;
- if (drop)
- return;
-
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH12K_FLUSH_TIMEOUT);
@@ -6576,6 +6935,18 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
time_left);
}
+static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ if (drop)
+ return;
+
+ ath12k_mac_flush(ar);
+}
+
static int
ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
enum nl80211_band band,
@@ -6778,7 +7149,7 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
- ieee80211_queue_work(ar->hw, &arsta->update_wk);
+ ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk);
}
static void ath12k_mac_disable_peer_fixed_rate(void *data,
@@ -6826,8 +7197,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
- if (sgi == NL80211_TXRATE_FORCE_LGI)
- return -EINVAL;
+ if (sgi == NL80211_TXRATE_FORCE_LGI) {
+ ret = -EINVAL;
+ goto out;
+ }
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing at least one of used basic rates along with them.
@@ -6843,7 +7216,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
if (ret) {
ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
arvif->vdev_id, ret);
- return ret;
+ goto out;
}
ieee80211_iterate_stations_atomic(hw,
ath12k_mac_disable_peer_fixed_rate,
@@ -6888,7 +7261,8 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
*/
ath12k_warn(ar->ab,
"Setting more than one MCS Value in bitrate mask not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ieee80211_iterate_stations_atomic(hw,
@@ -6915,6 +7289,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
+out:
return ret;
}
@@ -6922,14 +7297,18 @@ static void
ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif;
int recovery_count;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH12K_STATE_RESTARTED) {
@@ -7013,7 +7392,8 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
int ret = 0;
@@ -7021,6 +7401,8 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (idx >= ATH12K_NUM_CHANS)
return -ENOENT;
+ ar = ath12k_ah_to_ar(ah);
+
ar_survey = &ar->survey[idx];
mutex_lock(&ar->conf_mutex);
@@ -7052,6 +7434,7 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
exit:
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -7089,6 +7472,125 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
+static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_wmi_scan_req_arg arg;
+ struct ath12k *ar;
+ u32 scan_time_msec;
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH12K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH12K_SCAN_STARTING:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
+
+ memset(&arg, 0, sizeof(arg));
+ ath12k_wmi_start_scan_init(ar, &arg);
+ arg.num_chan = 1;
+ arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+ GFP_KERNEL);
+ if (!arg.chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH12K_SCAN_ID;
+ arg.chan_list[0] = chan->center_freq;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
+ arg.scan_f_passive = 1;
+ arg.burst_duration = duration;
+
+ ret = ath12k_start_scan(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto free_chan_list;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+ ret = ath12k_scan_stop(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto free_chan_list;
+ }
+
+ ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+
+free_chan_list:
+ kfree(arg.chan_list);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -7123,6 +7625,8 @@ static const struct ieee80211_ops ath12k_ops = {
.get_survey = ath12k_mac_op_get_survey,
.flush = ath12k_mac_op_flush,
.sta_statistics = ath12k_mac_op_sta_statistics,
+ .remain_on_channel = ath12k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -7158,9 +7662,9 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
}
static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
- u32 supported_bands)
+ u32 supported_bands,
+ struct ieee80211_supported_band *bands[])
{
- struct ieee80211_hw *hw = ar->hw;
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
void *channels;
@@ -7186,7 +7690,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_g_rates_size;
band->bitrates = ath12k_g_rates;
- hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ bands[NL80211_BAND_2GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
@@ -7198,7 +7702,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
- if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
channels = kmemdup(ath12k_6ghz_channels,
sizeof(ath12k_6ghz_channels), GFP_KERNEL);
if (!channels) {
@@ -7213,7 +7717,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+ bands[NL80211_BAND_6GHZ] = band;
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
@@ -7235,7 +7739,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ bands[NL80211_BAND_5GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
@@ -7251,28 +7755,59 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
return 0;
}
-static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
+static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
{
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
- struct wiphy *wiphy = hw->wiphy;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ u16 interface_modes = U16_MAX;
+
+ interface_modes &= ar->ab->hw_params->interface_modes;
+
+ return interface_modes == U16_MAX ? 0 : interface_modes;
+}
+
+static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
+ enum nl80211_iftype type)
+{
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ u16 interface_modes, mode;
+ bool is_enable = true;
+
+ mode = BIT(type);
+
+ interface_modes = ar->ab->hw_params->interface_modes;
+ if (!(interface_modes & mode))
+ is_enable = false;
+
+ return is_enable;
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct wiphy *wiphy = ah->hw->wiphy;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
- bool ap, mesh;
+ bool ap, mesh, p2p;
- ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
+ ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
+ p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
+ ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
- if (ap || mesh) {
+ if ((ap || mesh) && !p2p) {
n_limits = 2;
max_interfaces = 16;
+ } else if (p2p) {
+ n_limits = 3;
+ if (ap || mesh)
+ max_interfaces = 16;
+ else
+ max_interfaces = 3;
} else {
n_limits = 1;
max_interfaces = 1;
@@ -7287,14 +7822,22 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
- if (ap) {
+ if (ap || mesh || p2p)
limits[1].max = max_interfaces;
+
+ if (ap)
limits[1].types |= BIT(NL80211_IFTYPE_AP);
- }
if (mesh)
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+ if (p2p) {
+ limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+ limits[2].max = 1;
+ limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ }
+
combinations[0].limits = limits;
combinations[0].n_limits = n_limits;
combinations[0].max_interfaces = max_interfaces;
@@ -7349,21 +7892,27 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
},
};
-static void __ath12k_mac_unregister(struct ath12k *ar)
+static void ath12k_mac_cleanup_unregister(struct ath12k *ar)
{
- struct ieee80211_hw *hw = ar->hw;
- struct wiphy *wiphy = hw->wiphy;
-
- cancel_work_sync(&ar->regd_update_work);
-
- ieee80211_unregister_hw(hw);
-
idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+}
+
+static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
+{
+ struct ieee80211_hw *hw = ah->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(hw);
+
+ ath12k_mac_cleanup_unregister(ar);
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
@@ -7371,28 +7920,42 @@ static void __ath12k_mac_unregister(struct ath12k *ar)
SET_IEEE80211_DEV(hw, NULL);
}
-void ath12k_mac_unregister(struct ath12k_base *ab)
+static int ath12k_mac_setup_register(struct ath12k *ar,
+ u32 *ht_cap,
+ struct ieee80211_supported_band *bands[])
{
- struct ath12k *ar;
- struct ath12k_pdev *pdev;
- int i;
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ int ret;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar)
- continue;
+ init_waitqueue_head(&ar->txmgmt_empty_waitq);
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
- __ath12k_mac_unregister(ar);
- }
+ ath12k_pdev_caps_update(ar);
+
+ ret = ath12k_mac_setup_channels_rates(ar,
+ cap->supported_bands,
+ bands);
+ if (ret)
+ return ret;
+
+ ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap);
+ ath12k_mac_setup_sband_iftype_data(ar, cap);
+
+ ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab);
+ ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab);
+
+ return 0;
}
-static int __ath12k_mac_register(struct ath12k *ar)
+static int ath12k_mac_hw_register(struct ath12k_hw *ah)
{
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev;
+ struct ath12k_pdev_cap *cap;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
@@ -7407,30 +7970,34 @@ static int __ath12k_mac_register(struct ath12k *ar)
int ret;
u32 ht_cap = 0;
- ath12k_pdev_caps_update(ar);
+ pdev = ar->pdev;
- SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
-
- SET_IEEE80211_DEV(hw, ab->dev);
+ if (ab->pdevs_macaddr_valid)
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
- ret = ath12k_mac_setup_channels_rates(ar,
- cap->supported_bands);
+ ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands);
if (ret)
- goto err;
+ goto out;
- ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
- ath12k_mac_setup_sband_iftype_data(ar, cap);
+ wiphy->max_ap_assoc_sta = ar->max_num_stations;
- ret = ath12k_mac_setup_iface_combinations(ar);
- if (ret) {
- ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
- goto err_free_channels;
- }
+ cap = &pdev->cap;
wiphy->available_antennas_rx = cap->rx_chain_mask;
wiphy->available_antennas_tx = cap->tx_chain_mask;
- wiphy->interface_modes = ab->hw_params->interface_modes;
+ SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
+ SET_IEEE80211_DEV(hw, ab->dev);
+
+ ret = ath12k_mac_setup_iface_combinations(ah);
+ if (ret) {
+ ath12k_err(ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_cleanup_unregister;
+ }
+
+ wiphy->interface_modes = ath12k_mac_get_ifmodes(ah);
if (wiphy->bands[NL80211_BAND_2GHZ] &&
wiphy->bands[NL80211_BAND_5GHZ] &&
@@ -7483,15 +8050,10 @@ static int __ath12k_mac_register(struct ath12k *ar)
wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
- ar->max_num_stations = TARGET_NUM_STATIONS;
- ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
-
- wiphy->max_ap_assoc_sta = ar->max_num_stations;
-
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT;
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
@@ -7524,7 +8086,7 @@ static int __ath12k_mac_register(struct ath12k *ar)
ret = ieee80211_register_hw(hw);
if (ret) {
- ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
goto err_free_if_combs;
}
@@ -7552,142 +8114,213 @@ err_free_if_combs:
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
-err_free_channels:
- kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
- kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+err_cleanup_unregister:
+ ath12k_mac_cleanup_unregister(ar);
-err:
+out:
SET_IEEE80211_DEV(hw, NULL);
+
return ret;
}
+static void ath12k_mac_setup(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev = ar->pdev;
+ u8 pdev_idx = ar->pdev_idx;
+
+ ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx);
+
+ ar->wmi = &ab->wmi_ab.wmi[pdev_idx];
+ /* FIXME: wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath12k_wmi_pdev_attach(ab, pdev_idx);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+}
+
int ath12k_mac_register(struct ath12k_base *ab)
{
- struct ath12k *ar;
- struct ath12k_pdev *pdev;
+ struct ath12k_hw *ah;
int i;
int ret;
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (ab->pdevs_macaddr_valid) {
- ether_addr_copy(ar->mac_addr, pdev->mac_addr);
- } else {
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
- ar->mac_addr[4] += i;
- }
-
- ret = __ath12k_mac_register(ar);
- if (ret)
- goto err_cleanup;
-
- init_waitqueue_head(&ar->txmgmt_empty_waitq);
- idr_init(&ar->txmgmt_idr);
- spin_lock_init(&ar->txmgmt_idr_lock);
- }
-
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = 320000;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+
+ ret = ath12k_mac_hw_register(ah);
+ if (ret)
+ goto err;
+ }
+
return 0;
-err_cleanup:
+err:
for (i = i - 1; i >= 0; i--) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- __ath12k_mac_unregister(ar);
+ ah = ab->ah[i];
+ if (!ah)
+ continue;
+
+ ath12k_mac_hw_unregister(ah);
}
return ret;
}
-int ath12k_mac_allocate(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_base *ab)
+{
+ struct ath12k_hw *ah;
+ int i;
+
+ for (i = ab->num_hw - 1; i >= 0; i--) {
+ ah = ab->ah[i];
+ if (!ah)
+ continue;
+
+ ath12k_mac_hw_unregister(ah);
+ }
+}
+
+static void ath12k_mac_hw_destroy(struct ath12k_hw *ah)
+{
+ ieee80211_free_hw(ah->hw);
+}
+
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+ struct ath12k_pdev_map *pdev_map,
+ u8 num_pdev_map)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
struct ath12k_pdev *pdev;
- int ret;
+ struct ath12k_hw *ah;
int i;
+ u8 pdev_idx;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
+ hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map),
+ &ath12k_ops);
+ if (!hw)
+ return NULL;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops);
- if (!hw) {
- ath12k_warn(ab, "failed to allocate mac80211 hw device\n");
- ret = -ENOMEM;
- goto err_free_mac;
- }
+ ah = ath12k_hw_to_ah(hw);
+ ah->hw = hw;
+ ah->num_radio = num_pdev_map;
+
+ for (i = 0; i < num_pdev_map; i++) {
+ ab = pdev_map[i].ab;
+ pdev_idx = pdev_map[i].pdev_idx;
+ pdev = &ab->pdevs[pdev_idx];
- ar = hw->priv;
- ar->hw = hw;
+ ar = ath12k_ah_to_ar(ah);
+ ar->ah = ah;
ar->ab = ab;
+ ar->hw_link_id = i;
ar->pdev = pdev;
- ar->pdev_idx = i;
- ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i);
-
- ar->wmi = &ab->wmi_ab.wmi[i];
- /* FIXME: wmi[0] is already initialized during attach,
- * Should we do this again?
- */
- ath12k_wmi_pdev_attach(ab, i);
-
- ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
- ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
- ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
- ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
-
+ ar->pdev_idx = pdev_idx;
pdev->ar = ar;
- spin_lock_init(&ar->data_lock);
- INIT_LIST_HEAD(&ar->arvifs);
- INIT_LIST_HEAD(&ar->ppdu_stats_info);
- mutex_init(&ar->conf_mutex);
- init_completion(&ar->vdev_setup_done);
- init_completion(&ar->vdev_delete_done);
- init_completion(&ar->peer_assoc_done);
- init_completion(&ar->peer_delete_done);
- init_completion(&ar->install_key_done);
- init_completion(&ar->bss_survey_done);
- init_completion(&ar->scan.started);
- init_completion(&ar->scan.completed);
-
- INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
- INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
-
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
- skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- }
- return 0;
-
-err_free_mac:
- ath12k_mac_destroy(ab);
+ ath12k_mac_setup(ar);
+ }
- return ret;
+ return ah;
}
void ath12k_mac_destroy(struct ath12k_base *ab)
{
- struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar)
+ if (!pdev->ar)
continue;
- ieee80211_free_hw(ar->hw);
pdev->ar = NULL;
}
+
+ for (i = 0; i < ab->num_hw; i++) {
+ if (!ab->ah[i])
+ continue;
+
+ ath12k_mac_hw_destroy(ab->ah[i]);
+ ab->ah[i] = NULL;
+ }
+}
+
+int ath12k_mac_allocate(struct ath12k_base *ab)
+{
+ struct ath12k_hw *ah;
+ struct ath12k_pdev_map pdev_map[MAX_RADIOS];
+ int ret, i, j;
+ u8 radio_per_hw;
+
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ ab->num_hw = ab->num_radios;
+ radio_per_hw = 1;
+
+ for (i = 0; i < ab->num_hw; i++) {
+ for (j = 0; j < radio_per_hw; j++) {
+ pdev_map[j].ab = ab;
+ pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+ }
+
+ ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+ if (!ah) {
+ ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
+ i);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ab->ah[i] = ah;
+ }
+
+ ath12k_dp_pdev_pre_alloc(ab);
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ if (!ab->ah[i])
+ continue;
+
+ ath12k_mac_hw_destroy(ab->ah[i]);
+ ab->ah[i] = NULL;
+ }
+
+ return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 7c63bb628adc..3f5e1be0dff9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
@@ -12,6 +12,8 @@
struct ath12k;
struct ath12k_base;
+struct ath12k_hw;
+struct ath12k_pdev_map;
struct ath12k_generic_iter {
struct ath12k *ar;
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index d5441ddb374b..adb8c3ec1950 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/firmware.h>
#include "core.h"
#include "debug.h"
@@ -13,6 +14,8 @@
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define OTP_INVALID_BOARD_ID 0xFFFF
+#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
@@ -358,23 +361,60 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
+ unsigned int board_id;
int ret;
+ bool dualmac = false;
mhi_ctrl = mhi_alloc_controller();
if (!mhi_ctrl)
return -ENOMEM;
- ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
- ab_pci->amss_path,
- sizeof(ab_pci->amss_path));
-
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
- mhi_ctrl->fw_image = ab_pci->amss_path;
mhi_ctrl->regs = ab->mem;
mhi_ctrl->reg_len = ab->mem_len;
mhi_ctrl->rddm_size = ab->hw_params->rddm_size;
+ if (ab->hw_params->otp_board_id_register) {
+ board_id =
+ ath12k_pci_read32(ab, ab->hw_params->otp_board_id_register);
+ board_id = u32_get_bits(board_id, OTP_BOARD_ID_MASK);
+
+ if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to read board id\n");
+ } else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
+ dualmac = true;
+ ab->slo_capable = false;
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "dualmac fw selected for board id: %x\n", board_id);
+ }
+ }
+
+ if (dualmac) {
+ if (ab->fw.amss_dualmac_data && ab->fw.amss_dualmac_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_dualmac_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_dualmac_len;
+ } else {
+ ath12k_warn(ab, "dualmac firmware IE not present in firmware-N.bin\n");
+ ret = -ENOENT;
+ goto free_controller;
+ }
+ } else {
+ if (ab->fw.amss_data && ab->fw.amss_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_len;
+ } else {
+ /* use the old separate mhi.bin MHI firmware file */
+ ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ }
+ }
+
ret = ath12k_mhi_get_msi(ab_pci);
if (ret) {
ath12k_err(ab, "failed to get msi for mhi\n");
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
new file mode 100644
index 000000000000..61e919b23b16
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath12k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU);
+ bool oppps = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = le32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = le32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_INDEX);
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)) &&
+ !(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS)))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM) *
+ sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath12k_p2p_noa_ie_assign(struct ath12k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath12k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath12k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath12k_p2p_noa_ie_fill(ie, len, noa);
+ ath12k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath12k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath12k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h
new file mode 100644
index 000000000000..b6f177fe4ff4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/p2p.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+
+#ifndef ATH12K_P2P_H
+#define ATH12K_P2P_H
+
+#include "wmi.h"
+
+struct ath12k_wmi_p2p_noa_info;
+
+struct ath12k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+};
+
+void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa);
+void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
+ const struct ath12k_wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index f0d2e2d8719c..14954bc05144 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -39,6 +39,10 @@
#define QCN9274_DEVICE_ID 0x1109
#define WCN7850_DEVICE_ID 0x1107
+#define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164
+#define DOMAIN_NUMBER_MASK GENMASK(7, 4)
+#define BUS_NUMBER_MASK GENMASK(3, 0)
+
static const struct pci_device_id ath12k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
@@ -201,18 +205,17 @@ static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
window_start = 2 * WINDOW_START;
- /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
- * use 0th window
- */
- else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
- !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
- window_start = 0;
else
window_start = WINDOW_START;
return window_start;
}
+static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset)
+{
+ return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END);
+}
+
static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
{
u32 val, delay;
@@ -682,12 +685,22 @@ static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
{
struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ struct pci_bus *bus = ab_pci->pdev->bus;
+
cfg->tgt_ce = ab->hw_params->target_ce_config;
cfg->tgt_ce_len = ab->hw_params->target_ce_count;
cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+
+ if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) {
+ ab_pci->qmi_instance =
+ u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
+ u32_encode_bits(bus->number, BUS_NUMBER_MASK);
+ ab->qmi.service_ins_id += ab_pci->qmi_instance;
+ }
}
static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
@@ -901,6 +914,26 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
}
+static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ u32 reg;
+
+ /* On platforms with two or more identical mhi devices, qmi service run
+ * with identical qrtr-node-id. Because of this identical ID qrtr-lookup
+ * cannot register more than one qmi service with identical node ID.
+ *
+ * This generates a unique instance ID from PCIe domain number and bus number,
+ * writes to the given register, it is available for firmware when the QMI service
+ * is spawned.
+ */
+ reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK;
+ ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
+ reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg));
+}
+
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
@@ -1138,15 +1171,17 @@ u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
+
+ if (ath12k_pci_is_offset_within_mhi_region(offset)) {
+ offset = offset - PCI_MHIREGLEN_REG;
+ val = ioread32(ab->mem +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
spin_unlock_bh(&ab_pci->window_lock);
} else {
- if ((!window_start) &&
- (offset >= PCI_MHIREGLEN_REG &&
- offset <= PCI_MHI_REGION_END))
- offset = offset - PCI_MHIREGLEN_REG;
-
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@@ -1183,15 +1218,17 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
+
+ if (ath12k_pci_is_offset_within_mhi_region(offset)) {
+ offset = offset - PCI_MHIREGLEN_REG;
+ iowrite32(value, ab->mem +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
spin_unlock_bh(&ab_pci->window_lock);
} else {
- if ((!window_start) &&
- (offset >= PCI_MHIREGLEN_REG &&
- offset <= PCI_MHI_REGION_END))
- offset = offset - PCI_MHIREGLEN_REG;
-
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@@ -1219,6 +1256,9 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
ath12k_pci_msi_enable(ab_pci);
+ if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features))
+ ath12k_pci_update_qrtr_node_id(ab);
+
ret = ath12k_mhi_start(ab_pci);
if (ret) {
ath12k_err(ab, "failed to start mhi: %d\n", ret);
@@ -1310,11 +1350,21 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
goto err_free_core;
}
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ab->id.vendor = pdev->vendor;
+ ab->id.device = pdev->device;
+ ab->id.subsystem_vendor = pdev->subsystem_vendor;
+ ab->id.subsystem_device = pdev->subsystem_device;
+
switch (pci_dev->device) {
case QCN9274_DEVICE_ID:
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = true;
ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
+ ab->hal_rx_ops = &hal_rx_qcn9274_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@@ -1333,9 +1383,11 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
}
break;
case WCN7850_DEVICE_ID:
+ ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = false;
ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
+ ab->hal_rx_ops = &hal_rx_wcn7850_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index b2edf32ada20..ca93693ba4e9 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
@@ -53,6 +53,9 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
+#define QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB 0x1E20338
+#define OTP_BOARD_ID_MASK GENMASK(15, 0)
+
#define PCI_BAR_WINDOW0_BASE 0x1E00000
#define PCI_BAR_WINDOW0_END 0x1E7FFFC
#define PCI_SOC_RANGE_MASK 0x3FFF
@@ -111,6 +114,7 @@ struct ath12k_pci {
u16 link_ctl;
unsigned long irq_flags;
const struct ath12k_pci_ops *pci_ops;
+ u32 qmi_instance;
};
static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 77a132f6bbd1..92845ffff44a 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -17,7 +17,7 @@
#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
#define ATH12K_QMI_MAX_CHUNK_SIZE 2097152
-static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -61,7 +61,7 @@ static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -511,7 +511,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -528,7 +528,68 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_phy_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ num_phy_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ num_phy),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ board_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -753,7 +814,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -789,7 +850,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -821,7 +882,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -863,7 +924,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -890,7 +951,7 @@ static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -930,7 +991,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -957,7 +1018,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -975,7 +1036,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@@ -983,7 +1044,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1009,7 +1070,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1026,7 +1087,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1042,7 +1103,7 @@ static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1068,7 +1129,7 @@ static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1094,7 +1155,7 @@ static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1348,7 +1409,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -1483,7 +1544,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1501,7 +1562,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1525,7 +1586,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1542,7 +1603,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1595,7 +1656,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1630,7 +1691,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
@@ -1654,7 +1715,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1671,7 +1732,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1706,7 +1767,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1724,7 +1785,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1862,7 +1923,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1879,22 +1940,78 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enable_fwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enable_fwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
+ struct qmi_wlanfw_host_cap_req_msg_v01 *req)
{
+ struct wlfw_host_mlo_chip_info_s_v01 *info;
+ u8 hw_link_id = 0;
+ int i;
+
+ if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "skip QMI MLO cap due to invalid num_radio %d\n",
+ ab->qmi.num_radios);
+ return;
+ }
+
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
@@ -1905,28 +2022,31 @@ static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *re
/* Max peer number generally won't change for the same device
* but needs to be synced with host driver.
*/
- req->max_mlo_peer = 32;
+ req->max_mlo_peer = ab->hw_params->max_mlo_peer;
req->mlo_num_chips_valid = 1;
req->mlo_num_chips = 1;
+
+ info = &req->mlo_chip_info[0];
+ info->chip_id = 0;
+ info->num_local_links = ab->qmi.num_radios;
+
+ for (i = 0; i < info->num_local_links; i++) {
+ info->hw_link_id[i] = hw_link_id;
+ info->valid_mlo_link_id[i] = 1;
+
+ hw_link_id++;
+ }
+
req->mlo_chip_info_valid = 1;
- req->mlo_chip_info[0].chip_id = 0;
- req->mlo_chip_info[0].num_local_links = 2;
- req->mlo_chip_info[0].hw_link_id[0] = 0;
- req->mlo_chip_info[0].hw_link_id[1] = 1;
- req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
- req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
}
static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
- struct qmi_wlanfw_host_cap_req_msg_v01 req;
- struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
req.num_clients_valid = 1;
req.num_clients = 1;
req.mem_cfg_mode = ab->qmi.target_mem_mode;
@@ -1963,10 +2083,10 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
*/
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
-
- ath12k_host_cap_parse_mlo(&req);
}
+ ath12k_host_cap_parse_mlo(ab, &req);
+
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -1977,6 +2097,7 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
goto out;
}
@@ -1996,6 +2117,62 @@ out:
return ret;
}
+static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_phy_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_phy_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ int ret;
+
+ if (!ab->slo_capable)
+ goto out;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_PHY_CAP_REQ_V01,
+ QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_phy_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "failed to send phy capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!resp.num_phy_valid) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ab->qmi.num_radios = resp.num_phy;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ resp.num_phy_valid, resp.num_phy,
+ resp.board_id_valid, resp.board_id);
+
+ return;
+
+out:
+ /* If PHY capability not advertised then rely on default num link */
+ ab->qmi.num_radios = ab->hw_params->def_num_link;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "no valid response from PHY capability, choose default num_phy %d\n",
+ ab->qmi.num_radios);
+}
+
static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_ind_register_req_msg_v01 *req;
@@ -2040,6 +2217,7 @@ static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
ret);
goto out;
@@ -2068,8 +2246,8 @@ resp_out:
static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
- struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0, i;
bool delayed;
@@ -2077,8 +2255,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
-
/* Some targets by default request a block of big contiguous
* DMA memory, it's hard to allocate from kernel. So host returns
* failure to firmware and firmware then request multiple blocks of
@@ -2088,7 +2264,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
delayed = true;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
- memset(req, 0, sizeof(*req));
} else {
delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
@@ -2114,6 +2289,7 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
ret);
goto out;
@@ -2208,17 +2384,14 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
- struct qmi_wlanfw_cap_req_msg_v01 req;
- struct qmi_wlanfw_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
unsigned int board_id = ATH12K_BOARD_ID_DEFAULT;
int ret = 0;
int r;
int i;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -2229,6 +2402,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
ret);
goto out;
@@ -2310,8 +2484,8 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
- struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
const u8 *temp = data;
int ret;
u32 remaining = len;
@@ -2319,7 +2493,6 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
while (remaining) {
req->valid = 1;
@@ -2423,8 +2596,7 @@ static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
break;
case ATH12K_QMI_BDF_TYPE_REGDB:
- ret = ath12k_core_fetch_board_data_api_1(ab, &bd,
- ATH12K_REGDB_FILE_NAME);
+ ret = ath12k_core_fetch_regdb(ab, &bd);
if (ret) {
ath12k_warn(ab, "qmi failed to load regdb bin:\n");
goto out;
@@ -2497,37 +2669,56 @@ out:
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- const struct firmware *fw;
+ const struct firmware *fw = NULL;
+ const void *m3_data;
char path[100];
+ size_t m3_len;
int ret;
- if (m3_mem->vaddr || m3_mem->size)
+ if (m3_mem->vaddr)
+ /* m3 firmware buffer is already available in the DMA buffer */
return 0;
- fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
- if (IS_ERR(fw)) {
- ret = PTR_ERR(fw);
- ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
- path, sizeof(path));
- ath12k_err(ab, "failed to load %s: %d\n", path, ret);
- return ret;
+ if (ab->fw.m3_data && ab->fw.m3_len > 0) {
+ /* firmware-N.bin had a m3 firmware file so use that */
+ m3_data = ab->fw.m3_data;
+ m3_len = ab->fw.m3_len;
+ } else {
+ /* No m3 file in firmware-N.bin so try to request old
+ * separate m3.bin.
+ */
+ fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
+ path, sizeof(path));
+ ath12k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_data = fw->data;
+ m3_len = fw->size;
}
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
- fw->size, &m3_mem->paddr,
+ m3_len, &m3_mem->paddr,
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
fw->size);
- release_firmware(fw);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- memcpy(m3_mem->vaddr, fw->data, fw->size);
- m3_mem->size = fw->size;
+ memcpy(m3_mem->vaddr, m3_data, m3_len);
+ m3_mem->size = m3_len;
+
+ ret = 0;
+
+out:
release_firmware(fw);
- return 0;
+ return ret;
}
static void ath12k_qmi_m3_free(struct ath12k_base *ab)
@@ -2546,14 +2737,11 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab)
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- struct qmi_wlanfw_m3_info_req_msg_v01 req;
- struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
ret = ath12k_qmi_m3_load(ab);
if (ret) {
ath12k_err(ab, "failed to load m3 firmware: %d", ret);
@@ -2573,6 +2761,7 @@ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
ret);
goto out;
@@ -2597,14 +2786,11 @@ out:
static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
u32 mode)
{
- struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
- struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
req.mode = mode;
req.hw_debug_valid = 1;
req.hw_debug = 0;
@@ -2619,6 +2805,7 @@ static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
mode, ret);
goto out;
@@ -2649,10 +2836,10 @@ out:
static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
- struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp = {};
struct ce_pipe_config *ce_cfg;
struct service_to_pipe *svc_cfg;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0, pipe_num;
ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
@@ -2662,8 +2849,6 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
-
req->host_version_valid = 1;
strscpy(req->host_version, ATH12K_HOST_VERSION_STRING,
sizeof(req->host_version));
@@ -2710,6 +2895,7 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
ret);
goto out;
@@ -2733,6 +2919,49 @@ out:
return ret;
}
+static int ath12k_qmi_wlanfw_wlan_ini_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {};
+ struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ req.enable_fwlog_valid = true;
+ req.enable_fwlog = 1;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01,
+ QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "failed to send QMI wlan ini request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to receive QMI wlan ini request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "QMI wlan ini response failure: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
@@ -2749,6 +2978,12 @@ int ath12k_qmi_firmware_start(struct ath12k_base *ab,
{
int ret;
+ ret = ath12k_qmi_wlanfw_wlan_ini_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan fw ini: %d\n", ret);
+ return ret;
+ }
+
ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
@@ -2792,6 +3027,8 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
struct ath12k_base *ab = qmi->ab;
int ret;
+ ath12k_qmi_phy_cap_send(ab);
+
ret = ath12k_qmi_fw_ind_register_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index e25bbaa125e8..6ee33c9851c6 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_QMI_H
@@ -15,7 +15,6 @@
#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000
#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
-#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07
#define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
@@ -141,6 +140,7 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
+ u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
unsigned int service_ins_id;
@@ -251,6 +251,22 @@ struct qmi_wlanfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_PHY_CAP_REQ_V01 0x0057
+#define QMI_WLANFW_PHY_CAP_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_PHY_CAP_RESP_V01 0x0057
+
+struct qmi_wlanfw_phy_cap_req_msg_v01 {
+};
+
+struct qmi_wlanfw_phy_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 num_phy_valid;
+ u8 num_phy;
+ u8 board_id_valid;
+ u32 board_id;
+};
+
#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
@@ -559,6 +575,21 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01 0x002F
+#define ATH12K_QMI_WLANFW_WLAN_INI_RESP_V01 0x002F
+#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_INI_RESP_MSG_V01_MAX_LEN 7
+
+struct qmi_wlanfw_wlan_ini_req_msg_v01 {
+ /* Must be set to true if enable_fwlog is being passed */
+ u8 enable_fwlog_valid;
+ u8 enable_fwlog;
+};
+
+struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index f924bc13ccff..f308e9a6ed55 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
@@ -48,7 +48,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
int ret;
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
@@ -95,7 +96,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
struct ieee80211_supported_band **bands;
struct ath12k_wmi_scan_chan_list_arg *arg;
struct ieee80211_channel *channel;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ath12k_wmi_channel_arg *ch;
enum nl80211_band band;
int num_channels = 0;
@@ -103,7 +104,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -129,7 +130,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
ch = arg->channel;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -199,7 +200,7 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index 55f20c446ca9..a0db6702a189 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@@ -147,6 +147,61 @@ struct rx_mpdu_start_qcn9274 {
__le32 res1;
} __packed;
+#define QCN9274_MPDU_START_SELECT_MPDU_START_TAG BIT(0)
+#define QCN9274_MPDU_START_SELECT_INFO0_REO_QUEUE_DESC_LO BIT(1)
+#define QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 BIT(2)
+#define QCN9274_MPDU_START_SELECT_PN_95_32 BIT(3)
+#define QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 BIT(4)
+#define QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID BIT(5)
+#define QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 BIT(6)
+#define QCN9274_MPDU_START_SELECT_INFO5_INFO6 BIT(7)
+#define QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 BIT(8)
+#define QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 BIT(9)
+#define QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL BIT(10)
+#define QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL BIT(11)
+#define QCN9274_MPDU_START_SELECT_HT_CTRL_INFO7 BIT(12)
+#define QCN9274_MPDU_START_SELECT_ML_ADDR1_47_0_ML_ADDR2_15_0 BIT(13)
+#define QCN9274_MPDU_START_SELECT_ML_ADDR2_47_16_INFO8 BIT(14)
+#define QCN9274_MPDU_START_SELECT_RES_0_RES_1 BIT(15)
+
+#define QCN9274_MPDU_START_WMASK (QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 | \
+ QCN9274_MPDU_START_SELECT_PN_95_32 | \
+ QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 | \
+ QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID | \
+ QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 | \
+ QCN9274_MPDU_START_SELECT_INFO5_INFO6 | \
+ QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 | \
+ QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 | \
+ QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL | \
+ QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL)
+
+/* The below rx_mpdu_start_qcn9274_compact structure is tied with the mask
+ * value QCN9274_MPDU_START_WMASK. If the mask value changes the structure
+ * will also change.
+ */
+
+struct rx_mpdu_start_qcn9274_compact {
+ __le32 info1;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info3;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed;
+
/* rx_mpdu_start
*
* reo_destination_indication
@@ -608,6 +663,8 @@ enum rx_msdu_start_reception_type {
RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
};
+#define RX_MSDU_END_64_TLV_SRC_LINK_ID GENMASK(24, 22)
+
#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
@@ -786,6 +843,52 @@ struct rx_msdu_end_qcn9274 {
__le32 info14;
} __packed;
+#define QCN9274_MSDU_END_SELECT_MSDU_END_TAG BIT(0)
+#define QCN9274_MSDU_END_SELECT_INFO0_PHY_PPDUID_IP_HDR_CSUM_INFO1 BIT(1)
+#define QCN9274_MSDU_END_SELECT_INFO2_CUMULATIVE_CSUM_RULE_IND_0 BIT(2)
+#define QCN9274_MSDU_END_SELECT_IPV6_OP_CRC_INFO3_TYPE13 BIT(3)
+#define QCN9274_MSDU_END_SELECT_RULE_IND_1_TCP_SEQ_NUM BIT(4)
+#define QCN9274_MSDU_END_SELECT_TCP_ACK_NUM_INFO4_WINDOW_SIZE BIT(5)
+#define QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID BIT(6)
+#define QCN9274_MSDU_END_SELECT_INFO6_FSE_METADATA BIT(7)
+#define QCN9274_MSDU_END_SELECT_CCE_MDATA_TCP_UDP_CSUM_INFO7_IP_LEN BIT(8)
+#define QCN9274_MSDU_END_SELECT_INFO8_INFO9 BIT(9)
+#define QCN9274_MSDU_END_SELECT_INFO10_INFO11 BIT(10)
+#define QCN9274_MSDU_END_SELECT_VLAN_CTAG_STAG_CI_PEER_MDATA BIT(11)
+#define QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ BIT(12)
+#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA BIT(13)
+#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_31_0_TOEPLITZ_HASH_2_4 BIT(14)
+#define QCN9274_MSDU_END_SELECT_RES0_SA_47_0 BIT(15)
+#define QCN9274_MSDU_END_SELECT_INFO13_INFO14 BIT(16)
+
+#define QCN9274_MSDU_END_WMASK (QCN9274_MSDU_END_SELECT_MSDU_END_TAG | \
+ QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID | \
+ QCN9274_MSDU_END_SELECT_INFO10_INFO11 | \
+ QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ | \
+ QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA | \
+ QCN9274_MSDU_END_SELECT_INFO13_INFO14)
+
+/* The below rx_msdu_end_qcn9274_compact structure is tied with the mask value
+ * QCN9274_MSDU_END_WMASK. If the mask value changes the structure will also
+ * change.
+ */
+
+struct rx_msdu_end_qcn9274_compact {
+ __le64 msdu_end_tag;
+ __le16 sa_sw_peer_id;
+ __le16 info5;
+ __le16 sa_idx;
+ __le16 da_idx_or_sw_peer_id;
+ __le32 info10;
+ __le32 info11;
+ __le32 info12;
+ __le32 flow_id_toeplitz;
+ __le32 ppdu_start_timestamp_63_32;
+ __le32 phy_meta_data;
+ __le32 info13;
+ __le32 info14;
+} __packed;
+
/* These macro definitions are only used for WCN7850 */
#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0)
@@ -1450,16 +1553,18 @@ struct rx_msdu_end_wcn7850 {
*
*/
-/* TODO: Move to compact TLV approach
- * By default these tlv's are not aligned to 128b boundary
- * Need to remove unused qwords and make them compact/aligned
- */
struct hal_rx_desc_qcn9274 {
struct rx_msdu_end_qcn9274 msdu_end;
struct rx_mpdu_start_qcn9274 mpdu_start;
u8 msdu_payload[];
} __packed;
+struct hal_rx_desc_qcn9274_compact {
+ struct rx_msdu_end_qcn9274_compact msdu_end;
+ struct rx_mpdu_start_qcn9274_compact mpdu_start;
+ u8 msdu_payload[];
+} __packed;
+
#define RX_BE_PADDING0_BYTES 8
#define RX_BE_PADDING1_BYTES 8
@@ -1484,6 +1589,7 @@ struct hal_rx_desc_wcn7850 {
struct hal_rx_desc {
union {
struct hal_rx_desc_qcn9274 qcn9274;
+ struct hal_rx_desc_qcn9274_compact qcn9274_compact;
struct hal_rx_desc_wcn7850 wcn7850;
} u;
} __packed;
diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h
index f72096684b74..240737e1542d 100644
--- a/drivers/net/wireless/ath/ath12k/trace.h
+++ b/drivers/net/wireless/ath/ath12k/trace.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -140,6 +140,33 @@ TRACE_EVENT(ath12k_htt_rxdesc,
)
);
+TRACE_EVENT(ath12k_wmi_diag,
+ TP_PROTO(struct ath12k_base *ab, const void *data, size_t len),
+
+ TP_ARGS(ab, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 11cc3005c0f9..838161dd9098 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -19,6 +19,7 @@
#include "mac.h"
#include "hw.h"
#include "peer.h"
+#include "p2p.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
@@ -162,6 +163,14 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_enable_event) },
+ [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_disable_event) },
+ [WMI_TAG_P2P_NOA_INFO] = {
+ .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
+ [WMI_TAG_P2P_NOA_EVENT] = {
+ .min_len = sizeof(struct wmi_p2p_noa_event) },
};
static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -179,18 +188,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
-
- if (ab->num_radios == 2) {
- config->num_peers = TARGET_NUM_PEERS(DBS);
- config->num_tids = TARGET_NUM_TIDS(DBS);
- } else if (ab->num_radios == 3) {
- config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
- config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
- } else {
- /* Control should not reach here */
- config->num_peers = TARGET_NUM_PEERS(SINGLE);
- config->num_tids = TARGET_NUM_TIDS(SINGLE);
- }
+ config->num_peers = ab->num_radios *
+ ath12k_core_get_max_peers_per_radio(ab);
+ config->num_tids = ath12k_core_get_max_num_tids(ab);
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
@@ -228,6 +228,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+ config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -359,8 +362,8 @@ static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
}
static const void **
-ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -369,7 +372,7 @@ ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -493,13 +496,13 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
- pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
- fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
ab->fw_pdev_count++;
@@ -727,6 +730,20 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk
return 0;
}
+static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath12k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params->single_pdev_only &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
{
struct sk_buff *skb;
@@ -752,6 +769,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_mgmt_send_cmd *cmd;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
@@ -770,7 +788,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->desc_id = cpu_to_le32(buf_id);
- cmd->chanfreq = 0;
+ cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->frame_len = cpu_to_le32(frame->len);
@@ -826,6 +844,9 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+ if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
+ cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
+
ptr = skb->data + sizeof(*cmd);
len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
@@ -1024,6 +1045,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->regdomain = cpu_to_le32(arg->regdomain);
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
+ cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
if (!restart) {
if (arg->ssid) {
@@ -1051,7 +1073,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
/* Note: This is a nested TLV containing:
- * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
@@ -1710,6 +1732,48 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
+ size_t p2p_ie_len, aligned_len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+
+ p2p_ie_len = p2p_ie[1] + 2;
+ aligned_len = roundup(p2p_ie_len, sizeof(u32));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
+
+ ptr += sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ aligned_len);
+ memcpy(tlv->value, p2p_ie, p2p_ie_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn)
@@ -2130,7 +2194,7 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
@@ -3265,6 +3329,9 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+ WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
+
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
}
@@ -4214,7 +4281,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
for (i = 0; i < ab->fw_pdev_count; i++) {
struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
- if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
+ if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
bands = fw_pdev->supported_bands;
break;
@@ -4271,7 +4338,8 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
} else {
for (i = 0; i < ab->num_radios; i++) {
- if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
+ if (ab->pdevs[i].pdev_id ==
+ ath12k_wmi_caps_ext_get_pdev_id(caps))
break;
}
@@ -4374,7 +4442,7 @@ static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4452,7 +4520,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4738,7 +4806,7 @@ static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4770,7 +4838,7 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4790,15 +4858,15 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
return 0;
}
-static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4826,7 +4894,7 @@ static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4948,7 +5016,7 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status_irqsafe(ar->hw, msdu);
+ ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4970,7 +5038,7 @@ static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5006,6 +5074,10 @@ static void ath12k_wmi_event_scan_started(struct ath12k *ar)
break;
case ATH12K_SCAN_STARTING:
ar->scan.state = ATH12K_SCAN_RUNNING;
+
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
+
complete(&ar->scan.started);
break;
}
@@ -5076,6 +5148,8 @@ static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -5087,7 +5161,11 @@ static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
- ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
+
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+
break;
}
}
@@ -5141,7 +5219,7 @@ static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5174,7 +5252,7 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5201,7 +5279,7 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5226,13 +5304,14 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
static int freq_to_idx(struct ath12k *ar, int freq)
{
struct ieee80211_supported_band *sband;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
int band, ch, idx = 0;
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
if (!ar->mac.sbands[band].channels)
continue;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
if (!sband)
continue;
@@ -5245,14 +5324,14 @@ exit:
return idx;
}
-static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5291,7 +5370,7 @@ ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5331,7 +5410,7 @@ ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5362,7 +5441,7 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5384,13 +5463,13 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
}
static int
-ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, const struct wmi_pdev_temperature_event *ev)
+ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ const struct wmi_pdev_temperature_event *ev)
{
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5725,8 +5804,7 @@ static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *s
{
u32 vdev_id, tx_status;
- if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath12k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -5864,7 +5942,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
- ieee80211_rx_ni(ar->hw, skb);
+ ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
exit:
rcu_read_unlock();
@@ -6037,7 +6115,7 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
- sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
arg.mac_addr, NULL);
if (!sta) {
ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
@@ -6110,7 +6188,7 @@ static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath12k_warn(ab, "failed to extract chan info event");
return;
}
@@ -6395,7 +6473,7 @@ static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6446,7 +6524,7 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_csa_finish(arvif->vif, 0);
}
rcu_read_unlock();
}
@@ -6460,7 +6538,7 @@ ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6494,7 +6572,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
struct ath12k *ar;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6531,7 +6609,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar));
exit:
rcu_read_unlock();
@@ -6546,7 +6624,7 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct ath12k *ar;
struct wmi_pdev_temperature_event ev = {0};
- if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
ath12k_warn(ab, "failed to extract pdev temperature event");
return;
}
@@ -6573,7 +6651,7 @@ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6603,7 +6681,7 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6628,6 +6706,53 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
kfree(tb);
}
+static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_p2p_noa_event *ev;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+ struct ath12k *ar;
+ int ret, vdev_id;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_P2P_NOA_EVENT];
+ noa = tb[WMI_TAG_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
+
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
+ vdev_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+
+ ret = 0;
+
+out:
+ kfree(tb);
+ return ret;
+}
+
static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -6635,7 +6760,7 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6662,6 +6787,70 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
kfree(tb);
}
+static void
+ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ trace_ath12k_wmi_diag(ab, skb->data, skb->len);
+}
+
+static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_enable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_disable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -6757,11 +6946,18 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_RFKILL_STATE_CHANGE_EVENTID:
ath12k_rfkill_state_change_event(ab, skb);
break;
+ case WMI_TWT_ENABLE_EVENTID:
+ ath12k_wmi_twt_enable_event(ab, skb);
+ break;
+ case WMI_TWT_DISABLE_EVENTID:
+ ath12k_wmi_twt_disable_event(ab, skb);
+ break;
+ case WMI_P2P_NOA_EVENTID:
+ ath12k_wmi_p2p_noa_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
- case WMI_TWT_ENABLE_EVENTID:
- case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
ath12k_dbg(ab, ATH12K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
@@ -6772,6 +6968,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_VDEV_DELETE_RESP_EVENTID:
ath12k_vdev_delete_resp_event(ab, skb);
break;
+ case WMI_DIAG_EVENTID:
+ ath12k_wmi_diag_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 06e5b9b4049b..103462feb935 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@@ -168,10 +168,6 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_PARAMS_MAX_SSID 16
-#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
-
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
@@ -2163,6 +2159,10 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_11BE = 289,
+ WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS = 361,
+
+ WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT = 365,
+
WMI_MAX_EXT2_SERVICE,
};
@@ -2350,6 +2350,7 @@ struct ath12k_wmi_resource_config_arg {
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
bool is_reg_cc_ext_event_supported;
+ u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@@ -2402,6 +2403,7 @@ struct wmi_init_cmd {
} __packed;
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2542,9 +2544,17 @@ struct ath12k_wmi_hw_mode_cap_params {
#define WMI_MAX_HECAP_PHY_SIZE (3)
+/* pdev_id is present in lower 16 bits of pdev_and_hw_link_ids in
+ * ath12k_wmi_mac_phy_caps_params & ath12k_wmi_caps_ext_params.
+ *
+ * hw_link_id is present in higher 16 bits of pdev_and_hw_link_ids.
+ */
+#define WMI_CAPS_PARAMS_PDEV_ID GENMASK(15, 0)
+#define WMI_CAPS_PARAMS_HW_LINK_ID GENMASK(31, 16)
+
struct ath12k_wmi_mac_phy_caps_params {
__le32 hw_mode_id;
- __le32 pdev_id;
+ __le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 supported_flags;
__le32 supported_bands;
@@ -2636,13 +2646,7 @@ struct wmi_service_ready_ext2_event {
struct ath12k_wmi_caps_ext_params {
__le32 hw_mode_id;
- union {
- struct {
- __le16 pdev_id;
- __le16 hw_link_id;
- } __packed ath12k_wmi_pdev_to_link_map;
- __le32 pdev_id;
- };
+ __le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 wireless_modes_ext;
__le32 eht_cap_mac_info_2ghz[WMI_MAX_EHTCAP_MAC_SIZE];
@@ -2716,6 +2720,9 @@ struct wmi_vdev_create_cmd {
struct ath12k_wmi_mac_addr_params vdev_macaddr;
__le32 num_cfg_txrx_streams;
__le32 pdev_id;
+ __le32 mbssid_flags;
+ __le32 mbssid_tx_vdev_id;
+ __le32 vdev_stats_id_valid;
__le32 vdev_stats_id;
} __packed;
@@ -2764,6 +2771,10 @@ struct ath12k_wmi_ssid_params {
#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
+enum wmi_vdev_mbssid_flags {
+ WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
+};
+
struct wmi_vdev_start_request_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -2782,7 +2793,7 @@ struct wmi_vdev_start_request_cmd {
__le32 cac_duration_ms;
__le32 regdomain;
__le32 min_data_rate;
- __le32 mbssid_flags;
+ __le32 mbssid_flags; /* uses enum wmi_vdev_mbssid_flags */
__le32 mbssid_tx_vdev_id;
__le32 eht_ops;
__le32 punct_bitmap;
@@ -3146,7 +3157,7 @@ struct ath12k_wmi_element_info_arg {
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
/* Values lower than this may be refused by some firmware revisions with a scan
* completion with a timedout reason.
@@ -3270,24 +3281,19 @@ struct ath12k_wmi_scan_req_arg {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
- union {
- struct {
- u32 scan_ev_started:1,
- scan_ev_completed:1,
- scan_ev_bss_chan:1,
- scan_ev_foreign_chan:1,
- scan_ev_dequeued:1,
- scan_ev_preempted:1,
- scan_ev_start_failed:1,
- scan_ev_restarted:1,
- scan_ev_foreign_chn_exit:1,
- scan_ev_invalid:1,
- scan_ev_gpio_timeout:1,
- scan_ev_suspended:1,
- scan_ev_resumed:1;
- };
- u32 scan_events;
- };
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
u32 dwell_time_active;
u32 dwell_time_active_2g;
u32 dwell_time_passive;
@@ -3300,36 +3306,31 @@ struct ath12k_wmi_scan_req_arg {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
- union {
- struct {
- u32 scan_f_passive:1,
- scan_f_bcast_probe:1,
- scan_f_cck_rates:1,
- scan_f_ofdm_rates:1,
- scan_f_chan_stat_evnt:1,
- scan_f_filter_prb_req:1,
- scan_f_bypass_dfs_chn:1,
- scan_f_continue_on_err:1,
- scan_f_offchan_mgmt_tx:1,
- scan_f_offchan_data_tx:1,
- scan_f_promisc_mode:1,
- scan_f_capture_phy_err:1,
- scan_f_strict_passive_pch:1,
- scan_f_half_rate:1,
- scan_f_quarter_rate:1,
- scan_f_force_active_dfs_chn:1,
- scan_f_add_tpc_ie_in_probe:1,
- scan_f_add_ds_ie_in_probe:1,
- scan_f_add_spoofed_mac_in_probe:1,
- scan_f_add_rand_seq_in_probe:1,
- scan_f_en_ie_whitelist_in_probe:1,
- scan_f_forced:1,
- scan_f_2ghz:1,
- scan_f_5ghz:1,
- scan_f_80mhz:1;
- };
- u32 scan_flags;
- };
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@@ -3489,6 +3490,37 @@ struct wmi_get_pdev_temperature_cmd {
__le32 pdev_id;
} __packed;
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+
+struct wmi_p2p_noa_event {
+ __le32 vdev_id;
+} __packed;
+
+struct ath12k_wmi_p2p_noa_descriptor {
+ __le32 type_count; /* 255: continuous schedule, 0: reserved */
+ __le32 duration; /* Absent period duration in micro seconds */
+ __le32 interval; /* Absent period interval in micro seconds */
+ __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0)
+#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8)
+#define WMI_P2P_NOA_INFO_OPP_PS BIT(16)
+#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17)
+#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24)
+
+struct ath12k_wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ * Bits 15-8 - Index (identifies the instance of NOA sub element)
+ * Bit 16 - Opp PS state of the AP
+ * Bits 23-17 - Ctwindow in TUs
+ * Bits 31-24 - Number of NOA descriptors
+ */
+ __le32 noa_attr;
+ struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
struct wmi_bcn_tmpl_cmd {
@@ -3503,6 +3535,12 @@ struct wmi_bcn_tmpl_cmd {
__le32 esp_ie_offset;
} __packed;
+struct wmi_p2p_go_set_beacon_ie_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 ie_buf_len;
+} __packed;
+
struct wmi_vdev_install_key_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -4797,6 +4835,16 @@ struct wmi_rfkill_state_change_event {
__le32 radio_state;
} __packed;
+struct wmi_twt_enable_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
+struct wmi_twt_disable_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -4806,6 +4854,8 @@ int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn);
@@ -4917,4 +4967,30 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode);
+static inline u32
+ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_PDEV_ID);
+}
+
+static inline u32
+ath12k_wmi_caps_ext_get_hw_link_id(const struct ath12k_wmi_caps_ext_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_HW_LINK_ID);
+}
+
+static inline u32
+ath12k_wmi_mac_phy_get_pdev_id(const struct ath12k_wmi_mac_phy_caps_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids,
+ WMI_CAPS_PARAMS_PDEV_ID);
+}
+
+static inline u32
+ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids,
+ WMI_CAPS_PARAMS_HW_LINK_ID);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index c630343ca4f9..eea4bda77608 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -779,6 +779,10 @@ static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
const struct ieee80211_ops ath5k_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath5k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath5k_start,
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index e37db4af33de..61b2e3f15f0e 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
wiphy_lock(vif->ar->wiphy);
- cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
wiphy_unlock(vif->ar->wiphy);
}
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 9bfaadfa6c00..1a6697b6e3b4 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -144,7 +144,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
return ret;
}
-static int ath_ahb_remove(struct platform_device *pdev)
+static void ath_ahb_remove(struct platform_device *pdev)
{
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
@@ -155,13 +155,11 @@ static int ath_ahb_remove(struct platform_device *pdev)
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
}
-
- return 0;
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
- .remove = ath_ahb_remove,
+ .remove_new = ath_ahb_remove,
.driver = {
.name = "ath9k",
},
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 988222cea9df..acc84e6711b0 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -643,7 +643,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
} else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
+ antcomb->rssi_lna2) {
/* set to A-B */
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 57e2b4c89125..ad72a30b67c3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -851,8 +851,6 @@
#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
-#define AR_PHY_POWER_TX_RATE1 0x9934
-#define AR_PHY_POWER_TX_RATE2 0x9938
#define AR_PHY_POWER_TX_RATE_MAX 0x993c
#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
#define PHY_AGC_CLR 0x10000000
@@ -1041,13 +1039,6 @@
#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
-/*
- * AGC 3 Register Map
- */
-#define AR_AGC3_BASE 0xce00
-
-#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
-
/* GLB Registers */
#define AR_GLB_BASE 0x20000
#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index ee72faac2f1d..b399a7926ef5 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -365,10 +365,10 @@ bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
if (!vif || !vif->bss_conf.csa_active)
return false;
- if (!ieee80211_beacon_cntdwn_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0))
return false;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 237f4ec2cffd..6c33e898b300 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -306,7 +306,6 @@ struct ath9k_htc_tx {
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock;
- bool initialized;
};
struct ath9k_htc_tx_ctl {
@@ -515,6 +514,7 @@ struct ath9k_htc_priv {
unsigned long ps_usecount;
bool ps_enabled;
bool ps_idle;
+ bool initialized;
#ifdef CONFIG_MAC80211_LEDS
enum led_brightness brightness;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 533471e69400..547634f82183 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -514,10 +514,10 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
if (!vif || !vif->bss_conf.csa_active)
return false;
- if (!ieee80211_beacon_cntdwn_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0))
return false;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
priv->csa_vif = NULL;
return true;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 0aa5bdeb44a1..3633f9eb2c55 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
htc_handle->drv_priv = priv;
+ /* Allow ath9k_wmi_event_tasklet() to operate. */
+ smp_wmb();
+ priv->initialized = true;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a9b5212051a..b389e19381c4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1868,6 +1868,10 @@ static void ath9k_htc_channel_switch_beacon(struct ieee80211_hw *hw,
}
struct ieee80211_ops ath9k_htc_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath9k_htc_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath9k_htc_start,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index efcaeccb055a..ce9c04e418b8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
- /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
- smp_wmb();
- priv->tx.initialized = true;
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c48ff0ffbfef..a2943aaecb20 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2786,6 +2786,10 @@ static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
struct ieee80211_ops ath9k_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath9k_tx,
.start = ath9k_start,
.stop = ath9k_stop,
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
index 955147ab48a2..f50994910eae 100644
--- a/drivers/net/wireless/ath/ath9k/reg_aic.h
+++ b/drivers/net/wireless/ath/ath9k/reg_aic.h
@@ -17,10 +17,6 @@
#ifndef REG_AIC_H
#define REG_AIC_H
-#define AR_SM_BASE 0xa200
-#define AR_SM1_BASE 0xb200
-#define AR_AGC_BASE 0x9e00
-
#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 1476b42b52a9..805ad31edba2 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
}
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ /* Check if ath9k_htc_probe_device() completed. */
+ if (!data_race(priv->initialized)) {
+ kfree_skb(skb);
+ continue;
+ }
+
hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_id = be16_to_cpu(hdr->command_id);
wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
&wmi->drv_priv->fatal_work);
break;
case WMI_TXSTATUS_EVENTID:
- /* Check if ath9k_tx_init() completed. */
- if (!data_race(priv->tx.initialized))
- break;
-
spin_lock_bh(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
spin_unlock_bh(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f15684379b03..d519b676a109 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -369,12 +369,11 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
- int ret;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- while ((ret = ath_tid_dequeue(tid, &skb)) == 0) {
+ while (ath_tid_dequeue(tid, &skb) == 0) {
fi = get_frame_info(skb);
bf = fi->bf;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 524327d24964..7e7797bf44b7 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1712,6 +1712,10 @@ static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops carl9170_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = carl9170_op_start,
.stop = carl9170_op_stop,
.tx = carl9170_op_tx,
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6bb9aa2bfe65..e902ca80eba7 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -189,7 +189,7 @@ static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
{
- struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct _carl9170_tx_superframe *super;
unsigned int chunks;
int cookie = -1;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 41119fb177e3..bfbd3c7a70b3 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1347,6 +1347,10 @@ static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif
}
static const struct ieee80211_ops wcn36xx_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = wcn36xx_start,
.stop = wcn36xx_stop,
.add_interface = wcn36xx_add_interface,
@@ -1685,6 +1689,7 @@ static struct platform_driver wcn36xx_driver = {
module_platform_driver(wcn36xx_driver);
+MODULE_DESCRIPTION("Qualcomm Atheros WCN3660/3680 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 447b51cff8f9..0b55a272bfd6 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2178,6 +2178,10 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
static const struct ieee80211_ops at76_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = at76_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = at76_add_interface,
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 67b4bac048e5..c0d8fc0b22fb 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
return dev->__using_pio_transfers;
}
+static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_wake_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_wake_queue(dev->wl->hw, 0);
+}
+
+static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_stop_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_stop_queue(dev->wl->hw, 0);
+}
+
/* Message printing */
__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 760d1a28edc6..6ac7dcebfff9 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
should_inject_overflow(ring)) {
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
- ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ b43_stop_queue(dev, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
@@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
- ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
+ b43_wake_queue(dev, ring->queue_prio);
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 92ca0b2ca286..badb2f494035 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work)
start_ieee80211:
wl->hw->queues = B43_QOS_QUEUE_NUM;
- if (!modparam_qos || dev->fw.opensource)
+ if (!modparam_qos || dev->fw.opensource ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
wl->hw->queues = 1;
err = ieee80211_register_hw(wl->hw);
@@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work)
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
wl->tx_queue_stopped[queue_num] = true;
- ieee80211_stop_queue(wl->hw, queue_num);
+ b43_stop_queue(dev, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
}
@@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
+ u16 skb_queue_mapping;
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
@@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
- if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ skb_queue_mapping = skb_get_queue_mapping(skb);
+ skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb_queue_mapping])
ieee80211_queue_work(wl->hw, &wl->tx_work);
- } else {
- ieee80211_stop_queue(wl->hw, skb->queue_mapping);
- }
+ else
+ b43_stop_queue(wl->current_dev, skb_queue_mapping);
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -5170,6 +5172,10 @@ static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops b43_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = b43_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43_op_conf_tx,
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 0cf70fdb60a6..e41f2f5b4c26 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (total_len > (q->buffer_size - q->buffer_used)) {
/* Not enough memory on the queue. */
err = -EBUSY;
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
goto out;
}
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
(q->free_packet_slots == 0)) {
/* The queue is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
list_add(&pack->list, &q->packets_list);
if (q->stopped) {
- ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
+ b43_wake_queue(dev, q->queue_prio);
q->stopped = false;
}
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 760136638a95..18eb610f600a 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3531,6 +3531,10 @@ static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops b43legacy_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = b43legacy_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43legacy_op_conf_tx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
index ac3a36fa3640..f471c962104a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
@@ -7,21 +7,33 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <feature.h>
#include "vops.h"
-static int brcmf_bca_attach(struct brcmf_pub *drvr)
+#define BRCMF_BCA_E_LAST 212
+
+static void brcmf_bca_feat_attach(struct brcmf_if *ifp)
{
- pr_err("%s: executing\n", __func__);
- return 0;
+ /* SAE support not confirmed so disabling for now */
+ ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_SAE);
}
-static void brcmf_bca_detach(struct brcmf_pub *drvr)
+static int brcmf_bca_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_err("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_BCA_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_BCA_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_bca_ops = {
- .attach = brcmf_bca_attach,
- .detach = brcmf_bca_detach,
+ .feat_attach = brcmf_bca_feat_attach,
+ .alloc_fweh_info = brcmf_bca_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
index d55f3271d619..4f0c1e1a8e60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_bca_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_BCA, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom AP chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 133c5ea6429c..adf8a14feb49 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -32,6 +32,7 @@
#include "vendor.h"
#include "bus.h"
#include "common.h"
+#include "fwvid.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
@@ -1179,8 +1180,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
scan_request = cfg->scan_request;
cfg->scan_request = NULL;
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
+ timer_delete_sync(&cfg->escan_timeout);
if (fw_abort) {
/* Do a scan abort to stop the driver's scan engine */
@@ -1687,52 +1687,39 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
return reason;
}
-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_wsec_pmk_le pmk;
int err;
+ if (key_len > sizeof(pmk.key)) {
+ bphy_err(drvr, "key must be less than %zu bytes\n",
+ sizeof(pmk.key));
+ return -EINVAL;
+ }
+
memset(&pmk, 0, sizeof(pmk));
- /* pass pmk directly */
- pmk.key_len = cpu_to_le16(pmk_len);
- pmk.flags = cpu_to_le16(0);
- memcpy(pmk.key, pmk_data, pmk_len);
+ /* pass key material directly */
+ pmk.key_len = cpu_to_le16(key_len);
+ pmk.flags = cpu_to_le16(flags);
+ memcpy(pmk.key, key, key_len);
- /* store psk in firmware */
+ /* store key material in firmware */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
&pmk, sizeof(pmk));
if (err < 0)
bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n",
- pmk_len);
+ key_len);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_set_wsec);
-static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
- u16 pwd_len)
+static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
{
- struct brcmf_pub *drvr = ifp->drvr;
- struct brcmf_wsec_sae_pwd_le sae_pwd;
- int err;
-
- if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
- bphy_err(drvr, "sae_password must be less than %d\n",
- BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
- return -EINVAL;
- }
-
- sae_pwd.key_len = cpu_to_le16(pwd_len);
- memcpy(sae_pwd.key, pwd_data, pwd_len);
-
- err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
- sizeof(sae_pwd));
- if (err < 0)
- bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
- pwd_len);
-
- return err;
+ return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0);
}
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
@@ -2503,8 +2490,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
}
- err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
- sme->crypto.sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto);
if (!err && sme->crypto.psk)
err = brcmf_set_pmk(ifp, sme->crypto.psk,
BRCMF_WSEC_MAX_PSK_LEN);
@@ -3081,7 +3067,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
struct brcmf_scb_val_le scbval;
struct brcmf_pktcnt_le pktcnt;
s32 err;
- u32 rate;
+ u32 rate = 0;
u32 rssi;
/* Get the current tx rate */
@@ -3779,8 +3765,10 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
if (req->channels[i] == chan)
break;
}
- if (i == req->n_channels)
- req->channels[req->n_channels++] = chan;
+ if (i == req->n_channels) {
+ req->n_channels++;
+ req->channels[i] = chan;
+ }
for (i = 0; i < req->n_ssids; i++) {
if (req->ssids[i].ssid_len == ssid_len &&
@@ -5252,8 +5240,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if (crypto->sae_pwd) {
brcmf_dbg(INFO, "using SAE offload\n");
profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
- err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
- crypto->sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, crypto);
if (err < 0)
goto exit;
}
@@ -5287,7 +5274,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
settings->hidden_ssid);
if (err) {
bphy_err(drvr, "%s closednet error (%d)\n",
- settings->hidden_ssid ?
+ (settings->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) ?
"enabled" : "disabled",
err);
goto exit;
@@ -5360,10 +5347,12 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
msleep(400);
if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
+ struct cfg80211_crypto_settings crypto = {};
+
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
brcmf_set_pmk(ifp, NULL, 0);
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
- brcmf_set_sae_password(ifp, NULL, 0);
+ brcmf_fwvid_set_sae_password(ifp, &crypto);
profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
}
@@ -7269,7 +7258,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
u32 nmode = 0;
u32 vhtmode = 0;
u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
- u32 rxchain;
+ u32 rxchain = 0;
u32 nchain;
int err;
s32 i;
@@ -8435,6 +8424,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
brcmf_btcoex_detach(cfg);
wiphy_unregister(cfg->wiphy);
wl_deinit_priv(cfg);
+ cancel_work_sync(&cfg->escan_timeout_work);
brcmf_free_wiphy(cfg->wiphy);
kfree(cfg);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 0e1fa3f0dea2..dc3a6a537507 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -468,4 +468,6 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
+
#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index b6d458e022fa..b24faae35873 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -266,7 +266,7 @@ static int brcmf_c_process_cal_blob(struct brcmf_if *ifp)
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ struct brcmf_fweh_info *fweh = drvr->fweh;
u8 buf[BRCMF_DCMD_SMLEN];
struct brcmf_bus *bus;
struct brcmf_rev_info_le revinfo;
@@ -413,15 +413,21 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
brcmf_c_set_joinpref_default(ifp);
/* Setup event_msgs, enable E_IF */
- err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err) {
bphy_err(drvr, "Get event_msgs error (%d)\n", err);
goto done;
}
- setbit(eventmask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
+ /*
+ * BRCMF_E_IF can safely be used to set the appropriate bit
+ * in the event_mask as the firmware event code is guaranteed
+ * to match the value of BRCMF_E_IF because it is old cruft
+ * that all vendors have.
+ */
+ setbit(fweh->event_mask, BRCMF_E_IF);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err) {
bphy_err(drvr, "Set event_msgs error (%d)\n", err);
goto done;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index f599d5f896e8..bf91b1e1368f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -691,7 +691,7 @@ static int brcmf_net_mon_open(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- u32 monitor;
+ u32 monitor = 0;
int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -1348,13 +1348,17 @@ int brcmf_attach(struct device *dev)
goto fail;
}
+ /* attach firmware event handler */
+ ret = brcmf_fweh_attach(drvr);
+ if (ret != 0) {
+ bphy_err(drvr, "brcmf_fweh_attach failed\n");
+ goto fail;
+ }
+
/* Attach to events important for core code */
brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
brcmf_psm_watchdog_notify);
- /* attach firmware event handler */
- brcmf_fweh_attach(drvr);
-
ret = brcmf_bus_started(drvr, drvr->ops);
if (ret != 0) {
bphy_err(drvr, "dongle is not responding: err=%d\n", ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index e4f911dd414b..ea76b8d33401 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -122,7 +122,7 @@ struct brcmf_pub {
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
- struct brcmf_fweh_info fweh;
+ struct brcmf_fweh_info *fweh;
struct brcmf_ampdu_rx_reorder
*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
index b75652ba9359..9a4837881486 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
@@ -7,21 +7,53 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <fwil.h>
#include "vops.h"
-static int brcmf_cyw_attach(struct brcmf_pub *drvr)
+#define BRCMF_CYW_E_LAST 197
+
+static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_err("%s: executing\n", __func__);
- return 0;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_sae_pwd_le sae_pwd;
+ u16 pwd_len = crypto->sae_pwd_len;
+ int err;
+
+ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+ bphy_err(drvr, "sae_password must be less than %d\n",
+ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = cpu_to_le16(pwd_len);
+ memcpy(sae_pwd.key, crypto->sae_pwd, pwd_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+ sizeof(sae_pwd));
+ if (err < 0)
+ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+ pwd_len);
+
+ return err;
}
-static void brcmf_cyw_detach(struct brcmf_pub *drvr)
+static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_err("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_CYW_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_CYW_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_cyw_ops = {
- .attach = brcmf_cyw_attach,
- .detach = brcmf_cyw_detach,
+ .set_sae_password = brcmf_cyw_set_sae_pwd,
+ .alloc_fweh_info = brcmf_cyw_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
index f82fbbe3ecef..90d06cda03a2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_cyw_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_CYW, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Cypress/Infineon chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 86ff174936a9..c3a602197662 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -83,6 +83,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* ACEPC W5 Pro Cherry Trail Z8350 HDMI stick, same wifi as the T8 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 6d10c9efbe93..f23310a77a5d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -13,6 +13,7 @@
#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
+#include "fwvid.h"
#include "feature.h"
#include "common.h"
@@ -183,7 +184,7 @@ static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv)
static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
enum brcmf_feat_id id, char *name)
{
- u32 data;
+ u32 data = 0;
int err;
/* we need to know firmware error */
@@ -339,6 +340,11 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver");
+ brcmf_feat_wlc_version_overrides(drvr);
+ brcmf_feat_firmware_overrides(drvr);
+
+ brcmf_fwvid_feat_attach(ifp);
+
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
@@ -346,9 +352,6 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
- brcmf_feat_wlc_version_overrides(drvr);
- brcmf_feat_firmware_overrides(drvr);
-
/* set chip related quirks */
switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 68960ae98987..0774f6c59226 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -14,7 +14,8 @@
#include "fweh.h"
#include "fwil.h"
#include "proto.h"
-
+#include "bus.h"
+#include "fwvid.h"
/**
* struct brcmf_fweh_queue_item - event item on event queue.
*
@@ -28,7 +29,7 @@
*/
struct brcmf_fweh_queue_item {
struct list_head q;
- enum brcmf_fweh_event_code code;
+ u32 code;
u8 ifidx;
u8 ifaddr[ETH_ALEN];
struct brcmf_event_msg_be emsg;
@@ -94,7 +95,7 @@ static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
struct brcmf_if *ifp,
- enum brcmf_fweh_event_code code,
+ u32 fwcode,
struct brcmf_event_msg *emsg,
void *data)
{
@@ -102,13 +103,13 @@ static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
int err = -EINVAL;
if (ifp) {
- fweh = &ifp->drvr->fweh;
+ fweh = ifp->drvr->fweh;
/* handle the event if valid interface and handler */
- if (fweh->evt_handler[code])
- err = fweh->evt_handler[code](ifp, emsg, data);
+ if (fweh->evt_handler[fwcode])
+ err = fweh->evt_handler[fwcode](ifp, emsg, data);
else
- bphy_err(drvr, "unhandled event %d ignored\n", code);
+ bphy_err(drvr, "unhandled fwevt %d ignored\n", fwcode);
} else {
bphy_err(drvr, "no interface object\n");
}
@@ -142,7 +143,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
is_p2pdev = ((ifevent->flags & BRCMF_E_IF_FLAG_NOIF) &&
(ifevent->role == BRCMF_E_IF_ROLE_P2P_CLIENT ||
((ifevent->role == BRCMF_E_IF_ROLE_STA) &&
- (drvr->fweh.p2pdev_setup_ongoing))));
+ (drvr->fweh->p2pdev_setup_ongoing))));
if (!is_p2pdev && (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
brcmf_dbg(EVENT, "event can be ignored\n");
return;
@@ -163,7 +164,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
if (!is_p2pdev)
brcmf_proto_add_if(drvr, ifp);
- if (!drvr->fweh.evt_handler[BRCMF_E_IF])
+ if (!drvr->fweh->evt_handler[BRCMF_E_IF])
if (brcmf_net_attach(ifp, false) < 0)
return;
}
@@ -183,6 +184,45 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
}
}
+static void brcmf_fweh_map_event_code(struct brcmf_fweh_info *fweh,
+ enum brcmf_fweh_event_code code,
+ u32 *fw_code)
+{
+ int i;
+
+ if (WARN_ON(!fw_code))
+ return;
+
+ *fw_code = code;
+ if (fweh->event_map) {
+ for (i = 0; i < fweh->event_map->n_items; i++) {
+ if (fweh->event_map->items[i].code == code) {
+ *fw_code = fweh->event_map->items[i].fwevt_code;
+ break;
+ }
+ }
+ }
+}
+
+static void brcmf_fweh_map_fwevt_code(struct brcmf_fweh_info *fweh, u32 fw_code,
+ enum brcmf_fweh_event_code *code)
+{
+ int i;
+
+ if (WARN_ON(!code))
+ return;
+
+ *code = fw_code;
+ if (fweh->event_map) {
+ for (i = 0; i < fweh->event_map->n_items; i++) {
+ if (fweh->event_map->items[i].fwevt_code == fw_code) {
+ *code = fweh->event_map->items[i].code;
+ break;
+ }
+ }
+ }
+}
+
/**
* brcmf_fweh_dequeue_event() - get event from the queue.
*
@@ -221,15 +261,19 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
struct brcmf_event_msg emsg;
fweh = container_of(work, struct brcmf_fweh_info, event_work);
- drvr = container_of(fweh, struct brcmf_pub, fweh);
+ drvr = fweh->drvr;
while ((event = brcmf_fweh_dequeue_event(fweh))) {
- brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
- brcmf_fweh_event_name(event->code), event->code,
+ enum brcmf_fweh_event_code code;
+
+ brcmf_fweh_map_fwevt_code(fweh, event->code, &code);
+ brcmf_dbg(EVENT, "event %s (%u:%u) ifidx %u bsscfg %u addr %pM\n",
+ brcmf_fweh_event_name(code), code, event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
- bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
+ bphy_err(drvr, "invalid bsscfg index: %u\n",
+ event->emsg.bsscfgidx);
goto event_free;
}
@@ -237,7 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
emsg_be = &event->emsg;
emsg.version = be16_to_cpu(emsg_be->version);
emsg.flags = be16_to_cpu(emsg_be->flags);
- emsg.event_code = event->code;
+ emsg.event_code = code;
emsg.status = be32_to_cpu(emsg_be->status);
emsg.reason = be32_to_cpu(emsg_be->reason);
emsg.auth_type = be32_to_cpu(emsg_be->auth_type);
@@ -283,7 +327,7 @@ event_free:
*/
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
{
- ifp->drvr->fweh.p2pdev_setup_ongoing = ongoing;
+ ifp->drvr->fweh->p2pdev_setup_ongoing = ongoing;
}
/**
@@ -291,12 +335,27 @@ void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
*
* @drvr: driver information object.
*/
-void brcmf_fweh_attach(struct brcmf_pub *drvr)
+int brcmf_fweh_attach(struct brcmf_pub *drvr)
{
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_info *fweh;
+ int err;
+
+ err = brcmf_fwvid_alloc_fweh_info(drvr);
+ if (err < 0)
+ return err;
+
+ fweh = drvr->fweh;
+ fweh->drvr = drvr;
+
+ fweh->event_mask_len = DIV_ROUND_UP(fweh->num_event_codes, 8);
+ fweh->event_mask = kzalloc(fweh->event_mask_len, GFP_KERNEL);
+ if (!fweh->event_mask)
+ return -ENOMEM;
+
INIT_WORK(&fweh->event_work, brcmf_fweh_event_worker);
spin_lock_init(&fweh->evt_q_lock);
INIT_LIST_HEAD(&fweh->event_q);
+ return 0;
}
/**
@@ -306,14 +365,19 @@ void brcmf_fweh_attach(struct brcmf_pub *drvr)
*/
void brcmf_fweh_detach(struct brcmf_pub *drvr)
{
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_info *fweh = drvr->fweh;
+
+ if (!fweh)
+ return;
/* cancel the worker if initialized */
if (fweh->event_work.func) {
cancel_work_sync(&fweh->event_work);
WARN_ON(!list_empty(&fweh->event_q));
- memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
}
+ drvr->fweh = NULL;
+ kfree(fweh->event_mask);
+ kfree(fweh);
}
/**
@@ -326,11 +390,17 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr)
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
brcmf_fweh_handler_t handler)
{
- if (drvr->fweh.evt_handler[code]) {
+ struct brcmf_fweh_info *fweh = drvr->fweh;
+ u32 evt_handler_idx;
+
+ brcmf_fweh_map_event_code(fweh, code, &evt_handler_idx);
+
+ if (fweh->evt_handler[evt_handler_idx]) {
bphy_err(drvr, "event code %d already registered\n", code);
return -ENOSPC;
}
- drvr->fweh.evt_handler[code] = handler;
+
+ fweh->evt_handler[evt_handler_idx] = handler;
brcmf_dbg(TRACE, "event handler registered for %s\n",
brcmf_fweh_event_name(code));
return 0;
@@ -345,9 +415,12 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
void brcmf_fweh_unregister(struct brcmf_pub *drvr,
enum brcmf_fweh_event_code code)
{
+ u32 evt_handler_idx;
+
brcmf_dbg(TRACE, "event handler cleared for %s\n",
brcmf_fweh_event_name(code));
- drvr->fweh.evt_handler[code] = NULL;
+ brcmf_fweh_map_event_code(drvr->fweh, code, &evt_handler_idx);
+ drvr->fweh->evt_handler[evt_handler_idx] = NULL;
}
/**
@@ -357,27 +430,28 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
*/
int brcmf_fweh_activate_events(struct brcmf_if *ifp)
{
- struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_fweh_info *fweh = ifp->drvr->fweh;
+ enum brcmf_fweh_event_code code;
int i, err;
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- memset(eventmask, 0, sizeof(eventmask));
- for (i = 0; i < BRCMF_E_LAST; i++) {
- if (ifp->drvr->fweh.evt_handler[i]) {
+ memset(fweh->event_mask, 0, fweh->event_mask_len);
+ for (i = 0; i < fweh->num_event_codes; i++) {
+ if (fweh->evt_handler[i]) {
+ brcmf_fweh_map_fwevt_code(fweh, i, &code);
brcmf_dbg(EVENT, "enable event %s\n",
- brcmf_fweh_event_name(i));
- setbit(eventmask, i);
+ brcmf_fweh_event_name(code));
+ setbit(fweh->event_mask, i);
}
}
/* want to handle IF event as well */
brcmf_dbg(EVENT, "enable event IF\n");
- setbit(eventmask, BRCMF_E_IF);
+ setbit(fweh->event_mask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
- eventmask, BRCMF_EVENTING_MASK_LEN);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err)
- bphy_err(drvr, "Set event_msgs error (%d)\n", err);
+ bphy_err(fweh->drvr, "Set event_msgs error (%d)\n", err);
return err;
}
@@ -397,21 +471,21 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet,
u32 packet_len, gfp_t gfp)
{
- enum brcmf_fweh_event_code code;
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ u32 fwevt_idx;
+ struct brcmf_fweh_info *fweh = drvr->fweh;
struct brcmf_fweh_queue_item *event;
void *data;
u32 datalen;
/* get event info */
- code = get_unaligned_be32(&event_packet->msg.event_type);
+ fwevt_idx = get_unaligned_be32(&event_packet->msg.event_type);
datalen = get_unaligned_be32(&event_packet->msg.datalen);
data = &event_packet[1];
- if (code >= BRCMF_E_LAST)
+ if (fwevt_idx >= fweh->num_event_codes)
return;
- if (code != BRCMF_E_IF && !fweh->evt_handler[code])
+ if (fwevt_idx != BRCMF_E_IF && !fweh->evt_handler[fwevt_idx])
return;
if (datalen > BRCMF_DCMD_MAXLEN ||
@@ -422,13 +496,13 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
if (!event)
return;
- event->datalen = datalen;
- event->code = code;
+ event->code = fwevt_idx;
event->ifidx = event_packet->msg.ifidx;
/* use memcpy to get aligned event message */
memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg));
memcpy(event->data, data, datalen);
+ event->datalen = datalen;
memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN);
brcmf_fweh_queue_event(fweh, event);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 48414e8b9389..9ca1b2aadcb5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -17,6 +17,10 @@ struct brcmf_pub;
struct brcmf_if;
struct brcmf_cfg80211_info;
+#define BRCMF_ABSTRACT_EVENT_BIT BIT(31)
+#define BRCMF_ABSTRACT_ENUM_DEF(_id, _val) \
+ BRCMF_ENUM_DEF(_id, (BRCMF_ABSTRACT_EVENT_BIT | (_val)))
+
/* list of firmware events */
#define BRCMF_FWEH_EVENT_ENUM_DEFLIST \
BRCMF_ENUM_DEF(SET_SSID, 0) \
@@ -98,16 +102,9 @@ struct brcmf_cfg80211_info;
/* firmware event codes sent by the dongle */
enum brcmf_fweh_event_code {
BRCMF_FWEH_EVENT_ENUM_DEFLIST
- /* this determines event mask length which must match
- * minimum length check in device firmware so it is
- * hard-coded here.
- */
- BRCMF_E_LAST = 139
};
#undef BRCMF_ENUM_DEF
-#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
-
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
@@ -288,27 +285,66 @@ typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
void *data);
/**
+ * struct brcmf_fweh_event_map_item - fweh event and firmware event pair.
+ *
+ * @code: fweh event code as used by higher layers.
+ * @fwevt_code: firmware event code as used by firmware.
+ *
+ * This mapping is needed when a functionally identical event has a
+ * different numerical definition between vendors. When such mapping
+ * is needed the higher layer event code should not collide with the
+ * firmware event.
+ */
+struct brcmf_fweh_event_map_item {
+ enum brcmf_fweh_event_code code;
+ u32 fwevt_code;
+};
+
+/**
+ * struct brcmf_fweh_event_map - mapping between firmware event and fweh event.
+ *
+ * @n_items: number of mapping items.
+ * @items: array of fweh event and firmware event pairs.
+ */
+struct brcmf_fweh_event_map {
+ u32 n_items;
+ const struct brcmf_fweh_event_map_item items[] __counted_by(n_items);
+};
+
+/**
* struct brcmf_fweh_info - firmware event handling information.
*
* @p2pdev_setup_ongoing: P2P device creation in progress.
* @event_work: event worker.
* @evt_q_lock: lock for event queue protection.
* @event_q: event queue.
- * @evt_handler: registered event handlers.
+ * @event_mask_len: length of @event_mask used to enable firmware events.
+ * @event_mask: byte array used in 'event_msgs' iovar command.
+ * @event_map: mapping between fweh event and firmware event which
+ * may be provided by vendor-specific module for events that need
+ * mapping.
+ * @num_event_codes: number of firmware events supported by firmware which
+ * does a minimum length check for the @event_mask. This value is to
+ * be provided by vendor-specific module determining @event_mask_len
+ * and consequently the allocation size for @event_mask.
+ * @evt_handler: event handler registry indexed by firmware event code.
*/
struct brcmf_fweh_info {
+ struct brcmf_pub *drvr;
bool p2pdev_setup_ongoing;
struct work_struct event_work;
spinlock_t evt_q_lock;
struct list_head event_q;
- int (*evt_handler[BRCMF_E_LAST])(struct brcmf_if *ifp,
- const struct brcmf_event_msg *evtmsg,
- void *data);
+ uint event_mask_len;
+ u8 *event_mask;
+ struct brcmf_fweh_event_map *event_map;
+ uint num_event_codes;
+ brcmf_fweh_handler_t evt_handler[] __counted_by(num_event_codes);
};
const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code);
-void brcmf_fweh_attach(struct brcmf_pub *drvr);
+int brcmf_fweh_attach(struct brcmf_pub *drvr);
void brcmf_fweh_detach(struct brcmf_pub *drvr);
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
int (*handler)(struct brcmf_if *ifp,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index 72fe8bce6eaf..6385a7db7f7d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -142,6 +142,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_set);
s32
brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
@@ -160,36 +161,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err;
}
-
-
-s32
-brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
-{
- s32 err;
- __le32 data_le = cpu_to_le32(data);
-
- mutex_lock(&ifp->drvr->proto_block);
- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
- mutex_unlock(&ifp->drvr->proto_block);
-
- return err;
-}
-
-s32
-brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
-{
- s32 err;
- __le32 data_le = cpu_to_le32(*data);
-
- mutex_lock(&ifp->drvr->proto_block);
- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
- mutex_unlock(&ifp->drvr->proto_block);
- *data = le32_to_cpu(data_le);
- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
-
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_get);
static u32
brcmf_create_iovar(const char *name, const char *data, u32 datalen,
@@ -239,6 +211,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *dat
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_set);
s32
brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
@@ -270,26 +243,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32
-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
-}
-
-s32
-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_get);
static u32
brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
@@ -364,6 +318,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_set);
s32
brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
@@ -394,28 +349,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32
-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
- sizeof(data_le));
-}
-
-s32
-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
- sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_get);
static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
char *buf, u32 buflen)
@@ -465,6 +399,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_set);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
@@ -494,39 +429,4 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
- sizeof(data_le));
-}
-
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
-
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
-{
- return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
-}
-
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
-{
- __le16 data_le = cpu_to_le16(*data);
- s32 err;
-
- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le16_to_cpu(data_le);
- return err;
-}
-
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_get);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index bc693157c4b1..a315a7fac6a0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -81,29 +81,122 @@
s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
-s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
-s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+static inline
+s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
- u32 len);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
+ err = brcmf_fil_cmd_data_set(ifp, cmd, &data_le, sizeof(data_le));
+
+ return err;
+}
+static inline
+s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(*data);
+
+ err = brcmf_fil_cmd_data_get(ifp, cmd, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
+
+ return err;
+}
+
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name,
+ const void *data, u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
-
-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
- u32 len);
-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
- u32 len);
-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+static inline
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
+
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
+ void *data, u32 len);
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
+ void *data, u32 len);
+static inline
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+ sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+ sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
+static inline
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id,
+ u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
+ sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+static inline
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u8 *data)
+{
+ return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
+}
+static inline
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u16 *data)
+{
+ __le16 data_le = cpu_to_le16(*data);
+ s32 err;
+
+ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le16_to_cpu(data_le);
+ return err;
+}
#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 9d248ba1c0b2..e74a23e11830 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -584,7 +584,7 @@ struct brcmf_wsec_key_le {
struct brcmf_wsec_pmk_le {
__le16 key_len;
__le16 flags;
- u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
index 86eafdb40541..41eafcda77f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
@@ -90,7 +90,7 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *vmod,
return -ERANGE;
if (WARN_ON(!vmod) || WARN_ON(!vops) ||
- WARN_ON(!vops->attach) || WARN_ON(!vops->detach))
+ WARN_ON(!vops->alloc_fweh_info))
return -EINVAL;
if (WARN_ON(fwvid_list[fwvid].vmod))
@@ -150,7 +150,7 @@ static inline int brcmf_fwvid_request_module(enum brcmf_fwvendor fwvid)
}
#endif
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
+int brcmf_fwvid_attach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
int ret;
@@ -175,7 +175,7 @@ int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
return ret;
}
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
+void brcmf_fwvid_detach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
@@ -187,9 +187,10 @@ void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
mutex_lock(&fwvid_list_lock);
- drvr->vops = NULL;
- list_del(&drvr->bus_if->list);
-
+ if (drvr->vops) {
+ drvr->vops = NULL;
+ list_del(&drvr->bus_if->list);
+ }
mutex_unlock(&fwvid_list_lock);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
index 43df58bb70ad..e6ac9fc341bc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
@@ -6,12 +6,15 @@
#define FWVID_H_
#include "firmware.h"
+#include "cfg80211.h"
struct brcmf_pub;
+struct brcmf_if;
struct brcmf_fwvid_ops {
- int (*attach)(struct brcmf_pub *drvr);
- void (*detach)(struct brcmf_pub *drvr);
+ void (*feat_attach)(struct brcmf_if *ifp);
+ int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto);
+ int (*alloc_fweh_info)(struct brcmf_pub *drvr);
};
/* exported functions */
@@ -20,28 +23,37 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *mod,
int brcmf_fwvid_unregister_vendor(enum brcmf_fwvendor fwvid, struct module *mod);
/* core driver functions */
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr);
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr);
+int brcmf_fwvid_attach(struct brcmf_pub *drvr);
+void brcmf_fwvid_detach(struct brcmf_pub *drvr);
const char *brcmf_fwvid_vendor_name(struct brcmf_pub *drvr);
-static inline int brcmf_fwvid_attach(struct brcmf_pub *drvr)
+static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp)
{
- int ret;
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
- ret = brcmf_fwvid_attach_ops(drvr);
- if (ret)
- return ret;
+ if (!vops->feat_attach)
+ return;
- return drvr->vops->attach(drvr);
+ vops->feat_attach(ifp);
}
-static inline void brcmf_fwvid_detach(struct brcmf_pub *drvr)
+static inline int brcmf_fwvid_set_sae_password(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
+{
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
+
+ if (!vops || !vops->set_sae_password)
+ return -EOPNOTSUPP;
+
+ return vops->set_sae_password(ifp, crypto);
+}
+
+static inline int brcmf_fwvid_alloc_fweh_info(struct brcmf_pub *drvr)
{
if (!drvr->vops)
- return;
+ return -EIO;
- drvr->vops->detach(drvr);
- brcmf_fwvid_detach_ops(drvr);
+ return drvr->vops->alloc_fweh_info(drvr);
}
#endif /* FWVID_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
index 5573a47766ad..05d7c2a4fba5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
@@ -7,21 +7,34 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <cfg80211.h>
#include "vops.h"
-static int brcmf_wcc_attach(struct brcmf_pub *drvr)
+#define BRCMF_WCC_E_LAST 213
+
+static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_debug("%s: executing\n", __func__);
- return 0;
+ return brcmf_set_wsec(ifp, crypto->sae_pwd, crypto->sae_pwd_len,
+ BRCMF_WSEC_PASSPHRASE);
}
-static void brcmf_wcc_detach(struct brcmf_pub *drvr)
+static int brcmf_wcc_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_debug("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_WCC_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_WCC_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_wcc_ops = {
- .attach = brcmf_wcc_attach,
- .detach = brcmf_wcc_detach,
+ .set_sae_password = brcmf_wcc_set_sae_pwd,
+ .alloc_fweh_info = brcmf_wcc_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
index 02918d434556..b66135e3cff4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_wcc_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_WCC, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom mobility chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
index 89c8829528c2..9540a05247c2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <net/mac80211.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 543e93ec49d2..92860dc0a92e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -959,6 +959,10 @@ static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops brcms_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = brcms_ops_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = brcms_ops_start,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index ccc621b8ed9f..a27d6f0b8819 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
return sh;
}
-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
+static void wlc_phy_timercb_phycal(void *ptr)
{
+ struct brcms_phy *pi = ptr;
uint delay = 5;
if (PHY_PERICAL_MPHASE_PENDING(pi)) {
@@ -551,8 +552,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
if (!pi->phycal_timer)
goto err;
- if (!wlc_phy_attach_nphy(pi))
- goto err;
+ wlc_phy_attach_nphy(pi);
} else if (ISLCNPHY(pi)) {
if (!wlc_phy_attach_lcnphy(pi))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
index 8668fa5558a2..70a9ec050717 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
@@ -941,7 +941,7 @@ void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+void wlc_phy_attach_nphy(struct brcms_phy *pi);
bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 8580a2754789..cd9b502a6a9f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14546,7 +14546,7 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
wlc_phy_txpwr_apply_nphy(pi);
}
-static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
+static void wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
{
struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
@@ -14595,11 +14595,9 @@ static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
pi->phycal_tempdelta = 0;
wlc_phy_txpwr_srom_read_ppr_nphy(pi);
-
- return true;
}
-bool wlc_phy_attach_nphy(struct brcms_phy *pi)
+void wlc_phy_attach_nphy(struct brcms_phy *pi)
{
uint i;
@@ -14645,10 +14643,7 @@ bool wlc_phy_attach_nphy(struct brcms_phy *pi)
pi->pi_fptr.chanset = wlc_phy_chanspec_set_nphy;
pi->pi_fptr.txpwrrecalc = wlc_phy_txpower_recalc_target_nphy;
- if (!wlc_phy_txpwr_srom_read_nphy(pi))
- return false;
-
- return true;
+ wlc_phy_txpwr_srom_read_nphy(pi);
}
static s32 get_rf_pwr_offset(struct brcms_phy *pi, s16 pga_gn, s16 pad_gn)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
index a0de5db0cd64..b72381791536 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
}
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name)
{
return (struct wlapi_timer *)
- brcms_init_timer(physhim->wl, (void (*)(void *))fn,
- arg, name);
+ brcms_init_timer(physhim->wl, fn, arg, name);
}
void wlapi_free_timer(struct wlapi_timer *t)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
index dd8774717ade..27d0934e600e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name);
void wlapi_free_timer(struct wlapi_timer *t);
void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 9eaf5ec133f9..075b705a8d7b 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3432,6 +3432,10 @@ static const struct attribute_group il3945_attribute_group = {
};
static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = il3945_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il3945_mac_start,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 70e420df1643..4beb7be6d51d 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -6301,6 +6301,10 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
}
static const struct ieee80211_ops il4965_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = il4965_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il4965_mac_start,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 17570d62c896..9d33a66a49b5 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -3438,9 +3438,7 @@ il_init_geos(struct il_priv *il)
if (!channels)
return -ENOMEM;
- rates =
- kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
- GFP_KERNEL);
+ rates = kcalloc(RATE_COUNT_LEGACY, sizeof(*rates), GFP_KERNEL);
if (!rates) {
kfree(channels);
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 20971304fdef..4b04865fc2c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -46,6 +46,15 @@ config IWLWIFI
if IWLWIFI
+config IWLWIFI_KUNIT_TESTS
+ tristate
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option for iwlwifi kunit tests.
+
+ If unsure, say N.
+
config IWLWIFI_LEDS
bool
depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index b983982aee45..8bb94a4c12cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -18,6 +18,7 @@ iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
+iwlwifi-objs += fw/regulatory.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
@@ -33,4 +34,6 @@ obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
+
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 134635c70ce8..2e530fc928a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_AX210_UCODE_API_MAX 86
+#define IWL_AX210_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_AX210_UCODE_API_MIN 59
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 82da957adcf6..18b1f2cf08cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 86
+#define IWL_BZ_UCODE_API_MAX 90
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 80
@@ -129,10 +129,6 @@ static const struct iwl_base_params iwl_bz_base_params = {
IWL_DEVICE_BZ_COMMON, \
.ht_params = &iwl_22000_ht_params
-#define IWL_DEVICE_GL_A \
- IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_gl_a_ht_params
-
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
* A-MPDU, with additional overhead to account for processing time.
@@ -153,6 +149,7 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
};
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
const struct iwl_cfg iwl_cfg_bz = {
.fw_name_mac = "bz",
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 80eb9b499538..9b79279fd76c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 86
+#define IWL_SC_UCODE_API_MAX 90
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 82
@@ -33,6 +33,10 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0"
+#define IWL_SC2_A_FM_C_FW_PRE "iwlwifi-sc2-a0-fm-c0"
+#define IWL_SC2_A_WH_A_FW_PRE "iwlwifi-sc2-a0-wh-a0"
+#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
+#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
@@ -48,6 +52,14 @@
IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_sc_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -124,6 +136,9 @@ static const struct iwl_base_params iwl_sc_base_params = {
#define IWL_DEVICE_SC \
IWL_DEVICE_BZ_COMMON, \
+ .uhb_supported = true, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .num_rbds = IWL_NUM_RBDS_SC_EHT, \
.ht_params = &iwl_22000_ht_params
/*
@@ -149,10 +164,21 @@ const char iwl_sc_name[] = "Intel(R) TBD Sc device";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
- .uhb_supported = true,
IWL_DEVICE_SC,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_SC_EHT,
+};
+
+const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
+
+const struct iwl_cfg iwl_cfg_sc2 = {
+ .fw_name_mac = "sc2",
+ IWL_DEVICE_SC,
+};
+
+const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
+
+const struct iwl_cfg iwl_cfg_sc2f = {
+ .fw_name_mac = "sc2f",
+ IWL_DEVICE_SC,
};
MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
@@ -162,3 +188,7 @@ MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 5f3d5b15f727..52b008ce53bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1570,6 +1570,10 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
}
const struct ieee80211_ops iwlagn_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = iwlagn_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = iwlagn_mac_start,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index b96f30d11644..4caf2e25a297 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -4,7 +4,6 @@
* Copyright (C) 2019-2023 Intel Corporation
*/
#include <linux/uuid.h>
-#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@@ -13,68 +12,21 @@
const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
-IWL_EXPORT_SYMBOL(iwl_guid);
-const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
- 0x81, 0x4F, 0x75, 0xE4,
- 0xDD, 0x26, 0xB5, 0xFD);
-IWL_EXPORT_SYMBOL(iwl_rfi_guid);
-
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "GOOGLE-ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- },
- },
- { .ident = "RAZER",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
- },
- },
- {}
+static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
+ [DSM_FUNC_QUERY] = sizeof(u32),
+ [DSM_FUNC_DISABLE_SRD] = sizeof(u8),
+ [DSM_FUNC_ENABLE_INDONESIA_5G2] = sizeof(u8),
+ [DSM_FUNC_ENABLE_6E] = sizeof(u32),
+ [DSM_FUNC_REGULATORY_CONFIG] = sizeof(u32),
+ /* Not supported in driver */
+ [5] = (size_t)0,
+ [DSM_FUNC_11AX_ENABLEMENT] = sizeof(u32),
+ [DSM_FUNC_ENABLE_UNII4_CHAN] = sizeof(u32),
+ [DSM_FUNC_ACTIVATE_CHANNEL] = sizeof(u32),
+ [DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32),
+ [DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32),
+ [DSM_FUNC_RFI_CONFIG] = sizeof(u32),
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
@@ -200,46 +152,41 @@ out:
}
/*
- * Evaluate a DSM with no arguments and a u8 return value,
+ * This function receives a DSM function number, calculates its expected size
+ * according to Intel BIOS spec, and fills in the value in a 32-bit field.
+ * In case the expected size is smaller than 32-bit, padding will be added.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ size_t expected_size;
+ u64 tmp;
int ret;
- u64 val;
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u8));
+ BUILD_BUG_ON(ARRAY_SIZE(acpi_dsm_size) != DSM_FUNC_NUM_FUNCS);
- if (ret < 0)
- return ret;
+ if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size)))
+ return -EINVAL;
- /* cast val (u64) to be u8 */
- *value = (u8)val;
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
+ expected_size = acpi_dsm_size[func];
-/*
- * Evaluate a DSM with no arguments and a u32 return value,
- */
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- int ret;
- u64 val;
-
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u32));
+ /* Currently all ACPI DSMs are either 8-bit or 32-bit */
+ if (expected_size != sizeof(u8) && expected_size != sizeof(u32))
+ return -EOPNOTSUPP;
- if (ret < 0)
+ ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func,
+ &iwl_guid, &tmp, expected_size);
+ if (ret)
return ret;
- /* cast val (u64) to be u32 */
- *value = (u32)val;
+ if ((expected_size == sizeof(u8) && tmp != (u8)tmp) ||
+ (expected_size == sizeof(u32) && tmp != (u32)tmp))
+ IWL_DEBUG_RADIO(fwrt,
+ "DSM value overflows the expected size, truncating\n");
+ *value = (u32)tmp;
+
return 0;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
static union acpi_object *
iwl_acpi_get_wifi_pkg_range(struct device *dev,
@@ -307,9 +254,8 @@ iwl_acpi_get_wifi_pkg(struct device *dev,
tbl_rev);
}
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@@ -331,22 +277,9 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
ACPI_TYPE_INTEGER) {
u32 tas_selection =
(u32)wifi_pkg->package.elements[1].integer.value;
- u16 override_iec =
- (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
- u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
- ACPI_WTAS_ENABLE_IEC_POS;
- u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
-
- enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
- if (fw_ver <= 3) {
- cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
- cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
- } else {
- cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
- cmd->v4.override_tas_iec = (u8)override_iec;
- cmd->v4.enable_tas_iec = (u8)enabled_iec;
- }
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ tas_selection);
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -365,22 +298,16 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].integer.value >
- APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[2].integer.value);
ret = -EINVAL;
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
- cmd->v4.block_list_size = cpu_to_le32(block_list_size);
+ tas_data->block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
- if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
- IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
- block_list_size);
- ret = -EINVAL;
- goto out_free;
- }
for (i = 0; i < block_list_size; i++) {
u32 country;
@@ -394,7 +321,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- cmd->v4.block_list_array[i] = cpu_to_le32(country);
+ tas_data->block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@@ -403,19 +330,19 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
-int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
union acpi_object *wifi_pkg, *data;
u32 mcc_val;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -439,46 +366,42 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev)
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit)
{
union acpi_object *data, *wifi_pkg;
- u64 dflt_pwr_limit;
- int tbl_rev;
+ int tbl_rev, ret = -EINVAL;
- data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
- if (IS_ERR(data)) {
- dflt_pwr_limit = 0;
+ *dflt_pwr_limit = 0;
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_SPLC_METHOD);
+ if (IS_ERR(data))
goto out;
- }
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
- wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
- dflt_pwr_limit = 0;
+ wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER)
goto out_free;
- }
- dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ *dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ ret = 0;
out_free:
kfree(data);
out:
- return dflt_pwr_limit;
+ return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_ECKV_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -499,11 +422,11 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
-static int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled, u8 num_chains, u8 num_sub_bands)
+static int iwl_acpi_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled, u8 num_chains,
+ u8 num_sub_bands)
{
int i, j, idx = 0;
@@ -511,8 +434,8 @@ static int iwl_sar_set_profile(union acpi_object *table,
* The table from ACPI is flat, but we store it in a
* structured array.
*/
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) {
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) {
+ for (i = 0; i < BIOS_SAR_MAX_CHAINS_PER_PROFILE; i++) {
+ for (j = 0; j < BIOS_SAR_MAX_SUB_BANDS_NUM; j++) {
/* if we don't have the values, use the default */
if (i >= num_chains || j >= num_sub_bands) {
profile->chains[i].subbands[j] = 0;
@@ -535,73 +458,7 @@ static int iwl_sar_set_profile(union acpi_object *table,
return 0;
}
-static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b };
- int i, j;
-
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) {
- struct iwl_sar_profile *prof;
-
- /* don't allow SAR to be disabled (profile 0 means disable) */
- if (profs[i] == 0)
- return -EPERM;
-
- /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
- if (profs[i] > ACPI_SAR_PROFILE_NUM)
- return -EINVAL;
-
- /* profiles go from 1 to 4, so decrement to access the array */
- prof = &fwrt->sar_profiles[profs[i] - 1];
-
- /* if the profile is disabled, do nothing */
- if (!prof->enabled) {
- IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
- profs[i]);
- /*
- * if one of the profiles is disabled, we
- * ignore all of them and return 1 to
- * differentiate disabled from other failures.
- */
- return 1;
- }
-
- IWL_DEBUG_INFO(fwrt,
- "SAR EWRD: chain %d profile index %d\n",
- i, profs[i]);
- IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
- for (j = 0; j < n_subbands; j++) {
- per_chain[i * n_subbands + j] =
- cpu_to_le16(prof->chains[i].subbands[j]);
- IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
- j, prof->chains[i].subbands[j]);
- }
- }
-
- return 0;
-}
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int i, ret = 0;
-
- for (i = 0; i < n_tables; i++) {
- ret = iwl_sar_fill_table(fwrt,
- &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0],
- n_subbands, prof_a, prof_b);
- if (ret)
- break;
- }
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
-
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
int ret, tbl_rev;
@@ -618,7 +475,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -634,7 +491,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -650,7 +507,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -680,16 +537,15 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
- flags & IWL_SAR_ENABLE_MSK,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
+ num_chains, num_sub_bands);
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
@@ -707,7 +563,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -723,7 +579,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -739,7 +595,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -767,7 +623,7 @@ read_table:
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ if (n_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@@ -776,13 +632,15 @@ read_table:
pos = 3;
for (i = 0; i < n_profiles; i++) {
+ union acpi_object *table = &wifi_pkg->package.elements[pos];
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
- ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
- &fwrt->sar_profiles[i + 1], enabled,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table,
+ &fwrt->sar_profiles[i + 1],
+ enabled, num_chains,
+ num_sub_bands);
if (ret < 0)
break;
@@ -794,9 +652,8 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
int i, j, k, ret, tbl_rev;
@@ -811,7 +668,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
.revisions = BIT(3),
.bands = ACPI_GEO_NUM_BANDS_REV2,
.profiles = ACPI_NUM_GEO_PROFILES_REV3,
- .min_profiles = 3,
+ .min_profiles = BIOS_GEO_MIN_PROFILE_NUM,
},
{
.revisions = BIT(2),
@@ -897,7 +754,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
read_table:
fwrt->geo_rev = tbl_rev;
for (i = 0; i < num_profiles; i++) {
- for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
+ for (j = 0; j < BIOS_GEO_MAX_NUM_BANDS; j++) {
union acpi_object *entry;
/*
@@ -921,7 +778,7 @@ read_table:
entry->integer.value;
}
- for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) {
+ for (k = 0; k < BIOS_GEO_NUM_CHAINS; k++) {
/* same here as above */
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].chains[k] =
@@ -949,151 +806,26 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
-
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- /*
- * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
- * earlier firmware versions. Unfortunately, we don't have a
- * TLV API flag to rely on, so rely on the major version which
- * is in the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to 17. It was
- * also backported to 29, but only for 7265D devices. The
- * intention was to have it in 36 as well, but not all 8000
- * family got this feature enabled. The 8000 family is the
- * only one using version 36, so skip this version entirely.
- */
- return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
- fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles)
-{
- int i, j;
-
- if (!fwrt->geo_enabled)
- return -ENODATA;
-
- if (!iwl_sar_geo_support(fwrt))
- return -EOPNOTSUPP;
-
- for (i = 0; i < n_profiles; i++) {
- for (j = 0; j < n_bands; j++) {
- struct iwl_per_chain_offset *chain =
- &table[i * n_bands + j];
-
- chain->max_tx_power =
- cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
- chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0];
- chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1];
- IWL_DEBUG_RADIO(fwrt,
- "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
- i, j,
- fwrt->geo_profiles[i].bands[j].chains[0],
- fwrt->geo_profiles[i].bands[j].chains[1],
- fwrt->geo_profiles[i].bands[j].max);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- int ret;
- u8 value;
- u32 val;
- __le32 config_bitmap = 0;
-
- /*
- * Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'.
- * Setting config_bitmap Indonesia bit is valid only for HR/JF.
- */
- switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
- case IWL_CFG_RF_TYPE_HR1:
- case IWL_CFG_RF_TYPE_HR2:
- case IWL_CFG_RF_TYPE_JF1:
- case IWL_CFG_RF_TYPE_JF2:
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2,
- &iwl_guid, &value);
-
- if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
- break;
- default:
- break;
- }
-
- /*
- ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
- */
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_DISABLE_SRD,
- &iwl_guid, &value);
- if (!ret) {
- if (value == DSM_VALUE_SRD_PASSIVE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
- else if (value == DSM_VALUE_SRD_DISABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
- }
-
- if (fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
- /*
- ** Evaluate func 'DSM_FUNC_REGULATORY_CONFIG'
- */
- ret = iwl_acpi_get_dsm_u32(fwrt->dev, 0,
- DSM_FUNC_REGULATORY_CONFIG,
- &iwl_guid, &val);
- /*
- * China 2022 enable if the BIOS object does not exist or
- * if it is enabled in BIOS.
- */
- if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
- }
-
- return config_bitmap;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands = 0;
int idx = 2;
- u8 cmd_ver;
-
- fwrt->ppag_flags = 0;
- fwrt->ppag_table_valid = false;
data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev == 1 || tbl_rev == 2) {
+ if (tbl_rev >= 1 && tbl_rev <= 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
IWL_DEBUG_RADIO(fwrt,
- "Reading PPAG table v2 (tbl_rev=%d)\n",
+ "Reading PPAG table (tbl_rev=%d)\n",
tbl_rev);
goto read_table;
} else {
@@ -1116,6 +848,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto read_table;
}
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+
read_table:
fwrt->ppag_ver = tbl_rev;
flags = &wifi_pkg->package.elements[1];
@@ -1125,19 +860,8 @@ read_table:
goto out_free;
}
- fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) {
- ret = -EINVAL;
- goto out_free;
- }
- if (!fwrt->ppag_flags && cmd_ver <= 3) {
- ret = 0;
- goto out_free;
- }
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value,
+ fwrt->ppag_ver);
/*
* read, verify gain values and save them into the PPAG table.
@@ -1155,132 +879,15 @@ read_table:
}
fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
- /* from ver 4 the fw deals with out of range values */
- if (cmd_ver >= 4)
- continue;
- if ((j == 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
- (j != 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
- ret = -EINVAL;
- goto out_free;
- }
}
}
- fwrt->ppag_table_valid = true;
ret = 0;
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
-
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size)
-{
- u8 cmd_ver;
- int i, j, num_sub_bands;
- s8 *gain;
-
- /* many firmware images for JF lie about this */
- if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
- CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
- return -EOPNOTSUPP;
-
- if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
- IWL_DEBUG_RADIO(fwrt,
- "PPAG capability not supported by FW, command not sent.\n");
- return -EINVAL;
- }
-
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (!fwrt->ppag_table_valid || (cmd_ver <= 3 && !fwrt->ppag_flags)) {
- IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
- return -EINVAL;
- }
-
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
-
- IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
- if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = cmd->v1.gain[0];
- *cmd_size = sizeof(cmd->v1);
- if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
- /* in this case FW supports revision 0 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is %d, send truncated table\n",
- fwrt->ppag_ver);
- }
- } else if (cmd_ver >= 2 && cmd_ver <= 4) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v2.gain[0];
- *cmd_size = sizeof(cmd->v2);
- if (fwrt->ppag_ver == 0) {
- /* in this case FW supports revisions 1 or 2 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is 0, send padded table\n");
- }
- } else {
- IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
- return -EINVAL;
- }
-
- /* ppag mode */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits were read from bios: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
- if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_ver == 2)) {
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
- } else {
- IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
- }
-
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits going to be sent: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
-
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- gain[i * num_sub_bands + j] =
- fwrt->ppag_chains[i].subbands[j];
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, gain[i * num_sub_bands + j]);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
-{
-
- if (!dmi_check_system(dmi_ppag_approved_list)) {
- IWL_DEBUG_RADIO(fwrt,
- "System vendor '%s' is not in the approved list, disabling PPAG.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- fwrt->ppag_flags = 0;
- return false;
- }
-
- return true;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters)
@@ -1293,7 +900,6 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (IS_ERR(data))
return;
- /* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WPFC_WIFI_DATA_SIZE,
&tbl_rev);
@@ -1303,13 +909,14 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (tbl_rev != 0)
goto out_free;
- BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
+ ACPI_WPFC_WIFI_DATA_SIZE - 1);
for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
- if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER)
- return;
+ if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
tmp.filter_cfg_chains[i] =
- cpu_to_le32(wifi_pkg->package.elements[i].integer.value);
+ cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
}
IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
@@ -1318,3 +925,38 @@ out_free:
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters);
+
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_GLAI_METHOD);
+ if (IS_ERR(data))
+ return;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_GLAI_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != 0) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid GLAI revision: %d\n", tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[1].integer.value > ACPI_GLAI_MAX_STATUS)
+ goto out_free;
+
+ fwrt->uefi_tables_lock_status =
+ wifi_pkg->package.elements[1].integer.value;
+
+ IWL_DEBUG_RADIO(fwrt,
+ "Loaded UEFI WIFI GUID lock status: %d from ACPI\n",
+ fwrt->uefi_tables_lock_status);
+out_free:
+ kfree(data);
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index e9277f6f3582..1d32b82f73db 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -7,6 +7,7 @@
#define __iwl_fw_acpi__
#include <linux/acpi.h>
+#include "fw/regulatory.h"
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
@@ -25,6 +26,7 @@
#define ACPI_PPAG_METHOD "PPAG"
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WPFC_METHOD "WPFC"
+#define ACPI_GLAI_METHOD "GLAI"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -56,187 +58,90 @@
#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
-#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */
+#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */
/* revision 0 and 1 are identical, except for the semantics in the FW */
#define ACPI_GEO_NUM_BANDS_REV0 2
#define ACPI_GEO_NUM_BANDS_REV2 3
-#define ACPI_GEO_NUM_CHAINS 2
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
-
+/*
+ * One element for domain type,
+ * and one for the status
+ */
+#define ACPI_GLAI_WIFI_DATA_SIZE 2
+#define ACPI_GLAI_MAX_STATUS 2
/*
* TAS size: 1 elelment for type,
* 1 element for enabled field,
* 1 element for block list size,
* 16 elements for block list array
*/
-#define APCI_WTAS_BLACK_LIST_MAX 16
-#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
-#define ACPI_WTAS_ENABLED_MSK 0x1
-#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
-#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
-#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
-#define ACPI_WTAS_ENABLE_IEC_POS 0x2
-#define ACPI_WTAS_USA_UHB_MSK BIT(16)
-#define ACPI_WTAS_USA_UHB_POS 16
-
+#define ACPI_WTAS_WIFI_DATA_SIZE (3 + IWL_WTAS_BLACK_LIST_MAX)
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V2) + 2)
-/* PPAG gain value bounds in 1/8 dBm */
-#define ACPI_PPAG_MIN_LB -16
-#define ACPI_PPAG_MAX_LB 24
-#define ACPI_PPAG_MIN_HB -16
-#define ACPI_PPAG_MAX_HB 40
-#define ACPI_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
#define IWL_SAR_ENABLE_MSK BIT(0)
#define IWL_REDUCE_POWER_FLAGS_POS 1
-/*
- * The profile for revision 2 is a superset of revision 1, which is in
- * turn a superset of revision 0. So we can store all revisions
- * inside revision 2, which is what we represent here.
- */
-struct iwl_sar_profile_chain {
- u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-struct iwl_sar_profile {
- bool enabled;
- struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_geo_profile_band {
- u8 max;
- u8 chains[ACPI_GEO_NUM_CHAINS];
-};
-
-struct iwl_geo_profile {
- struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_ppag_chain {
- s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-enum iwl_dsm_funcs_rev_0 {
- DSM_FUNC_QUERY = 0,
- DSM_FUNC_DISABLE_SRD = 1,
- DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
- DSM_FUNC_ENABLE_6E = 3,
- DSM_FUNC_REGULATORY_CONFIG = 4,
- DSM_FUNC_11AX_ENABLEMENT = 6,
- DSM_FUNC_ENABLE_UNII4_CHAN = 7,
- DSM_FUNC_ACTIVATE_CHANNEL = 8,
- DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
- DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
-};
-
-enum iwl_dsm_values_srd {
- DSM_VALUE_SRD_ACTIVE,
- DSM_VALUE_SRD_PASSIVE,
- DSM_VALUE_SRD_DISABLE,
- DSM_VALUE_SRD_MAX
-};
-
-enum iwl_dsm_values_indonesia {
- DSM_VALUE_INDONESIA_DISABLE,
- DSM_VALUE_INDONESIA_ENABLE,
- DSM_VALUE_INDONESIA_RESERVED,
- DSM_VALUE_INDONESIA_MAX
-};
-
-/* DSM RFI uses a different GUID, so need separate definitions */
-
-#define DSM_RFI_FUNC_ENABLE 3
-
-enum iwl_dsm_values_rfi {
- DSM_VALUE_RFI_ENABLE,
- DSM_VALUE_RFI_DISABLE,
- DSM_VALUE_RFI_MAX
-};
-
-enum iwl_dsm_masks_reg {
- DSM_MASK_CHINA_22_REG = BIT(2)
-};
+/* The Inidcator whether UEFI WIFI GUID tables are locked is read from ACPI */
+#define UEFI_WIFI_GUID_UNLOCKED 0
+
+#define ACPI_DSM_REV 0
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
extern const guid_t iwl_guid;
-extern const guid_t iwl_rfi_guid;
-
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value);
-
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value);
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @mcc: output buffer (3 bytes) that will get the MCC
*
* This function tries to read the current MCC from ACPI if available.
*/
-int iwl_acpi_get_mcc(struct device *dev, char *mcc);
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev);
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit);
/*
* iwl_acpi_get_eckv - read external clock validation from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @extl_clk: output var (2 bytes) that will get the clk indication.
*
* This function tries to read the external clock indication
* from ACPI if available.
*/
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b);
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt);
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles);
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
-
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters);
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
+
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
@@ -245,92 +150,61 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+static inline int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
return -ENOENT;
}
-static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
+static inline int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
{
+ *dflt_pwr_limit = 0;
return 0;
}
-static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+static inline int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
return -ENOENT;
}
-static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
+static inline int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
-{
- return -ENOENT;
-}
-
-static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
return 1;
}
-static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- return false;
-}
-
-static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+static inline int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
{
return -ENOENT;
}
-static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- return 0;
-}
-
static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
- union iwl_ppag_table_cmd *cmd, int *cmd_size)
-{
- return -ENOENT;
-}
+/* macro since the second argument doesn't always exist */
+#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0)
-static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
{
- return false;
}
-static inline void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters)
+static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ return -ENOENT;
}
-
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index 3e81e9369224..bc27e15488f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
+ * Copyright (C) 2023 Intel Corporation
* Copyright (C) 2013-2014, 2018-2019 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
@@ -170,7 +171,11 @@ enum iwl_bt_ci_compliance {
* @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
* @ttc_status: is TTC enabled - one bit per PHY
* @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
+ * The following fields are only for version 5, and are reserved in version 4:
+ * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is
+ * utilizing) when the RSSI is low (<= -65 dBm)
+ * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
+ * BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
*/
struct iwl_bt_coex_profile_notif {
__le32 mbox_msg[4];
@@ -182,7 +187,10 @@ struct iwl_bt_coex_profile_notif {
__le32 bt_activity_grading;
u8 ttc_status;
u8 rrc_status;
- __le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
+ u8 wifi_loss_low_rssi;
+ u8 wifi_loss_mid_high_rssi;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4
+ * BT_COEX_PROFILE_NTFY_API_S_VER_5
+ */
#endif /* __iwl_fw_api_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index ea99d41040d2..d2a74beed3a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -324,7 +324,7 @@ struct iwl_wowlan_patterns_cmd {
u8 n_patterns;
/**
- * @n_patterns: sta_id
+ * @sta_id: sta_id
*/
u8 sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 751b596ea1a5..0f7903c5a4df 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -101,7 +101,7 @@ enum iwl_data_path_subcmd_ids {
RX_NO_DATA_NOTIF = 0xF5,
/**
- * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+ * @THERMAL_DUAL_CHAIN_REQUEST: firmware request for SMPS mode,
* &struct iwl_thermal_dual_chain_request
*/
THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 394747deb269..47c914de2992 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -319,7 +319,7 @@ struct iwl_fw_ini_conf_set_tlv {
* @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration
- * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported
+ * @IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM: max number of configuration supported
*/
enum iwl_fw_ini_config_set_type {
@@ -360,6 +360,7 @@ enum iwl_fw_ini_allocation_id {
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
* @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
+ * @IWL_FW_INI_LOCATION_NUM: number of valid locations
*/
enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_INVALID,
@@ -439,6 +440,7 @@ enum iwl_fw_ini_region_device_memory_subtype {
* Hard coded time points in which the driver can send hcmd or perform dump
* collection
*
+ * @IWL_FW_INI_TIME_POINT_INVALID: invalid timepoint
* @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW
* @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif
* @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence
@@ -553,7 +555,7 @@ enum iwl_fw_ini_dump_policy {
* enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
*
* @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
- * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_MEDIUM: dump more regions than "brief", but not all regions
* @IWL_FW_INI_DUMP_VERBOSE : dump all regions
*/
enum iwl_fw_ini_dump_type {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 798731ecbefd..b31ae6889bd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -394,7 +394,7 @@ struct iwl_buf_alloc_cmd {
*
* @first_word: magic word value
* @second_word: magic word value
- * @framfrags: DRAM fragmentaion detail
+ * @dram_frags: DRAM fragmentaion detail
*/
struct iwl_dram_info {
__le32 first_word;
@@ -537,7 +537,7 @@ enum iwl_fw_dbg_config_cmd_type {
}; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */
/* this token disables debug asserts in the firmware */
-#define IWL_FW_DBG_CONFIG_TOKEN 0x00011301
+#define IWL_FW_DBG_CONFIG_TOKEN 0x00010001
/**
* struct iwl_fw_dbg_config_cmd - configure FW debug
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index b044990c7b87..25530a29317e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -630,6 +630,7 @@ enum iwl_location_frame_format {
* @IWL_LOCATION_BW_20MHZ: 20MHz
* @IWL_LOCATION_BW_40MHZ: 40MHz
* @IWL_LOCATION_BW_80MHZ: 80MHz
+ * @IWL_LOCATION_BW_160MHZ: 160MHz
*/
enum iwl_location_bw {
IWL_LOCATION_BW_20MHZ,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index f15e6d64c298..c6d1f5644638 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -242,9 +242,9 @@ struct iwl_mac_low_latency_cmd {
* @esr_transition_timeout: the timeout required by the AP for the
* eSR transition.
* Available only from version 2 of the command.
- * This values comes from the EMLSR transition delay in the EML
+ * This value comes from the EMLSR transition delay in the EML
* Capabilities subfield.
- * @medium_sync_delay: the value as it appeasr in P802.11be_D2.2 Figure 9-1002j.
+ * @medium_sync_delay: the value as it appears in P802.11be_D2.2 Figure 9-1002j.
* @assoc_id: unique ID assigned by the AP during association
* @reserved1: alignment
* @data_policy: see &enum iwl_mac_data_policy
@@ -317,7 +317,6 @@ enum iwl_mac_config_filter_flags {
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
* len delim to determine if AGG or single.
* @client: client mac data
- * @go_ibss: mac data for go or ibss
* @p2p_dev: mac data for p2p device
*/
struct iwl_mac_config_cmd {
@@ -374,7 +373,7 @@ struct iwl_mac_config_cmd {
* iwl_link_ctx_cfg_cmd::bss_color_disable
* @LINK_CONTEXT_MODIFY_EHT_PARAMS: covers iwl_link_ctx_cfg_cmd::puncture_mask.
* This flag can be set only if the MAC that this link relates to has
- * eht_support set to true.
+ * eht_support set to true. No longer used since _VER_3 of this command.
* @LINK_CONTEXT_MODIFY_ALL: set all above flags
*/
enum iwl_link_ctx_modify_flags {
@@ -447,6 +446,7 @@ enum iwl_link_ctx_flags {
* @listen_lmac: indicates whether the link should be allocated on the Listen
* Lmac or on the Main Lmac. Cannot be changed on an active Link.
* Relevant only for eSR.
+ * @reserved1: in version 2, listen_lmac became reserved
* @cck_rates: basic rates available for CCK
* @ofdm_rates: basic rates available for OFDM
* @cck_short_preamble: 1 for enabling short preamble, 0 otherwise
@@ -462,7 +462,7 @@ enum iwl_link_ctx_flags {
* @bi: beacon interval in TU, applicable only when associated
* @dtim_interval: DTIM interval in TU.
* Relevant only for GO, otherwise this is offloaded.
- * @puncture_mask: puncture mask for EHT
+ * @puncture_mask: puncture mask for EHT (removed in VER_3)
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @flags: a combination from &enum iwl_link_ctx_flags
* @flags_mask: what of %flags have changed. Also &enum iwl_link_ctx_flags
@@ -472,10 +472,10 @@ enum iwl_link_ctx_flags {
* @bssid_index: index of the associated VAP
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @spec_link_id: link_id as the AP knows it
- * @reserved: alignment
+ * @reserved2: alignment
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
- * @reserved1: reserved for future use
+ * @reserved3: reserved for future use
*/
struct iwl_link_config_cmd {
__le32 action;
@@ -486,7 +486,10 @@ struct iwl_link_config_cmd {
__le16 reserved_for_local_link_addr;
__le32 modify_mask;
__le32 active;
- __le32 listen_lmac;
+ union {
+ __le32 listen_lmac;
+ __le32 reserved1;
+ };
__le32 cck_rates;
__le32 ofdm_rates;
__le32 cck_short_preamble;
@@ -502,7 +505,7 @@ struct iwl_link_config_cmd {
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
__le32 bi;
__le32 dtim_interval;
- __le16 puncture_mask;
+ __le16 puncture_mask; /* removed in _VER_3 */
__le16 frame_time_rts_th;
__le32 flags;
__le32 flags_mask;
@@ -512,11 +515,11 @@ struct iwl_link_config_cmd {
u8 bssid_index;
u8 bss_color;
u8 spec_link_id;
- u8 reserved;
+ u8 reserved2;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
- __le32 reserved1[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1 */
+ __le32 reserved3[8];
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 55882190251c..545826973a80 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@@ -431,8 +431,8 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
-#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
-#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
+#define MAX_CHANNEL_BW_INDX_API_D_VER_1 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 5
/**
* struct iwl_he_pkt_ext_v1 - QAM thresholds
@@ -455,7 +455,7 @@ enum iwl_he_pkt_ext_constellations {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v1 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_1][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
/**
@@ -480,7 +480,7 @@ struct iwl_he_pkt_ext_v1 {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v2 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 7ec959244ffc..58034dfa7e70 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_nvm_reg_h__
#define __iwl_fw_api_nvm_reg_h__
+#include "fw/regulatory.h"
/**
* enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
*/
@@ -438,36 +439,30 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
-#define IWL_TAS_BLOCK_LIST_MAX 16
/**
- * struct iwl_tas_config_cmd_v2 - configures the TAS
+ * struct iwl_tas_config_cmd_common - configures the TAS.
+ * This is also the v2 structure.
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
*/
-struct iwl_tas_config_cmd_v2 {
+struct iwl_tas_config_cmd_common {
__le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
*/
struct iwl_tas_config_cmd_v3 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
__le16 override_tas_iec;
__le16 enable_tas_iec;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
- * struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
+ * struct iwl_tas_config_cmd_v4 - configures the TAS
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
@@ -475,19 +470,20 @@ struct iwl_tas_config_cmd_v3 {
* @reserved: reserved
*/
struct iwl_tas_config_cmd_v4 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
u8 reserved;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
-union iwl_tas_config_cmd {
- struct iwl_tas_config_cmd_v2 v2;
- struct iwl_tas_config_cmd_v3 v3;
- struct iwl_tas_config_cmd_v4 v4;
+struct iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_common common;
+ union {
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+ };
};
+
/**
* enum iwl_lari_config_masks - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 306ed88de463..08a2c416ce60 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -142,6 +142,8 @@ struct iwl_phy_context_cmd_v1 {
* @lmac_id: the lmac id the phy context belongs to
* @ci: channel info
* @rxchain_info: ???
+ * @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz
+ * @sbb_ctrl_channel_loc: location of the control channel
* @dsp_cfg_flags: set to 0
* @reserved: reserved to align to 64 bit
*/
@@ -152,9 +154,20 @@ struct iwl_phy_context_cmd {
/* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
struct iwl_fw_channel_info ci;
__le32 lmac_id;
- __le32 rxchain_info; /* reserved in _VER_4 */
+ union {
+ __le32 rxchain_info; /* reserved in _VER_4 */
+ struct { /* used for _VER_5/_VER_6 */
+ u8 sbb_bandwidth;
+ u8 sbb_ctrl_channel_loc;
+ __le16 puncture_mask; /* added in VER_6 */
+ };
+ };
__le32 dsp_cfg_flags;
__le32 reserved;
-} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3,
+ * PHY_CONTEXT_CMD_API_VER_4,
+ * PHY_CONTEXT_CMD_API_VER_5,
+ * PHY_CONTEXT_CMD_API_VER_6
+ */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 040d83fa5424..0bf38243f88a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -506,13 +506,40 @@ struct iwl_geo_tx_power_profiles_resp {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */
/**
+ * enum iwl_ppag_flags - PPAG enable masks
+ * @IWL_PPAG_ETSI_MASK: enable PPAG in ETSI
+ * @IWL_PPAG_CHINA_MASK: enable PPAG in China
+ * @IWL_PPAG_ETSI_LPI_UHB_MASK: enable LPI in ETSI for UHB
+ * @IWL_PPAG_ETSI_VLP_UHB_MASK: enable VLP in ETSI for UHB
+ * @IWL_PPAG_ETSI_SP_UHB_MASK: enable SP in ETSI for UHB
+ * @IWL_PPAG_USA_LPI_UHB_MASK: enable LPI in USA for UHB
+ * @IWL_PPAG_USA_VLP_UHB_MASK: enable VLP in USA for UHB
+ * @IWL_PPAG_USA_SP_UHB_MASK: enable SP in USA for UHB
+ * @IWL_PPAG_CANADA_LPI_UHB_MASK: enable LPI in CANADA for UHB
+ * @IWL_PPAG_CANADA_VLP_UHB_MASK: enable VLP in CANADA for UHB
+ * @IWL_PPAG_CANADA_SP_UHB_MASK: enable SP in CANADA for UHB
+ */
+enum iwl_ppag_flags {
+ IWL_PPAG_ETSI_MASK = BIT(0),
+ IWL_PPAG_CHINA_MASK = BIT(1),
+ IWL_PPAG_ETSI_LPI_UHB_MASK = BIT(2),
+ IWL_PPAG_ETSI_VLP_UHB_MASK = BIT(3),
+ IWL_PPAG_ETSI_SP_UHB_MASK = BIT(4),
+ IWL_PPAG_USA_LPI_UHB_MASK = BIT(5),
+ IWL_PPAG_USA_VLP_UHB_MASK = BIT(6),
+ IWL_PPAG_USA_SP_UHB_MASK = BIT(7),
+ IWL_PPAG_CANADA_LPI_UHB_MASK = BIT(8),
+ IWL_PPAG_CANADA_VLP_UHB_MASK = BIT(9),
+ IWL_PPAG_CANADA_SP_UHB_MASK = BIT(10),
+};
+
+/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: version 1
* @v2: version 2
- *
- * @flags: bit 0 - indicates enablement of PPAG for ETSI
- * bit 1 - indicates enablement of PPAG for CHINA BIOS
- * bit 1 can be used only in v3 (identical to v2)
+ * version 3, 4 and 5 are the same structure as v2,
+ * but has a different format of the flags bitmap
+ * @flags: values from &enum iwl_ppag_flags
* @gain: table of antenna gain values per chain and sub-band
* @reserved: reserved
*/
@@ -529,6 +556,11 @@ union iwl_ppag_table_cmd {
} v2;
} __packed;
+#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
+ IWL_PPAG_ETSI_LPI_UHB_MASK | \
+ IWL_PPAG_USA_LPI_UHB_MASK)
+
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index d62fed543276..d7f8a276b683 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021, 2023 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -109,6 +109,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
* @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
* station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_AMSDU_SPP: SPP (signaling and payload protected) A-MSDU
* @STA_KEY_FLG_KEYID_MSK: the index of the key
* @STA_KEY_FLG_KEYID_POS: key index bit position
* @STA_KEY_NOT_VALID: key is invalid
@@ -129,6 +130,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_EN_MSK = (7 << 0),
STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+ STA_KEY_FLG_AMSDU_SPP = BIT(7),
STA_KEY_FLG_KEYID_POS = 8,
STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
STA_KEY_NOT_VALID = BIT(11),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 842360b1e995..d9e4c75403b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -76,6 +76,8 @@ enum iwl_tx_flags {
* to a secured STA
* @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
* selection, retry limits and BT kill
+ * @IWL_TX_FLAGS_RTS: firmware used an RTS
+ * @IWL_TX_FLAGS_CTS: firmware used CTS-to-self
*/
enum iwl_tx_cmd_flags {
IWL_TX_FLAGS_CMD_RATE = BIT(0),
@@ -884,6 +886,7 @@ struct iwl_tx_path_flush_cmd {
/**
* struct iwl_flush_queue_info - virtual flush queue info
+ * @tid: the tid to flush
* @queue_num: virtual queue id
* @read_before_flush: read pointer before flush
* @read_after_flush: read pointer after flush
@@ -897,6 +900,7 @@ struct iwl_flush_queue_info {
/**
* struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @sta_id: the station for which the queue was flushed
* @num_flushed_queues: number of queues in queues array
* @queues: all flushed queues
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 9c69d3674384..e6c0f928a6bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -66,6 +66,16 @@ enum iwl_gen2_tx_fifo {
IWL_GEN2_TRIG_TX_FIFO_VO,
};
+enum iwl_bz_tx_fifo {
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+};
/**
* enum iwl_tx_queue_cfg_actions - TXQ config options
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index e27774e7ed74..db6d7013df66 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -19,7 +19,6 @@
* @fwrt_ptr: pointer to the buffer coming from fwrt
* @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
* transport's data.
- * @trans_len: length of the valid data in trans_ptr
* @fwrt_len: length of the valid data in fwrt_ptr
*/
struct iwl_fw_dump_ptrs {
@@ -1728,10 +1727,12 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
/**
* mask_apply_and_normalize - applies mask on val and normalize the result
*
- * The normalization is based on the first set bit in the mask
- *
* @val: value
* @mask: mask to apply and to normalize with
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * Returns: the extracted value
*/
static u32 mask_apply_and_normalize(u32 val, u32 mask)
{
@@ -2200,15 +2201,16 @@ struct iwl_dump_ini_mem_ops {
};
/**
- * iwl_dump_ini_mem
- *
- * Creates a dump tlv and copy a memory region into it.
- * Returns the size of the current dump tlv or 0 if failed
+ * iwl_dump_ini_mem - dump memory region
*
* @fwrt: fw runtime struct
* @list: list to add the dump tlv to
* @reg_data: memory region
* @ops: memory dump operations
+ *
+ * Creates a dump tlv and copy a memory region into it.
+ *
+ * Returns: the size of the current dump tlv or 0 if failed
*/
static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_dump_ini_region_data *reg_data,
@@ -2427,9 +2429,12 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_debug_info_tlv *debug_info =
(void *)node->tlv.data;
+ BUILD_BUG_ON(sizeof(cfg_name->cfg_name) !=
+ sizeof(debug_info->debug_cfg_name));
+
cfg_name->image_type = debug_info->image_type;
cfg_name->cfg_name_len =
- cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ cpu_to_le32(sizeof(cfg_name->cfg_name));
memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
sizeof(cfg_name->cfg_name));
cfg_name++;
@@ -2873,7 +2878,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
- schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq, &wk_data->wk,
+ usecs_to_jiffies(delay));
return 0;
}
@@ -3175,7 +3181,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
else
- schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq,
+ &fwrt->dump.wks[idx].wk,
+ usecs_to_jiffies(delay));
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index eb38c686b5cb..98d56e778d99 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -306,8 +306,6 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
_iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
}
-void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
-
static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
struct iwl_lmac_alive *lmac,
struct iwl_umac_alive *umac)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 06d6f7f66430..5c76e3b94968 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2014, 2018-2024 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,7 +16,7 @@
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
- * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_RXF: RX FIFO contents
* @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
* &struct iwl_fw_error_dump_txcmd packets
* @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info
@@ -24,21 +24,24 @@
* @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_TXF: TX FIFO contents
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
* @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
* &struct iwl_fw_error_dump_rb
- * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
+ * @IWL_FW_ERROR_DUMP_PAGING: UMAC's image memory segments which were
* paged to the DRAM.
* @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
+ * @IWL_FW_ERROR_DUMP_INTERNAL_TXF: internal TX FIFO data
* @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
* for that reason is not in use in any other place in the Linux Wi-Fi
* stack.
* @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem,
* which we get from the fw after ALIVE. The content is structured as
* &struct iwl_fw_error_dump_smem_cfg.
+ * @IWL_FW_ERROR_DUMP_D3_DEBUG_DATA: D3 debug data
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@@ -59,8 +62,6 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
IWL_FW_ERROR_DUMP_MEM_CFG = 16,
IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17,
-
- IWL_FW_ERROR_DUMP_MAX,
};
/**
@@ -442,7 +443,7 @@ struct iwl_fw_ini_err_table_dump {
* struct iwl_fw_error_dump_rb - content of an Receive Buffer
* @index: the index of the Receive Buffer in the Rx queue
* @rxq: the RB's Rx queue
- * @reserved:
+ * @reserved: reserved
* @data: the content of the Receive Buffer
*/
struct iwl_fw_error_dump_rb {
@@ -488,7 +489,7 @@ struct iwl_fw_ini_special_device_memory {
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
- * @reserved:
+ * @reserved: reserved
* @data: the content of the page block
*/
struct iwl_fw_error_dump_paging {
@@ -511,6 +512,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
/**
* enum iwl_fw_dbg_trigger - triggers available
*
+ * @FW_DBG_TRIGGER_INVALID: invalid trigger value
* @FW_DBG_TRIGGER_USER: trigger log collection by user
* This should not be defined as a trigger to the driver, but a value the
* driver should set to indicate that the trigger was initiated by the
@@ -530,14 +532,15 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
* @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
- * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
- * threshold.
- * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_LATENCY: trigger log collection when the tx latency
+ * goes above a threshold.
+ * @FW_DBG_TRIGGER_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
* @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
* @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
* in the driver.
+ * @FW_DBG_TRIGGER_MAX: beyond triggers, number for sizing arrays etc.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index bfc39bd5bbc6..f69d29e531c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2008-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2008-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -216,6 +216,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* ADD_MODIFY_STA_KEY_API_S_VER_2.
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
* @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
+ * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL: support for adaptive dwell in scanning
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
* @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
* indicating low latency direction.
@@ -239,14 +240,21 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
* @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
* STA_CONTEXT_DOT11AX_API_S
+ * @IWL_UCODE_TLV_API_FTM_RTT_ACCURACY: version 7 of the range response API
+ * is supported by FW, this indicates the RTT confidence value
* @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar
* version tables.
* @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
- * SCAN_CONFIG_DB_CMD_API_S.
+ * SCAN_CONFIG_DB_CMD_API_S.
+ * @IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP: support for setting adaptive dwell
+ * number of APs in the 5 GHz band
+ * @IWL_UCODE_TLV_API_BAND_IN_RX_DATA: FW reports band number in RX notification
* @IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX: Firmware offloaded the station disable tx
* logic.
* @IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR: Firmware supports clearing the debug
* internal buffer
+ * @IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD: Firmware doesn't need the host to
+ * configure the smart fifo
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -287,6 +295,7 @@ enum iwl_ucode_tlv_api {
/* API Set 2 */
IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX = (__force iwl_ucode_tlv_api_t)66,
IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR = (__force iwl_ucode_tlv_api_t)67,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD = (__force iwl_ucode_tlv_api_t)68,
NUM_IWL_UCODE_TLV_API
/*
@@ -383,6 +392,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* channels even when these are not enabled.
* @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
* complete to FW.
+ * @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload
+ * protected) A-MSDU.
+ * @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -468,6 +480,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT = (__force iwl_ucode_tlv_capa_t)103,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
IWL_UCODE_TLV_CAPA_SYNCED_TIME = (__force iwl_ucode_tlv_capa_t)106,
@@ -480,7 +493,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114,
IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116,
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
-
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
@@ -566,6 +579,7 @@ enum iwl_fw_dbg_reg_operator {
* struct iwl_fw_dbg_reg_op - an operation on a register
*
* @op: &enum iwl_fw_dbg_reg_operator
+ * @reserved: reserved
* @addr: offset of the register
* @val: value
*/
@@ -612,6 +626,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
* @version: version of the TLV - currently 0
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
* @size_power: buffer size will be 2^(size_power + 11)
+ * @reserved: reserved
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
@@ -722,6 +737,8 @@ enum iwl_fw_dbg_trigger_vif_type {
* @trig_dis_ms: the time, in milliseconds, after an occurrence of this
* trigger in which another occurrence should be ignored.
* @flags: &enum iwl_fw_dbg_trigger_flags
+ * @reserved: reserved (for alignment)
+ * @data: trigger data
*/
struct iwl_fw_dbg_trigger_tlv {
__le32 id;
@@ -762,7 +779,7 @@ struct iwl_fw_dbg_trigger_missed_bcon {
/**
* struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
- * cmds: the list of commands to trigger the collection on
+ * @cmds: the list of commands to trigger the collection on
*/
struct iwl_fw_dbg_trigger_cmd {
struct cmd {
@@ -772,7 +789,7 @@ struct iwl_fw_dbg_trigger_cmd {
} __packed;
/**
- * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * struct iwl_fw_dbg_trigger_stats - configures trigger for statistics
* @stop_offset: the offset of the value to be monitored
* @stop_threshold: the threshold above which to collect
* @start_offset: the offset of the value to be monitored
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 650e4bde9c17..1195e708caa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -12,6 +12,8 @@
#include "fw/api/alive.h"
#include "fw/uefi.h"
+#define IWL_PNVM_REDUCED_CAP_BIT BIT(25)
+
struct iwl_pnvm_section {
__le32 offset;
const u8 data[];
@@ -173,6 +175,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
u32 tlv_len, tlv_type;
+ u32 rf_type;
len -= sizeof(*tlv);
tlv = (const void *)data;
@@ -201,6 +204,16 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
+ trans->reduced_cap_sku = false;
+ rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
+ if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) &&
+ rf_type == IWL_CFG_RF_TYPE_FM)
+ trans->reduced_cap_sku = true;
+
+ IWL_DEBUG_FW(trans,
+ "Reduced SKU device %d\n",
+ trans->reduced_cap_sku);
+
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
@@ -239,7 +252,7 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
}
new_len = pnvm->size;
- *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
release_firmware(pnvm);
if (!*data)
@@ -255,21 +268,27 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
struct pnvm_sku_package *package;
u8 *image = NULL;
- /* First attempt to get the PNVM from BIOS */
- package = iwl_uefi_get_pnvm(trans_p, len);
- if (!IS_ERR_OR_NULL(package)) {
- if (*len >= sizeof(*package)) {
- /* we need only the data */
- *len -= sizeof(*package);
- image = kmemdup(package->data, *len, GFP_KERNEL);
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (trans_p->sku_id[2]) {
+ package = iwl_uefi_get_pnvm(trans_p, len);
+ if (!IS_ERR_OR_NULL(package)) {
+ if (*len >= sizeof(*package)) {
+ /* we need only the data */
+ *len -= sizeof(*package);
+ image = kvmemdup(package->data,
+ *len, GFP_KERNEL);
+ }
+ /*
+ * free package regardless of whether kmemdup
+ * succeeded
+ */
+ kfree(package);
+ if (image)
+ return image;
}
- /* free package regardless of whether kmemdup succeeded */
- kfree(package);
- if (image)
- return image;
}
- /* If it's not available, try from the filesystem */
+ /* If it's not available, or for Intel SKU, try from the filesystem */
if (iwl_pnvm_get_from_fs(trans_p, &image, len))
return NULL;
return image;
@@ -314,7 +333,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
set:
iwl_trans_set_pnvm(trans, capa);
free:
- kfree(data);
+ kvfree(data);
kfree(pnvm_data);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
new file mode 100644
index 000000000000..36d506463e0e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/dmi.h>
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "regulatory.h"
+#include "fw/runtime.h"
+#include "fw/uefi.h"
+
+#define GET_BIOS_TABLE(__name, ...) \
+do { \
+ int ret = -ENOENT; \
+ if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \
+ ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \
+ if (ret < 0) \
+ ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \
+ return ret; \
+} while (0)
+
+#define IWL_BIOS_TABLE_LOADER(__name) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \
+{GET_BIOS_TABLE(__name, fwrt); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+#define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \
+ data_type * data) \
+{GET_BIOS_TABLE(__name, fwrt, data); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+IWL_BIOS_TABLE_LOADER(wrds_table);
+IWL_BIOS_TABLE_LOADER(ewrd_table);
+IWL_BIOS_TABLE_LOADER(wgds_table);
+IWL_BIOS_TABLE_LOADER(ppag_table);
+IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
+IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
+IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
+IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
+
+
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "GOOGLE-ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ { .ident = "RAZER",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ {}
+};
+
+static const struct dmi_system_id dmi_tas_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "LENOVO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "MSI",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
+ * earlier firmware versions. Unfortunately, we don't have a
+ * TLV API flag to rely on, so rely on the major version which
+ * is in the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles)
+{
+ int i, j;
+
+ if (!fwrt->geo_enabled)
+ return -ENODATA;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < n_profiles; i++) {
+ for (j = 0; j < n_bands; j++) {
+ struct iwl_per_chain_offset *chain =
+ &table[i * n_bands + j];
+
+ chain->max_tx_power =
+ cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
+ chain->chain_a =
+ fwrt->geo_profiles[i].bands[j].chains[0];
+ chain->chain_b =
+ fwrt->geo_profiles[i].bands[j].chains[1];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j,
+ fwrt->geo_profiles[i].bands[j].chains[0],
+ fwrt->geo_profiles[i].bands[j].chains[1],
+ fwrt->geo_profiles[i].bands[j].max);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table);
+
+static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b };
+ int i, j;
+
+ for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */
+ if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /*
+ * if one of the profiles is disabled, we
+ * ignore all of them and return 1 to
+ * differentiate disabled from other failures.
+ */
+ return 1;
+ }
+
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < n_subbands; j++) {
+ per_chain[i * n_subbands + j] =
+ cpu_to_le16(prof->chains[i].subbands[j]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->chains[i].subbands[j]);
+ }
+ }
+
+ return 0;
+}
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_tables; i++) {
+ ret = iwl_sar_fill_table(fwrt,
+ &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS],
+ n_subbands, prof_a, prof_b);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_fill_profile);
+
+static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
+ int subband)
+{
+ s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
+
+ if ((subband == 0 &&
+ (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
+ (subband != 0 &&
+ (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
+ return false;
+ }
+ return true;
+}
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+ bool send_ppag_always;
+
+ /* many firmware images for JF lie about this */
+ if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
+ return -EOPNOTSUPP;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD), 1);
+ /*
+ * Starting from ver 4, driver needs to send the PPAG CMD regardless
+ * if PPAG is enabled/disabled or valid/invalid.
+ */
+ send_ppag_always = cmd_ver > 3;
+
+ /* Don't send PPAG if it is disabled */
+ if (!send_ppag_always && !fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+
+ IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver >= 1) {
+ /* in this case FW supports revision 0 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d, send truncated table\n",
+ fwrt->ppag_ver);
+ }
+ } else if (cmd_ver >= 2 && cmd_ver <= 5) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ /* in this case FW supports revisions 1,2 or 3 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is 0, send padded table\n");
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ /* ppag mode */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits were read from bios: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ if (cmd_ver == 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
+ else if (cmd_ver < 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
+
+ if ((cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
+ (cmd_ver == 2 && fwrt->ppag_ver >= 2)) {
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
+ }
+
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits going to be sent: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ if (!send_ppag_always &&
+ !iwl_ppag_value_valid(fwrt, i, j))
+ return -EINVAL;
+
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fill_ppag_table);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_is_ppag_approved);
+
+bool iwl_is_tas_approved(void)
+{
+ return dmi_check_system(dmi_tas_approved_list);
+}
+IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection)
+{
+ u8 override_iec = u32_get_bits(tas_selection,
+ IWL_WTAS_OVERRIDE_IEC_MSK);
+ u8 enabled_iec = u32_get_bits(tas_selection, IWL_WTAS_ENABLE_IEC_MSK);
+ u8 usa_tas_uhb = u32_get_bits(tas_selection, IWL_WTAS_USA_UHB_MSK);
+ int enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ tas_selection);
+
+ tas_data->usa_tas_uhb_allowed = usa_tas_uhb;
+ tas_data->override_tas_iec = override_iec;
+ tas_data->enable_tas_iec = enabled_iec;
+
+ return enabled;
+}
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ int ret;
+ u32 val;
+ __le32 config_bitmap = 0;
+
+ switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_HR1:
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_JF1:
+ case IWL_CFG_RF_TYPE_JF2:
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &val);
+
+ if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ break;
+ default:
+ break;
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
+ if (!ret) {
+ if (val == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (val == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
+ &val);
+ /*
+ * China 2022 enable if the BIOS object does not exist or
+ * if it is enabled in BIOS.
+ */
+ if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
+ }
+
+ return config_bitmap;
+}
+IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ GET_BIOS_TABLE(dsm, fwrt, func, value);
+}
+IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
new file mode 100644
index 000000000000..28e774766847
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef __fw_regulatory_h__
+#define __fw_regulatory_h__
+
+#include "fw/img.h"
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/api/config.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
+#define BIOS_SAR_MAX_PROFILE_NUM 4
+/*
+ * Each SAR profile has (up to, depends on the table revision) 4 chains:
+ * chain A, chain B, chain A when in CDB, chain B when in CDB
+ */
+#define BIOS_SAR_MAX_CHAINS_PER_PROFILE 4
+#define BIOS_SAR_NUM_CHAINS 2
+#define BIOS_SAR_MAX_SUB_BANDS_NUM 11
+
+#define BIOS_GEO_NUM_CHAINS 2
+#define BIOS_GEO_MAX_NUM_BANDS 3
+#define BIOS_GEO_MAX_PROFILE_NUM 8
+#define BIOS_GEO_MIN_PROFILE_NUM 3
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+
+/* PPAG gain value bounds in 1/8 dBm */
+#define IWL_PPAG_MIN_LB -16
+#define IWL_PPAG_MAX_LB 24
+#define IWL_PPAG_MIN_HB -16
+#define IWL_PPAG_MAX_HB 40
+
+#define IWL_PPAG_ETSI_CHINA_MASK 3
+#define IWL_PPAG_REV3_MASK 0x7FF
+
+#define IWL_WTAS_BLACK_LIST_MAX 16
+#define IWL_WTAS_ENABLED_MSK 0x1
+#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
+#define IWL_WTAS_ENABLE_IEC_MSK 0x4
+#define IWL_WTAS_USA_UHB_MSK BIT(16)
+
+/*
+ * The profile for revision 2 is a superset of revision 1, which is in
+ * turn a superset of revision 0. So we can store all revisions
+ * inside revision 2, which is what we represent here.
+ */
+
+/*
+ * struct iwl_sar_profile_chain - per-chain values of a SAR profile
+ * @subbands: the SAR value for each subband
+ */
+struct iwl_sar_profile_chain {
+ u8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+/*
+ * struct iwl_sar_profile - SAR profile from SAR tables
+ * @enabled: whether the profile is enabled or not
+ * @chains: per-chain SAR values
+ */
+struct iwl_sar_profile {
+ bool enabled;
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+
+/*
+ * struct iwl_geo_profile_band - per-band geo SAR offsets
+ * @max: the max tx power allowed for the band
+ * @chains: SAR offsets values for each chain
+ */
+struct iwl_geo_profile_band {
+ u8 max;
+ u8 chains[BIOS_GEO_NUM_CHAINS];
+};
+
+/*
+ * struct iwl_geo_profile - geo profile
+ * @bands: per-band table of the SAR offsets
+ */
+struct iwl_geo_profile {
+ struct iwl_geo_profile_band bands[BIOS_GEO_MAX_NUM_BANDS];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+struct iwl_ppag_chain {
+ s8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+struct iwl_tas_data {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+};
+
+/* For DSM revision 0 and 4 */
+enum iwl_dsm_funcs {
+ DSM_FUNC_QUERY = 0,
+ DSM_FUNC_DISABLE_SRD = 1,
+ DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+ DSM_FUNC_ENABLE_6E = 3,
+ DSM_FUNC_REGULATORY_CONFIG = 4,
+ DSM_FUNC_11AX_ENABLEMENT = 6,
+ DSM_FUNC_ENABLE_UNII4_CHAN = 7,
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
+ DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
+ DSM_FUNC_RFI_CONFIG = 11,
+ DSM_FUNC_NUM_FUNCS = 12,
+};
+
+enum iwl_dsm_values_srd {
+ DSM_VALUE_SRD_ACTIVE,
+ DSM_VALUE_SRD_PASSIVE,
+ DSM_VALUE_SRD_DISABLE,
+ DSM_VALUE_SRD_MAX
+};
+
+enum iwl_dsm_values_indonesia {
+ DSM_VALUE_INDONESIA_DISABLE,
+ DSM_VALUE_INDONESIA_ENABLE,
+ DSM_VALUE_INDONESIA_RESERVED,
+ DSM_VALUE_INDONESIA_MAX
+};
+
+enum iwl_dsm_values_rfi {
+ DSM_VALUE_RFI_DLVR_DISABLE = BIT(0),
+ DSM_VALUE_RFI_DDR_DISABLE = BIT(1),
+};
+
+#define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\
+ DSM_VALUE_RFI_DDR_DISABLE)
+
+enum iwl_dsm_masks_reg {
+ DSM_MASK_CHINA_22_REG = BIT(2)
+};
+
+struct iwl_fw_runtime;
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles);
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b);
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
+bool iwl_is_tas_approved(void);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection);
+
+int iwl_bios_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+
+int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+
+int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+
+static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
+ const u8 ppag_ver)
+{
+ return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
+ IWL_PPAG_REV3_MASK);
+}
+#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 357727774db9..b2bc4fd37abf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -14,6 +14,7 @@
#include "fw/api/power.h"
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
+#include "fw/regulatory.h"
struct iwl_fw_runtime_ops {
void (*dump_start)(void *ctx);
@@ -100,6 +101,11 @@ struct iwl_txf_iter_data {
* @dump: debug dump data
* @uats_enabled: VLP or AFC AP is enabled
* @uats_table: AP type table
+ * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
+ * 0: Unlocked, 1 and 2: Locked.
+ * Only read the UEFI variables if locked.
+ * @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables
+ * @geo_profiles: geographic profiles as read from WGDS BIOS table
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -158,24 +164,22 @@ struct iwl_fw_runtime {
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool tpc_enabled;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
-#ifdef CONFIG_ACPI
- struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ struct iwl_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM];
u8 sar_chain_a_profile;
u8 sar_chain_b_profile;
- struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
+ u8 reduced_power_flags;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
u32 geo_rev;
u32 geo_num_profiles;
bool geo_enabled;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
- u32 ppag_ver;
- bool ppag_table_valid;
+ u8 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
- u8 reduced_power_flags;
- bool uats_enabled;
struct iwl_uats_table_cmd uats_table;
-#endif
+ u8 uefi_tables_lock_status;
+ bool uats_enabled;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 2964c5fb11e9..e81fc0129b9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -76,6 +76,42 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
return data;
}
+static
+void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
+{
+ void *var;
+ unsigned long var_size;
+
+ var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
+ &var_size);
+
+ if (IS_ERR(var)) {
+ IWL_DEBUG_RADIO(trans,
+ "%s UEFI variable not found 0x%lx\n", var_name,
+ PTR_ERR(var));
+ return var;
+ }
+
+ if (var_size < expected_size) {
+ IWL_DEBUG_RADIO(trans,
+ "Invalid %s UEFI variable len (%lu)\n",
+ var_name, var_size);
+ kfree(var);
+ return ERR_PTR(-EINVAL);
+ }
+
+ IWL_DEBUG_RADIO(trans, "%s from UEFI with size %lu\n", var_name,
+ var_size);
+
+ if (size)
+ *size = var_size;
+ return var;
+}
+
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
{
@@ -230,26 +266,13 @@ u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
unsigned long package_size;
u8 *data;
- package = iwl_uefi_get_variable(IWL_UEFI_REDUCED_POWER_NAME,
- &IWL_EFI_VAR_GUID, &package_size);
-
- if (IS_ERR(package)) {
- IWL_DEBUG_FW(trans,
- "Reduced Power UEFI variable not found 0x%lx (len %lu)\n",
- PTR_ERR(package), package_size);
+ package = iwl_uefi_get_verified_variable(trans,
+ IWL_UEFI_REDUCED_POWER_NAME,
+ "Reduced Power",
+ sizeof(*package),
+ &package_size);
+ if (IS_ERR(package))
return ERR_CAST(package);
- }
-
- if (package_size < sizeof(*package)) {
- IWL_DEBUG_FW(trans,
- "Invalid Reduced Power UEFI variable len (%lu)\n",
- package_size);
- kfree(package);
- return ERR_PTR(-EINVAL);
- }
-
- IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
- package_size);
IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
package->rev, package->total_size, package->n_skus);
@@ -283,32 +306,15 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat
void iwl_uefi_get_step_table(struct iwl_trans *trans)
{
struct uefi_cnv_common_step_data *data;
- unsigned long package_size;
int ret;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
-
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "STEP UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
+ "STEP", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid STEP table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return;
- }
-
- IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_step_parse(data, trans);
if (ret < 0)
@@ -318,7 +324,6 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table);
-#ifdef CONFIG_ACPI
static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
struct iwl_fw_runtime *fwrt)
{
@@ -355,31 +360,15 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_sgom_data *data;
- unsigned long package_size;
int ret;
if (!fwrt->geo_enabled)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "SGOM UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
- return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid SGOM table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_SGOM_NAME,
+ "SGOM", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_sgom_parse(data, fwrt);
if (ret < 0)
@@ -404,28 +393,12 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_uats_data *data;
- unsigned long package_size;
int ret;
- data = iwl_uefi_get_variable(IWL_UEFI_UATS_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "UATS UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME,
+ "UATS", sizeof(*data), NULL);
+ if (IS_ERR(data))
return -EINVAL;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid UATS table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return -EINVAL;
- }
-
- IWL_DEBUG_FW(trans, "Read UATS from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_uats_parse(data, fwrt);
if (ret < 0) {
@@ -438,4 +411,298 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
return 0;
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table);
-#endif /* CONFIG_ACPI */
+
+static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt,
+ struct uefi_sar_profile *uefi_sar_prof,
+ u8 prof_index, bool enabled)
+{
+ memcpy(&fwrt->sar_profiles[prof_index].chains, uefi_sar_prof,
+ sizeof(struct uefi_sar_profile));
+
+ fwrt->sar_profiles[prof_index].enabled = enabled & IWL_SAR_ENABLE_MSK;
+}
+
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wrds *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDS_NAME,
+ "WRDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profile, 0, data->mode);
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ewrd *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_EWRD_NAME,
+ "EWRD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_EWRD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI EWRD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < data->num_profiles; i++)
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profiles[i], i + 1,
+ data->mode);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wgds *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WGDS_NAME,
+ "WGDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WGDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WGDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles < BIOS_GEO_MIN_PROFILE_NUM ||
+ data->num_profiles > BIOS_GEO_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Invalid number of profiles in WGDS: %d\n",
+ data->num_profiles);
+ goto out;
+ }
+
+ fwrt->geo_rev = data->revision;
+ for (i = 0; i < data->num_profiles; i++)
+ memcpy(&fwrt->geo_profiles[i], &data->geo_profiles[i],
+ sizeof(struct iwl_geo_profile));
+
+ fwrt->geo_num_profiles = data->num_profiles;
+ fwrt->geo_enabled = true;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ppag *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_PPAG_NAME,
+ "PPAG", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision < IWL_UEFI_MIN_PPAG_REV ||
+ data->revision > IWL_UEFI_MAX_PPAG_REV) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PPAG revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ fwrt->ppag_ver = data->revision;
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes,
+ fwrt->ppag_ver);
+
+ BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains));
+ memcpy(&fwrt->ppag_chains, &data->ppag_chains,
+ sizeof(data->ppag_chains));
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
+{
+ struct uefi_cnv_var_wtas *uefi_tas;
+ int ret = 0, enabled, i;
+
+ uefi_tas = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WTAS_NAME,
+ "WTAS", sizeof(*uefi_tas), NULL);
+ if (IS_ERR(uefi_tas))
+ return -EINVAL;
+
+ if (uefi_tas->revision != IWL_UEFI_WTAS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WTAS revision:%d\n",
+ uefi_tas->revision);
+ goto out;
+ }
+
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ uefi_tas->tas_selection);
+ if (!enabled) {
+ IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+ ret = 0;
+ goto out;
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n",
+ uefi_tas->revision);
+ if (uefi_tas->black_list_size > IWL_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %d\n",
+ uefi_tas->black_list_size);
+ ret = -EINVAL;
+ goto out;
+ }
+ tas_data->block_list_size = cpu_to_le32(uefi_tas->black_list_size);
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", uefi_tas->black_list_size);
+
+ for (i = 0; i < uefi_tas->black_list_size; i++) {
+ tas_data->block_list_array[i] =
+ cpu_to_le32(uefi_tas->black_list[i]);
+ IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n",
+ uefi_tas->black_list[i]);
+ }
+out:
+ kfree(uefi_tas);
+ return ret;
+}
+
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ struct uefi_cnv_var_splc *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_SPLC_NAME,
+ "SPLC", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_SPLC_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI SPLC revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *dflt_pwr_limit = data->default_pwr_limit;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ struct uefi_cnv_var_wrdd *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDD_NAME,
+ "WRDD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->mcc != UEFI_MCC_CHINA) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n");
+ goto out;
+ }
+
+ mcc[0] = (data->mcc >> 8) & 0xff;
+ mcc[1] = data->mcc & 0xff;
+ mcc[2] = '\0';
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ struct uefi_cnv_var_eckv *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
+ "ECKV", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_ECKV_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *extl_clk = data->ext_clock_valid;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ struct uefi_cnv_var_general_cfg *data;
+ int ret = -EINVAL;
+
+ /* Not supported function index */
+ if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
+ return -EOPNOTSUPP;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_DSM_NAME,
+ "DSM", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_DSM_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSM revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n");
+ goto out;
+ }
+
+ *value = data->functions[func];
+ ret = 0;
+out:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index bf61a8df1225..303cc299d1bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -5,15 +5,38 @@
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
+#include "fw/regulatory.h"
+
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP"
#define IWL_UEFI_UATS_NAME L"CnvUefiWlanUATS"
+#define IWL_UEFI_WRDS_NAME L"UefiCnvWlanWRDS"
+#define IWL_UEFI_EWRD_NAME L"UefiCnvWlanEWRD"
+#define IWL_UEFI_WGDS_NAME L"UefiCnvWlanWGDS"
+#define IWL_UEFI_PPAG_NAME L"UefiCnvWlanPPAG"
+#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS"
+#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC"
+#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
+#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
+#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
+
#define IWL_SGOM_MAP_SIZE 339
#define IWL_UATS_MAP_SIZE 339
+#define IWL_UEFI_WRDS_REVISION 2
+#define IWL_UEFI_EWRD_REVISION 2
+#define IWL_UEFI_WGDS_REVISION 3
+#define IWL_UEFI_MIN_PPAG_REV 1
+#define IWL_UEFI_MAX_PPAG_REV 3
+#define IWL_UEFI_WTAS_REVISION 1
+#define IWL_UEFI_SPLC_REVISION 0
+#define IWL_UEFI_WRDD_REVISION 0
+#define IWL_UEFI_ECKV_REVISION 0
+#define IWL_UEFI_DSM_REVISION 4
+
struct pnvm_sku_package {
u8 rev;
u32 total_size;
@@ -42,6 +65,120 @@ struct uefi_cnv_common_step_data {
} __packed;
/*
+ * struct uefi_sar_profile - a SAR profile as defined in UEFI
+ *
+ * @chains: a per-chain table of SAR values
+ */
+struct uefi_sar_profile {
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wrds - WRDS table as defined in UEFI
+ *
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @sar_profile: sar profile #1
+ */
+struct uefi_cnv_var_wrds {
+ u8 revision;
+ u32 mode;
+ struct uefi_sar_profile sar_profile;
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ewrd - EWRD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @num_profiles: how many additional profiles we have in this table (0-3)
+ * @sar_profiles: the additional SAR profiles (#2-#4)
+ */
+struct uefi_cnv_var_ewrd {
+ u8 revision;
+ u32 mode;
+ u32 num_profiles;
+ struct uefi_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM - 1];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wgds - WGDS table as defined in UEFI
+ * @revision: the revision of the table
+ * @num_profiles: the number of geo profiles we have in the table.
+ * The first 3 are mandatory, and can have up to 8.
+ * @geo_profiles: a per-profile table of the offsets to add to SAR values.
+ */
+struct uefi_cnv_var_wgds {
+ u8 revision;
+ u8 num_profiles;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ppag - PPAG table as defined in UEFI
+ * @revision: the revision of the table
+ * @ppag_modes: values from &enum iwl_ppag_flags
+ * @ppag_chains: the PPAG values per chain and band
+ */
+struct uefi_cnv_var_ppag {
+ u8 revision;
+ u32 ppag_modes;
+ struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
+} __packed;
+
+/* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @tas_selection: different options of TAS enablement.
+ * @black_list_size: the number of defined entried in the black list
+ * @black_list: a list of countries that are not allowed to use the TAS feature
+ */
+struct uefi_cnv_var_wtas {
+ u8 revision;
+ u32 tas_selection;
+ u8 black_list_size;
+ u16 black_list[IWL_WTAS_BLACK_LIST_MAX];
+} __packed;
+
+/* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @default_pwr_limit: The default maximum power per device
+ */
+struct uefi_cnv_var_splc {
+ u8 revision;
+ u32 default_pwr_limit;
+} __packed;
+
+#define UEFI_MCC_CHINA 0x434e
+
+/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code
+ */
+struct uefi_cnv_var_wrdd {
+ u8 revision;
+ u32 mcc;
+} __packed;
+
+/* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI
+ * @revision: the revision of the table
+ * @ext_clock_valid: indicates if external 32KHz clock is valid
+ */
+struct uefi_cnv_var_eckv {
+ u8 revision;
+ u32 ext_clock_valid;
+} __packed;
+
+#define UEFI_MAX_DSM_FUNCS 32
+
+/* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI
+ * @revision: the revision of the table
+ * @functions: payload of the different DSM functions
+ */
+struct uefi_cnv_var_general_cfg {
+ u8 revision;
+ u32 functions[UEFI_MAX_DSM_FUNCS];
+} __packed;
+
+/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
* disable the feature in old kernels.
@@ -55,6 +192,21 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
void iwl_uefi_get_step_table(struct iwl_trans *trans);
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data);
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -85,13 +237,56 @@ iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
{
return 0;
}
-#endif /* CONFIG_EFI */
-#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
-void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt);
-#else
+static inline int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ *dflt_pwr_limit = 0;
+ return 0;
+}
+
+static inline int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
+{
+ return -ENOENT;
+}
+
static inline
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
@@ -103,6 +298,5 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
{
return 0;
}
-
-#endif
+#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index ae6f1cd4d660..6aa4f7f9c708 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -12,6 +12,7 @@
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
#include "iwl-csr.h"
+#include "iwl-drv.h"
enum iwl_device_family {
IWL_DEVICE_FAMILY_UNDEFINED,
@@ -418,6 +419,8 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_BZ 0x46
#define IWL_CFG_MAC_TYPE_GL 0x47
#define IWL_CFG_MAC_TYPE_SC 0x48
+#define IWL_CFG_MAC_TYPE_SC2 0x49
+#define IWL_CFG_MAC_TYPE_SC2F 0x4A
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -442,6 +445,9 @@ struct iwl_cfg {
#define IWL_CFG_NO_160 0x1
#define IWL_CFG_160 0x0
+#define IWL_CFG_NO_320 0x1
+#define IWL_CFG_320 0x0
+
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -471,6 +477,15 @@ struct iwl_dev_info {
const char *name;
};
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+extern const struct iwl_dev_info iwl_dev_info_table[];
+extern const unsigned int iwl_dev_info_table_size;
+const struct iwl_dev_info *
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
+ u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
+ u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+#endif
+
/*
* This list declares the config structures for all devices.
*/
@@ -526,7 +541,10 @@ extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
+extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
+extern const char iwl_sc2_name[];
+extern const char iwl_sc2f_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -632,6 +650,8 @@ extern const struct iwl_cfg iwl_cfg_bz;
extern const struct iwl_cfg iwl_cfg_gl;
extern const struct iwl_cfg iwl_cfg_sc;
+extern const struct iwl_cfg iwl_cfg_sc2;
+extern const struct iwl_cfg iwl_cfg_sc2f;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 3b14f6476743..561d0c261123 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -64,21 +64,22 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
};
-static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
- struct list_head *list)
+/* add a new TLV node, returning it so it can be modified */
+static struct iwl_ucode_tlv *iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
+ struct list_head *list)
{
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
- node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
+ node = kzalloc(struct_size(node, tlv.data, len), GFP_KERNEL);
if (!node)
- return -ENOMEM;
+ return NULL;
memcpy(&node->tlv, tlv, sizeof(node->tlv));
memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
- return 0;
+ return &node->tlv;
}
static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
@@ -103,10 +104,18 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
+ /* we use this as a string, ensure input was NUL terminated */
+ if (strnlen(debug_info->debug_cfg_name,
+ sizeof(debug_info->debug_cfg_name)) ==
+ sizeof(debug_info->debug_cfg_name))
+ return -EINVAL;
+
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
debug_info->debug_cfg_name);
- return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
@@ -175,7 +184,9 @@ static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
@@ -246,11 +257,9 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
- struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
u32 rf = le32_to_cpu(trig->reset_fw);
- struct iwl_ucode_tlv *dup = NULL;
- int ret;
+ struct iwl_ucode_tlv *new_tlv;
if (le32_to_cpu(tlv->length) < sizeof(*trig))
return -EINVAL;
@@ -267,20 +276,18 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
"WRT: time point %u for trigger TLV with reset_fw %u\n",
tp, rf);
trans->dbg.last_tp_resetfw = 0xFF;
+
+ new_tlv = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+ if (!new_tlv)
+ return -ENOMEM;
+
if (!le32_to_cpu(trig->occurrences)) {
- dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
- GFP_KERNEL);
- if (!dup)
- return -ENOMEM;
- dup_trig = (void *)dup->data;
- dup_trig->occurrences = cpu_to_le32(-1);
- tlv = dup;
- }
+ struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new_tlv->data;
- ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
- kfree(dup);
+ new_trig->occurrences = cpu_to_le32(-1);
+ }
- return ret;
+ return 0;
}
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
@@ -304,7 +311,9 @@ static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list))
+ return -ENOMEM;
+ return 0;
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
@@ -1096,7 +1105,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
node_trig = (void *)node_tlv->data;
}
- memcpy(node_trig->data + offset, trig->data, trig_data_len);
+ memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
node_tlv->length = cpu_to_le32(size);
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
@@ -1148,7 +1157,9 @@ iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
if (!match) {
IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
le32_to_cpu(trig->time_point));
- return iwl_dbg_tlv_add(trig_tlv, trig_list);
+ if (!iwl_dbg_tlv_add(trig_tlv, trig_list))
+ return -ENOMEM;
+ return 0;
}
return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
@@ -1234,7 +1245,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
}
}
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
tp, dump_data.trig->reset_fw);
IWL_DEBUG_FW(fwrt,
@@ -1244,22 +1255,22 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
fwrt->trans->dbg.last_tp_resetfw ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
IWL_DEBUG_FW(fwrt,
"WRT: stop only and no reload firmware\n");
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ffe2670720c9..4696d73c8971 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.ucode_capa.cmd_versions);
kfree(drv->fw.phy_integration_ver);
kfree(drv->trans->dbg.pc_data);
+ drv->trans->dbg.pc_data = NULL;
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -186,6 +187,7 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
rf = "hr";
+ rf_step = 'b';
break;
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
@@ -1423,35 +1425,25 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
- int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
/* also protects start/stop from racing against each other */
lockdep_assert_held(&iwlwifi_opmode_table_mtx);
- for (retry = 0; retry <= max_retry; retry++) {
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->dbgfs_op_mode = debugfs_create_dir(op->name,
- drv->dbgfs_drv);
- dbgfs_dir = drv->dbgfs_op_mode;
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->trans->cfg,
- &drv->fw, dbgfs_dir);
-
- if (op_mode)
- return op_mode;
-
- if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status))
- break;
-
- IWL_ERR(drv, "retry init count %d\n", retry);
+ op_mode = ops->start(drv->trans, drv->trans->cfg,
+ &drv->fw, dbgfs_dir);
+ if (op_mode)
+ return op_mode;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- debugfs_remove_recursive(drv->dbgfs_op_mode);
- drv->dbgfs_op_mode = NULL;
+ debugfs_remove_recursive(drv->dbgfs_op_mode);
+ drv->dbgfs_op_mode = NULL;
#endif
- }
return NULL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 3d1a27ba35c6..1549ff429549 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -6,6 +6,7 @@
#ifndef __iwl_drv_h__
#define __iwl_drv_h__
#include <linux/export.h>
+#include <kunit/visibility.h>
/* for all modules */
#define DRV_NAME "iwlwifi"
@@ -89,8 +90,13 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define IWL_EXPORT_SYMBOL(sym)
#endif
-/* max retry for init flow */
-#define IWL_MAX_INIT_RETRY 2
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT
+#else
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT static
+#endif
#define FW_NAME_PRE_BUFSIZE 64
struct iwl_trans;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 5aab64c63a13..2b290fab1ef2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -270,7 +270,7 @@ enum iwl_eeprom_enhanced_txpwr_flags {
};
/**
- * struct iwl_eeprom_enhanced_txpwr
+ * struct iwl_eeprom_enhanced_txpwr - enhanced regulatory TX power limits
* @flags: entry flags
* @channel: channel number
* @chain_a_max: chain a max power in 1/2 dBm
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index e0400ba2ab74..6ba374efaacb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -570,18 +570,19 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * @closed_rb_num: [0:11] Indicates the index of the RB which was closed
+ * @closed_fr_num: [0:11] Indicates the index of the RX Frame which was closed
+ * @finished_rb_num: [0:11] Indicates the index of the current RB
* in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * @finished_fr_num: [0:11] Indicates the index of the RX Frame
* which was transferred
+ * @__spare: reserved
*/
struct iwl_rb_status {
__le16 closed_rb_num;
__le16 closed_fr_num;
__le16 finished_rb_num;
- __le16 finished_fr_nam;
+ __le16 finished_fr_num;
__le32 __spare;
} __packed;
@@ -651,15 +652,15 @@ struct iwl_tfd_tb {
*
* This structure contains dma address and length of transmission address
*
- * @tb_len length of the tx buffer
- * @addr 64 bits dma address
+ * @tb_len: length of the tx buffer
+ * @addr: 64 bits dma address
*/
struct iwl_tfh_tb {
__le16 tb_len;
__le64 addr;
} __packed;
-/**
+/*
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs.
@@ -698,10 +699,11 @@ struct iwl_tfd {
/**
* struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
- * @ num_tbs 0-4 number of active tbs
- * 5 -15 reserved
- * @ tbs[25] transmit frame buffer descriptors
- * @ __pad padding
+ * @num_tbs:
+ * 0-4 number of active tbs
+ * 5-15 reserved
+ * @tbs: transmit frame buffer descriptors
+ * @__pad: padding
*/
struct iwl_tfh_tfd {
__le16 num_tbs;
@@ -718,10 +720,12 @@ struct iwl_tfh_tfd {
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * @tfd_offset:
+ * For devices up to 22000:
+ * 0-12 - tx command byte count
* 12-16 - station index
- * For 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * For 22000:
+ * 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 402896988686..baa39a18087a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -156,6 +156,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_80MHZ: 80 MHz channel okay
* @NVM_CHANNEL_160MHZ: 160 MHz channel okay
* @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
+ * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
*/
enum iwl_nvm_channel_flags {
NVM_CHANNEL_VALID = BIT(0),
@@ -170,6 +172,8 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_80MHZ = BIT(10),
NVM_CHANNEL_160MHZ = BIT(11),
NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
};
/**
@@ -309,7 +313,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
/* Note: already can print up to 101 characters, 110 is the limit! */
IWL_DEBUG_DEV(dev, level,
- "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
chan, flags,
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
@@ -322,7 +326,9 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
CHECK_AND_PRINT_I(40MHZ),
CHECK_AND_PRINT_I(80MHZ),
CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH));
+ CHECK_AND_PRINT_I(DC_HIGH),
+ CHECK_AND_PRINT_I(VLP),
+ CHECK_AND_PRINT_I(AFC));
#undef CHECK_AND_PRINT_I
}
@@ -366,6 +372,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
(flags & IEEE80211_CHAN_NO_IR))
flags |= IEEE80211_CHAN_IR_CONCURRENT;
+ /* Set the AP type for the UHB case. */
+ if (!(nvm_flags & NVM_CHANNEL_VLP))
+ flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
+
return flags;
}
@@ -668,7 +680,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 |
@@ -696,10 +707,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI,
.phy_cap_info[5] =
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US) |
IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP,
.phy_cap_info[6] =
IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
@@ -733,6 +745,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -745,7 +760,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.mac_cap_info[0] =
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
- IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
@@ -793,7 +807,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
@@ -801,7 +814,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI,
.phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US),
},
/* For all MCS and bandwidth, set 2 NSS for both Tx and
@@ -829,6 +843,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -892,8 +909,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
bool no_320;
- no_320 = !trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB;
+ no_320 = (!trans->trans_cfg->integrated &&
+ trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) ||
+ trans->reduced_cap_sku;
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -1020,8 +1038,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iftype_data->eht_cap.has_eht) {
iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
- ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
- IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
@@ -1059,6 +1076,26 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &=
~IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
}
+
+ if (trans->step_urm) {
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs11_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
+ }
+
+ if (trans->no_160)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ if (trans->reduced_cap_sku) {
+ memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
+ sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
+ ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
}
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
@@ -1575,7 +1612,8 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
- const struct iwl_cfg *cfg)
+ const struct iwl_cfg *cfg,
+ bool uats_enabled)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1620,6 +1658,16 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags &= ~NL80211_RRF_NO_IR;
}
}
+
+ /* Set the AP type for the UHB case. */
+ if (uats_enabled) {
+ if (!(nvm_flags & NVM_CHANNEL_VLP))
+ flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
+
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
+ }
+
/*
* reg_capa is per regulatory domain so apply it for every channel
*/
@@ -1674,7 +1722,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver)
+ u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled)
{
int ch_idx;
u16 ch_flags;
@@ -1740,7 +1788,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
ch_flags, reg_capa,
- cfg);
+ cfg, uats_enabled);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -2100,7 +2148,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM)
+ if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
nvm->sku_cap_11be_enable = true;
/* Initialize PHY sku data */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 651ed25b683b..fd9c3bed9407 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver);
+ u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 3dc618a7c70f..1ca82f3e4ebf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -68,9 +68,11 @@ struct iwl_cfg;
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
- * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
+ * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Return %true if the device should be stopped by
* the transport immediately after the call. May sleep.
+ * Note that this must not return %true for newer devices using gen2 PCIe
+ * transport.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index dd32c287b983..a7d44df06eab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -368,12 +368,19 @@ enum {
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
};
-#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930
+
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
+#define CNVI_PMU_STEP_FLOW 0xA2D588
+#define CNVI_PMU_STEP_FLOW_FORCE_URM BIT(2)
+
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 5789a8735976..b93cef7b2330 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -519,6 +519,7 @@ struct iwl_pnvm_image {
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
+ * @set_q_ptrs: set queue pointers internally, after D3 when HW state changed
* @txq_enable: setup a queue. To setup an AC queue, use the
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler
@@ -528,6 +529,8 @@ struct iwl_pnvm_image {
* hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
+ * @txq_alloc: Allocate a new TX queue, may sleep.
+ * @txq_free: Free a previously allocated TX queue.
* @txq_set_shared_mode: change Tx queue shared/unshared marking
* @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
* @wait_txq_empty: wait until specific tx queue is empty. May sleep.
@@ -547,23 +550,27 @@ struct iwl_pnvm_image {
* the op_mode. May be called several times before start_fw, can't be
* called after that.
* @set_pmi: set the power pmi state
+ * @sw_reset: trigger software reset of the NIC
* @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
* Sleeping is not allowed between grab_nic_access and
* release_nic_access.
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
- * @set_bits_mask - set SRAM register according to value and mask.
+ * @set_bits_mask: set SRAM register according to value and mask.
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
* @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
* of the trans debugfs
+ * @sync_nmi: trigger a firmware NMI and wait for it to complete
* @load_pnvm: save the pnvm data in DRAM
* @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
* context info.
* @load_reduce_power: copy reduce power table to the corresponding DRAM memory
* @set_reduce_power: set reduce power table addresses in the sratch buffer
* @interrupts: disable/enable interrupts to transport
+ * @imr_dma_data: set up IMR DMA
+ * @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data
*/
struct iwl_trans_ops {
@@ -775,7 +782,7 @@ struct iwl_self_init_dram {
* @imr_size: imr dram size received from fw
* @sram_addr: sram address from debug tlv
* @sram_size: sram size from debug tlv
- * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr2sram_remainbyte: size remained after each dma transfer
* @imr_curr_addr: current dst address used during dma transfer
* @imr_base_addr: imr address received from fw
*/
@@ -822,12 +829,16 @@ struct iwl_pc_data {
* @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ * @unsupported_region_msk: unsupported regions out of active_regions
* @active_regions: active regions
* @debug_info_tlv_list: list of debug info TLVs
* @time_point: array of debug time points
* @periodic_trig_list: periodic triggers list
* @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
* @ucode_preset: preset based on ucode
+ * @restart_required: indicates debug restart is required
+ * @last_tp_resetfw: last handling of reset during debug timepoint
+ * @imr_data: IMR debug data allocation
* @dump_file_name_ext: dump file name extension
* @dump_file_name_ext_valid: dump file name extension if valid or not
* @num_pc: number of program counter for cpu
@@ -930,6 +941,7 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @block: queue is blocked
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
@@ -938,6 +950,8 @@ struct iwl_pcie_first_tb_buf {
* @id: queue id
* @low_mark: low watermark, resume queue if free space more than this
* @high_mark: high watermark, stop queue if free space less than this
+ * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
+ * @overflow_tx: need to transmit from overflow
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -990,10 +1004,19 @@ struct iwl_txq {
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @page_offs: offset from skb->cb to mac header page pointer
* @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
- * @queue_used - bit mask of used queues
- * @queue_stopped - bit mask of stopped queues
+ * @queue_used: bit mask of used queues
+ * @queue_stopped: bit mask of stopped queues
+ * @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
* @queue_alloc_cmd_ver: queue allocation command version
+ * @bc_pool: bytecount DMA allocations pool
+ * @bc_tbl_size: bytecount table size
+ * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
+ * (and similar usage)
+ * @tfd: TFD data
+ * @tfd.max_tbs: max number of buffers per TFD
+ * @tfd.size: TFD size
+ * @tfd.addr_size: TFD/TB address size
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@@ -1026,27 +1049,35 @@ struct iwl_trans_txqs {
/**
* struct iwl_trans - transport common data
*
- * @csme_own - true if we couldn't get ownership on the device
- * @ops - pointer to iwl_trans_ops
- * @op_mode - pointer to the op_mode
+ * @csme_own: true if we couldn't get ownership on the device
+ * @ops: pointer to iwl_trans_ops
+ * @op_mode: pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
- * @cfg - pointer to the configuration
- * @drv - pointer to iwl_drv
+ * @cfg: pointer to the configuration
+ * @drv: pointer to iwl_drv
+ * @state: current device state
* @status: a bit-mask of transport status flags
- * @dev - pointer to struct device * that represents the device
+ * @dev: pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
- * @hw_rf_id a u32 with the device RF ID
- * @hw_crf_id a u32 with the device CRF ID
- * @hw_wfpm_id a u32 with the device wfpm ID
+ * @hw_rf_id: a u32 with the device RF ID
+ * @hw_cnv_id: a u32 with the device CNV ID
+ * @hw_crf_id: a u32 with the device CRF ID
+ * @hw_wfpm_id: a u32 with the device wfpm ID
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
+ * @sku_id: the SKU identifier (for PNVM matching)
+ * @pnvm_loaded: indicates PNVM was loaded
+ * @hw_rev: the revision data of the HW
* @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
+ * @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
+ * @command_groups: pointer to command group name list array
+ * @command_groups_size: array size of @command_groups
* @wide_cmd_header: true when ucode supports wide command header format
* @wait_command_queue: wait queue for sync commands
* @num_rx_queues: number of RX queues allocated by the transport;
@@ -1055,19 +1086,29 @@ struct iwl_trans_txqs {
* @iml: a pointer to the image loader itself
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_pool_name: name for the TX command allocation pool
+ * @dbgfs_dir: iwlwifi debugfs base dir for this device
+ * @sync_cmd_lockdep_map: lockdep map for checking sync commands
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
+ * @dbg: additional debug data, see &struct iwl_trans_debug
+ * @init_dram: FW initialization DMA data
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
+ * @name: the device name
* @txqs: transport tx queues data.
* @mbx_addr_0_step: step address data 0
* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
* only valid for discrete (not integrated) NICs
* @invalid_tx_cmd: invalid TX command buffer
+ * @reduced_cap_sku: reduced capability supported SKU
+ * @no_160: device not supporting 160 MHz
+ * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
+ * @trans_specific: data for the specific transport this is allocated for/with
*/
struct iwl_trans {
bool csme_own;
@@ -1090,6 +1131,8 @@ struct iwl_trans {
u32 hw_id;
char hw_id_str[52];
u32 sku_id[3];
+ bool reduced_cap_sku;
+ u8 no_160:1, step_urm:1;
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 9fe1761691ec..535edb51d1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -181,6 +181,9 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
u32 value;
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
return 0;
@@ -252,6 +255,124 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
+static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return;
+
+ /* Done already */
+ if (mvmvif->bt_coex_esr_disabled == !enable)
+ return;
+
+ mvmvif->bt_coex_esr_disabled = !enable;
+
+ /* Nothing to do */
+ if (mvmvif->esr_active == enable)
+ return;
+
+ if (enable) {
+ /* Try to re-enable eSR*/
+ iwl_mvm_mld_select_links(mvm, vif, false);
+ return;
+ }
+
+ /*
+ * Find the primary link, as we want to switch to it and drop the
+ * secondary one.
+ */
+ link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
+ WARN_ON(link_id < 0);
+
+ ieee80211_set_active_links_async(vif,
+ vif->active_links & BIT(link_id));
+}
+
+/*
+ * This function receives the LB link id and checks if eSR should be
+ * enabled or disabled (due to BT coex)
+ */
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id, int primary_link)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ bool have_wifi_loss_rate =
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ BT_PROFILE_NOTIFICATION, 0) > 4;
+ s8 link_rssi = 0;
+ u8 wifi_loss_rate;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
+ return true;
+
+ /* If LB link is the primary one we should always disable eSR */
+ if (link_id == primary_link)
+ return false;
+
+ /* The feature is not supported */
+ if (!have_wifi_loss_rate)
+ return true;
+
+ /*
+ * We might not have a link_info when checking whether we can
+ * (re)enable eSR - the LB link might not exist yet
+ */
+ if (link_info)
+ link_rssi = (s8)link_info->beacon_stats.avg_signal;
+
+ /*
+ * In case we don't know the RSSI - take the lower wifi loss,
+ * so we will more likely enter eSR, and if RSSI is low -
+ * we will get an update on this and exit eSR.
+ */
+ if (!link_rssi)
+ wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ else if (!mvmvif->bt_coex_esr_disabled)
+ /* RSSI needs to get really low to disable eSR... */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ else
+ /* ...And really high before we enable it back */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
+}
+
+void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ usable_links);
+ bool enable;
+
+ /* Not assoc, not MLD vif or only one usable link */
+ if (primary_link < 0)
+ return;
+
+ enable = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+ primary_link);
+
+ iwl_mvm_bt_coex_enable_esr(mvm, vif, enable);
+}
+
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
@@ -297,6 +418,8 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
return;
}
+ iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id);
+
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
@@ -432,6 +555,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
+ /* When BT is off this will be 0 */
+ if (data->notif->wifi_loss_low_rssi == BT_OFF)
+ iwl_mvm_bt_coex_enable_esr(mvm, vif, true);
+
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
@@ -454,6 +581,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ rcu_read_unlock();
+ return;
+ }
+
iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
if (data.primary) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index c832068b5718..f5122c4678a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -11,6 +11,9 @@
#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
+#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
+#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
+#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4582afb149d7..70e03a9a937e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -450,9 +450,9 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
}
static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
int ret;
@@ -461,16 +461,14 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct wowlan_key_rsc_v5_data data = {};
int i;
- data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
if (!data.rsc)
return -ENOMEM;
- memset(data.rsc, 0xff, sizeof(*data.rsc));
-
for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
data.rsc->mcast_key_id_map[i] =
IWL_MCAST_KEY_MAP_INVALID;
- data.rsc->sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ data.rsc->sta_id = cpu_to_le32(mvm_link->ap_sta_id);
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_wowlan_get_rsc_v5_data,
@@ -494,7 +492,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
if (ver == 4) {
size = sizeof(*data.rsc_tsc);
data.rsc_tsc->sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else {
/* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */
size = sizeof(data.rsc_tsc->params);
@@ -668,10 +666,9 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct cfg80211_wowlan *wowlan)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
@@ -693,7 +690,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
pattern_cmd->n_patterns = wowlan->n_patterns;
if (ver >= 3)
- pattern_cmd->sta_id = mvmvif->deflink.ap_sta_id;
+ pattern_cmd->sta_id = mvm_link->ap_sta_id;
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@@ -723,14 +720,15 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_chanctx_conf *ctx;
u8 chains_static, chains_dynamic;
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef, ap_def;
int ret, i;
struct iwl_binding_cmd_v1 binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
struct iwl_time_quota_data *quota;
u32 status;
- if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
+ if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm) ||
+ ieee80211_vif_is_mld(vif)))
return -EINVAL;
/* add back the PHY */
@@ -744,12 +742,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EINVAL;
}
chandef = ctx->def;
+ ap_def = ctx->ap;
chains_static = ctx->rx_chains_static;
chains_dynamic = ctx->rx_chains_dynamic;
rcu_read_unlock();
ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef,
- chains_static, chains_dynamic);
+ &ap_def, chains_static, chains_dynamic);
if (ret)
return ret;
@@ -927,6 +926,9 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+ if (ap_sta->mfp)
+ wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -987,7 +989,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
}
static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1016,7 +1019,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
return -EIO;
}
- ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif);
+ ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1030,7 +1033,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (ver == 2) {
size = sizeof(tkip_data.tkip);
tkip_data.tkip.sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
} else {
@@ -1079,7 +1082,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
- kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ kek_kck_cmd.sta_id = cpu_to_le32(mvm_link->ap_sta_id);
if (cmd_ver == 4) {
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
@@ -1112,6 +1115,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct ieee80211_sta *ap_sta)
{
int ret;
@@ -1130,7 +1134,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret;
}
- ret = iwl_mvm_wowlan_config_key_params(mvm, vif);
+ ret = iwl_mvm_wowlan_config_key_params(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1142,7 +1146,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
- ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
+ ret = iwl_mvm_send_patterns(mvm, mvm_link, wowlan);
else
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
@@ -1223,6 +1227,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_mvm_vif_link_info *mvm_link;
struct iwl_d3_manager_config d3_cfg_cmd_data = {
/*
* Program the minimum sleep time to 10 seconds, as many
@@ -1237,7 +1242,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
- int ret;
+ int ret, primary_link;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1251,21 +1256,46 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
return -EINVAL;
}
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+ return 1;
+
+ if (ieee80211_vif_is_mld(vif) && vif->cfg.assoc) {
+ /*
+ * Select the 'best' link. May need to revisit, it seems
+ * better to not optimize for throughput but rather range,
+ * reliability and power here - and select 2.4 GHz ...
+ */
+ primary_link =
+ iwl_mvm_mld_get_primary_link(mvm, vif,
+ vif->active_links);
+
+ if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",
+ vif->active_links))
+ primary_link = __ffs(vif->active_links);
+
+ ret = ieee80211_set_active_links(vif, BIT(primary_link));
+ if (ret)
+ return ret;
+ } else {
+ primary_link = 0;
+ }
+
mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
synchronize_net();
- vif = iwl_mvm_get_bss_vif(mvm);
- if (IS_ERR_OR_NULL(vif)) {
- ret = 1;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvm_link = mvmvif->link[primary_link];
+ if (WARN_ON_ONCE(!mvm_link)) {
+ ret = -EINVAL;
goto out_noreset;
}
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) {
+ if (mvm_link->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@@ -1279,24 +1309,31 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->net_detect = true;
} else {
- struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .offloading_tid = 0,
+ };
- wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
+ wowlan_config_cmd.sta_id = mvm_link->ap_sta_id;
ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id],
+ mvm->fw_id_to_mac_id[mvm_link->ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta)) {
ret = -EINVAL;
goto out_noreset;
}
+ ret = iwl_mvm_sta_ensure_queue(
+ mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ goto out_noreset;
+
ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
vif, mvmvif, ap_sta);
if (ret)
goto out_noreset;
ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
- vif, mvmvif, ap_sta);
+ vif, mvmvif, mvm_link, ap_sta);
if (ret)
goto out;
@@ -1462,7 +1499,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
wakeup.disconnect = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
@@ -1486,6 +1524,9 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
+ if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+ wakeup.unprot_deauth_disassoc = true;
+
if (status->wake_packet) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
@@ -1839,9 +1880,12 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn));
break;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn));
memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn));
break;
+ default:
+ WARN_ON(1);
}
}
@@ -1931,7 +1975,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
struct iwl_mvm *mvm, u32 gtk_cipher)
{
- int i;
+ int i, j;
struct ieee80211_key_conf *key;
struct {
struct ieee80211_key_conf conf;
@@ -1975,7 +2019,15 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
key = ieee80211_gtk_rekey_add(vif, &conf.conf);
if (IS_ERR(key))
return false;
- iwl_mvm_set_key_rx_seq_idx(key, status, i);
+
+ for (j = 0; j < ARRAY_SIZE(status->gtk_seq); j++) {
+ if (!status->gtk_seq[j].valid ||
+ status->gtk_seq[j].key_id != key->keyidx)
+ continue;
+ iwl_mvm_set_key_rx_seq_idx(key, status, j);
+ break;
+ }
+ WARN_ON(j == ARRAY_SIZE(status->gtk_seq));
}
return true;
@@ -2065,7 +2117,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
.status = status,
};
int i;
-
u32 disconnection_reasons =
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
@@ -2073,9 +2124,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
- if (status->wakeup_reasons & disconnection_reasons)
- return false;
-
if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
WOWLAN_INFO_NOTIFICATION,
@@ -2136,6 +2184,9 @@ out:
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
}
+ if (status->wakeup_reasons & disconnection_reasons)
+ return false;
+
return true;
}
@@ -2193,7 +2244,10 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_igtk_status *data)
{
+ int i;
+
BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
+ BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn));
if (!data->key_len)
return;
@@ -2205,7 +2259,10 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ WOWLAN_IGTK_MIN_INDEX;
memcpy(status->igtk.key, data->key, sizeof(data->key));
- memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn));
+
+ /* mac80211 expects big endian for memcmp() to work, convert */
+ for (i = 0; i < sizeof(data->ipn); i++)
+ status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1];
}
static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
@@ -2839,6 +2896,9 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA :
mvmvif->deflink.ap_sta_id;
+ /* bug - FW with MLO has status notification */
+ WARN_ON(ieee80211_vif_is_mld(vif));
+
d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
}
@@ -2947,7 +3007,7 @@ static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
if (results->matched_profiles) {
memcpy(results->matches, notif->matches, matches_len);
- d3_data->nd_results_valid = TRUE;
+ d3_data->nd_results_valid = true;
}
/* no scan should be active at this point */
@@ -3345,6 +3405,7 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ unsigned long end = jiffies + 60 * HZ;
u32 pme_asserted;
while (true) {
@@ -3358,6 +3419,12 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
if (msleep_interruptible(100))
break;
+
+ if (time_is_before_jiffies(end)) {
+ IWL_ERR(mvm,
+ "ending pseudo-D3 with timeout after ~60 seconds\n");
+ return -ETIMEDOUT;
+ }
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index e8b881596baf..51b01f7528be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -381,9 +381,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -578,34 +578,47 @@ static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct iwl_mvm_phy_ctxt *phy_ctxt;
+ struct ieee80211_bss_conf *link_conf;
u16 value;
- int ret;
+ int link_id, ret = -EINVAL;
ret = kstrtou16(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&mvm->mutex);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
- /* make sure the channel context is assigned */
- if (!chanctx_conf) {
+ mvm->dbgfs_rx_phyinfo = value;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct cfg80211_chan_def min_def, ap_def;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u8 chains_static, chains_dynamic;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ continue;
+ }
+ /* A command can't be sent with RCU lock held, so copy
+ * everything here and use it after unlocking
+ */
+ min_def = chanctx_conf->min_def;
+ ap_def = chanctx_conf->ap;
+ chains_static = chanctx_conf->rx_chains_static;
+ chains_dynamic = chanctx_conf->rx_chains_dynamic;
rcu_read_unlock();
- mutex_unlock(&mvm->mutex);
- return -EINVAL;
- }
- phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
- rcu_read_unlock();
+ phy_ctxt = mvmvif->link[link_id]->phy_ctxt;
+ if (!phy_ctxt)
+ continue;
- mvm->dbgfs_rx_phyinfo = value;
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &min_def, &ap_def,
+ chains_static, chains_dynamic);
+ }
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
- chanctx_conf->rx_chains_static,
- chanctx_conf->rx_chains_dynamic);
mutex_unlock(&mvm->mutex);
return ret ?: count;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index edc8204f7c0e..79f4ac8cbc72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -391,9 +391,7 @@ static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file,
char buf[12];
u32 value;
- err = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ err = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (err)
return err;
@@ -877,14 +875,14 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
le16_to_cpu(rsp->curr_mcc));
pos += scnprintf(pos, endpos - pos, "Block list entries:");
- for (i = 0; i < APCI_WTAS_BLACK_LIST_MAX; i++)
+ for (i = 0; i < IWL_WTAS_BLACK_LIST_MAX; i++)
pos += scnprintf(pos, endpos - pos, " 0x%x",
le16_to_cpu(rsp->block_list[i]));
pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n",
- iwl_mvm_is_vendor_in_approved_list() ? "YES" : "NO");
+ iwl_is_tas_approved() ? "YES" : "NO");
pos += scnprintf(pos, endpos - pos,
"\tDo TAS Support Dual Radio?: %s\n",
rsp->in_dual_radio ? "TRUE" : "FALSE");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 233ae81884a0..4863a3c74640 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -821,9 +821,10 @@ iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* If secure LTF is turned off, replace the flag with PMF only
*/
flags = le32_to_cpu(target->initiator_ap_flags);
- if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
- !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
- flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
flags |= IWL_INITIATOR_AP_FLAGS_PMF;
target->initiator_ap_flags = cpu_to_le32(flags);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 8f10590f9cdd..8e760300a1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -12,6 +12,9 @@ struct iwl_mvm_pasn_sta {
struct list_head list;
struct iwl_mvm_int_sta int_sta;
u8 addr[ETH_ALEN];
+
+ /* must be last as it followed by buffer holding the key */
+ struct ieee80211_key_conf keyconf;
};
struct iwl_mvm_pasn_hltk_data {
@@ -303,6 +306,10 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
{
list_del(&sta->list);
+ if (sta->keyconf.keylen)
+ iwl_mvm_sec_key_del_pasn(mvm, vif, BIT(sta->int_sta.sta_id),
+ &sta->keyconf);
+
if (iwl_mvm_has_mld_api(mvm->fw))
iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
else
@@ -342,6 +349,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (hltk && hltk_len) {
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) {
+ IWL_ERR(mvm, "No support for secure LTF measurement\n");
+ return -EINVAL;
+ }
+
hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
@@ -352,12 +365,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (tk && tk_len) {
- sta = kzalloc(sizeof(*sta), GFP_KERNEL);
+ sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL);
if (!sta)
return -ENOBUFS;
ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
- cipher, tk, tk_len);
+ cipher, tk, tk_len, &sta->keyconf);
if (ret) {
kfree(sta);
return ret;
@@ -425,7 +438,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_unlock();
phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, &ctx.ap,
ctx.rx_chains_static,
ctx.rx_chains_dynamic);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1252084662c6..e1c2b7fc92ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,6 +16,7 @@
#include "fw/acpi.h"
#include "fw/pnvm.h"
#include "fw/uefi.h"
+#include "fw/regulatory.h"
#include "mvm.h"
#include "fw/dbg.h"
@@ -487,7 +488,6 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
#endif /* CONFIG_ACPI */
}
-#if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
{
u8 cmd_ver;
@@ -567,17 +567,6 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
return ret;
}
-#else
-
-static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
-{
-}
-#endif
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
@@ -677,6 +666,11 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ)
+ mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
+ CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
+
/* Send init config command to mark that we are sending NVM access
* commands
*/
@@ -890,7 +884,6 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
-#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
@@ -931,9 +924,9 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
- ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
- IWL_NUM_CHAIN_TABLES,
- n_subbands, prof_a, prof_b);
+ ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
+ IWL_NUM_CHAIN_TABLES,
+ n_subbands, prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
@@ -989,7 +982,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
resp = (void *)cmd.resp_pkt->data;
ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
+ if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM))
ret = -EIO;
iwl_free_resp(&cmd);
@@ -1003,7 +996,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
u16 len;
u32 n_bands;
u32 n_profiles;
- u32 sk = 0;
+ __le32 sk = cpu_to_le32(0);
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
@@ -1020,27 +1013,35 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
/* the ops field is at the same spot for all versions, so set in v1 */
cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+ /* Only set to South Korea if the table revision is 1 */
+ if (mvm->fwrt.geo_rev == 1)
+ sk = cpu_to_le32(1);
+
if (cmd_ver == 5) {
len = sizeof(cmd.v5);
n_bands = ARRAY_SIZE(cmd.v5.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v5.table_revision = sk;
} else if (cmd_ver == 4) {
len = sizeof(cmd.v4);
n_bands = ARRAY_SIZE(cmd.v4.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v4.table_revision = sk;
} else if (cmd_ver == 3) {
len = sizeof(cmd.v3);
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v3.table_revision = sk;
} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
len = sizeof(cmd.v2);
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v2.table_revision = sk;
} else {
len = sizeof(cmd.v1);
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
}
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
@@ -1052,8 +1053,8 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
/* the table is at the same position for all versions, so set use v1 */
- ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
- n_bands, n_profiles);
+ ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0],
+ n_bands, n_profiles);
/*
* It is a valid scenario to not support SAR, or miss wgds table,
@@ -1062,27 +1063,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
- /* Only set to South Korea if the table revision is 1 */
- if (mvm->fwrt.geo_rev == 1)
- sk = 1;
-
- /*
- * Set the table_revision to South Korea (1) or not (0). The
- * element name is misleading, as it doesn't contain the table
- * revision number, but whether the South Korea variation
- * should be used.
- * This must be done after calling iwl_sar_geo_init().
- */
- if (cmd_ver == 5)
- cmd.v5.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 4)
- cmd.v4.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 3)
- cmd.v3.table_revision = cpu_to_le32(sk);
- else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER))
- cmd.v2.table_revision = cpu_to_le32(sk);
-
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
@@ -1091,7 +1071,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
union iwl_ppag_table_cmd cmd;
int ret, cmd_size;
- ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
/* Not supporting PPAG table is a valid scenario */
if (ret < 0)
return 0;
@@ -1110,80 +1090,19 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
/* no need to read the table, done in INIT stage */
- if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
+ if (!(iwl_is_ppag_approved(&mvm->fwrt)))
return 0;
return iwl_mvm_ppag_send_cmd(mvm);
}
-static const struct dmi_system_id dmi_tas_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "LENOVO",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "Acer",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "MSI",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
- },
- },
- { .ident = "Honor",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
- },
- },
- /* keep last */
- {}
-};
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return dmi_check_system(dmi_tas_approved_list);
-}
-
static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
{
int i;
u32 size = le32_to_cpu(*le_size);
/* Verify that there is room for another country */
- if (size >= IWL_TAS_BLOCK_LIST_MAX)
+ if (size >= IWL_WTAS_BLACK_LIST_MAX)
return false;
for (i = 0; i < size; i++) {
@@ -1200,21 +1119,21 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
- union iwl_tas_config_cmd cmd = {};
+ struct iwl_tas_data data = {};
+ struct iwl_tas_config_cmd cmd = {};
int cmd_size, fw_ver;
- BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
- APCI_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
return;
}
- fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
+ ret = iwl_bios_get_tas_table(&mvm->fwrt, &data);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1225,16 +1144,16 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
if (ret == 0)
return;
- if (!iwl_mvm_is_vendor_in_approved_list()) {
+ if (!iwl_is_tas_approved()) {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_MCC_US)) ||
- (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_MCC_CANADA))) {
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_US)) ||
+ (!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
@@ -1242,41 +1161,64 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
} else {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is in the approved list.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
}
- /* v4 is the same size as v3, so no need to differentiate here */
- cmd_size = fw_ver < 3 ?
- sizeof(struct iwl_tas_config_cmd_v2) :
- sizeof(struct iwl_tas_config_cmd_v3);
+ fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common));
+
+ /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
+ if (fw_ver == 4) {
+ cmd.v4.override_tas_iec = data.override_tas_iec;
+ cmd.v4.enable_tas_iec = data.enable_tas_iec;
+ cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed;
+ } else {
+ cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec);
+ cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec);
+ }
+
+ cmd_size = sizeof(struct iwl_tas_config_cmd_common);
+ if (fw_ver >= 3)
+ /* v4 is the same size as v3 */
+ cmd_size += sizeof(struct iwl_tas_config_cmd_v3);
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
+static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
- u8 value;
- int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
- &iwl_rfi_guid, &value);
+ u32 value = 0;
+ /* default behaviour is disabled */
+ bool bios_enable_rfi = false;
+ int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value);
+
if (ret < 0) {
IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
+ return bios_enable_rfi;
+ }
- } else if (value >= DSM_VALUE_RFI_MAX) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
- value);
-
- } else if (value == DSM_VALUE_RFI_ENABLE) {
+ value &= DSM_VALUE_RFI_DISABLE;
+ /* RFI BIOS CONFIG value can be 0 or 3 only.
+ * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
+ * 1 and 2 are invalid BIOS configurations, So, it's not possible to
+ * disable ddr/dlvr separately.
+ */
+ if (!value) {
IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
- return DSM_VALUE_RFI_ENABLE;
+ bios_enable_rfi = true;
+ } else if (value == DSM_VALUE_RFI_DISABLE) {
+ IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n");
+ } else {
+ IWL_DEBUG_RADIO(mvm,
+ "DSM RFI got invalid value, value=%d\n", value);
}
- IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
-
- /* default behaviour is disabled */
- return DSM_VALUE_RFI_DISABLE;
+ return bios_enable_rfi;
}
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
@@ -1288,43 +1230,34 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), 1);
- cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
+ cmd.config_bitmap = iwl_get_lari_config_bitmap(&mvm->fwrt);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
if (!ret)
cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_UNII4_CHAN,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
if (!ret)
cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ACTIVATE_CHANNEL,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
if (!ret) {
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
cmd.chan_state_active_bitmap = cpu_to_le32(value);
}
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_FORCE_DISABLE_CHANNELS,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS,
+ &value);
if (!ret)
cmd.force_disable_channels_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
if (!ret)
cmd.edt_bitmap = cpu_to_le32(value);
@@ -1390,15 +1323,17 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED ||
le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED)
- mvm->fwrt.uats_enabled = TRUE;
+ mvm->fwrt.uats_enabled = true;
}
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
{
int ret;
+ iwl_acpi_get_guid_lock_status(&mvm->fwrt);
+
/* read PPAG table */
- ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
+ ret = iwl_bios_get_ppag_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1406,7 +1341,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
/* read SAR tables */
- ret = iwl_sar_get_wrds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wrds_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1415,7 +1350,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
* If not available, don't fail and don't bother with EWRD and
* WGDS */
- if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
+ if (!iwl_bios_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
@@ -1426,7 +1361,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
} else {
- ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
+ ret = iwl_bios_get_ewrd_table(&mvm->fwrt);
/* if EWRD is not available, we can still use
* WRDS, so don't fail */
if (ret < 0)
@@ -1436,7 +1371,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
/* read geo SAR table */
if (iwl_sar_geo_support(&mvm->fwrt)) {
- ret = iwl_sar_get_wgds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wgds_table(&mvm->fwrt);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1446,59 +1381,18 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
-}
-#else /* CONFIG_ACPI */
-inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
- int prof_a, int prof_b)
-{
- return 1;
+ if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
+ IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
}
-inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- return -ENOENT;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_hw_restart_disconnect(vif);
}
-static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-
-static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
-{
-}
-
-static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
-{
-}
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return false;
-}
-
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
-{
- return DSM_VALUE_RFI_DISABLE;
-}
-
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
-{
-}
-
-#endif /* CONFIG_ACPI */
-
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
{
u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
@@ -1543,10 +1437,15 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
if (flags & ERROR_RECOVERY_UPDATE_DB) {
resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
- if (resp)
+ if (resp) {
IWL_ERR(mvm,
"Failed to send recovery cmd blob was invalid %d\n",
resp);
+
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_disconnect_iterator,
+ mvm);
+ }
}
}
@@ -1781,9 +1680,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (!mvm->ptp_data.ptp_clock)
iwl_mvm_ptp_init(mvm);
- if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
- IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
-
ret = iwl_mvm_ppag_init(mvm);
if (ret)
goto error;
@@ -1803,7 +1699,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_uats_init(mvm);
if (iwl_rfi_supported(mvm)) {
- if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
+ if (iwl_mvm_eval_dsm_rfi(mvm))
iwl_rfi_send_config_cmd(mvm, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index be48b0fc9cb6..f13f13e6b71a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
#include "time-event.h"
@@ -53,6 +53,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned int link_id = link_conf->link_id;
struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
struct iwl_link_config_cmd cmd = {};
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info))
return -EINVAL;
@@ -84,7 +86,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
@@ -100,6 +103,8 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
u32 ht_flag, flags = 0, flags_mask = 0;
int ret;
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info ||
link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
@@ -190,12 +195,21 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
+ struct ieee80211_chanctx_conf *ctx;
+ struct cfg80211_chan_def *def = NULL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(link_conf->chanctx_conf);
+ if (ctx)
+ def = iwl_mvm_chanctx_def(mvm, ctx);
+
if (iwlwifi_mod_params.disable_11be ||
- !link_conf->eht_support)
+ !link_conf->eht_support || !def ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) >= 6)
changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
else
- cmd.puncture_mask =
- cpu_to_le16(link_conf->eht_puncturing);
+ cmd.puncture_mask = cpu_to_le16(def->punctured);
+ rcu_read_unlock();
}
cmd.bss_color = link_conf->he_bss_color.color;
@@ -224,7 +238,8 @@ send_cmd:
cmd.flags = cpu_to_le32(flags);
cmd.flags_mask = cpu_to_le32(flags_mask);
cmd.spec_link_id = link_conf->link_id;
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY);
if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index c4f96125cf33..228ede7b8957 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -31,6 +31,17 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
IWL_GEN2_TRIG_TX_FIFO_BK,
};
+const u8 iwl_mvm_ac_to_bz_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+};
+
struct iwl_mvm_mac_iface_iterator_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
@@ -455,7 +466,7 @@ void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
/* Protect when channel wider than 20MHz */
- if (link_conf->chandef.width > NL80211_CHAN_WIDTH_20)
+ if (link_conf->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
*protection_flags |= cpu_to_le32(ht_flag);
break;
default:
@@ -494,7 +505,7 @@ void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (link_conf->qos)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- if (link_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+ if (link_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
}
@@ -910,8 +921,8 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
link_conf = rcu_dereference(vif->link_conf[link_id]);
if (link_conf) {
basic = link_conf->basic_rates;
- if (link_conf->chandef.chan)
- band = link_conf->chandef.chan->band;
+ if (link_conf->chanreq.oper.chan)
+ band = link_conf->chanreq.oper.chan->band;
}
rcu_read_unlock();
}
@@ -1466,8 +1477,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
mvmvif->csa_countdown = true;
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
- int c = ieee80211_beacon_update_cntdwn(csa_vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif, 0);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif,
&csa_vif->bss_conf);
@@ -1486,7 +1497,7 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
}
} else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
/* we don't have CSA NoA scheduled yet, switch now */
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
RCU_INIT_POINTER(mvm->csa_vif, NULL);
}
}
@@ -1626,10 +1637,22 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
*/
- if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG)
- iwl_mvm_connection_loss(mvm, vif, "missed beacons");
- else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD)
- ieee80211_beacon_loss(vif);
+ if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) {
+ if (rx_missed_bcon_since_rx >= IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD) {
+ iwl_mvm_connection_loss(mvm, vif, "missed beacons");
+ } else {
+ IWL_WARN(mvm,
+ "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+ IWL_WARN(mvm,
+ "missed_beacons:%d, missed_beacons_since_rx:%d\n",
+ rx_missed_bcon, rx_missed_bcon_since_rx);
+ }
+ } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ ieee80211_beacon_loss(vif);
+ else
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+ }
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
@@ -1832,7 +1855,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
csa_vif->bss_conf.beacon_int));
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 7f13dff04b26..69f6a96b0cfb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -138,7 +138,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
- le32_to_cpu(resp->cap), resp_ver);
+ le32_to_cpu(resp->cap), resp_ver,
+ mvm->fwrt.uats_enabled);
/* Store the return source id */
src_id = resp->source_id;
if (IS_ERR_OR_NULL(regd)) {
@@ -263,6 +264,9 @@ static const u8 tm_if_types_ext_capa_sta[] = {
__bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
__bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME)
static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
{
@@ -272,6 +276,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
{
.iftype = NL80211_IFTYPE_STATION,
@@ -280,6 +285,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
};
@@ -490,6 +496,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM))
hw->wiphy->hw_timestamp_max_peers = 1;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@@ -695,6 +706,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
}
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD),
+ IWL_FW_CMD_VER_UNKNOWN) >= 11) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SECURE_LTF);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@@ -1195,14 +1218,12 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
iwlwifi_mod_params.fw_restart) {
- max_retry = IWL_MAX_INIT_RETRY;
/*
* This will prevent mac80211 recovery flows to trigger during
* init failures
@@ -1210,13 +1231,7 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
}
- for (retry = 0; retry <= max_retry; retry++) {
- ret = __iwl_mvm_mac_start(mvm);
- if (!ret || mvm->pldr_sync)
- break;
-
- IWL_ERR(mvm, "mac start retry %d\n", retry);
- }
+ ret = __iwl_mvm_mac_start(mvm);
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
@@ -1350,6 +1365,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
* discover that its list is now empty.
*/
cancel_work_sync(&mvm->async_handlers_wk);
+ wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
}
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
@@ -1433,7 +1449,7 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
goto out_unlock;
@@ -1600,7 +1616,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
*/
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC) {
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
ret = 0;
goto out;
}
@@ -1616,7 +1633,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_remove_mac;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -1627,6 +1644,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
@@ -1637,10 +1657,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
mvm->monitor_p80 =
- iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef);
+ iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
}
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -2558,7 +2579,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
}
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
@@ -2579,7 +2600,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
/* FIXME: need to update per link when FW API will
* support it
*/
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
IWL_ERR(mvm,
"failed to update CQM thresholds\n");
@@ -2606,9 +2627,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be))
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
@@ -2617,10 +2636,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
vif->cfg.assoc &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
/*
@@ -3416,16 +3432,16 @@ iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (WARN_ON_ONCE(!link_conf->chandef.chan ||
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan ||
!mvmvif->link[link_id]))
return;
- if (!(link_conf->chandef.chan->flags & IEEE80211_CHAN_RADAR)) {
+ if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
mvmvif->link[link_id]->he_ru_2mhz_block = false;
return;
}
- cfg80211_bss_iter(hw->wiphy, &link_conf->chandef,
+ cfg80211_bss_iter(hw->wiphy, &link_conf->chanreq.oper,
iwl_mvm_check_he_obss_narrow_bw_ru_iter,
&iter_data);
@@ -3485,10 +3501,10 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
return;
/* FIXME: MEI needs to be updated for MLO */
- if (!vif->bss_conf.chandef.chan)
+ if (!vif->bss_conf.chanreq.oper.chan)
return;
- conn_info.channel = vif->bss_conf.chandef.chan->hw_value;
+ conn_info.channel = vif->bss_conf.chanreq.oper.chan->hw_value;
switch (mvm_sta->pairwise_cipher) {
case WLAN_CIPHER_SUITE_TKIP:
@@ -3685,6 +3701,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
NL80211_TDLS_SETUP);
}
+ if (ret)
+ return ret;
+
for_each_sta_active_link(vif, sta, link_sta, i)
link_sta->agg.max_rc_amsdu_len = 1;
@@ -3693,6 +3712,19 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mvmvif->ap_sta = sta;
+ /*
+ * Initialize the rates here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
return 0;
}
@@ -3719,10 +3751,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
* the default bss_conf
*/
if (!mvm->mld_api_is_used &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
iwl_mvm_vif_set_he_support(hw, vif, sta, true);
@@ -3774,7 +3804,7 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
NL80211_TDLS_ENABLE_LINK);
} else {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
mvmvif->authorized = 1;
@@ -3791,13 +3821,17 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
mvm_sta->authorized = true;
- iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
-
/* MFP is set by default before the station is authorized.
* Clear it here in case it's not used.
*/
- if (!sta->mfp)
- return callbacks->update_sta(mvm, vif, sta);
+ if (!sta->mfp) {
+ int ret = callbacks->update_sta(mvm, vif, sta);
+
+ if (ret)
+ return ret;
+ }
+
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
return 0;
}
@@ -3828,7 +3862,7 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
mvmvif->authorized = 0;
/* disable beacon filtering */
- iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ iwl_mvm_disable_beacon_filter(mvm, vif);
}
return 0;
@@ -4407,44 +4441,6 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
return true;
}
-#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
-#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
-#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
-#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
-#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
-
-static void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
- u32 duration_ms,
- u32 *duration_tu,
- u32 *delay)
-{
- u32 dtim_interval = vif->bss_conf.dtim_period *
- vif->bss_conf.beacon_int;
-
- *delay = AUX_ROC_MIN_DELAY;
- *duration_tu = MSEC_TO_TU(duration_ms);
-
- /*
- * If we are associated we want the delay time to be at least one
- * dtim interval so that the FW can wait until after the DTIM and
- * then start the time event, this will potentially allow us to
- * remain off-channel for the max duration.
- * Since we want to use almost a whole dtim interval we would also
- * like the delay to be for 2-3 dtim intervals, in case there are
- * other time events with higher priority.
- */
- if (vif->cfg.assoc) {
- *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
- /* We cannot remain off-channel longer than the DTIM interval */
- if (dtim_interval <= *duration_tu) {
- *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
- if (*duration_tu <= AUX_ROC_MIN_DURATION)
- *duration_tu = dtim_interval -
- AUX_ROC_MIN_SAFETY_BUFFER;
- }
- }
-}
-
static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
@@ -4542,48 +4538,6 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
return res;
}
-static int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
- struct ieee80211_channel *channel,
- struct ieee80211_vif *vif,
- int duration, u32 activity)
-{
- int res;
- u32 duration_tu, delay;
- struct iwl_roc_req roc_req = {
- .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
- .activity = cpu_to_le32(activity),
- .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Set the channel info data */
- iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
- channel->hw_value,
- iwl_mvm_phy_band_from_nl80211(channel->band),
- IWL_PHY_CHANNEL_MODE20, 0);
-
- iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
- &delay);
- roc_req.duration = cpu_to_le32(duration_tu);
- roc_req.max_delay = cpu_to_le32(delay);
-
- IWL_DEBUG_TE(mvm,
- "\t(requested = %ums, max_delay = %ums)\n",
- duration, delay);
- IWL_DEBUG_TE(mvm,
- "Requesting to remain on channel %u for %utu\n",
- channel->hw_value, duration_tu);
-
- /* Set the node address */
- memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
-
- res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- 0, sizeof(roc_req), &roc_req);
-
- return res;
-}
-
static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
{
int ret = 0;
@@ -4700,7 +4654,7 @@ static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt,
- &chandef, 1, 1);
+ &chandef, NULL, 1, 1);
}
/* Execute the common part for MLD and non-MLD modes */
@@ -4788,8 +4742,8 @@ static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
data->responder = true;
}
-static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx)
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm_ftm_responder_iter_data data = {
.responder = false,
@@ -4808,9 +4762,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt;
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -4823,7 +4775,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
goto out;
}
- ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def,
+ ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
if (ret) {
@@ -4876,9 +4828,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
if (WARN_ONCE((phy_ctxt->ref > 1) &&
(changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
@@ -4903,7 +4853,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
}
iwl_mvm_bt_coex_vif_change(mvm);
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
@@ -5356,8 +5306,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
- return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+ return iwl_mvm_disable_beacon_filter(mvm, vif);
}
return -EOPNOTSUPP;
@@ -5441,7 +5391,7 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
iwl_mvm_csa_client_absent(mvm, vif);
if (mvmvif->bf_data.bf_enabled) {
- int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ int ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
return ret;
@@ -6103,6 +6053,7 @@ void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
}
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
enum iwl_mvm_rxq_notif_type type,
bool sync,
@@ -6151,11 +6102,12 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
READ_ONCE(mvm->queue_sync_state) == 0 ||
- iwl_mvm_is_radio_killed(mvm),
- HZ);
- WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm),
- "queue sync: failed to sync, state is 0x%lx\n",
- mvm->queue_sync_state);
+ iwl_mvm_is_radio_hw_killed(mvm),
+ SYNC_RX_QUEUE_TIMEOUT);
+ WARN_ONCE(!ret && !iwl_mvm_is_radio_hw_killed(mvm),
+ "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ mvm->queue_sync_state,
+ mvm->queue_sync_cookie);
}
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
index ea3e9e9c6e26..8a38fc4b0b0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <net/mac80211.h>
@@ -62,11 +62,13 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5;
u32 flags = 0;
lockdep_assert_held(&mvm->mutex);
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (!pairwise)
flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
switch (keyconf->cipher) {
@@ -96,14 +98,19 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
if (!sta && vif->type == NL80211_IFTYPE_STATION)
sta = mvmvif->ap_sta;
- /* Set the MFP flag also for an AP interface where the key is an IGTK
- * key as in such a case the station would always be NULL
+ /*
+ * If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
*/
- if ((!IS_ERR_OR_NULL(sta) && sta->mfp) ||
- (vif->type == NL80211_IFTYPE_AP &&
- (keyconf->keyidx == 4 || keyconf->keyidx == 5)))
+ if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk)
flags |= IWL_SEC_KEY_FLAG_MFP;
+ if (keyconf->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
return flags;
}
@@ -335,6 +342,21 @@ static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
+ IWL_SEC_KEY_FLAG_MFP;
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ return __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx,
+ 0);
+}
+
int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index f313a8d771e4..bb7851042177 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
@@ -167,7 +167,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC |
- MAC_FILTER_IN_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
MAC_CFG_FILTER_ACCEPT_BEACON |
MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
MAC_CFG_FILTER_ACCEPT_GRP);
@@ -205,8 +205,11 @@ static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
cmd.p2p_dev.is_disc_extended =
iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif);
- /* Override the filter flags to accept only probe requests */
- cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+ /* Override the filter flags to accept all management frames. This is
+ * needed to support both P2P device discovery using probe requests and
+ * P2P service discovery using action frames
+ */
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 61170173f917..084314bf6f36 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include "mvm.h"
@@ -47,7 +47,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -81,7 +81,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
}
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -253,9 +254,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
if (!rcu_access_pointer(link_conf->chanctx_conf))
n_active++;
- if (n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
@@ -437,6 +435,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
mvmvif->ap_ibss_active = false;
}
+ iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE, false);
+
if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
int ret = iwl_mvm_esr_mode_inactive(mvm, vif);
@@ -448,9 +449,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_MONITOR)
iwl_mvm_mld_rm_snif_sta(mvm, vif);
- iwl_mvm_link_changed(mvm, vif, link_conf,
- LINK_CONTEXT_MODIFY_ACTIVE, false);
-
if (switching_chanctx)
return;
mvmvif->link[link_id]->phy_ctxt = NULL;
@@ -606,6 +604,7 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
struct iwl_mvm_link_sel_data {
u8 link_id;
enum nl80211_band band;
+ enum nl80211_chan_width width;
bool active;
};
@@ -657,7 +656,8 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
continue;
data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chandef.chan->band;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
data[n_data].active = vif->active_links & BIT(link_id);
n_data++;
}
@@ -752,8 +752,8 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
- /* Update EHT Puncturing info */
- if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc)
+ /* if associated, maybe puncturing changed - we'll check later */
+ if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
if (link_changes) {
@@ -1121,17 +1121,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {};
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 removed = old_links & ~new_links;
u16 added = new_links & ~old_links;
int err, i;
- if (hweight16(new_links) > 1 &&
- n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
int r;
@@ -1223,6 +1218,146 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
return ret;
}
+/*
+ * This function receives a subset of the usable links bitmap and
+ * returns the primary link id, and -1 if such link doesn't exist
+ * (e.g. non-MLO connection) or wasn't found.
+ */
+int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long usable_links)
+{
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ u8 link_id, n_data = 0;
+
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc)
+ return -1;
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
+ data[n_data].active = true;
+ n_data++;
+ }
+
+ if (n_data <= 1)
+ return -1;
+
+ /* The logic should be modified to handle more than 2 links */
+ WARN_ON_ONCE(n_data > 2);
+
+ /* Primary link is the link with the wider bandwidth or higher band */
+ if (data[0].width > data[1].width)
+ return data[0].link_id;
+ if (data[0].width < data[1].width)
+ return data[1].link_id;
+ if (data[0].band >= data[1].band)
+ return data[0].link_id;
+
+ return data[1].link_id;
+}
+
+/*
+ * This function receives a bitmap of usable links and check if we can enter
+ * eSR on those links.
+ */
+static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long desired_links)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ desired_links);
+ const struct wiphy_iftype_ext_capab *ext_capa;
+ bool ret = true;
+ int link_id;
+
+ if (primary_link < 0)
+ return false;
+
+ if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
+ ieee80211_vif_type_p2p(vif));
+ if (!ext_capa ||
+ !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ /* BT Coex effects eSR mode only if one of the link is on LB */
+ if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
+ continue;
+
+ ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+ primary_link);
+ // Mark eSR as disabled for the next time
+ if (!ret)
+ mvmvif->bt_coex_esr_disabled = true;
+ break;
+ }
+
+ return ret;
+}
+
+static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 desired_links)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int n_links = hweight16(desired_links);
+ bool ret = true;
+
+ if (n_links <= 1)
+ return true;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Check if HW supports the wanted number of links */
+ if (n_links > iwl_mvm_max_active_links(mvm, vif)) {
+ ret = false;
+ goto unlock;
+ }
+
+ /* If it is an eSR device, check that we can enter eSR */
+ if (iwl_mvm_is_esr_supported(mvm->fwrt.trans))
+ ret = iwl_mvm_can_enter_esr(mvm, vif, desired_links);
+unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static enum ieee80211_neg_ttlm_res
+iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u16 map;
+ u8 i;
+
+ /* Verify all TIDs are mapped to the same links set */
+ map = neg_ttlm->downlink[0];
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->uplink[i] != map)
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1317,4 +1452,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.change_vif_links = iwl_mvm_mld_change_vif_links,
.change_sta_links = iwl_mvm_mld_change_sta_links,
+ .can_activate_links = iwl_mvm_mld_can_activate_links,
+ .can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 40627961b834..5c316909cab2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -40,8 +40,9 @@
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD 4
#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
-#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 19
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
@@ -105,6 +106,7 @@ struct iwl_mvm_phy_ctxt {
/* track for RLC config command */
u32 center_freq1;
bool rlc_disabled;
+ u32 channel_load_by_us;
};
struct iwl_mvm_time_event_data {
@@ -121,7 +123,7 @@ struct iwl_mvm_time_event_data {
* if the te is in the time event list or not (when id == TE_MAX)
*/
u32 id;
- u8 link_id;
+ s8 link_id;
};
/* Power management */
@@ -359,6 +361,7 @@ struct iwl_mvm_vif_link_info {
* @pm_enabled - indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
+ * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex
* @low_latency: bit flags for low latency
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
@@ -389,6 +392,7 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
+ bool bt_coex_esr_disabled;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -537,8 +541,8 @@ struct iwl_mvm_tt_mgmt {
#ifdef CONFIG_THERMAL
/**
- *struct iwl_mvm_thermal_device - thermal zone related data
- * @temp_trips: temperature thresholds for report
+ * struct iwl_mvm_thermal_device - thermal zone related data
+ * @trips: temperature thresholds for report
* @fw_trips_index: keep indexes to original array - temp_trips
* @tzone: thermal zone device data
*/
@@ -848,6 +852,9 @@ struct iwl_mvm {
spinlock_t async_handlers_lock;
struct work_struct async_handlers_wk;
+ /* For async rx handlers that require the wiphy lock */
+ struct wiphy_work async_handlers_wiphy_wk;
+
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -1215,7 +1222,6 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
- * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
* if this is set, when intentionally triggered
@@ -1230,7 +1236,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
- IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
IWL_MVM_STATUS_STARTING,
@@ -1567,13 +1572,17 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_trans *trans = mvm->fwrt.trans;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
if (vif->type == NL80211_IFTYPE_AP)
return mvm->fw->ucode_capa.num_beacons;
- if (iwl_mvm_is_esr_supported(trans) ||
- (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
+ if ((iwl_mvm_is_esr_supported(trans) &&
+ !mvmvif->bt_coex_esr_disabled) ||
+ ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->hw_rf_id))))
return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1581,12 +1590,16 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
extern const u8 iwl_mvm_ac_to_tx_fifo[];
extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
- return iwl_mvm_has_new_tx_api(mvm) ?
- iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_ac_to_bz_tx_fifo[ac];
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_ac_to_gen2_tx_fifo[ac];
+ return iwl_mvm_ac_to_tx_fifo[ac];
}
struct iwl_rate_info {
@@ -1801,18 +1814,20 @@ void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
/* MVM PHY */
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm);
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef);
int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
u8 chains_static, u8 chains_dynamic);
@@ -2116,6 +2131,12 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
+bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id, int primary_link);
+void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2129,11 +2150,9 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
{}
#endif
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
@@ -2366,7 +2385,7 @@ u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time);
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm);
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm);
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2387,6 +2406,10 @@ int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf);
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf);
void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_vif_link_info *link,
@@ -2511,7 +2534,7 @@ static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
static inline void
iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
- struct cfg80211_chan_def *chandef)
+ const struct cfg80211_chan_def *chandef)
{
enum nl80211_band band = chandef->chan->band;
@@ -2601,7 +2624,6 @@ static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden);
-bool iwl_mvm_is_vendor_in_approved_list(void);
/* Callbacks for ieee80211_ops */
void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -2730,4 +2752,28 @@ bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx);
void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool valid_links_changed);
+int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long usable_links);
+
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx);
+
+static inline struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
+{
+ bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
+ iwl_mvm_enable_fils(mvm, ctx);
+
+ return use_def ? &ctx->def : &ctx->min_def;
+}
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay);
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, u32 activity);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index c0dd441e800e..ae8177222881 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -590,7 +590,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_acpi_get_mcc(mvm->dev, mcc)) {
+ !iwl_bios_get_mcc(&mvm->fwrt, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index adbbe19aeae5..a93981cb9714 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -161,9 +161,9 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!vif->bss_conf.chandef.chan ||
- vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
- vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
+ if (!vif->bss_conf.chanreq.oper.chan ||
+ vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
return;
if (!vif->cfg.assoc)
@@ -219,7 +219,7 @@ void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
return;
if (mvm->fw_static_smps_request &&
- link_conf->chandef.width == NL80211_CHAN_WIDTH_160 &&
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
link_conf->he_support)
mode = IEEE80211_SMPS_STATIC;
@@ -259,7 +259,7 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
}
/**
- * enum iwl_rx_handler_context context for Rx handler
+ * enum iwl_rx_handler_context: context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
* which can't acquire mvm->mutex.
* @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
@@ -267,15 +267,19 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
* it will be called from a worker with mvm->mutex held.
* @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
* mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ * and mvm->mutex. Will be handled with the wiphy_work queue infra
+ * instead of regular work queue.
*/
enum iwl_rx_handler_context {
RX_HANDLER_SYNC,
RX_HANDLER_ASYNC_LOCKED,
RX_HANDLER_ASYNC_UNLOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
};
/**
- * struct iwl_rx_handlers handler for FW notification
+ * struct iwl_rx_handlers: handler for FW notification
* @cmd_id: command id
* @min_size: minimum size to expect for the notification
* @context: see &iwl_rx_handler_context
@@ -316,7 +320,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
struct iwl_tlc_update_notif),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -324,7 +329,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
iwl_mvm_handle_rx_system_oper_stats,
- RX_HANDLER_ASYNC_LOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
struct iwl_system_statistics_notif_oper),
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
iwl_mvm_handle_rx_system_oper_part1_stats,
@@ -673,6 +678,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *work);
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{
@@ -682,7 +689,7 @@ static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
if (!backoff)
return 0;
- dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
while (backoff->pwr) {
if (dflt_pwr_limit >= backoff->pwr)
@@ -1194,7 +1201,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
- iwl_mvm_get_acpi_tables(mvm);
+ iwl_mvm_get_bios_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
iwl_uefi_get_step_table(trans);
@@ -1265,6 +1272,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
+ wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+ iwl_mvm_async_handlers_wiphy_wk);
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
@@ -1551,35 +1560,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
spin_unlock_bh(&mvm->async_handlers_lock);
}
-static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+ u8 contexts)
{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(local_list);
- /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-
/*
- * Sync with Rx path with a lock. Remove all the entries from this list,
- * add them to a local one (lock free), and then handle them.
+ * Sync with Rx path with a lock. Remove all the entries of the
+ * wanted contexts from this list, add them to a local one (lock free),
+ * and then handle them.
*/
spin_lock_bh(&mvm->async_handlers_lock);
- list_splice_init(&mvm->async_handlers_list, &local_list);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ if (!(BIT(entry->context) & contexts))
+ continue;
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &local_list);
+ }
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_lock(&mvm->mutex);
entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_unlock(&mvm->mutex);
kfree(entry);
}
}
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+ BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1659,7 +1695,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
- schedule_work(&mvm->async_handlers_wk);
+ if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+ wiphy_work_queue(mvm->hw->wiphy,
+ &mvm->async_handlers_wiphy_wk);
+ else
+ schedule_work(&mvm->async_handlers_wk);
break;
}
}
@@ -1788,12 +1828,8 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
- bool state = iwl_mvm_is_radio_killed(mvm);
-
- if (state)
- wake_up(&mvm->rx_sync_waitq);
-
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
+ iwl_mvm_is_radio_killed(mvm));
}
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1818,10 +1854,12 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
bool unified = iwl_mvm_has_unified_ucode(mvm);
- if (state)
+ if (state) {
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- else
+ wake_up(&mvm->rx_sync_waitq);
+ } else {
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ }
iwl_mvm_set_rfkill_state(mvm);
@@ -1955,7 +1993,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
ieee80211_restart_hw(mvm->hw);
} else if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
- mvm->fwrt.trans->dbg.restart_required = FALSE;
+ mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 334d1f59f6e4..ce264b386029 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -9,7 +9,7 @@
#include "mvm.h"
/* Maps the driver specific channel width definition to the fw values */
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -33,7 +33,7 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
* Maps the driver specific control channel position (relative to the center
* freq) definitions to the the fw values
*/
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef)
{
int offs = chandef->chan->center_freq - chandef->center_freq1;
int abs_offs = abs(offs);
@@ -116,7 +116,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd_v1 *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
struct iwl_phy_context_cmd_tail *tail =
@@ -137,7 +137,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm,
@@ -197,14 +197,18 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
*/
static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic,
u32 action)
{
int ret;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
- if (ver == 3 || ver == 4) {
+ if (ver < 5 || !ap || !ap->chan)
+ ap = NULL;
+
+ if (ver >= 3 && ver <= 6) {
struct iwl_phy_context_cmd cmd = {};
/* Set the command header fields */
@@ -215,6 +219,14 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
chains_static,
chains_dynamic);
+ if (ap) {
+ cmd.sbb_bandwidth = iwl_mvm_get_channel_width(ap);
+ cmd.sbb_ctrl_channel_loc = iwl_mvm_get_ctrl_pos(ap);
+ }
+
+ if (ver == 6)
+ cmd.puncture_mask = cpu_to_le16(chandef->punctured);
+
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
0, sizeof(cmd), &cmd);
} else if (ver < 3) {
@@ -254,7 +266,8 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
* Send a command to add a PHY context based on the current HW configuration.
*/
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
int ret;
@@ -267,7 +280,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
FW_CTXT_ACTION_ADD);
@@ -300,7 +313,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* changed.
*/
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
@@ -324,7 +338,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
int ret;
/* ... remove it here ...*/
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, NULL,
chains_static, chains_dynamic,
FW_CTXT_ACTION_REMOVE);
if (ret)
@@ -338,7 +352,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
action);
}
@@ -358,7 +372,7 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT);
- iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, 1, 1,
+ iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, NULL, 1, 1,
FW_CTXT_ACTION_REMOVE);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 1b9b06e0443f..41e68aa6bec8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -20,8 +20,7 @@
static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd,
- u32 flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
u16 len;
@@ -62,7 +61,7 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
len = offsetof(struct iwl_beacon_filter_cmd,
bf_threshold_absolute_low);
- return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, 0,
len, cmd);
}
@@ -813,8 +812,7 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_beacon_filter_cmd *cmd,
- u32 cmd_flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@@ -825,7 +823,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd);
if (!ret)
mvmvif->bf_data.bf_enabled = true;
@@ -834,20 +832,18 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
}
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -856,7 +852,7 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@@ -865,10 +861,9 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
}
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
- return _iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return _iwl_mvm_disable_beacon_filter(mvm, vif);
}
static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
@@ -919,7 +914,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
!vif->cfg.ps ||
iwl_mvm_vif_low_latency(mvmvif));
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 6cba8a353b53..00860feefa7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -479,9 +479,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) {
+ u32 enabled = le32_to_cpu(notif->amsdu_enabled);
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
+ if (size < 2000) {
+ size = 0;
+ enabled = 0;
+ }
+
if (link_sta->agg.max_amsdu_len < size) {
/*
* In debug link_sta->agg.max_amsdu_len < size
@@ -492,7 +498,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
goto out;
}
- mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+ mvmsta->amsdu_enabled = enabled;
mvmsta->max_amsdu_len = size;
link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
@@ -525,10 +531,10 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
- if (WARN_ON_ONCE(!link_conf->chandef.chan))
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
switch (le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
@@ -538,7 +544,7 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
default:
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
}
- } else if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ &&
+ } else if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ &&
eht_cap->has_eht) {
switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0],
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 481d68cbbbd8..a8c4e354e2ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4161,6 +4161,8 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
* @mvm: The mvm component
* @mvmsta: The station
* @enable: Enable Tx protection?
+ *
+ * Returns: an error code
*/
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 8caa971770c6..b1add7942c5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -752,6 +752,19 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
spin_unlock(&mvm->tcm.lock);
}
+static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm,
+ struct iwl_stats_ntfy_per_phy *per_phy)
+{
+ int i;
+
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ if (!mvm->phy_ctxts[i].ref)
+ continue;
+ mvm->phy_ctxts[i].channel_load_by_us =
+ le32_to_cpu(per_phy[i].channel_load_by_us);
+ }
+}
+
static void
iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
struct iwl_statistics_operational_ntfy *stats)
@@ -766,6 +779,7 @@ iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator_all_macs,
&data);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
static void
@@ -841,6 +855,7 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
struct iwl_stats_ntfy_per_link *link_stats;
struct ieee80211_bss_conf *bss_conf;
struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif_link_info *link_info;
int link_id;
int sig;
@@ -857,20 +872,26 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
continue;
mvmvif = iwl_mvm_vif_from_mac80211(bss_conf->vif);
- if (!mvmvif || !mvmvif->link[link_id])
+ link_info = mvmvif->link[link_id];
+ if (!link_info)
continue;
link_stats = &per_link[fw_link_id];
- mvmvif->link[link_id]->beacon_stats.num_beacons =
+ link_info->beacon_stats.num_beacons =
le32_to_cpu(link_stats->beacon_counter);
/* we basically just use the u8 to store 8 bits and then treat
* it as a s8 whenever we take it out to a different type.
*/
- mvmvif->link[link_id]->beacon_stats.avg_signal =
+ link_info->beacon_stats.avg_signal =
-le32_to_cpu(link_stats->beacon_average_energy);
+ if (link_info->phy_ctxt &&
+ link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
+ iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif,
+ link_id);
+
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
@@ -935,6 +956,7 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 886d00098528..1484eaedf452 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -282,6 +282,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
u32 status,
struct ieee80211_rx_status *stats)
{
+ struct wireless_dev *wdev;
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
u8 keyid;
@@ -303,9 +304,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
+ if (!sta)
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
- return -1;
+ goto report;
/* good cases */
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
@@ -314,13 +321,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
- if (!sta)
- return -1;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
/*
* both keys will have the same cipher and MIC length, use
* whichever one is available
@@ -329,11 +329,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!key) {
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
if (!key)
- return -1;
+ goto report;
}
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
- return -1;
+ goto report;
/* get the real key ID */
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
@@ -347,7 +347,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return -1;
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
if (!key)
- return -1;
+ goto report;
}
/* Report status to mac80211 */
@@ -355,6 +355,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
ieee80211_key_mic_failure(key);
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
return -1;
}
@@ -397,8 +401,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
case IWL_RX_MPDU_STATUS_SEC_GCM:
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
/* alg is CCM: check MIC only */
- if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+ IWL_DEBUG_DROP(mvm,
+ "Dropping packet, bad MIC (CCM/GCM)\n");
return -1;
+ }
stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
*crypt_len = IEEE80211_CCMP_HDR_LEN;
@@ -505,6 +512,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(!mvm_sta->dup_data))
+ return false;
+
dup_data = &mvm_sta->dup_data[queue];
/*
@@ -512,11 +523,9 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
*/
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1)) {
- rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
return false;
- }
if (ieee80211_is_data_qos(hdr->frame_control)) {
/* frame has qos control */
@@ -642,10 +651,8 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (!ba_data) {
- WARN(true, "BAID %d not found in map\n", baid);
+ if (WARN(!ba_data, "BAID %d not found in map\n", baid))
goto out;
- }
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
@@ -681,11 +688,11 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
len -= sizeof(*notif) + sizeof(*internal_notif);
- if (internal_notif->sync &&
- mvm->queue_sync_cookie != internal_notif->cookie) {
- WARN_ONCE(1, "Received expired RX queue sync message\n");
+ if (WARN_ONCE(internal_notif->sync &&
+ mvm->queue_sync_cookie != internal_notif->cookie,
+ "Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n",
+ internal_notif->cookie, mvm->queue_sync_cookie, queue))
return;
- }
switch (internal_notif->type) {
case IWL_MVM_RXQ_EMPTY:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 7b6f1cdca067..f3e3986b4c72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -241,13 +241,11 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
return IWL_SCAN_TYPE_FRAGMENTED;
/*
- * in case of DCM with GO where BSS DTIM interval < 220msec
- * set all scan requests as fast-balance scan
+ * in case of DCM with P2P GO set all scan requests as
+ * fast-balance scan
*/
if (vif && vif->type == NL80211_IFTYPE_STATION &&
- data.is_dcm_with_p2p_go &&
- ((vif->bss_conf.beacon_int *
- vif->bss_conf.dtim_period) < 220))
+ data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 30d4233595e8..16285ae7cae9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2019, 2022-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2019, 2022-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#include "mvm.h"
@@ -232,6 +232,9 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
};
struct ieee80211_sta *sta = NULL;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD))
+ return 0;
/*
* Ignore the call if we are in HW Restart flow, or if the handled
* vif is a p2p device.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 2a3ca9785974..491c449fd431 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -71,7 +71,7 @@ u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
mpdu_dens = link_sta->ht_cap.ampdu_density;
}
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
/* overwrite HT values on 6 GHz */
mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -208,7 +208,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (sta->deflink.ht_cap.ht_supported ||
- mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
add_sta_cmd.station_flags_msk |=
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
@@ -1502,6 +1502,34 @@ out_err:
return ret;
}
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ return 0;
+ }
+
+ if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ ret = 0;
+ }
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ if (!list_empty(&mvmtxq->list))
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+ local_bh_enable();
+
+ return ret;
+}
+
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
@@ -2989,16 +3017,6 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
-
- /*
- * After we've deleted it, do another queue sync
- * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
- * running it won't find a new session in the old
- * BAID. It can find the NULL pointer for the BAID,
- * but we must not have it find a different session.
- */
- iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
- true, NULL, 0);
}
return 0;
@@ -3559,6 +3577,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags = cpu_to_le16(keyidx);
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+ if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
@@ -4298,12 +4319,12 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len)
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *keyconf)
{
int ret;
u16 queue;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_key_conf *keyconf;
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
bool mld = iwl_mvm_has_mld_api(mvm->fw);
@@ -4328,12 +4349,6 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
goto out;
- keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
- if (!keyconf) {
- ret = -ENOBUFS;
- goto out;
- }
-
keyconf->cipher = cipher;
memcpy(keyconf->key, key, key_len);
keyconf->keylen = key_len;
@@ -4354,10 +4369,9 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
0, NULL, 0, 0, true);
}
- kfree(keyconf);
- return 0;
out:
- iwl_mvm_dealloc_int_sta(mvm, sta);
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b33a0ce096d4..b3450569864e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -571,10 +571,12 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len);
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *key_conf_out);
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 218fdf1ed530..a59d264a11c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -45,32 +45,24 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
te_data->link_id = -1;
}
-void iwl_mvm_roc_done_wk(struct work_struct *wk)
+static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
-
/*
* Clear the ROC_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
- clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
-
- synchronize_net();
-
- /*
- * Flush the offchannel queue -- this is called when the time
+ * in the case that the time event actually completed in the firmware.
+ *
+ * Also flush the offchannel queue -- this is called when the time
* event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
*/
-
- mutex_lock(&mvm->mutex);
- if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
+ synchronize_net();
+
/*
* NB: access to this pointer would be racy, but the flush bit
* can only be set when we had a P2P-Device VIF, and we have a
@@ -105,21 +97,16 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
}
}
- /*
- * Clear the ROC_AUX_RUNNING status bit.
- * This will cause the TX path to drop offchannel transmissions.
- * That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
+ /* Do the same for AUX ROC */
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
- /* do the same in case of hot spot 2.0 */
+ synchronize_net();
+
iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
mvm->aux_sta.tfd_queue_msk);
if (mvm->mld_api_is_used) {
iwl_mvm_mld_rm_aux_sta(mvm);
- goto out_unlock;
+ return;
}
/* In newer version of this command an aux station is added only
@@ -128,8 +115,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
if (iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
}
+}
-out_unlock:
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_cleanup_roc(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -163,12 +156,12 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
* So we just do nothing here and the switch
* will be performed on the last TBTT.
*/
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
IWL_WARN(mvm, "CSA NOA started too early\n");
goto out_unlock;
}
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
@@ -294,18 +287,6 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
}
}
-static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
-{
- /*
- * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
- * roc_done_wk is already scheduled or running, so don't schedule it
- * again to avoid a race where the roc_done_wk clears this bit after
- * it is set here, affecting the next run of the roc_done_wk.
- */
- if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
- iwl_mvm_roc_finished(mvm);
-}
-
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -357,7 +338,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
switch (te_data->vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -692,7 +673,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
/* Determine whether mac or link id should be used, and validate the link id */
static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 link_id)
+ s8 link_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
@@ -706,8 +687,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
"Invalid link ID for session protection: %u\n", link_id))
return -EINVAL;
- if (WARN(ieee80211_vif_is_mld(vif) &&
- !(vif->active_links & BIT(link_id)),
+ if (WARN(!mvmvif->link[link_id]->active,
"Session Protection on an inactive link: %u\n", link_id))
return -EINVAL;
@@ -716,7 +696,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 id, u32 link_id)
+ u32 id, s8 link_id)
{
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
struct iwl_mvm_session_prot_cmd cmd = {
@@ -745,7 +725,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif = te_data->vif;
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
- unsigned int link_id;
+ s8 link_id;
if (!vif)
return false;
@@ -783,7 +763,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_cancel_session_protection(mvm, vif, id,
link_id);
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
}
}
return false;
@@ -929,7 +909,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
mvmvif->time_event_data.link_id != notif_link_id,
- "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
+ "SESSION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
notif_link_id, mvmvif->time_event_data.link_id))
goto out_unlock;
@@ -972,7 +952,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
- iwl_mvm_p2p_roc_finished(mvm);
+ mvmvif->time_event_data.link_id = -1;
+ iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
if (WARN_ON(mvmvif->time_event_data.id !=
@@ -986,6 +967,86 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
rcu_read_unlock();
}
+#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
+#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
+#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
+#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay)
+{
+ u32 dtim_interval = vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int;
+
+ *delay = AUX_ROC_MIN_DELAY;
+ *duration_tu = MSEC_TO_TU(duration_ms);
+
+ /*
+ * If we are associated we want the delay time to be at least one
+ * dtim interval so that the FW can wait until after the DTIM and
+ * then start the time event, this will potentially allow us to
+ * remain off-channel for the max duration.
+ * Since we want to use almost a whole dtim interval we would also
+ * like the delay to be for 2-3 dtim intervals, in case there are
+ * other time events with higher priority.
+ */
+ if (vif->cfg.assoc) {
+ *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
+ /* We cannot remain off-channel longer than the DTIM interval */
+ if (dtim_interval <= *duration_tu) {
+ *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
+ if (*duration_tu <= AUX_ROC_MIN_DURATION)
+ *duration_tu = dtim_interval -
+ AUX_ROC_MIN_SAFETY_BUFFER;
+ }
+ }
+}
+
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, u32 activity)
+{
+ int res;
+ u32 duration_tu, delay;
+ struct iwl_roc_req roc_req = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .activity = cpu_to_le32(activity),
+ .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
+ channel->hw_value,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
+ IWL_PHY_CHANNEL_MODE20, 0);
+
+ iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
+ &delay);
+ roc_req.duration = cpu_to_le32(duration_tu);
+ roc_req.max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums)\n",
+ duration, delay);
+ IWL_DEBUG_TE(mvm,
+ "Requesting to remain on channel %u for %utu\n",
+ channel->hw_value, duration_tu);
+
+ /* Set the node address */
+ memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
+
+ res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ 0, sizeof(roc_req), &roc_req);
+
+ return res;
+}
+
static int
iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -1163,18 +1224,22 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ te_data = &mvmvif->time_event_data;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
+ IWL_DEBUG_TE(mvm,
+ "No remain on channel event\n");
+ return;
+ }
+
iwl_mvm_cancel_session_protection(mvm, vif,
- mvmvif->time_event_data.id,
- mvmvif->time_event_data.link_id);
- iwl_mvm_p2p_roc_finished(mvm);
+ te_data->id,
+ te_data->link_id);
} else {
iwl_mvm_roc_station_remove(mvm, mvmvif);
- iwl_mvm_roc_finished(mvm);
}
-
- return;
+ goto cleanup_roc;
}
te_data = iwl_mvm_get_roc_te(mvm);
@@ -1185,13 +1250,21 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
- iwl_mvm_p2p_roc_finished(mvm);
- } else {
+ else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
- iwl_mvm_roc_finished(mvm);
- }
+
+cleanup_roc:
+ /*
+ * In case we get here before the ROC event started,
+ * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
+ * cleanup things properly
+ */
+ set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
+ IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
+ &mvm->status);
+ iwl_mvm_cleanup_roc(mvm);
}
void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
@@ -1296,7 +1369,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
- int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index db986bfc4dc3..782ddc8c296b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -520,13 +520,49 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
}
}
+static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info)
+{
+ if (unlikely(!mvmsta))
+ return true;
+
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return true;
+
+ if (likely(ieee80211_is_data(hdr->frame_control) &&
+ mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED))
+ return false;
+
+ /*
+ * Not a data frame, use host rate if on an old device that
+ * can't possibly be doing MLO (firmware may be selecting a
+ * bad rate), if we might be doing MLO we need to let FW pick
+ * (since we don't necesarily know the link), but FW rate
+ * selection was fixed.
+ */
+ return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
+}
+
+static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
+ const u8 *addr3_override)
+{
+ struct ieee80211_hdr *out_hdr = cmd;
+
+ memcpy(cmd, hdr, hdrlen);
+ if (addr3_override)
+ memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
+}
+
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
- struct ieee80211_sta *sta, u8 sta_id)
+ struct ieee80211_sta *sta, u8 sta_id,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_device_tx_cmd *dev_cmd;
@@ -556,12 +592,12 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/*
- * For data and mgmt packets rate info comes from the fw. Only
+ * For data and mgmt packets rate info comes from the fw (for
+ * new devices, older FW is somewhat broken for this). Only
* set rate/antenna for injected frames with fixed rate, or
- * when no sta is given.
+ * when no sta is given, or with older firmware.
*/
- if (unlikely(!sta ||
- info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ if (unlikely(iwl_mvm_use_host_rate(mvm, mvmsta, hdr, info))) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
@@ -584,7 +620,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le16(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -599,7 +635,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le32(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -617,7 +653,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
out:
return dev_cmd;
@@ -820,7 +856,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
+ NULL);
if (!dev_cmd)
return -1;
@@ -869,10 +906,10 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
if (WARN_ON(!link_conf))
band = NL80211_BAND_2GHZ;
else
- band = link_conf->chandef.chan->band;
+ band = link_conf->chanreq.oper.chan->band;
rcu_read_unlock();
} else {
- band = mvmsta->vif->bss_conf.chandef.chan->band;
+ band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
}
lmac = iwl_mvm_get_lmac_id(mvm, band);
@@ -914,9 +951,15 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
- if (WARN_ON_ONCE(IS_ERR(next)))
- return -EINVAL;
- else if (next)
+
+ if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
+ return -ENOMEM;
+
+ if (WARN_ONCE(IS_ERR(next),
+ "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
+ return PTR_ERR(next);
+
+ if (next)
consume_skb(skb);
skb_list_walk_safe(next, tmp, next) {
@@ -972,8 +1015,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 tid;
- snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
- tcp_hdrlen(skb);
+ snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
@@ -1140,7 +1182,8 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
*/
static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
@@ -1172,7 +1215,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_probe_resp_set_noa(mvm, skb);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
- sta, mvmsta->deflink.sta_id);
+ sta, mvmsta->deflink.sta_id,
+ addr3_override);
if (!dev_cmd)
goto drop;
@@ -1294,9 +1338,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
+ struct ieee80211_vif *vif;
unsigned int payload_len;
int ret;
struct sk_buff *orig_skb = skb;
+ const u8 *addr3;
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -1307,26 +1353,59 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
memcpy(&info, skb->cb, sizeof(info));
if (!skb_is_gso(skb))
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len;
if (payload_len <= skb_shinfo(skb)->gso_size)
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
__skb_queue_head_init(&mpdus_skbs);
+ vif = info.control.vif;
+ if (!vif)
+ return -1;
+
ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
if (ret)
return ret;
WARN_ON(skb_queue_empty(&mpdus_skbs));
+ /*
+ * As described in IEEE sta 802.11-2020, table 9-30 (Address
+ * field contents), A-MSDU address 3 should contain the BSSID
+ * address.
+ * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
+ * in the command header. We need to preserve the original
+ * address 3 in the skb header to correctly create all the
+ * A-MSDU subframe headers from it.
+ */
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ addr3 = vif->cfg.ap_addr;
+ break;
+ case NL80211_IFTYPE_AP:
+ addr3 = vif->addr;
+ break;
+ default:
+ addr3 = NULL;
+ break;
+ }
+
while (!skb_queue_empty(&mpdus_skbs)) {
+ struct ieee80211_hdr *hdr;
+ bool amsdu;
+
skb = __skb_dequeue(&mpdus_skbs);
+ hdr = (void *)skb->data;
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
- ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
+ amsdu ? addr3 : NULL);
if (ret) {
/* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
@@ -1587,12 +1666,18 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
+ *
+ * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
- return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
- tx_resp->frame_count) & 0xfff;
+ u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count);
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return val & 0xFFFF;
+ return val & 0xFFF;
}
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
@@ -2125,6 +2210,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
tfd_cnt, pkt_len))
return;
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
@@ -2160,12 +2251,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
rcu_read_unlock();
-
- IWL_DEBUG_TX_REPLY(mvm,
- "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
- sta_id, le32_to_cpu(ba_res->flags),
- le16_to_cpu(ba_res->txed),
- le16_to_cpu(ba_res->done));
return;
}
@@ -2197,9 +2282,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
rcu_read_unlock();
- iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
- tid_data->rate_n_flags, false);
-
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
ba_notif->sta_addr, ba_notif->sta_id);
@@ -2212,6 +2294,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
ba_notif->reduced_txp);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags, false);
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 91286018a69d..ab56ff87c6f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -249,6 +249,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
* This is the special case in which init is set and we call a callback in
* this case to clear the state indicating that station creation is in
* progress.
+ *
+ * Returns: an error code indicating success or failure
*/
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index fa4a14546860..c8fc8b4fd85c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -119,7 +119,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->version.version = 0;
prph_sc_ctrl->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 5f55efe64bf5..0fa92704cd14 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -180,7 +180,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2c9b98c8184b..4a657036b9d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -502,12 +502,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */
{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -526,7 +530,7 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, _cfg, _name)
-static const struct iwl_dev_info iwl_dev_info_table[] = {
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
/* 9000 */
IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
@@ -1008,8 +1012,13 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_mtp_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1115,8 +1124,24 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_sc_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_sc2_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_sc2f_name),
#endif /* CONFIG_IWLMVM */
};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_dev_info_table_size = ARRAY_SIZE(iwl_dev_info_table);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size);
+#endif
/*
* Read rf id and cdb info from prph register and store it
@@ -1143,6 +1168,20 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
iwl_trans->hw_cnv_id =
iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ /* In BZ, the MAC step must be read from the CNVI aux register */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
+ u8 step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+
+ /* For BZ-U, take B step also when A step is indicated */
+ if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
+ CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
+ step == SILICON_A_STEP)
+ step = SILICON_B_STEP;
+
+ iwl_trans->hw_rev_step = step;
+ iwl_trans->hw_rev |= step;
+ }
+
/* Read cdb info (also contains the jacket info if needed in the future */
iwl_trans->hw_wfpm_id =
iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
@@ -1236,7 +1275,7 @@ out:
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-static const struct iwl_dev_info *
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
@@ -1299,6 +1338,7 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
return NULL;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1382,6 +1422,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
+ iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
}
#if IS_ENABLED(CONFIG_IWLMVM)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 63e13577aff8..6c76b2dd6878 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1484,12 +1484,9 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
- if (trans->trans_cfg->gen2)
- _iwl_trans_pcie_gen2_stop_device(trans);
- else
- _iwl_trans_pcie_stop_device(trans, from_irq);
- }
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
+ !WARN_ON(trans->trans_cfg->gen2))
+ _iwl_trans_pcie_stop_device(trans, from_irq);
}
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
@@ -1718,6 +1715,7 @@ enable_msi:
static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
{
+#if defined(CONFIG_SMP)
int iter_rx_q, i, ret, cpu, offset;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1738,6 +1736,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
"Failed to set affinity mask for IRQ %d\n",
trans_pcie->msix_entries[i].vector);
}
+#endif
}
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 6c2b37e56c78..fa8eba47dc4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1331,7 +1331,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
amsdu_pad = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index ca74b1b63cac..33973a60d0bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -271,9 +271,10 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
meta = NULL;
goto unmap;
}
- IWL_WARN(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys, (unsigned long long)phys);
+ IWL_DEBUG_TX(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys,
+ (unsigned long long)phys);
ret = 0;
unmap:
@@ -352,7 +353,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
@@ -1601,8 +1602,8 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (read_ptr == tfd_num)
goto out;
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->read_ptr, tfd_num, ssn);
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
+ txq_id, read_ptr, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
@@ -1630,7 +1631,8 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
struct sk_buff *skb = txq->entries[read_ptr].skb;
- if (WARN_ON_ONCE(!skb))
+ if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
+ read_ptr, txq->read_ptr, txq_id))
continue;
iwl_txq_free_tso_page(trans, skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
new file mode 100644
index 000000000000..5658471bdf0a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+iwlwifi-tests-y += module.o devinfo.o
+
+ccflags-y += -I$(srctree)/$(src)/../
+
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlwifi-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
new file mode 100644
index 000000000000..7aa47fce6e2d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for the iwlwifi device info table
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "iwl-drv.h"
+#include "iwl-config.h"
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
+{
+ printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
+ pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
+ di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
+ di->cores);
+}
+
+static void devinfo_table_order(struct kunit *test)
+{
+ int idx;
+
+ for (idx = 0; idx < iwl_dev_info_table_size; idx++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[idx];
+ const struct iwl_dev_info *ret;
+
+ ret = iwl_pci_find_dev_info(di->device, di->subdevice,
+ di->mac_type, di->mac_step,
+ di->rf_type, di->cdb,
+ di->jacket, di->rf_id,
+ di->no_160, di->cores, di->rf_step);
+ if (ret != di) {
+ iwl_pci_print_dev_info("searched: ", di);
+ iwl_pci_print_dev_info("found: ", ret);
+ KUNIT_FAIL(test,
+ "unusable entry at index %d (found index %d instead)\n",
+ idx, (int)(ret - iwl_dev_info_table));
+ }
+ }
+}
+
+static struct kunit_case devinfo_test_cases[] = {
+ KUNIT_CASE(devinfo_table_order),
+ {}
+};
+
+static struct kunit_suite iwlwifi_devinfo = {
+ .name = "iwlwifi-devinfo",
+ .test_cases = devinfo_test_cases,
+};
+
+kunit_test_suite(iwlwifi_devinfo);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/module.c b/drivers/net/wireless/intel/iwlwifi/tests/module.c
new file mode 100644
index 000000000000..0c54f818e5a7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/module.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Module boilerplate for the iwlwifi kunit module.
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlwifi");
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index b52cce38115d..c4fe70e05b9b 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
"FW rev %s - Softmac protocol %x.%x\n",
fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
- "%s - %x.%x", fw_version,
+ "%.19s - %x.%x", fw_version,
priv->fw_var >> 8, priv->fw_var & 0xff);
}
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index c6084683aedd..687841b2fa2a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -704,6 +704,10 @@ static void p54_set_coverage_class(struct ieee80211_hw *dev,
}
static const struct ieee80211_ops p54_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = p54_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = p54_start,
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ce0179b8ab36..0073b5e0f9c9 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -700,6 +700,7 @@ static struct spi_driver p54spi_driver = {
module_spi_driver(p54spi_driver);
+MODULE_DESCRIPTION("Prism54 SPI wireless driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
MODULE_ALIAS("spi:cx3110x");
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 104d2b6dc9af..5a525da434c2 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
if (!cmdarray[i].cmdbuf) {
lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
ret = -1;
- goto done;
+ goto free_cmd_array;
}
}
@@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- ret = 0;
+ return 0;
+free_cmd_array:
+ for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+ if (cmdarray[i].cmdbuf) {
+ kfree(cmdarray[i].cmdbuf);
+ cmdarray[i].cmdbuf = NULL;
+ }
+ }
+ kfree(priv->cmd_array);
+ priv->cmd_array = NULL;
done:
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 199d33ed3bb9..9cca69fe04d7 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -473,6 +473,10 @@ static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops lbtf_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = lbtf_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = lbtf_op_start,
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index da211372a481..b90f922f1cdc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -288,6 +288,6 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
wiphy_lock(priv->wdev.wiphy);
- cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0);
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
wiphy_unlock(priv->wdev.wiphy);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 90e401100898..c0c635e74bc5 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -392,12 +392,10 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
chan_list =
(struct mwifiex_ie_types_chan_list_param_set *) *buffer;
- memset(chan_list, 0,
- sizeof(struct mwifiex_ie_types_chan_list_param_set));
+ memset(chan_list, 0, struct_size(chan_list, chan_scan_param, 1));
chan_list->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
- chan_list->header.len = cpu_to_le16(
- sizeof(struct mwifiex_ie_types_chan_list_param_set) -
- sizeof(struct mwifiex_ie_types_header));
+ chan_list->header.len =
+ cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
chan_list->chan_scan_param[0].chan_number =
bss_desc->bcn_ht_oper->primary_chan;
chan_list->chan_scan_param[0].radio_type =
@@ -411,8 +409,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
- *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
- ret_len += sizeof(struct mwifiex_ie_types_chan_list_param_set);
+ *buffer += struct_size(chan_list, chan_scan_param, 1);
+ ret_len += struct_size(chan_list, chan_scan_param, 1);
}
if (bss_desc->bcn_bss_co_2040) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 3604abcbcff9..b909a7665e9c 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -3359,7 +3359,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
}
if (!wowlan->patterns[i].pkt_offset) {
- if (!(byte_seq[0] & 0x01) &&
+ if (is_unicast_ether_addr(byte_seq) &&
(byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
continue;
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index f9c9fec7c792..9deaf59dcb62 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -566,14 +566,8 @@ mwifiex_verext_write(struct file *file, const char __user *ubuf,
int ret;
u32 versionstrsel;
struct mwifiex_private *priv = (void *)file->private_data;
- char buf[16];
- memset(buf, 0, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- ret = kstrtou32(buf, 10, &versionstrsel);
+ ret = kstrtou32_from_user(ubuf, count, 10, &versionstrsel);
if (ret)
return ret;
@@ -874,19 +868,14 @@ mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
{
bool timeshare_coex;
struct mwifiex_private *priv = file->private_data;
- char kbuf[16];
int ret;
if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
return -EOPNOTSUPP;
- memset(kbuf, 0, sizeof(kbuf));
-
- if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
- return -EFAULT;
-
- if (kstrtobool(kbuf, &timeshare_coex))
- return -EINVAL;
+ ret = kstrtobool_from_user(ubuf, count, &timeshare_coex);
+ if (ret)
+ return ret;
ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
HostCmd_ACT_GEN_SET, 0, &timeshare_coex, true);
@@ -970,9 +959,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
mwifiex_dfs_dir);
- if (!priv->dfs_dev_dir)
- return;
-
MWIFIEX_DFS_ADD_FILE(info);
MWIFIEX_DFS_ADD_FILE(debug);
MWIFIEX_DFS_ADD_FILE(getlog);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 62f3c9a52a1d..3adc447b715f 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -770,7 +770,7 @@ struct mwifiex_chan_scan_param_set {
struct mwifiex_ie_types_chan_list_param_set {
struct mwifiex_ie_types_header header;
- struct mwifiex_chan_scan_param_set chan_scan_param[1];
+ struct mwifiex_chan_scan_param_set chan_scan_param[];
} __packed;
struct mwifiex_ie_types_rxba_sync {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 318b42b1896f..175882485a19 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -28,11 +28,9 @@
#include <linux/inetdevice.h>
#include <linux/devcoredump.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index a2ddac363b10..0326b121747c 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -664,15 +664,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Copy the current channel TLV to the command being
prepared */
- memcpy(chan_tlv_out->chan_scan_param + tlv_idx,
+ memcpy(&chan_tlv_out->chan_scan_param[tlv_idx],
tmp_chan_list,
- sizeof(chan_tlv_out->chan_scan_param));
+ sizeof(*chan_tlv_out->chan_scan_param));
/* Increment the TLV header length by the size
appended */
le16_unaligned_add_cpu(&chan_tlv_out->header.len,
- sizeof(
- chan_tlv_out->chan_scan_param));
+ sizeof(*chan_tlv_out->chan_scan_param));
/*
* The tlv buffer length is set to the number of bytes
@@ -2369,12 +2368,11 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX &&
bgscan_cfg_in->chan_list[chan_idx].chan_number;
chan_idx++) {
- temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
+ temp_chan = &chan_list_tlv->chan_scan_param[chan_idx];
/* Increment the TLV header length by size appended */
le16_unaligned_add_cpu(&chan_list_tlv->header.len,
- sizeof(
- chan_list_tlv->chan_scan_param));
+ sizeof(*chan_list_tlv->chan_scan_param));
temp_chan->chan_number =
bgscan_cfg_in->chan_list[chan_idx].chan_number;
@@ -2413,7 +2411,7 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
chan_scan_param);
le16_unaligned_add_cpu(&chan_list_tlv->header.len,
chan_num *
- sizeof(chan_list_tlv->chan_scan_param[0]));
+ sizeof(*chan_list_tlv->chan_scan_param));
}
tlv_pos += (sizeof(chan_list_tlv->header)
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 00a5679b5c51..8558995e8fc7 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -871,7 +871,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
}
} else {
memcpy(ra, skb->data, ETH_ALEN);
- if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
+ if (is_multicast_ether_addr(ra) || mwifiex_is_skb_mgmt_frame(skb))
eth_broadcast_addr(ra);
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
}
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 13bcb123d122..ce8fea76dbb2 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5610,6 +5610,10 @@ static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops mwl8k_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mwl8k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mwl8k_start,
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8a3a90d1bfac..758e380fdf1d 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -1613,8 +1613,8 @@ EXPORT_SYMBOL_GPL(mt76_get_sar_power);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
- ieee80211_csa_finish(vif);
+ if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
+ ieee80211_csa_finish(vif, 0);
}
void mt76_csa_finish(struct mt76_dev *dev)
@@ -1638,7 +1638,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active)
return;
- dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
+ dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
}
void mt76_csa_check(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 89d738deea62..9b49267b1eab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -701,6 +701,10 @@ static void mt7603_tx(struct ieee80211_hw *hw,
}
const struct ieee80211_ops mt7603_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7603_tx,
.start = mt7603_start,
.stop = mt7603_stop,
@@ -728,6 +732,7 @@ const struct ieee80211_ops mt7603_ops = {
.set_sar_specs = mt7603_set_sar_specs,
};
+MODULE_DESCRIPTION("MediaTek MT7603E and MT76x8 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static int __init mt7603_init(void)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index dab16b5fc386..0971c164b57e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1375,4 +1375,5 @@ const struct ieee80211_ops mt7615_ops = {
};
EXPORT_SYMBOL_GPL(mt7615_ops);
+MODULE_DESCRIPTION("MediaTek MT7615E and MT7663E wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index ae34d019e588..c807bd8d928d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -353,7 +353,7 @@ static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
if (vif->bss_conf.csa_active)
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index ac036a072439..87a956ea3ad7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -270,4 +270,5 @@ static void __exit mt7615_exit(void)
module_init(mt7615_init);
module_exit(mt7615_exit);
+MODULE_DESCRIPTION("MediaTek MT7615E MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 67cedd2555f9..9692890ba51b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -253,4 +253,5 @@ module_sdio_driver(mt7663s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663S (SDIO) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 04963b9f7498..df737e1ff27b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -281,4 +281,5 @@ module_usb_driver(mt7663u_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 0052d103e276..820b39590027 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -349,4 +349,5 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7663 SDIO/USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 96494ba2fdf7..3a20ba0d2492 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -3160,4 +3160,5 @@ exit:
EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x connac layer helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index c3a392a1a659..bcd24c9072ec 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -342,4 +342,5 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
return 0;
}
+MODULE_DESCRIPTION("MediaTek MT76x EEPROM helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 9277ff38b7a2..79b7996ad1a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -59,6 +59,10 @@ mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static const struct ieee80211_ops mt76x0e_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x0e_start,
.stop = mt76x0e_stop,
@@ -302,6 +306,7 @@ static const struct pci_device_id mt76x0e_device_table[] = {
MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7650E_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76x0e_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0422c332354a..bba44f289b4e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -118,6 +118,10 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops mt76x0u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x0u_start,
.stop = mt76x0u_stop,
@@ -336,6 +340,7 @@ err:
MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7610U_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0U (USB) wireless driver");
MODULE_LICENSE("GPL");
static struct usb_driver mt76x0_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index 02da543dfc5c..b2cc44914294 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -293,4 +293,5 @@ void mt76x02u_init_mcu(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x02 MCU helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 8a0e8124b894..8020446be37b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -696,4 +696,5 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
+MODULE_DESCRIPTION("MediaTek MT76x02 helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 8c01855885ce..1fe5f5a02f93 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -506,4 +506,5 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+MODULE_DESCRIPTION("MediaTek MT76x2 EEPROM helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index df85ebc6e1df..30959746e924 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -165,6 +165,7 @@ mt76x2e_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, mt76x2e_device_table);
MODULE_FIRMWARE(MT7662_FIRMWARE);
MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_DESCRIPTION("MediaTek MT76x2E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76pci_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index b38bb7a2362b..bfc8c69f43fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -132,6 +132,10 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
}
const struct ieee80211_ops mt76x2_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x2_start,
.stop = mt76x2_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 55068f3252ef..ca78e14251c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -147,4 +147,5 @@ static struct usb_driver mt76x2u_driver = {
module_usb_driver(mt76x2u_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x2U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index ac07ed1f63a3..9fe390fdd730 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -103,6 +103,10 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
const struct ieee80211_ops mt76x2u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index c67c4f6ca2aa..d90f98c50039 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -228,7 +228,7 @@ mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
@@ -463,10 +463,10 @@ static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
return false;
- cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
mt7915_check_he_obss_narrow_bw_ru_iter,
&iter_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index aff4f21e843d..3039f53e2245 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -958,4 +958,5 @@ static void __exit mt7915_exit(void)
module_init(mt7915_init);
module_exit(mt7915_exit);
+MODULE_DESCRIPTION("MediaTek MT7915E MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 0645417e0582..0d5adc5ddae3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -1418,5 +1418,6 @@ const struct ieee80211_ops mt7921_ops = {
};
EXPORT_SYMBOL_GPL(mt7921_ops);
+MODULE_DESCRIPTION("MediaTek MT7921 core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 57903c6e4f11..dde26f327478 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -544,4 +544,5 @@ MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
MODULE_FIRMWARE(MT7922_ROM_PATCH);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7921E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 7591e54d2897..a9ce1e746b95 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -323,5 +323,6 @@ static struct sdio_driver mt7921s_driver = {
.drv.pm = pm_sleep_ptr(&mt7921s_pm_ops),
};
module_sdio_driver(mt7921s_driver);
+MODULE_DESCRIPTION("MediaTek MT7921S (SDIO) wireless driver");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index e5258c74fc07..8b7c03c47598 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -336,5 +336,6 @@ static struct usb_driver mt7921u_driver = {
};
module_usb_driver(mt7921u_driver);
+MODULE_DESCRIPTION("MediaTek MT7921U (USB) wireless driver");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 8f1075da4903..125a1be3cb64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -1450,4 +1450,5 @@ const struct ieee80211_ops mt7925_ops = {
EXPORT_SYMBOL_GPL(mt7925_ops);
MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7925 core driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 734f31ee40d3..1fd99a856541 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -583,4 +583,5 @@ MODULE_FIRMWARE(MT7925_FIRMWARE_WM);
MODULE_FIRMWARE(MT7925_ROM_PATCH);
MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 9b885c5b3ed5..1e0f094fc905 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -329,4 +329,5 @@ static struct usb_driver mt7925u_driver = {
module_usb_driver(mt7925u_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 502be22dbe36..7872fbae7252 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -684,9 +684,10 @@ mt792x_get_mac80211_ops(struct device *dev,
if (!(*fw_features & MT792x_FW_CAP_CNM)) {
ops->remain_on_channel = NULL;
ops->cancel_remain_on_channel = NULL;
- ops->add_chanctx = NULL;
- ops->remove_chanctx = NULL;
- ops->change_chanctx = NULL;
+ ops->add_chanctx = ieee80211_emulate_add_chanctx;
+ ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
+ ops->change_chanctx = ieee80211_emulate_change_chanctx;
+ ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->mgd_prepare_tx = NULL;
@@ -862,5 +863,6 @@ int mt792x_load_firmware(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt792x_load_firmware);
+MODULE_DESCRIPTION("MediaTek MT792x core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index 2dd283caed36..589a3efb9f8c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -314,5 +314,6 @@ void mt792xu_disconnect(struct usb_interface *usb_intf)
}
EXPORT_SYMBOL_GPL(mt792xu_disconnect);
+MODULE_DESCRIPTION("MediaTek MT792x USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 51deea84b642..234e6495871b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -1450,6 +1450,10 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
const struct ieee80211_ops mt7996_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7996_tx,
.start = mt7996_start,
.stop = mt7996_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 3c729b563edc..3ec813077dc2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -341,7 +341,7 @@ mt7996_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
@@ -4477,7 +4477,8 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
skb_put_data(skb, &req, sizeof(req));
/* cck and ofdm */
- skb_put_data(skb, &la.cck, sizeof(la.cck) + sizeof(la.ofdm));
+ skb_put_data(skb, &la.cck, sizeof(la.cck));
+ skb_put_data(skb, &la.ofdm, sizeof(la.ofdm));
/* ht20 */
skb_put_data(skb, &la.mcs[0], 8);
/* ht40 */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index c50d89a445e9..9f2abfa273c9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -650,4 +650,5 @@ static void __exit mt7996_exit(void)
module_init(mt7996_init);
module_exit(mt7996_exit);
+MODULE_DESCRIPTION("MediaTek MT7996 MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index c52d550f0c32..3e88798df017 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -672,4 +672,5 @@ EXPORT_SYMBOL_GPL(mt76s_init);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 1584665fe3cb..5a0bcb5071bd 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1128,4 +1128,5 @@ int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index fc76c66ff1a5..d6c01a2dd198 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -138,4 +138,5 @@ int __mt76_worker_fn(void *ptr)
}
EXPORT_SYMBOL_GPL(__mt76_worker_fn);
+MODULE_DESCRIPTION("MediaTek MT76x helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index c8d332456a6b..a7330576486b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -405,6 +405,10 @@ out:
}
const struct ieee80211_ops mt7601u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7601u_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mt7601u_start,
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index ad2509d8c99a..089102ed9ae5 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -356,7 +356,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
memcpy(vif->auth.ssid.ssid, sme->ssid, sme->ssid_len);
vif->auth.ssid.ssid_len = sme->ssid_len;
}
- vif->auth.key_mgmt_suite = cpu_to_be32(sme->crypto.akm_suites[0]);
+ vif->auth.key_mgmt_suite = sme->crypto.akm_suites[0];
ether_addr_copy(vif->auth.bssid, sme->bssid);
break;
@@ -1518,7 +1518,7 @@ static struct wilc_vif *wilc_get_vif_from_type(struct wilc *wl, int type)
{
struct wilc_vif *vif;
- list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ wilc_for_each_vif(wl, vif) {
if (vif->iftype == type)
return vif;
}
@@ -1609,7 +1609,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
cfg80211_unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
- wilc_set_operation_mode(vif, 0, 0, 0);
mutex_lock(&wl->vif_mutex);
list_del_rcu(&vif->list);
wl->vif_num--;
@@ -1804,15 +1803,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
INIT_LIST_HEAD(&wl->rxq_head.list);
INIT_LIST_HEAD(&wl->vif_list);
+ wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ wiphy_name(wl->wiphy));
+ if (!wl->hif_workqueue) {
+ ret = -ENOMEM;
+ goto free_cfg;
+ }
vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto free_cfg;
+ goto free_hq;
}
return 0;
+free_hq:
+ destroy_workqueue(wl->hif_workqueue);
+
free_cfg:
wilc_wlan_cfg_deinit(wl);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 839f142663e8..f1085ccb7eed 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -107,7 +107,7 @@ static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
if (index < 0 || index >= WILC_NUM_CONCURRENT_IFC)
return NULL;
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->idx == index)
return vif;
}
@@ -377,38 +377,49 @@ struct wilc_join_bss_param *
wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto)
{
- struct wilc_join_bss_param *param;
- struct ieee80211_p2p_noa_attr noa_attr;
- u8 rates_len = 0;
- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ struct ieee80211_p2p_noa_attr noa_attr;
+ const struct cfg80211_bss_ies *ies;
+ struct wilc_join_bss_param *param;
+ u8 rates_len = 0, ies_len;
int ret;
- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return NULL;
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
+ if (!ies_data) {
+ rcu_read_unlock();
+ kfree(param);
+ return NULL;
+ }
+ ies_len = ies->len;
+ rcu_read_unlock();
+
param->beacon_period = cpu_to_le16(bss->beacon_interval);
param->cap_info = cpu_to_le16(bss->capability);
param->bss_type = WILC_FW_BSS_TYPE_INFRA;
param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
ether_addr_copy(param->bssid, bss->bssid);
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
if (ssid_elm) {
if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
}
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
if (tim_elm && tim_elm[1] >= 2)
param->dtim_period = tim_elm[3];
memset(param->p_suites, 0xFF, 3);
memset(param->akm_suites, 0xFF, 3);
- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
if (rates_ie) {
rates_len = rates_ie[1];
if (rates_len > WILC_MAX_RATES_SUPPORTED)
@@ -419,7 +430,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
if (rates_len < WILC_MAX_RATES_SUPPORTED) {
supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
- ies->data, ies->len);
+ ies_data, ies_len);
if (supp_rates_ie) {
u8 ext_rates = supp_rates_ie[1];
@@ -434,11 +445,11 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
}
- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
if (ht_ie)
param->ht_capable = true;
- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
+ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
@@ -462,7 +473,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wmm_ie) {
struct ieee80211_wmm_param_ie *ie;
@@ -477,13 +488,13 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wpa_ie) {
param->mode_802_11i = 1;
param->rsn_found = true;
}
- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
if (rsn_ie) {
int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
int offset = 8;
@@ -517,6 +528,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
}
+ kfree(ies_data);
return (void *)param;
}
@@ -1555,26 +1567,28 @@ int wilc_deinit(struct wilc_vif *vif)
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- struct host_if_msg *msg;
- int id;
struct host_if_drv *hif_drv;
+ struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
- return;
- hif_drv = vif->hif_drv;
+ goto out;
+ hif_drv = vif->hif_drv;
if (!hif_drv) {
netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv);
- return;
+ goto out;
}
msg = wilc_alloc_work(vif, handle_rcvd_ntwrk_info, false);
if (IS_ERR(msg))
- return;
+ goto out;
msg->body.net_info.frame_len = get_unaligned_le16(&buffer[6]) - 1;
msg->body.net_info.rssi = buffer[8];
@@ -1583,7 +1597,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
GFP_KERNEL);
if (!msg->body.net_info.mgmt) {
kfree(msg);
- return;
+ goto out;
}
result = wilc_enqueue_work(msg);
@@ -1592,43 +1606,41 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg->body.net_info.mgmt);
kfree(msg);
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- struct host_if_msg *msg;
- int id;
struct host_if_drv *hif_drv;
+ struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
mutex_lock(&wilc->deinit_lock);
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
- if (!vif) {
- mutex_unlock(&wilc->deinit_lock);
- return;
- }
+ if (!vif)
+ goto out;
hif_drv = vif->hif_drv;
if (!hif_drv) {
- mutex_unlock(&wilc->deinit_lock);
- return;
+ goto out;
}
if (!hif_drv->conn_info.conn_result) {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
- mutex_unlock(&wilc->deinit_lock);
- return;
+ goto out;
}
msg = wilc_alloc_work(vif, handle_rcvd_gnrl_async_info, false);
- if (IS_ERR(msg)) {
- mutex_unlock(&wilc->deinit_lock);
- return;
- }
+ if (IS_ERR(msg))
+ goto out;
msg->body.mac_info.status = buffer[7];
result = wilc_enqueue_work(msg);
@@ -1636,32 +1648,36 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
kfree(msg);
}
-
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
mutex_unlock(&wilc->deinit_lock);
}
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- int id;
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
- return;
- hif_drv = vif->hif_drv;
+ goto out;
- if (!hif_drv)
- return;
+ hif_drv = vif->hif_drv;
+ if (!hif_drv) {
+ goto out;
+ }
if (hif_drv->usr_scan_req.scan_result) {
struct host_if_msg *msg;
msg = wilc_alloc_work(vif, handle_scan_complete, false);
if (IS_ERR(msg))
- return;
+ goto out;
result = wilc_enqueue_work(msg);
if (result) {
@@ -1670,6 +1686,8 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 91d71e0f7ef2..710e29bea560 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -96,7 +96,7 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
struct wilc_vif *vif;
struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->iftype == WILC_STATION_MODE)
if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
@@ -132,7 +132,7 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (!is_zero_ether_addr(vif->bssid))
ret_val++;
}
@@ -140,6 +140,19 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
return ret_val;
}
+static void wilc_wake_tx_queues(struct wilc *wl)
+{
+ int srcu_idx;
+ struct wilc_vif *ifc;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ wilc_for_each_vif(wl, ifc) {
+ if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
+ netif_wake_queue(ifc->ndev);
+ }
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+}
+
static int wilc_txq_task(void *vp)
{
int ret;
@@ -160,17 +173,7 @@ static int wilc_txq_task(void *vp)
do {
ret = wilc_wlan_handle_txq(wl, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- int srcu_idx;
- struct wilc_vif *ifc;
-
- srcu_idx = srcu_read_lock(&wl->srcu);
- list_for_each_entry_rcu(ifc, &wl->vif_list,
- list) {
- if (ifc->mac_opened &&
- netif_queue_stopped(ifc->ndev))
- netif_wake_queue(ifc->ndev);
- }
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ wilc_wake_tx_queues(wl);
}
if (ret != WILC_VMM_ENTRY_FULL_RETRY)
break;
@@ -284,7 +287,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, &b, 1, 0, 0))
goto fail;
- b = WILC_FW_PREAMBLE_SHORT;
+ b = WILC_FW_PREAMBLE_AUTO;
if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, &b, 1, 0, 0))
goto fail;
@@ -416,7 +419,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
b = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, &b, 1,
- 1, 1))
+ 1, 0))
goto fail;
return 0;
@@ -665,7 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
/* Verify MAC Address is not already in use: */
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, tmp_vif) {
wilc_get_mac_address(tmp_vif, mac_addr);
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
@@ -768,7 +771,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->mac_opened)
netif_stop_queue(vif->ndev);
}
@@ -811,19 +814,21 @@ static int wilc_mac_close(struct net_device *ndev)
void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
u32 pkt_offset)
{
- unsigned int frame_len = 0;
- int stats;
unsigned char *buff_to_send = NULL;
- struct sk_buff *skb;
struct net_device *wilc_netdev;
+ unsigned int frame_len = 0;
struct wilc_vif *vif;
+ struct sk_buff *skb;
+ int srcu_idx;
+ int stats;
if (!wilc)
return;
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_netdev = get_if_handler(wilc, buff);
if (!wilc_netdev)
- return;
+ goto out;
buff += pkt_offset;
vif = netdev_priv(wilc_netdev);
@@ -834,7 +839,7 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
skb = dev_alloc_skb(frame_len);
if (!skb)
- return;
+ goto out;
skb->dev = wilc_netdev;
@@ -847,6 +852,8 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
stats = netif_rx(skb);
netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
@@ -855,7 +862,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
u32 type_bit = BIT(type >> 4);
@@ -890,8 +897,7 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
- struct wilc_vif *vif;
- int srcu_idx, ifc_cnt = 0;
+ struct wilc_vif *vif, *vif_tmp;
if (!wilc)
return;
@@ -901,32 +907,19 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) {
+ mutex_lock(&wilc->vif_mutex);
+ list_del_rcu(&vif->list);
+ wilc->vif_num--;
+ mutex_unlock(&wilc->vif_mutex);
+ synchronize_srcu(&wilc->srcu);
if (vif->ndev)
unregister_netdev(vif->ndev);
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
destroy_workqueue(wilc->hif_workqueue);
- while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
- mutex_lock(&wilc->vif_mutex);
- if (wilc->vif_num <= 0) {
- mutex_unlock(&wilc->vif_mutex);
- break;
- }
- vif = wilc_get_wl_to_vif(wilc);
- if (!IS_ERR(vif))
- list_del_rcu(&vif->list);
-
- wilc->vif_num--;
- mutex_unlock(&wilc->vif_mutex);
- synchronize_srcu(&wilc->srcu);
- ifc_cnt++;
- }
-
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
wiphy_unregister(wilc->wiphy);
@@ -941,7 +934,7 @@ static u8 wilc_get_available_idx(struct wilc *wl)
int srcu_idx;
srcu_idx = srcu_read_lock(&wl->srcu);
- list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ wilc_for_each_vif(wl, vif) {
if (vif->idx == 0)
idx = 1;
else
@@ -989,13 +982,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
goto error;
}
- wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
- ndev->name);
- if (!wl->hif_workqueue) {
- ret = -ENOMEM;
- goto unregister_netdev;
- }
-
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
vif->idx = wilc_get_available_idx(wl);
@@ -1008,15 +994,15 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
return vif;
-unregister_netdev:
+error:
if (rtnl_locked)
cfg80211_unregister_netdevice(ndev);
else
unregister_netdev(ndev);
- error:
free_netdev(ndev);
return ERR_PTR(ret);
}
+MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER));
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index aafe3dc44ac6..5937d6d45695 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -13,6 +13,7 @@
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
+#include <linux/rculist.h>
#include "hif.h"
#include "wlan.h"
@@ -29,6 +30,11 @@
#define TX_BACKOFF_WEIGHT_MS 1
+#define wilc_for_each_vif(w, v) \
+ struct wilc *_w = w; \
+ list_for_each_entry_srcu(v, &_w->vif_list, list, \
+ srcu_read_lock_held(&_w->srcu))
+
struct wilc_wfi_stats {
unsigned long rx_packets;
unsigned long tx_packets;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 0d13e3e46e98..d6d394693090 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -984,4 +984,5 @@ static struct sdio_driver wilc_sdio_driver = {
module_driver(wilc_sdio_driver,
sdio_register_driver,
sdio_unregister_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 77b4cdff73c3..61c3572ce321 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(enable_crc16,
#define WILC_SPI_RSP_HDR_EXTRA_DATA 8
struct wilc_spi {
- bool isinit; /* true if SPI protocol has been configured */
+ bool isinit; /* true if wilc_spi_init was successful */
bool probing_crc; /* true if we're probing chip's CRC config */
bool crc7_enabled; /* true if crc7 is currently enabled */
bool crc16_enabled; /* true if crc16 is currently enabled */
@@ -55,6 +55,8 @@ struct wilc_spi {
static const struct wilc_hif_func wilc_hif_spi;
static int wilc_spi_reset(struct wilc *wilc);
+static int wilc_spi_configure_bus_protocol(struct wilc *wilc);
+static int wilc_validate_chipid(struct wilc *wilc);
/********************************************
*
@@ -192,11 +194,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
/* assert ENABLE: */
gpiod_set_value(gpios->enable, 1);
mdelay(5);
- /* assert RESET: */
- gpiod_set_value(gpios->reset, 1);
- } else {
/* deassert RESET: */
gpiod_set_value(gpios->reset, 0);
+ } else {
+ /* assert RESET: */
+ gpiod_set_value(gpios->reset, 1);
/* deassert ENABLE: */
gpiod_set_value(gpios->enable, 0);
}
@@ -232,8 +234,27 @@ static int wilc_bus_probe(struct spi_device *spi)
}
clk_prepare_enable(wilc->rtc_clk);
+ dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n",
+ enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off");
+
+ /* we need power to configure the bus protocol and to read the chip id: */
+
+ wilc_wlan_power(wilc, true);
+
+ ret = wilc_spi_configure_bus_protocol(wilc);
+ if (ret)
+ goto power_down;
+
+ ret = wilc_validate_chipid(wilc);
+ if (ret)
+ goto power_down;
+
+ wilc_wlan_power(wilc, false);
return 0;
+power_down:
+ clk_disable_unprepare(wilc->rtc_clk);
+ wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
free:
@@ -273,6 +294,7 @@ static struct spi_driver wilc_spi_driver = {
.remove = wilc_bus_remove,
};
module_spi_driver(wilc_spi_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SPI wireless driver");
MODULE_LICENSE("GPL");
static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
@@ -300,7 +322,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -343,7 +364,6 @@ static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -381,8 +401,6 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
-
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
if (ret < 0)
@@ -476,7 +494,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
********************************************/
static u8 wilc_get_crc7(u8 *buffer, u32 len)
{
- return crc7_be(0xfe, buffer, len);
+ return crc7_be(0xfe, buffer, len) | 0x01;
}
static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
@@ -1105,26 +1123,34 @@ static int wilc_spi_deinit(struct wilc *wilc)
static int wilc_spi_init(struct wilc *wilc, bool resume)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
struct wilc_spi *spi_priv = wilc->bus_data;
- u32 reg;
- u32 chipid;
- int ret, i;
+ int ret;
if (spi_priv->isinit) {
/* Confirm we can read chipid register without error: */
- ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
- if (ret == 0)
+ if (wilc_validate_chipid(wilc) == 0)
return 0;
-
- dev_err(&spi->dev, "Fail cmd read chip id...\n");
}
wilc_wlan_power(wilc, true);
- /*
- * configure protocol
- */
+ ret = wilc_spi_configure_bus_protocol(wilc);
+ if (ret) {
+ wilc_wlan_power(wilc, false);
+ return ret;
+ }
+
+ spi_priv->isinit = true;
+
+ return 0;
+}
+
+static int wilc_spi_configure_bus_protocol(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u32 reg;
+ int ret, i;
/*
* Infer the CRC settings that are currently in effect. This
@@ -1176,6 +1202,15 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
spi_priv->probing_crc = false;
+ return 0;
+}
+
+static int wilc_validate_chipid(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ u32 chipid;
+ int ret;
+
/*
* make sure can read chip id without protocol error
*/
@@ -1184,9 +1219,10 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
dev_err(&spi->dev, "Fail cmd read chip id...\n");
return ret;
}
-
- spi_priv->isinit = true;
-
+ if (!is_wilc1000(chipid)) {
+ dev_err(&spi->dev, "Unknown chip id 0x%x\n", chipid);
+ return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 9eb115c79c90..a9e872a7b2c3 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -12,11 +12,6 @@
#define WAKE_UP_TRIAL_RETRY 10000
-static inline bool is_wilc1000(u32 id)
-{
- return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
-}
-
static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
{
mutex_lock(&wilc->hif_cs);
@@ -730,7 +725,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
mutex_lock(&wilc->txq_add_to_head_cs);
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list)
+ wilc_for_each_vif(wilc, vif)
wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
srcu_read_unlock(&wilc->srcu, srcu_idx);
@@ -1198,27 +1193,32 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (ret) {
- netdev_err(vif->ndev, "Error while reading reg\n");
+ ret = wilc->hif_func->hif_read_reg(wilc, GLOBAL_MODE_CONTROL, &reg);
+ if (ret)
goto release;
- }
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
- (reg | WILC_ABORT_REQ_BIT));
- if (ret) {
- netdev_err(vif->ndev, "Error while writing reg\n");
+ reg &= ~WILC_GLOBAL_MODE_ENABLE_WIFI;
+ ret = wilc->hif_func->hif_write_reg(wilc, GLOBAL_MODE_CONTROL, reg);
+ if (ret)
goto release;
- }
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg);
+ ret = wilc->hif_func->hif_read_reg(wilc, PWR_SEQ_MISC_CTRL, &reg);
+ if (ret)
+ goto release;
+
+ reg &= ~WILC_PWR_SEQ_ENABLE_WIFI_SLEEP;
+ ret = wilc->hif_func->hif_write_reg(wilc, PWR_SEQ_MISC_CTRL, reg);
+ if (ret)
+ goto release;
+
+ ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
if (ret) {
netdev_err(vif->ndev, "Error while reading reg\n");
goto release;
}
- reg = BIT(0);
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
+ (reg | WILC_ABORT_REQ_BIT));
if (ret) {
netdev_err(vif->ndev, "Error while writing reg\n");
goto release;
@@ -1410,7 +1410,7 @@ static int init_chip(struct net_device *dev)
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
chipid = wilc_get_chipid(wilc, true);
@@ -1440,7 +1440,7 @@ static int init_chip(struct net_device *dev)
}
release:
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
return ret;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index a72cd5cac81d..54643d8fef04 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -156,6 +156,12 @@
#define WILC_GP_REG_0 0x149c
#define WILC_GP_REG_1 0x14a0
+#define GLOBAL_MODE_CONTROL 0x1614
+#define PWR_SEQ_MISC_CTRL 0x3008
+
+#define WILC_GLOBAL_MODE_ENABLE_WIFI BIT(0)
+#define WILC_PWR_SEQ_ENABLE_WIFI_SLEEP BIT(28)
+
#define WILC_HAVE_SDIO_IRQ_GPIO BIT(0)
#define WILC_HAVE_USE_PMU BIT(1)
#define WILC_HAVE_SLEEP_CLK_SRC_RTC BIT(2)
@@ -403,6 +409,11 @@ struct wilc_cfg_rsp {
struct wilc_vif;
+static inline bool is_wilc1000(u32 id)
+{
+ return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
+}
+
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size);
int wilc_wlan_start(struct wilc *wilc);
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index 506d2f31efb5..641f847d47ab 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <net/ieee80211_radiotap.h>
@@ -685,6 +684,10 @@ static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
}
static const struct ieee80211_ops plfxlc_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = plfxlc_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = plfxlc_op_start,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 3b283e93a13e..76b07db284f8 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -478,7 +478,7 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
wiphy_lock(priv_to_wiphy(vif->mac));
- cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
wiphy_unlock(priv_to_wiphy(vif->mac));
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 13dd672b825e..42e21e9f303b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1705,6 +1705,10 @@ static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops rt2400pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index ecddda4c471e..36ddc5a69fa4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -2003,6 +2003,10 @@ static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops rt2500pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index 13fdcff0ad66..09923765e2db 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -1794,6 +1794,10 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2500usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index dcb56f708a5f..14c45aba836f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -287,6 +287,10 @@ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2800pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 7118d4f9038d..701ba54bf3e5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -132,6 +132,10 @@ static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
}
static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index b2a8e75a901b..160bef79acdb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -629,6 +629,10 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2800usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
index ad95f9eba301..1000fbfb94b8 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
@@ -197,10 +197,7 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
transfer += header_length;
} else {
skb_push(skb, iv_len + align);
- if (align < icv_len)
- skb_put(skb, icv_len - align);
- else if (align > icv_len)
- skb_trim(skb, rxdesc->size + iv_len + icv_len);
+ skb_put(skb, icv_len - align);
/* Move ieee80211 header */
memmove(skb->data + transfer,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 483723bf514b..d1cd5694e3c7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2872,6 +2872,10 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops rt61pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index dfa9d5213898..b79dda952a33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -2291,6 +2291,10 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops rt73usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index f6c25a52b69a..77b6cb7e1f6b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1607,6 +1607,10 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
}
static const struct ieee80211_ops rtl8180_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8180_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8180_start,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 04945f905d6d..78d99afa373d 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1377,6 +1377,10 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev,
static const struct ieee80211_ops rtl8187_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8187_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8187_start,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 4695fb4e2d2d..fd92d23c43d9 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -6,6 +6,7 @@
*/
#include <asm/byteorder.h>
+#include <linux/average.h>
#define RTL8XXXU_DEBUG_REG_WRITE 0x01
#define RTL8XXXU_DEBUG_REG_READ 0x02
@@ -498,6 +499,7 @@ struct rtl8xxxu_txdesc40 {
#define DESC_RATE_ID_SHIFT 16
#define DESC_RATE_ID_MASK 0xf
#define TXDESC_NAVUSEHDR BIT(20)
+#define TXDESC_EN_DESC_ID BIT(21)
#define TXDESC_SEC_RC4 0x00400000
#define TXDESC_SEC_AES 0x00c00000
#define TXDESC_PKT_OFFSET_SHIFT 26
@@ -1774,6 +1776,8 @@ struct rtl8xxxu_cfo_tracking {
#define RTL8XXXU_HW_LED_CONTROL 2
#define RTL8XXXU_MAX_MAC_ID_NUM 128
#define RTL8XXXU_BC_MC_MACID 0
+#define RTL8XXXU_BC_MC_MACID1 1
+#define RTL8XXXU_MAX_SEC_CAM_NUM 64
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
@@ -1855,6 +1859,8 @@ struct rtl8xxxu_priv {
int next_mbox;
int nr_out_eps;
+ /* Ensure no added or deleted stas while iterating */
+ struct mutex sta_mutex;
struct mutex h2c_mutex;
/* Protect the indirect register accesses of RTL8710BU. */
struct mutex syson_indirect_access_mutex;
@@ -1889,18 +1895,14 @@ struct rtl8xxxu_priv {
u8 pi_enabled:1;
u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
- u8 rssi_level;
DECLARE_BITMAP(tx_aggr_started, IEEE80211_NUM_TIDS);
DECLARE_BITMAP(tid_tx_operational, IEEE80211_NUM_TIDS);
- /*
- * Only one virtual interface permitted because only STA mode
- * is supported and no iface_combinations are provided.
- */
- struct ieee80211_vif *vif;
+
+ struct ieee80211_vif *vifs[2];
struct delayed_work ra_watchdog;
struct work_struct c2hcmd_work;
struct sk_buff_head c2hcmd_queue;
- struct work_struct update_beacon_work;
+ struct delayed_work update_beacon_work;
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
struct rtl8xxxu_cfo_tracking cfo_tracking;
@@ -1910,13 +1912,23 @@ struct rtl8xxxu_priv {
char led_name[32];
struct led_classdev led_cdev;
DECLARE_BITMAP(mac_id_map, RTL8XXXU_MAX_MAC_ID_NUM);
+ DECLARE_BITMAP(cam_map, RTL8XXXU_MAX_SEC_CAM_NUM);
};
+DECLARE_EWMA(rssi, 10, 16);
+
struct rtl8xxxu_sta_info {
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
u8 macid;
+ struct ewma_rssi avg_rssi;
+ u8 rssi_level;
+};
+
+struct rtl8xxxu_vif {
+ int port_num;
+ u8 hw_key_idx;
};
struct rtl8xxxu_rx_urb {
@@ -1986,11 +1998,13 @@ struct rtl8xxxu_fileops {
u8 init_reg_rxfltmap:1;
u8 init_reg_pkt_life_time:1;
u8 init_reg_hmtfr:1;
+ u8 supports_concurrent:1;
u8 ampdu_max_time;
u8 ustime_tsf_edca;
u16 max_aggr_num;
u8 supports_ap:1;
u16 max_macid_num;
+ u16 max_sec_cam_num;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
index 6d0f975f891b..afe9cc1b49dc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
@@ -1699,7 +1699,7 @@ void rtl8188e_handle_ra_tx_report2(struct rtl8xxxu_priv *priv, struct sk_buff *s
/* We only use macid 0, so only the first item is relevant.
* AP mode will use more of them if it's ever implemented.
*/
- if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION)
+ if (!priv->vifs[0] || priv->vifs[0]->type == NL80211_IFTYPE_STATION)
items = 1;
for (macid = 0; macid < items; macid++) {
@@ -1882,6 +1882,7 @@ struct rtl8xxxu_fileops rtl8188eu_fops = {
.has_tx_report = 1,
.init_reg_pkt_life_time = 1,
.gen2_thermal_meter = 1,
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
/*
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
index 1e1c8fa194cb..464216d007ce 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
@@ -1751,6 +1751,8 @@ struct rtl8xxxu_fileops rtl8188fu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 16,
+ .max_sec_cam_num = 16,
+ .supports_concurrent = 1,
.adda_1t_init = 0x03c00014,
.adda_1t_path_on = 0x03c00014,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index b30a9a513cb8..3ee7d8f87da6 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -613,6 +613,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
.adda_2t_path_on_a = 0x04db25a4,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 47bcaec6f2db..63b73ace27ec 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1769,6 +1769,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.needs_full_init = 1,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
index 28e93835e05a..21e4204769d0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
@@ -2014,26 +2014,40 @@ static int rtl8192fu_led_brightness_set(struct led_classdev *led_cdev,
struct rtl8xxxu_priv *priv = container_of(led_cdev,
struct rtl8xxxu_priv,
led_cdev);
- u16 ledcfg;
+ u32 ledcfg;
/* Values obtained by observing the USB traffic from the Windows driver. */
rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_0, 0x20080);
rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_1, 0x1b0000);
- ledcfg = rtl8xxxu_read16(priv, REG_LEDCFG0);
+ ledcfg = rtl8xxxu_read32(priv, REG_LEDCFG0);
+
+ /* Comfast CF-826F uses LED1. Asus USB-N13 C1 uses LED0. Set both. */
+
+ u32p_replace_bits(&ledcfg, LED_GPIO_ENABLE, LEDCFG0_LED2EN);
+ u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED0_IO_MODE);
+ u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED1_IO_MODE);
if (brightness == LED_OFF) {
- /* Value obtained like above. */
- ledcfg = BIT(1) | BIT(7);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV);
} else if (brightness == LED_ON) {
- /* Value obtained like above. */
- ledcfg = BIT(1) | BIT(7) | BIT(11);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED1SV);
} else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
- /* Value obtained by brute force. */
- ledcfg = BIT(8) | BIT(9);
+ u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS,
+ LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS,
+ LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV);
}
- rtl8xxxu_write16(priv, REG_LEDCFG0, ledcfg);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, ledcfg);
return 0;
}
@@ -2081,6 +2095,7 @@ struct rtl8xxxu_fileops rtl8192fu_fops = {
.max_aggr_num = 0x1f1f,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.trxff_boundary = 0x3f3f,
.pbp_rx = PBP_PAGE_SIZE_256,
.pbp_tx = PBP_PAGE_SIZE_256,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
index 871b8cca8a18..46d57510e9fc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
@@ -1877,6 +1877,7 @@ struct rtl8xxxu_fileops rtl8710bu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 16,
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x03c00016,
.adda_1t_path_on = 0x03c00016,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 15a30e496221..ad1bb9377ca2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -510,6 +510,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
.adda_2t_path_on_a = 0x04db25a4,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 954369ed6226..9640c841d20a 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1744,6 +1744,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 180907319e8c..efddbe402f0d 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1633,33 +1633,41 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
- enum nl80211_iftype linktype)
+ enum nl80211_iftype linktype, int port_num)
{
- u8 val8;
-
- val8 = rtl8xxxu_read8(priv, REG_MSR);
- val8 &= ~MSR_LINKTYPE_MASK;
+ u8 val8, type;
switch (linktype) {
case NL80211_IFTYPE_UNSPECIFIED:
- val8 |= MSR_LINKTYPE_NONE;
+ type = MSR_LINKTYPE_NONE;
break;
case NL80211_IFTYPE_ADHOC:
- val8 |= MSR_LINKTYPE_ADHOC;
+ type = MSR_LINKTYPE_ADHOC;
break;
case NL80211_IFTYPE_STATION:
- val8 |= MSR_LINKTYPE_STATION;
+ type = MSR_LINKTYPE_STATION;
break;
case NL80211_IFTYPE_AP:
- val8 |= MSR_LINKTYPE_AP;
+ type = MSR_LINKTYPE_AP;
break;
default:
- goto out;
+ return;
+ }
+
+ switch (port_num) {
+ case 0:
+ val8 = rtl8xxxu_read8(priv, REG_MSR) & 0x0c;
+ val8 |= type;
+ break;
+ case 1:
+ val8 = rtl8xxxu_read8(priv, REG_MSR) & 0x03;
+ val8 |= type << 2;
+ break;
+ default:
+ return;
}
rtl8xxxu_write8(priv, REG_MSR, val8);
-out:
- return;
}
static void
@@ -3572,27 +3580,47 @@ void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
-static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv)
+static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv, int port_num)
{
int i;
u16 reg;
- reg = REG_MACID;
+ switch (port_num) {
+ case 0:
+ reg = REG_MACID;
+ break;
+ case 1:
+ reg = REG_MACID1;
+ break;
+ default:
+ WARN_ONCE(1, "%s: invalid port_num\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < ETH_ALEN; i++)
- rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]);
+ rtl8xxxu_write8(priv, reg + i, priv->vifs[port_num]->addr[i]);
return 0;
}
-static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid)
+static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid, int port_num)
{
int i;
u16 reg;
dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid);
- reg = REG_BSSID;
+ switch (port_num) {
+ case 0:
+ reg = REG_BSSID;
+ break;
+ case 1:
+ reg = REG_BSSID1;
+ break;
+ default:
+ WARN_ONCE(1, "%s: invalid port_num\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < ETH_ALEN; i++)
rtl8xxxu_write8(priv, reg + i, bssid[i]);
@@ -4025,10 +4053,13 @@ static inline u8 rtl8xxxu_get_macid(struct rtl8xxxu_priv *priv,
{
struct rtl8xxxu_sta_info *sta_info;
- if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION || !sta)
+ if (!sta)
return 0;
sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ if (!sta_info)
+ return 0;
+
return sta_info->macid;
}
@@ -4235,9 +4266,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
}
- rtl8xxxu_set_mac(priv);
- rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
-
/*
* Configure initial WMAC settings
*/
@@ -4511,6 +4539,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8188e_ra_info_init_all(&priv->ra_info);
set_bit(RTL8XXXU_BC_MC_MACID, priv->mac_id_map);
+ set_bit(RTL8XXXU_BC_MC_MACID1, priv->mac_id_map);
exit:
return ret;
@@ -4530,8 +4559,10 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
* This is a bit of a hack - the lower bits of the cipher
* suite selector happens to match the cipher index in the CAM
*/
- addr = key->keyidx << CAM_CMD_KEY_SHIFT;
+ addr = key->hw_key_idx << CAM_CMD_KEY_SHIFT;
ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID;
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ ctrl |= BIT(6);
for (j = 5; j >= 0; j--) {
switch (j) {
@@ -4574,7 +4605,7 @@ static int rtl8xxxu_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
{
struct rtl8xxxu_priv *priv = hw->priv;
- schedule_work(&priv->update_beacon_work);
+ schedule_delayed_work(&priv->update_beacon_work, 0);
return 0;
}
@@ -4839,10 +4870,9 @@ static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__, rate_cfg);
- while (rate_cfg) {
- rate_cfg = (rate_cfg >> 1);
- rate_idx++;
- }
+ if (rate_cfg)
+ rate_idx = __fls(rate_cfg);
+
rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
}
@@ -4888,14 +4918,20 @@ static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
u8 aifs, aifsn, sifs;
int i;
- if (priv->vif) {
+ for (i = 0; i < ARRAY_SIZE(priv->vifs); i++) {
+ if (!priv->vifs[i])
+ continue;
+
struct ieee80211_sta *sta;
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid);
+ sta = ieee80211_find_sta(priv->vifs[i], priv->vifs[i]->bss_conf.bssid);
if (sta)
wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta);
rcu_read_unlock();
+
+ if (wireless_mode)
+ break;
}
if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ ||
@@ -4952,19 +4988,21 @@ static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u64 changed)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_sta_info *sta_info;
struct ieee80211_sta *sta;
struct rtl8xxxu_ra_report *rarpt;
+ u8 val8, macid;
u32 val32;
- u8 val8;
rarpt = &priv->ra_report;
if (changed & BSS_CHANGED_ASSOC) {
dev_dbg(dev, "Changed ASSOC: %i!\n", vif->cfg.assoc);
- rtl8xxxu_set_linktype(priv, vif->type);
+ rtl8xxxu_set_linktype(priv, vif->type, rtlvif->port_num);
if (vif->cfg.assoc) {
u32 ramask;
@@ -4980,6 +5018,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rcu_read_unlock();
goto error;
}
+ macid = rtl8xxxu_get_macid(priv, sta);
if (sta->deflink.ht_cap.ht_supported)
dev_info(dev, "%s: HT supported\n", __func__);
@@ -5000,19 +5039,20 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bw = RATE_INFO_BW_40;
else
bw = RATE_INFO_BW_20;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ sta_info->rssi_level = RTL8XXXU_RATR_STA_INIT;
rcu_read_unlock();
rtl8xxxu_update_ra_report(rarpt, highest_rate, sgi, bw);
- priv->vif = vif;
- priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
-
priv->fops->update_rate_mask(priv, ramask, 0, sgi,
- bw == RATE_INFO_BW_40, 0);
+ bw == RATE_INFO_BW_40, macid);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
- rtl8xxxu_stop_tx_beacon(priv);
+ if (rtlvif->port_num == 0)
+ rtl8xxxu_stop_tx_beacon(priv);
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
@@ -5054,7 +5094,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_BSSID) {
dev_dbg(dev, "Changed BSSID!\n");
- rtl8xxxu_set_bssid(priv, bss_conf->bssid);
+ rtl8xxxu_set_bssid(priv, bss_conf->bssid, rtlvif->port_num);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -5070,7 +5110,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changed & BSS_CHANGED_BEACON)
- schedule_work(&priv->update_beacon_work);
+ schedule_delayed_work(&priv->update_beacon_work, 0);
error:
return;
@@ -5079,11 +5119,12 @@ error:
static int rtl8xxxu_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
dev_dbg(dev, "Start AP mode\n");
- rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid);
+ rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid, rtlvif->port_num);
rtl8xxxu_write16(priv, REG_BCN_INTERVAL, vif->bss_conf.beacon_int);
priv->fops->report_connect(priv, RTL8XXXU_BC_MC_MACID, 0, true);
@@ -5509,13 +5550,14 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
+ struct rtl8xxxu_vif *rtlvif = vif ? (struct rtl8xxxu_vif *)vif->drv_priv : NULL;
struct device *dev = &priv->udev->dev;
u32 queue, rts_rate;
u16 pktlen = skb->len;
int tx_desc_size = priv->fops->tx_desc_size;
u8 macid;
int ret;
- bool ampdu_enable, sgi = false, short_preamble = false;
+ bool ampdu_enable, sgi = false, short_preamble = false, bmc = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -5557,10 +5599,14 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
tx_desc->txdw0 =
TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
- is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
+ bmc = true;
+ }
+
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
+ macid = rtl8xxxu_get_macid(priv, sta);
if (tx_info->control.hw_key) {
switch (tx_info->control.hw_key->cipher) {
@@ -5575,6 +5621,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
default:
break;
}
+ if (bmc && rtlvif && rtlvif->hw_key_idx != 0xff) {
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_EN_DESC_ID);
+ macid = rtlvif->hw_key_idx;
+ }
}
/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
@@ -5618,7 +5668,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
else
rts_rate = 0;
- macid = rtl8xxxu_get_macid(priv, sta);
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
ampdu_enable, rts_rate, macid);
@@ -5680,18 +5729,44 @@ static void rtl8xxxu_send_beacon_frame(struct ieee80211_hw *hw,
static void rtl8xxxu_update_beacon_work_callback(struct work_struct *work)
{
struct rtl8xxxu_priv *priv =
- container_of(work, struct rtl8xxxu_priv, update_beacon_work);
+ container_of(work, struct rtl8xxxu_priv, update_beacon_work.work);
struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_vif *vif = priv->vif;
+ struct ieee80211_vif *vif = priv->vifs[0];
if (!vif) {
WARN_ONCE(true, "no vif to update beacon\n");
return;
}
+ if (vif->bss_conf.csa_active) {
+ if (ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_csa_finish(vif, 0);
+ return;
+ }
+ schedule_delayed_work(&priv->update_beacon_work,
+ msecs_to_jiffies(vif->bss_conf.beacon_int));
+ }
rtl8xxxu_send_beacon_frame(hw, vif);
}
+static inline bool rtl8xxxu_is_packet_match_bssid(struct rtl8xxxu_priv *priv,
+ struct ieee80211_hdr *hdr,
+ int port_num)
+{
+ return priv->vifs[port_num] &&
+ priv->vifs[port_num]->type == NL80211_IFTYPE_STATION &&
+ priv->vifs[port_num]->cfg.assoc &&
+ ether_addr_equal(priv->vifs[port_num]->bss_conf.bssid, hdr->addr2);
+}
+
+static inline bool rtl8xxxu_is_sta_sta(struct rtl8xxxu_priv *priv)
+{
+ return (priv->vifs[0] && priv->vifs[0]->cfg.assoc &&
+ priv->vifs[0]->type == NL80211_IFTYPE_STATION) &&
+ (priv->vifs[1] && priv->vifs[1]->cfg.assoc &&
+ priv->vifs[1]->type == NL80211_IFTYPE_STATION);
+}
+
void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
struct ieee80211_rx_status *rx_status,
struct rtl8723au_phy_stats *phy_stats,
@@ -5708,12 +5783,11 @@ void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
rx_status->signal = priv->fops->cck_rssi(priv, phy_stats);
} else {
bool parse_cfo = priv->fops->set_crystal_cap &&
- priv->vif &&
- priv->vif->type == NL80211_IFTYPE_STATION &&
- priv->vif->cfg.assoc &&
!crc_icv_err &&
!ieee80211_is_ctl(hdr->frame_control) &&
- ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+ !rtl8xxxu_is_sta_sta(priv) &&
+ (rtl8xxxu_is_packet_match_bssid(priv, hdr, 0) ||
+ rtl8xxxu_is_packet_match_bssid(priv, hdr, 1));
if (parse_cfo) {
priv->cfo_tracking.cfo_tail[0] = phy_stats->path_cfotail[0];
@@ -5748,12 +5822,11 @@ static void jaguar2_rx_parse_phystats_type1(struct rtl8xxxu_priv *priv,
bool crc_icv_err)
{
bool parse_cfo = priv->fops->set_crystal_cap &&
- priv->vif &&
- priv->vif->type == NL80211_IFTYPE_STATION &&
- priv->vif->cfg.assoc &&
!crc_icv_err &&
!ieee80211_is_ctl(hdr->frame_control) &&
- ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+ !rtl8xxxu_is_sta_sta(priv) &&
+ (rtl8xxxu_is_packet_match_bssid(priv, hdr, 0) ||
+ rtl8xxxu_is_packet_match_bssid(priv, hdr, 1));
u8 pwdb_max = 0;
int rx_path;
@@ -6029,18 +6102,20 @@ void rtl8723bu_update_bt_link_info(struct rtl8xxxu_priv *priv, u8 bt_info)
btcoex->bt_busy = false;
}
+static inline bool rtl8xxxu_is_assoc(struct rtl8xxxu_priv *priv)
+{
+ return (priv->vifs[0] && priv->vifs[0]->cfg.assoc) ||
+ (priv->vifs[1] && priv->vifs[1]->cfg.assoc);
+}
+
static
void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
{
- struct ieee80211_vif *vif;
struct rtl8xxxu_btcoex *btcoex;
- bool wifi_connected;
- vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->cfg.assoc);
- if (!wifi_connected) {
+ if (!rtl8xxxu_is_assoc(priv)) {
rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
rtl8723bu_set_coex_with_type(priv, 0);
} else if (btcoex->has_sco || btcoex->has_hid || btcoex->has_a2dp) {
@@ -6058,15 +6133,11 @@ void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
static
void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
{
- struct ieee80211_vif *vif;
struct rtl8xxxu_btcoex *btcoex;
- bool wifi_connected;
- vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->cfg.assoc);
- if (wifi_connected) {
+ if (rtl8xxxu_is_assoc(priv)) {
u32 val32 = 0;
u32 high_prio_tx = 0, high_prio_rx = 0;
@@ -6249,6 +6320,76 @@ static void rtl8188e_c2hcmd_callback(struct work_struct *work)
}
}
+#define rtl8xxxu_iterate_vifs_atomic(priv, iterator, data) \
+ ieee80211_iterate_active_interfaces_atomic((priv)->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+
+struct rtl8xxxu_rx_update_rssi_data {
+ struct rtl8xxxu_priv *priv;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status *rx_status;
+ u8 *bssid;
+};
+
+static void rtl8xxxu_rx_update_rssi_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtl8xxxu_rx_update_rssi_data *iter_data = data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = iter_data->hdr;
+ struct rtl8xxxu_priv *priv = iter_data->priv;
+ struct rtl8xxxu_sta_info *sta_info;
+ struct ieee80211_rx_status *rx_status = iter_data->rx_status;
+ u8 *bssid = iter_data->bssid;
+
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
+ return;
+
+ if (!(ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return;
+
+ sta = ieee80211_find_sta_by_ifaddr(priv->hw, hdr->addr2,
+ vif->addr);
+ if (!sta)
+ return;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ ewma_rssi_add(&sta_info->avg_rssi, -rx_status->signal);
+}
+
+static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+{
+ __le16 fc = hdr->frame_control;
+ u8 *bssid;
+
+ if (ieee80211_has_tods(fc))
+ bssid = hdr->addr1;
+ else if (ieee80211_has_fromds(fc))
+ bssid = hdr->addr2;
+ else
+ bssid = hdr->addr3;
+
+ return bssid;
+}
+
+static void rtl8xxxu_rx_update_rssi(struct rtl8xxxu_priv *priv,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtl8xxxu_rx_update_rssi_data data = {};
+
+ if (ieee80211_is_ctl(hdr->frame_control))
+ return;
+
+ data.priv = priv;
+ data.hdr = hdr;
+ data.rx_status = rx_status;
+ data.bssid = get_hdr_bssid(hdr);
+
+ rtl8xxxu_iterate_vifs_atomic(priv, rtl8xxxu_rx_update_rssi_iter, &data);
+}
+
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hw *hw = priv->hw;
@@ -6308,18 +6449,26 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
skb_queue_tail(&priv->c2hcmd_queue, skb);
schedule_work(&priv->c2hcmd_work);
} else {
+ struct ieee80211_hdr *hdr;
+
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
skb_pull(skb, drvinfo_sz + desc_shift);
skb_trim(skb, pkt_len);
- if (rx_desc->phy_stats)
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (rx_desc->phy_stats) {
priv->fops->parse_phystats(
priv, rx_status, phy_stats,
rx_desc->rxmcs,
- (struct ieee80211_hdr *)skb->data,
+ hdr,
rx_desc->crc32 || rx_desc->icverr);
+ if (!rx_desc->crc32 && !rx_desc->icverr)
+ rtl8xxxu_rx_update_rssi(priv,
+ rx_status,
+ hdr);
+ }
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6416,10 +6565,15 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (rx_desc->phy_stats)
+ if (rx_desc->phy_stats) {
priv->fops->parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs, hdr,
rx_desc->crc32 || rx_desc->icverr);
+ if (!rx_desc->crc32 && !rx_desc->icverr)
+ rtl8xxxu_rx_update_rssi(priv,
+ rx_status,
+ hdr);
+ }
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6563,29 +6717,123 @@ error:
return ret;
}
+static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv)
+{
+ u8 macid[ETH_ALEN], bssid[ETH_ALEN], macid_1[ETH_ALEN], bssid_1[ETH_ALEN];
+ u8 msr, bcn_ctrl, bcn_ctrl_1, atimwnd[2], atimwnd_1[2];
+ struct rtl8xxxu_vif *rtlvif;
+ struct ieee80211_vif *vif;
+ u8 tsftr[8], tsftr_1[8];
+ int i;
+
+ msr = rtl8xxxu_read8(priv, REG_MSR);
+ bcn_ctrl = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ bcn_ctrl_1 = rtl8xxxu_read8(priv, REG_BEACON_CTRL_1);
+
+ for (i = 0; i < ARRAY_SIZE(atimwnd); i++)
+ atimwnd[i] = rtl8xxxu_read8(priv, REG_ATIMWND + i);
+ for (i = 0; i < ARRAY_SIZE(atimwnd_1); i++)
+ atimwnd_1[i] = rtl8xxxu_read8(priv, REG_ATIMWND_1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ tsftr[i] = rtl8xxxu_read8(priv, REG_TSFTR + i);
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ tsftr_1[i] = rtl8xxxu_read8(priv, REG_TSFTR1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(macid); i++)
+ macid[i] = rtl8xxxu_read8(priv, REG_MACID + i);
+
+ for (i = 0; i < ARRAY_SIZE(bssid); i++)
+ bssid[i] = rtl8xxxu_read8(priv, REG_BSSID + i);
+
+ for (i = 0; i < ARRAY_SIZE(macid_1); i++)
+ macid_1[i] = rtl8xxxu_read8(priv, REG_MACID1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(bssid_1); i++)
+ bssid_1[i] = rtl8xxxu_read8(priv, REG_BSSID1 + i);
+
+ /* disable bcn function, disable update TSF */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, (bcn_ctrl &
+ (~BEACON_FUNCTION_ENABLE)) | BEACON_DISABLE_TSF_UPDATE);
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, (bcn_ctrl_1 &
+ (~BEACON_FUNCTION_ENABLE)) | BEACON_DISABLE_TSF_UPDATE);
+
+ /* switch msr */
+ msr = (msr & 0xf0) | ((msr & 0x03) << 2) | ((msr & 0x0c) >> 2);
+ rtl8xxxu_write8(priv, REG_MSR, msr);
+
+ /* write port0 */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1 & ~BEACON_FUNCTION_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(atimwnd_1); i++)
+ rtl8xxxu_write8(priv, REG_ATIMWND + i, atimwnd_1[i]);
+ for (i = 0; i < ARRAY_SIZE(tsftr_1); i++)
+ rtl8xxxu_write8(priv, REG_TSFTR + i, tsftr_1[i]);
+ for (i = 0; i < ARRAY_SIZE(macid_1); i++)
+ rtl8xxxu_write8(priv, REG_MACID + i, macid_1[i]);
+ for (i = 0; i < ARRAY_SIZE(bssid_1); i++)
+ rtl8xxxu_write8(priv, REG_BSSID + i, bssid_1[i]);
+
+ /* write port1 */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl & ~BEACON_FUNCTION_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(atimwnd); i++)
+ rtl8xxxu_write8(priv, REG_ATIMWND_1 + i, atimwnd[i]);
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ rtl8xxxu_write8(priv, REG_TSFTR1 + i, tsftr[i]);
+ for (i = 0; i < ARRAY_SIZE(macid); i++)
+ rtl8xxxu_write8(priv, REG_MACID1 + i, macid[i]);
+ for (i = 0; i < ARRAY_SIZE(bssid); i++)
+ rtl8xxxu_write8(priv, REG_BSSID1 + i, bssid[i]);
+
+ /* write bcn ctl */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1);
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl);
+
+ vif = priv->vifs[0];
+ priv->vifs[0] = priv->vifs[1];
+ priv->vifs[1] = vif;
+
+ /* priv->vifs[0] is NULL here, based on how this function is currently
+ * called from rtl8xxxu_add_interface().
+ * When this function will be used in the future for a different
+ * scenario, please check whether vifs[0] or vifs[1] can be NULL and if
+ * necessary add code to set port_num = 1.
+ */
+ rtlvif = (struct rtl8xxxu_vif *)priv->vifs[1]->drv_priv;
+ rtlvif->port_num = 1;
+}
+
static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
- int ret;
+ int port_num;
u8 val8;
- if (!priv->vif)
- priv->vif = vif;
+ if (!priv->vifs[0])
+ port_num = 0;
+ else if (!priv->vifs[1])
+ port_num = 1;
else
return -EOPNOTSUPP;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- rtl8xxxu_stop_tx_beacon(priv);
+ if (port_num == 0) {
+ rtl8xxxu_stop_tx_beacon(priv);
- val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
- val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
- BEACON_DISABLE_TSF_UPDATE;
- rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
- ret = 0;
+ val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
+ BEACON_DISABLE_TSF_UPDATE;
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+ }
break;
case NL80211_IFTYPE_AP:
+ if (port_num == 1) {
+ rtl8xxxu_switch_ports(priv);
+ port_num = 0;
+ }
+
rtl8xxxu_write8(priv, REG_BEACON_CTRL,
BEACON_DISABLE_TSF_UPDATE | BEACON_CTRL_MBSSID);
rtl8xxxu_write8(priv, REG_ATIMWND, 0x0c); /* 12ms */
@@ -6602,29 +6850,31 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
val8 = rtl8xxxu_read8(priv, REG_CCK_CHECK);
val8 &= ~BIT_BCN_PORT_SEL;
rtl8xxxu_write8(priv, REG_CCK_CHECK, val8);
-
- ret = 0;
break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- rtl8xxxu_set_linktype(priv, vif->type);
+ priv->vifs[port_num] = vif;
+ rtlvif->port_num = port_num;
+ rtlvif->hw_key_idx = 0xff;
+
+ rtl8xxxu_set_linktype(priv, vif->type, port_num);
ether_addr_copy(priv->mac_addr, vif->addr);
- rtl8xxxu_set_mac(priv);
+ rtl8xxxu_set_mac(priv, port_num);
- return ret;
+ return 0;
}
static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
dev_dbg(&priv->udev->dev, "%s\n", __func__);
- if (priv->vif)
- priv->vif = NULL;
+ priv->vifs[rtlvif->port_num] = NULL;
}
static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
@@ -6746,8 +6996,8 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
else
rcr |= RCR_CHECK_BSSID_BEACON | RCR_CHECK_BSSID_MATCH;
- if (priv->vif && priv->vif->type == NL80211_IFTYPE_AP)
- rcr &= ~RCR_CHECK_BSSID_MATCH;
+ if (priv->vifs[0] && priv->vifs[0]->type == NL80211_IFTYPE_AP)
+ rcr &= ~(RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON);
if (*total_flags & FIF_CONTROL)
rcr |= RCR_ACCEPT_CTRL_FRAME;
@@ -6784,11 +7034,19 @@ static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
return 0;
}
+static int rtl8xxxu_get_free_sec_cam(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ return find_first_zero_bit(priv->cam_map, priv->fops->max_sec_cam_num);
+}
+
static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
u8 mac_addr[ETH_ALEN];
@@ -6800,9 +7058,6 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n",
__func__, cmd, key->cipher, key->keyidx);
- if (vif->type != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
-
if (key->keyidx > 3)
return -EOPNOTSUPP;
@@ -6826,7 +7081,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ether_addr_copy(mac_addr, sta->addr);
} else {
dev_dbg(dev, "%s: group key\n", __func__);
- eth_broadcast_addr(mac_addr);
+ ether_addr_copy(mac_addr, vif->bss_conf.bssid);
}
val16 = rtl8xxxu_read16(priv, REG_CR);
@@ -6840,16 +7095,28 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- key->hw_key_idx = key->keyidx;
+
+ retval = rtl8xxxu_get_free_sec_cam(hw);
+ if (retval < 0)
+ return -EOPNOTSUPP;
+
+ key->hw_key_idx = retval;
+
+ if (vif->type == NL80211_IFTYPE_AP && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ rtlvif->hw_key_idx = key->hw_key_idx;
+
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
rtl8xxxu_cam_write(priv, key, mac_addr);
+ set_bit(key->hw_key_idx, priv->cam_map);
retval = 0;
break;
case DISABLE_KEY:
rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000);
val32 = CAM_CMD_POLLING | CAM_CMD_WRITE |
- key->keyidx << CAM_CMD_KEY_SHIFT;
+ key->hw_key_idx << CAM_CMD_KEY_SHIFT;
rtl8xxxu_write32(priv, REG_CAM_CMD, val32);
+ rtlvif->hw_key_idx = 0xff;
+ clear_bit(key->hw_key_idx, priv->cam_map);
retval = 0;
break;
default:
@@ -6930,6 +7197,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
int signal, struct ieee80211_sta *sta,
bool force)
{
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
struct ieee80211_hw *hw = priv->hw;
u16 wireless_mode;
u8 rssi_level, ratr_idx;
@@ -6938,7 +7206,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
u8 go_up_gap = 5;
u8 macid = rtl8xxxu_get_macid(priv, sta);
- rssi_level = priv->rssi_level;
+ rssi_level = sta_info->rssi_level;
snr = rtl8xxxu_signal_to_snr(signal);
snr_thresh_high = RTL8XXXU_SNR_THRESH_HIGH;
snr_thresh_low = RTL8XXXU_SNR_THRESH_LOW;
@@ -6963,18 +7231,16 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
else
rssi_level = RTL8XXXU_RATR_STA_LOW;
- if (rssi_level != priv->rssi_level || force) {
+ if (rssi_level != sta_info->rssi_level || force) {
int sgi = 0;
u32 rate_bitmap = 0;
- rcu_read_lock();
rate_bitmap = (sta->deflink.supp_rates[0] & 0xfff) |
(sta->deflink.ht_cap.mcs.rx_mask[0] << 12) |
(sta->deflink.ht_cap.mcs.rx_mask[1] << 20);
if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
- rcu_read_unlock();
wireless_mode = rtl8xxxu_wireless_mode(hw, sta);
switch (wireless_mode) {
@@ -7055,7 +7321,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
break;
}
- priv->rssi_level = rssi_level;
+ sta_info->rssi_level = rssi_level;
priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz, macid);
}
}
@@ -7085,7 +7351,7 @@ static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
int cfo_khz_a, cfo_khz_b, cfo_average;
int crystal_cap;
- if (!priv->vif || !priv->vif->cfg.assoc) {
+ if (!rtl8xxxu_is_assoc(priv)) {
/* Reset */
cfo->adjust = true;
@@ -7148,41 +7414,64 @@ static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
rtl8xxxu_set_atc_status(priv, abs(cfo_average) >= CFO_TH_ATC);
}
-static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+static void rtl8xxxu_ra_iter(void *data, struct ieee80211_sta *sta)
{
- struct ieee80211_vif *vif;
- struct rtl8xxxu_priv *priv;
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_priv *priv = data;
+ int signal = -ewma_rssi_read(&sta_info->avg_rssi);
- priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
- vif = priv->vif;
+ priv->fops->report_rssi(priv, rtl8xxxu_get_macid(priv, sta),
+ rtl8xxxu_signal_to_snr(signal));
+ rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
+}
- if (vif && vif->type == NL80211_IFTYPE_STATION) {
- int signal;
- struct ieee80211_sta *sta;
+struct rtl8xxxu_stas_entry {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+};
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- struct device *dev = &priv->udev->dev;
+struct rtl8xxxu_iter_stas_data {
+ struct rtl8xxxu_priv *priv;
+ struct list_head list;
+};
- dev_dbg(dev, "%s: no sta found\n", __func__);
- rcu_read_unlock();
- goto out;
- }
- rcu_read_unlock();
+static void rtl8xxxu_collect_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtl8xxxu_iter_stas_data *iter_stas = data;
+ struct rtl8xxxu_stas_entry *stas_entry;
- signal = ieee80211_ave_rssi(vif);
+ stas_entry = kmalloc(sizeof(*stas_entry), GFP_ATOMIC);
+ if (!stas_entry)
+ return;
- priv->fops->report_rssi(priv, 0,
- rtl8xxxu_signal_to_snr(signal));
+ stas_entry->sta = sta;
+ list_add_tail(&stas_entry->list, &iter_stas->list);
+}
- if (priv->fops->set_crystal_cap)
- rtl8xxxu_track_cfo(priv);
+static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+{
- rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
+ struct rtl8xxxu_iter_stas_data iter_data;
+ struct rtl8xxxu_stas_entry *sta_entry, *tmp;
+ struct rtl8xxxu_priv *priv;
+
+ priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
+ iter_data.priv = priv;
+ INIT_LIST_HEAD(&iter_data.list);
+
+ mutex_lock(&priv->sta_mutex);
+ ieee80211_iterate_stations_atomic(priv->hw, rtl8xxxu_collect_sta_iter,
+ &iter_data);
+ list_for_each_entry_safe(sta_entry, tmp, &iter_data.list, list) {
+ list_del_init(&sta_entry->list);
+ rtl8xxxu_ra_iter(priv, sta_entry->sta);
+ kfree(sta_entry);
}
+ mutex_unlock(&priv->sta_mutex);
+
+ if (priv->fops->set_crystal_cap)
+ rtl8xxxu_track_cfo(priv);
-out:
schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
}
@@ -7304,7 +7593,9 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ cancel_work_sync(&priv->c2hcmd_work);
cancel_delayed_work_sync(&priv->ra_watchdog);
+ cancel_delayed_work_sync(&priv->update_beacon_work);
rtl8xxxu_free_rx_resources(priv);
rtl8xxxu_free_tx_resources(priv);
@@ -7315,16 +7606,34 @@ static int rtl8xxxu_sta_add(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
+ mutex_lock(&priv->sta_mutex);
+ ewma_rssi_init(&sta_info->avg_rssi);
if (vif->type == NL80211_IFTYPE_AP) {
+ sta_info->rssi_level = RTL8XXXU_RATR_STA_INIT;
sta_info->macid = rtl8xxxu_acquire_macid(priv);
- if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM)
+ if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM) {
+ mutex_unlock(&priv->sta_mutex);
return -ENOSPC;
+ }
rtl8xxxu_refresh_rate_mask(priv, 0, sta, true);
priv->fops->report_connect(priv, sta_info->macid, H2C_MACID_ROLE_STA, true);
+ } else {
+ switch (rtlvif->port_num) {
+ case 0:
+ sta_info->macid = RTL8XXXU_BC_MC_MACID;
+ break;
+ case 1:
+ sta_info->macid = RTL8XXXU_BC_MC_MACID1;
+ break;
+ default:
+ break;
+ }
}
+ mutex_unlock(&priv->sta_mutex);
return 0;
}
@@ -7336,13 +7645,19 @@ static int rtl8xxxu_sta_remove(struct ieee80211_hw *hw,
struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
+ mutex_lock(&priv->sta_mutex);
if (vif->type == NL80211_IFTYPE_AP)
rtl8xxxu_release_macid(priv, sta_info->macid);
+ mutex_unlock(&priv->sta_mutex);
return 0;
}
static const struct ieee80211_ops rtl8xxxu_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8xxxu_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = rtl8xxxu_add_interface,
@@ -7476,6 +7791,20 @@ static void rtl8xxxu_deinit_led(struct rtl8xxxu_priv *priv)
led_classdev_unregister(led);
}
+static const struct ieee80211_iface_limit rtl8xxxu_limits[] = {
+ { .max = 2, .types = BIT(NL80211_IFTYPE_STATION), },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP), },
+};
+
+static const struct ieee80211_iface_combination rtl8xxxu_combinations[] = {
+ {
+ .limits = rtl8xxxu_limits,
+ .n_limits = ARRAY_SIZE(rtl8xxxu_limits),
+ .max_interfaces = 2,
+ .num_different_channels = 1,
+ },
+};
+
static int rtl8xxxu_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -7522,7 +7851,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
untested = 0;
break;
case 0x2357:
- if (id->idProduct == 0x0109)
+ if (id->idProduct == 0x0109 || id->idProduct == 0x0135)
untested = 0;
break;
case 0x0b05:
@@ -7555,13 +7884,14 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
mutex_init(&priv->usb_buf_mutex);
mutex_init(&priv->syson_indirect_access_mutex);
mutex_init(&priv->h2c_mutex);
+ mutex_init(&priv->sta_mutex);
INIT_LIST_HEAD(&priv->tx_urb_free_list);
spin_lock_init(&priv->tx_urb_lock);
INIT_LIST_HEAD(&priv->rx_urb_pending_list);
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
- INIT_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
+ INIT_DELAYED_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -7611,6 +7941,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (ret)
goto err_set_intfdata;
+ hw->vif_data_size = sizeof(struct rtl8xxxu_vif);
+
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
if (priv->fops->max_macid_num)
@@ -7620,6 +7952,13 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
hw->queues = 4;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ if (priv->fops->supports_concurrent) {
+ hw->wiphy->iface_combinations = rtl8xxxu_combinations;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtl8xxxu_combinations);
+ }
+
sband = &rtl8xxxu_supported_band;
sband->ht_cap.ht_supported = true;
sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
@@ -7806,6 +8145,9 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192fu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x318b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192fu_fops},
+/* TP-Link TL-WN823N V2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0135, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192fu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 920ee50e2115..61c0c0ec07b3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -146,6 +146,21 @@
#define GPIO_INTM_EDGE_TRIG_IRQ BIT(9)
#define REG_LEDCFG0 0x004c
+#define LEDCFG0_LED0CM GENMASK(2, 0)
+#define LEDCFG0_LED1CM GENMASK(10, 8)
+#define LED_MODE_SW_CTRL 0x0
+#define LED_MODE_TX_OR_RX_EVENTS 0x3
+#define LEDCFG0_LED0SV BIT(3)
+#define LEDCFG0_LED1SV BIT(11)
+#define LED_SW_OFF 0x0
+#define LED_SW_ON 0x1
+#define LEDCFG0_LED0_IO_MODE BIT(7)
+#define LEDCFG0_LED1_IO_MODE BIT(15)
+#define LED_IO_MODE_OUTPUT 0x0
+#define LED_IO_MODE_INPUT 0x1
+#define LEDCFG0_LED2EN BIT(21)
+#define LED_GPIO_DISABLE 0x0
+#define LED_GPIO_ENABLE 0x1
#define LEDCFG0_DPDT_SELECT BIT(23)
#define REG_LEDCFG1 0x004d
#define LEDCFG1_HW_LED_CONTROL BIT(1)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 69e97647e3d6..2e60a6991ca1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1903,6 +1903,10 @@ void rtl_init_sw_leds(struct ieee80211_hw *hw)
EXPORT_SYMBOL(rtl_init_sw_leds);
const struct ieee80211_ops rtl_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = rtl_op_start,
.stop = rtl_op_stop,
.tx = rtl_op_tx,
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 2e945554ed6d..c1fbc29d5ca1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -1287,18 +1287,44 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
}
EXPORT_SYMBOL_GPL(rtl_get_hwinfo);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size)
+static void _rtl_fw_block_write_usb(struct ieee80211_hw *hw, u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 start = START_ADDRESS;
+ u32 n;
+
+ while (size > 0) {
+ if (size >= 64)
+ n = 64;
+ else if (size >= 8)
+ n = 8;
+ else
+ n = 1;
+
+ rtl_write_chunk(rtlpriv, start, n, buffer);
+
+ start += n;
+ buffer += n;
+ size -= n;
+ }
+}
+
+void rtl_fw_block_write(struct ieee80211_hw *hw, u8 *buffer, u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *pu4byteptr = (u8 *)buffer;
u32 i;
- for (i = 0; i < size; i++)
- rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i));
+ if (rtlpriv->rtlhal.interface == INTF_PCI) {
+ for (i = 0; i < size; i++)
+ rtl_write_byte(rtlpriv, (START_ADDRESS + i),
+ *(buffer + i));
+ } else if (rtlpriv->rtlhal.interface == INTF_USB) {
+ _rtl_fw_block_write_usb(hw, buffer, size);
+ }
}
EXPORT_SYMBOL_GPL(rtl_fw_block_write);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index 1ec59f439382..4821625ad1e5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -91,8 +91,8 @@ void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
int max_size, u8 *hwinfo, int *params);
void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size);
+void rtl_fw_block_write(struct ieee80211_hw *hw, u8 *buffer, u32 size);
void rtl_efuse_ops_init(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 96ce05bcf0b3..d059cfe5a2a9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -378,13 +378,13 @@ static void _rtl_pci_io_handler_init(struct device *dev,
rtlpriv->io.dev = dev;
- rtlpriv->io.write8_async = pci_write8_async;
- rtlpriv->io.write16_async = pci_write16_async;
- rtlpriv->io.write32_async = pci_write32_async;
+ rtlpriv->io.write8 = pci_write8_async;
+ rtlpriv->io.write16 = pci_write16_async;
+ rtlpriv->io.write32 = pci_write32_async;
- rtlpriv->io.read8_sync = pci_read8_sync;
- rtlpriv->io.read16_sync = pci_read16_sync;
- rtlpriv->io.read32_sync = pci_read32_sync;
+ rtlpriv->io.read8 = pci_read8_sync;
+ rtlpriv->io.read16 = pci_read16_sync;
+ rtlpriv->io.read32 = pci_read32_sync;
}
static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 50e139186a93..ed151754fc6e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -350,7 +350,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool defaultadapter = true;
__le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
@@ -503,9 +502,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
set_tx_desc_hwseq_en(pdesc, 1);
set_tx_desc_pkt_id(pdesc, 8);
-
- if (!defaultadapter)
- set_tx_desc_qos(pdesc, 1);
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
index 91e4427ab022..4757f93b84e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
@@ -11,7 +11,7 @@
#define CHIP_VENDOR_UMC_B_CUT BIT(6)
#define IS_92C_1T2R(version) \
- (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
+ (((version) & CHIP_92C_1T2R) == CHIP_92C_1T2R)
#define IS_VENDOR_UMC(version) \
(((version) & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 5ec0eb8773a5..4217c9a08d01 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -622,6 +622,9 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
u16 valuelow;
switch (queue_sel) {
+ default:
+ WARN_ON(1);
+ fallthrough;
case (TX_SELE_HQ | TX_SELE_LQ):
valuehi = QUEUE_HIGH;
valuelow = QUEUE_LOW;
@@ -634,9 +637,6 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
valuehi = QUEUE_HIGH;
valuelow = QUEUE_NORMAL;
break;
- default:
- WARN_ON(1);
- break;
}
if (!wmm_enable) {
beq = valuelow;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 4ff0d4118193..a76f2dc8a977 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -101,7 +101,8 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
rtlphy->rf_type = RF_1T1R;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Chip RF Type: %s\n",
- rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" :
+ rtlphy->rf_type == RF_1T2R ? "RF_1T2R" : "RF_1T1R");
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 20b4aac69642..48be7e346efc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -40,7 +40,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.thermalvalue = 0;
/* for firmware buf */
- rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
+ rtlpriv->rtlhal.pfirmware = kmalloc(0x4000, GFP_KERNEL);
if (!rtlpriv->rtlhal.pfirmware) {
pr_err("Can't alloc buffer for fw\n");
return 1;
@@ -61,7 +61,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
if (err) {
- vfree(rtlpriv->rtlhal.pfirmware);
+ kfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
return err;
@@ -72,7 +72,7 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->rtlhal.pfirmware) {
- vfree(rtlpriv->rtlhal.pfirmware);
+ kfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
}
@@ -145,7 +145,6 @@ MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
/* rx */
- .in_ep_num = RTL92C_USB_BULK_IN_NUM,
.rx_urb_num = RTL92C_NUM_RX_URBS,
.rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
.usb_rx_hdl = rtl8192cu_rx_hdl,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 2f44c8aa6066..aa702ba7c9f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -79,68 +79,75 @@ static int configvernoutep(struct ieee80211_hw *hw)
static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
bool bwificfg, struct rtl_ep_map *ep_map)
{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB Chip-B & WMM Setting.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 2;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
} else { /* typical setting */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB typical Setting.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 3;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 2;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
}
static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_ep_map *ep_map)
{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for WMM.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 5;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
} else { /* typical setting */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for typical.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 5;
- ep_map->ep_mapping[RTL_TXQ_BK] = 5;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
}
static void oneoutepmapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
{
- ep_map->ep_mapping[RTL_TXQ_BE] = 2;
- ep_map->ep_mapping[RTL_TXQ_BK] = 2;
- ep_map->ep_mapping[RTL_TXQ_VI] = 2;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
static int _out_ep_mapping(struct ieee80211_hw *hw)
@@ -475,9 +482,9 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool defaultadapter = true;
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ struct rtl_sta_info *sta_entry;
+ u8 agg_state = RTL_AGG_STOP;
+ u8 ampdu_density = 0;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 rate_flag = info->control.rates[0].flags;
@@ -486,6 +493,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
skb_get_queue_mapping(skb));
u8 *txdesc8;
__le32 *txdesc;
+ u8 tid;
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
@@ -499,10 +507,21 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_tx_rate(txdesc, tcb_desc->hw_rate);
if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
set_tx_desc_data_shortgi(txdesc, 1);
- if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
- info->flags & IEEE80211_TX_CTL_AMPDU) {
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid = ieee80211_get_tid(hdr);
+ agg_state = sta_entry->tids[tid].agg.agg_state;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
+ }
+
+ if (agg_state == RTL_AGG_OPERATIONAL &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
set_tx_desc_agg_enable(txdesc, 1);
set_tx_desc_max_agg_num(txdesc, 0x14);
+ set_tx_desc_ampdu_density(txdesc, ampdu_density);
+ tcb_desc->rts_enable = 1;
+ tcb_desc->rts_rate = DESC_RATE24M;
} else {
set_tx_desc_agg_break(txdesc, 1);
}
@@ -537,14 +556,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_data_bw(txdesc, 0);
set_tx_desc_data_sc(txdesc, 0);
}
- rcu_read_lock();
- sta = ieee80211_find_sta(mac->vif, mac->bssid);
- if (sta) {
- u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
-
- set_tx_desc_ampdu_density(txdesc, ampdu_density);
- }
- rcu_read_unlock();
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -587,8 +598,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
ppsc->fwctrl_lps) {
set_tx_desc_hwseq_en(txdesc, 1);
set_tx_desc_pkt_id(txdesc, 8);
- if (!defaultadapter)
- set_tx_desc_qos(txdesc, 1);
}
if (ieee80211_has_morefrags(fc))
set_tx_desc_more_frag(txdesc, 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
index 5f81cab205cc..09e61dc0f317 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
@@ -4,15 +4,12 @@
#ifndef __RTL92CU_TRX_H__
#define __RTL92CU_TRX_H__
-#define RTL92C_USB_BULK_IN_NUM 1
#define RTL92C_NUM_RX_URBS 8
#define RTL92C_NUM_TX_URBS 32
#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
#define RX_DRV_INFO_SIZE_UNIT 8
-#define RTL_AGG_ON 1
-
enum usb_rx_agg_mode {
USB_RX_AGG_DISABLE,
USB_RX_AGG_DMA,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 02ac69c08ed3..192982ec8152 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -42,6 +42,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
struct phy_sts_cck_8192d *cck_buf;
s8 rx_pwr_all, rx_pwr[4];
@@ -62,9 +63,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
if (ppsc->rfpwr_state == ERFON)
- cck_highpwr = (u8) rtl_get_bbreg(hw,
- RFPGA0_XA_HSSIPARAMETER2,
- BIT(9));
+ cck_highpwr = rtlphy->cck_high_power;
else
cck_highpwr = false;
if (!cck_highpwr) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index d9823ddab7be..65bfc14702f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -349,7 +349,6 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool b_defaultadapter = true;
/* bool b_trigger_ac = false; */
u8 *pdesc8 = (u8 *)pdesc_tx;
__le32 *pdesc = (__le32 *)pdesc8;
@@ -503,10 +502,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_hwseq_en_8723(pdesc, 1);
/* set_tx_desc_hwseq_en(pdesc, 1); */
/* set_tx_desc_pkt_id(pdesc, 8); */
-
- if (!b_defaultadapter)
- set_tx_desc_hwseq_sel_8723(pdesc, 1);
- /* set_tx_desc_qos(pdesc, 1); */
+ /* set_tx_desc_qos(pdesc, 1); */
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 30bf2775a335..6e8c87a2fae4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -23,86 +23,23 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define MAX_USBCTRL_VENDORREQ_TIMES 10
-static void usbctrl_async_callback(struct urb *urb)
-{
- if (urb) {
- /* free dr */
- kfree(urb->setup_packet);
- /* free databuf */
- kfree(urb->transfer_buffer);
- }
-}
-
-static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
- u16 value, u16 index, void *pdata,
- u16 len)
-{
- int rc;
- unsigned int pipe;
- u8 reqtype;
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
- u8 *databuf;
-
- if (WARN_ON_ONCE(len > databuf_maxlen))
- len = databuf_maxlen;
-
- pipe = usb_sndctrlpipe(udev, 0); /* write_out */
- reqtype = REALTEK_USB_VENQT_WRITE;
-
- dr = kzalloc(sizeof(*dr), GFP_ATOMIC);
- if (!dr)
- return -ENOMEM;
-
- databuf = kzalloc(databuf_maxlen, GFP_ATOMIC);
- if (!databuf) {
- kfree(dr);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- kfree(databuf);
- kfree(dr);
- return -ENOMEM;
- }
-
- dr->bRequestType = reqtype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16(value);
- dr->wIndex = cpu_to_le16(index);
- dr->wLength = cpu_to_le16(len);
- /* data are already in little-endian order */
- memcpy(databuf, pdata, len);
- usb_fill_control_urb(urb, udev, pipe,
- (unsigned char *)dr, databuf, len,
- usbctrl_async_callback, NULL);
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc < 0) {
- kfree(databuf);
- kfree(dr);
- }
- usb_free_urb(urb);
- return rc;
-}
-
-static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
- u16 value, u16 index, void *pdata,
- u16 len)
+static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype,
+ u16 value, void *pdata, u16 len)
{
unsigned int pipe;
int status;
- u8 reqtype;
int vendorreq_times = 0;
static int count;
- pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
- reqtype = REALTEK_USB_VENQT_READ;
+ if (reqtype == REALTEK_USB_VENQT_READ)
+ pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
+ else
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
do {
- status = usb_control_msg(udev, pipe, request, reqtype, value,
- index, pdata, len, 1000);
+ status = usb_control_msg(udev, pipe, REALTEK_USB_VENQT_CMD_REQ,
+ reqtype, value, REALTEK_USB_VENQT_CMD_IDX,
+ pdata, len, 1000);
if (status < 0) {
/* firmware download is checksumed, don't retry */
if ((value >= FW_8192C_START_ADDRESS &&
@@ -114,18 +51,15 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
} while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
if (status < 0 && count++ < 4)
- pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, *(u32 *)pdata);
- return status;
+ dev_err(&udev->dev, "reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x reqtype=0x%x\n",
+ value, status, *(u32 *)pdata, reqtype);
}
static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
{
struct device *dev = rtlpriv->io.dev;
struct usb_device *udev = to_usb_device(dev);
- u8 request;
u16 wvalue;
- u16 index;
__le32 *data;
unsigned long flags;
@@ -134,14 +68,33 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
rtlpriv->usb_data_index = 0;
data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)addr;
- _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_READ, wvalue, data, len);
return le32_to_cpu(*data);
}
+
+static void _usb_write_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val, u16 len)
+{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
+ unsigned long flags;
+ __le32 *data;
+ u16 wvalue;
+
+ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
+ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+ rtlpriv->usb_data_index = 0;
+ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
+
+ wvalue = (u16)(addr & 0x0000ffff);
+ *data = cpu_to_le32(val);
+
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, wvalue, data, len);
+}
+
static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
return (u8)_usb_read_sync(rtlpriv, addr, 1);
@@ -157,45 +110,27 @@ static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
return _usb_read_sync(rtlpriv, addr, 4);
}
-static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
- u16 len)
+static void _usb_write8_sync(struct rtl_priv *rtlpriv, u32 addr, u8 val)
{
- u8 request;
- u16 wvalue;
- u16 index;
- __le32 data;
- int ret;
-
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
- wvalue = (u16)(addr&0x0000ffff);
- data = cpu_to_le32(val);
-
- ret = _usbctrl_vendorreq_async_write(udev, request, wvalue,
- index, &data, len);
- if (ret < 0)
- dev_err(&udev->dev, "error %d writing at 0x%x\n", ret, addr);
+ _usb_write_sync(rtlpriv, addr, val, 1);
}
-static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+static void _usb_write16_sync(struct rtl_priv *rtlpriv, u32 addr, u16 val)
{
- struct device *dev = rtlpriv->io.dev;
-
- _usb_write_async(to_usb_device(dev), addr, val, 1);
+ _usb_write_sync(rtlpriv, addr, val, 2);
}
-static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
+static void _usb_write32_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val)
{
- struct device *dev = rtlpriv->io.dev;
-
- _usb_write_async(to_usb_device(dev), addr, val, 2);
+ _usb_write_sync(rtlpriv, addr, val, 4);
}
-static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
+static void _usb_write_chunk_sync(struct rtl_priv *rtlpriv, u32 addr,
+ u32 length, u8 *data)
{
- struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(rtlpriv->io.dev);
- _usb_write_async(to_usb_device(dev), addr, val, 4);
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, addr, data, length);
}
static void _rtl_usb_io_handler_init(struct device *dev,
@@ -205,12 +140,13 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.dev = dev;
mutex_init(&rtlpriv->io.bb_mutex);
- rtlpriv->io.write8_async = _usb_write8_async;
- rtlpriv->io.write16_async = _usb_write16_async;
- rtlpriv->io.write32_async = _usb_write32_async;
- rtlpriv->io.read8_sync = _usb_read8_sync;
- rtlpriv->io.read16_sync = _usb_read16_sync;
- rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.write8 = _usb_write8_sync;
+ rtlpriv->io.write16 = _usb_write16_sync;
+ rtlpriv->io.write32 = _usb_write32_sync;
+ rtlpriv->io.write_chunk = _usb_write_chunk_sync;
+ rtlpriv->io.read8 = _usb_read8_sync;
+ rtlpriv->io.read16 = _usb_read16_sync;
+ rtlpriv->io.read32 = _usb_read32_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
@@ -280,7 +216,6 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
- rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
rtlusb->usb_rx_segregate_hdl =
rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
@@ -312,20 +247,38 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
- if (usb_endpoint_dir_in(pep_desc))
+ if (usb_endpoint_dir_in(pep_desc)) {
+ if (usb_endpoint_xfer_bulk(pep_desc)) {
+ /* The vendor drivers assume there is only one
+ * bulk in ep and that it's the first in ep.
+ */
+ if (rtlusb->in_ep_nums == 0)
+ rtlusb->in_ep = usb_endpoint_num(pep_desc);
+ else
+ pr_warn("%s: bulk in endpoint is not the first in endpoint\n",
+ __func__);
+ }
+
rtlusb->in_ep_nums++;
- else if (usb_endpoint_dir_out(pep_desc))
+ } else if (usb_endpoint_dir_out(pep_desc)) {
+ if (rtlusb->out_ep_nums < RTL_USB_MAX_BULKOUT_NUM) {
+ if (usb_endpoint_xfer_bulk(pep_desc))
+ rtlusb->out_eps[rtlusb->out_ep_nums] =
+ usb_endpoint_num(pep_desc);
+ } else {
+ pr_warn("%s: found more bulk out endpoints than the expected %d\n",
+ __func__, RTL_USB_MAX_BULKOUT_NUM);
+ }
+
rtlusb->out_ep_nums++;
+ }
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
pep_desc->bInterval);
}
- if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
- pr_err("Too few input end points found\n");
- return -EINVAL;
- }
+
if (rtlusb->out_ep_nums == 0) {
pr_err("No output end points found\n");
return -EINVAL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index 3bf85b23eec1..12529afc0510 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -19,6 +19,7 @@
#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
+#define RTL_USB_MAX_BULKOUT_NUM 4
#define RTL_USB_MAX_TX_URBS_NUM 8
enum rtl_txq {
@@ -94,6 +95,7 @@ struct rtl_usb {
/* Tx */
u8 out_ep_nums ;
+ u8 out_eps[RTL_USB_MAX_BULKOUT_NUM];
u8 out_queue_sel;
struct rtl_ep_map ep_map;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index d87cd2252eac..f388d13e2ba8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1397,8 +1397,6 @@ struct rtl_phy {
#define RTL_AGG_PROGRESS 1
#define RTL_AGG_START 2
#define RTL_AGG_OPERATIONAL 3
-#define RTL_AGG_OFF 0
-#define RTL_AGG_ON 1
#define RTL_RX_AGG_START 1
#define RTL_RX_AGG_STOP 0
#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
@@ -1447,13 +1445,15 @@ struct rtl_io {
/*PCI IO map */
unsigned long pci_base_addr; /*device I/O address */
- void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
- void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*write8)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
+ void (*write16)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*write_chunk)(struct rtl_priv *rtlpriv, u32 addr, u32 length,
+ u8 *data);
- u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u8 (*read8)(struct rtl_priv *rtlpriv, u32 addr);
+ u16 (*read16)(struct rtl_priv *rtlpriv, u32 addr);
+ u32 (*read32)(struct rtl_priv *rtlpriv, u32 addr);
};
@@ -1471,7 +1471,6 @@ struct rtl_mac {
enum nl80211_iftype opmode;
/*Probe Beacon management */
- struct rtl_tid_data tids[MAX_TID_COUNT];
enum rtl_link_state link_state;
int n_channels;
@@ -2354,7 +2353,6 @@ struct rtl_mod_params {
struct rtl_hal_usbint_cfg {
/* data - rx */
- u32 in_ep_num;
u32 rx_urb_num;
u32 rx_max_size;
@@ -2916,25 +2914,25 @@ extern u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M];
static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read8_sync(rtlpriv, addr);
+ return rtlpriv->io.read8(rtlpriv, addr);
}
static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read16_sync(rtlpriv, addr);
+ return rtlpriv->io.read16(rtlpriv, addr);
}
static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read32_sync(rtlpriv, addr);
+ return rtlpriv->io.read32(rtlpriv, addr);
}
static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
{
- rtlpriv->io.write8_async(rtlpriv, addr, val8);
+ rtlpriv->io.write8(rtlpriv, addr, val8);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read8_sync(rtlpriv, addr);
+ rtlpriv->io.read8(rtlpriv, addr);
}
static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
@@ -2947,19 +2945,25 @@ static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
{
- rtlpriv->io.write16_async(rtlpriv, addr, val16);
+ rtlpriv->io.write16(rtlpriv, addr, val16);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read16_sync(rtlpriv, addr);
+ rtlpriv->io.read16(rtlpriv, addr);
}
static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
u32 addr, u32 val32)
{
- rtlpriv->io.write32_async(rtlpriv, addr, val32);
+ rtlpriv->io.write32(rtlpriv, addr, val32);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read32_sync(rtlpriv, addr);
+ rtlpriv->io.read32(rtlpriv, addr);
+}
+
+static inline void rtl_write_chunk(struct rtl_priv *rtlpriv,
+ u32 addr, u32 length, u8 *data)
+{
+ rtlpriv->io.write_chunk(rtlpriv, addr, length, data);
}
static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 1b2ad81838be..5b2036798159 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -316,23 +316,13 @@ static ssize_t rtw_debugfs_set_single_input(struct file *filp,
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
- struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- char tmp[32 + 1];
u32 input;
- int num;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtou32_from_user(buffer, count, 0, &input);
if (ret)
return ret;
- num = kstrtoint(tmp, 0, &input);
-
- if (num) {
- rtw_warn(rtwdev, "kstrtoint failed\n");
- return num;
- }
-
debugfs_priv->cb_data = input;
return count;
@@ -485,19 +475,12 @@ static ssize_t rtw_debugfs_set_fix_rate(struct file *filp,
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 fix_rate;
- char tmp[32 + 1];
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtou8_from_user(buffer, count, 0, &fix_rate);
if (ret)
return ret;
- ret = kstrtou8(tmp, 0, &fix_rate);
- if (ret) {
- rtw_warn(rtwdev, "invalid args, [rate]\n");
- return ret;
- }
-
dm_info->fix_rate = fix_rate;
return count;
@@ -879,20 +862,13 @@ static ssize_t rtw_debugfs_set_coex_enable(struct file *filp,
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_coex *coex = &rtwdev->coex;
- char tmp[32 + 1];
bool enable;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtobool_from_user(buffer, count, &enable);
if (ret)
return ret;
- ret = kstrtobool(tmp, &enable);
- if (ret) {
- rtw_warn(rtwdev, "invalid arguments\n");
- return ret;
- }
-
mutex_lock(&rtwdev->mutex);
coex->manual_control = !enable;
mutex_unlock(&rtwdev->mutex);
@@ -951,18 +927,13 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- char tmp[32 + 1];
bool input;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtobool_from_user(buffer, count, &input);
if (ret)
return ret;
- ret = kstrtobool(tmp, &input);
- if (ret)
- return -EINVAL;
-
if (!input)
return -EINVAL;
@@ -1030,11 +1001,12 @@ static ssize_t rtw_debugfs_set_dm_cap(struct file *filp,
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- int bit;
+ int ret, bit;
bool en;
- if (kstrtoint_from_user(buffer, count, 10, &bit))
- return -EINVAL;
+ ret = kstrtoint_from_user(buffer, count, 10, &bit);
+ if (ret)
+ return ret;
en = bit > 0;
bit = abs(bit);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index d8d68f16014e..7af5bf7fe5b6 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -927,6 +927,10 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
}
const struct ieee80211_ops rtw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
.start = rtw_ops_start,
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 2bfc0e822b8d..9986a4cb37eb 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1450,6 +1450,7 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
struct pci_dev *pdev = rtwpci->pdev;
const struct rtw_intf_phy_para *para;
u16 cut;
@@ -1498,6 +1499,9 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
ret);
}
+
+ if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
+ rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);
}
static int __maybe_unused rtw_pci_suspend(struct device *dev)
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 1634f03784f1..b122f226924b 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -557,6 +557,9 @@
#define REG_RFE_INV16 0x0cbe
#define BIT_RFE_BUF_EN BIT(3)
+#define REG_ANAPARSW_MAC_0 0x1010
+#define BIT_CF_L_V2 GENMASK(29, 28)
+
#define REG_ANAPAR_XTAL_0 0x1040
#define BIT_XCAP_0 GENMASK(23, 10)
#define REG_CPU_DMEM_CON 0x1080
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 914c94988b2f..11fbdd142162 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -777,3 +777,64 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]);
SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]);
}
+
+void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c)
+{
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
+ DCTLINFO_V2_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+
+ h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0],
+ DCTLINFO_V2_W4_SEC_ENT0_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[1],
+ DCTLINFO_V2_W4_SEC_ENT1_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[2],
+ DCTLINFO_V2_W4_SEC_ENT2_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[3],
+ DCTLINFO_V2_W4_SEC_ENT3_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[4],
+ DCTLINFO_V2_W4_SEC_ENT4_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[5],
+ DCTLINFO_V2_W4_SEC_ENT5_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[6],
+ DCTLINFO_V2_W4_SEC_ENT6_KEYID);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_SEC_ENT0_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT1_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT2_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT3_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT4_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT5_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT6_KEYID);
+
+ h2c->w5 = le32_encode_bits(addr_cam->sec_cam_map[0],
+ DCTLINFO_V2_W5_SEC_ENT_VALID_V1) |
+ le32_encode_bits(addr_cam->sec_ent[0],
+ DCTLINFO_V2_W5_SEC_ENT0_V1);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_SEC_ENT_VALID_V1 |
+ DCTLINFO_V2_W5_SEC_ENT0_V1);
+
+ h2c->w6 = le32_encode_bits(addr_cam->sec_ent[1],
+ DCTLINFO_V2_W6_SEC_ENT1_V1) |
+ le32_encode_bits(addr_cam->sec_ent[2],
+ DCTLINFO_V2_W6_SEC_ENT2_V1) |
+ le32_encode_bits(addr_cam->sec_ent[3],
+ DCTLINFO_V2_W6_SEC_ENT3_V1) |
+ le32_encode_bits(addr_cam->sec_ent[4],
+ DCTLINFO_V2_W6_SEC_ENT4_V1);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_SEC_ENT1_V1 |
+ DCTLINFO_V2_W6_SEC_ENT2_V1 |
+ DCTLINFO_V2_W6_SEC_ENT3_V1 |
+ DCTLINFO_V2_W6_SEC_ENT4_V1);
+
+ h2c->w7 = le32_encode_bits(addr_cam->sec_ent[5],
+ DCTLINFO_V2_W7_SEC_ENT5_V1) |
+ le32_encode_bits(addr_cam->sec_ent[6],
+ DCTLINFO_V2_W7_SEC_ENT6_V1);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_SEC_ENT5_V1 |
+ DCTLINFO_V2_W7_SEC_ENT6_V1);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 83c160a614e6..fa09d11c345c 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -352,6 +352,111 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24));
}
+struct rtw89_h2c_dctlinfo_ud_v2 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define DCTLINFO_V2_C0_MACID GENMASK(6, 0)
+#define DCTLINFO_V2_C0_OP BIT(7)
+
+#define DCTLINFO_V2_W0_QOS_FIELD_H GENMASK(7, 0)
+#define DCTLINFO_V2_W0_HW_EXSEQ_MACID GENMASK(14, 8)
+#define DCTLINFO_V2_W0_QOS_DATA BIT(15)
+#define DCTLINFO_V2_W0_AES_IV_L GENMASK(31, 16)
+#define DCTLINFO_V2_W0_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W1_AES_IV_H GENMASK(31, 0)
+#define DCTLINFO_V2_W1_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W2_SEQ0 GENMASK(11, 0)
+#define DCTLINFO_V2_W2_SEQ1 GENMASK(23, 12)
+#define DCTLINFO_V2_W2_AMSDU_MAX_LEN GENMASK(26, 24)
+#define DCTLINFO_V2_W2_STA_AMSDU_EN BIT(27)
+#define DCTLINFO_V2_W2_CHKSUM_OFLD_EN BIT(28)
+#define DCTLINFO_V2_W2_WITH_LLC BIT(29)
+#define DCTLINFO_V2_W2_NAT25_EN BIT(30)
+#define DCTLINFO_V2_W2_IS_MLD BIT(31)
+#define DCTLINFO_V2_W2_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W3_SEQ2 GENMASK(11, 0)
+#define DCTLINFO_V2_W3_SEQ3 GENMASK(23, 12)
+#define DCTLINFO_V2_W3_TGT_IND GENMASK(27, 24)
+#define DCTLINFO_V2_W3_TGT_IND_EN BIT(28)
+#define DCTLINFO_V2_W3_HTC_LB GENMASK(31, 29)
+#define DCTLINFO_V2_W3_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W4_VLAN_TAG_SEL GENMASK(7, 5)
+#define DCTLINFO_V2_W4_HTC_ORDER BIT(8)
+#define DCTLINFO_V2_W4_SEC_KEY_ID GENMASK(10, 9)
+#define DCTLINFO_V2_W4_VLAN_RX_DYNAMIC_PCP_EN BIT(11)
+#define DCTLINFO_V2_W4_VLAN_RX_PKT_DROP BIT(12)
+#define DCTLINFO_V2_W4_VLAN_RX_VALID BIT(13)
+#define DCTLINFO_V2_W4_VLAN_TX_VALID BIT(14)
+#define DCTLINFO_V2_W4_WAPI BIT(15)
+#define DCTLINFO_V2_W4_SEC_ENT_MODE GENMASK(17, 16)
+#define DCTLINFO_V2_W4_SEC_ENT0_KEYID GENMASK(19, 18)
+#define DCTLINFO_V2_W4_SEC_ENT1_KEYID GENMASK(21, 20)
+#define DCTLINFO_V2_W4_SEC_ENT2_KEYID GENMASK(23, 22)
+#define DCTLINFO_V2_W4_SEC_ENT3_KEYID GENMASK(25, 24)
+#define DCTLINFO_V2_W4_SEC_ENT4_KEYID GENMASK(27, 26)
+#define DCTLINFO_V2_W4_SEC_ENT5_KEYID GENMASK(29, 28)
+#define DCTLINFO_V2_W4_SEC_ENT6_KEYID GENMASK(31, 30)
+#define DCTLINFO_V2_W4_ALL GENMASK(31, 5)
+#define DCTLINFO_V2_W5_SEC_ENT7_KEYID GENMASK(1, 0)
+#define DCTLINFO_V2_W5_SEC_ENT8_KEYID GENMASK(3, 2)
+#define DCTLINFO_V2_W5_SEC_ENT_VALID_V1 GENMASK(23, 8)
+#define DCTLINFO_V2_W5_SEC_ENT0_V1 GENMASK(31, 24)
+#define DCTLINFO_V2_W5_ALL (GENMASK(31, 8) | GENMASK(3, 0))
+#define DCTLINFO_V2_W6_SEC_ENT1_V1 GENMASK(7, 0)
+#define DCTLINFO_V2_W6_SEC_ENT2_V1 GENMASK(15, 8)
+#define DCTLINFO_V2_W6_SEC_ENT3_V1 GENMASK(23, 16)
+#define DCTLINFO_V2_W6_SEC_ENT4_V1 GENMASK(31, 24)
+#define DCTLINFO_V2_W6_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W7_SEC_ENT5_V1 GENMASK(7, 0)
+#define DCTLINFO_V2_W7_SEC_ENT6_V1 GENMASK(15, 8)
+#define DCTLINFO_V2_W7_SEC_ENT7 GENMASK(23, 16)
+#define DCTLINFO_V2_W7_SEC_ENT8 GENMASK(31, 24)
+#define DCTLINFO_V2_W7_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W8_MLD_SMA_L_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W8_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W9_MLD_SMA_H_V1 GENMASK(15, 0)
+#define DCTLINFO_V2_W9_MLD_TMA_L_V1 GENMASK(31, 16)
+#define DCTLINFO_V2_W9_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W10_MLD_TMA_H_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W10_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W11_MLD_TA_BSSID_L_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W11_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W12_MLD_TA_BSSID_H_V1 GENMASK(15, 0)
+#define DCTLINFO_V2_W12_ALL GENMASK(15, 0)
+
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
@@ -373,6 +478,10 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
u8 *cmd);
+void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, u8 *cmd);
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index cbf6821af6b8..051a3cad6101 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -212,33 +212,68 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
rtw89_config_default_chandef(rtwdev);
}
+static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
+ struct rtw89_entity_weight *w)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chanctx_cfg *cfg;
+ struct rtw89_vif *rtwvif;
+ int idx;
+
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
+ cfg = hal->sub[idx].cfg;
+ if (!cfg) {
+ /* doesn't run with chanctx ops; one channel at most */
+ w->active_chanctxs = 1;
+ break;
+ }
+
+ if (cfg->ref_count > 0)
+ w->active_chanctxs++;
+ }
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (rtwvif->chanctx_assigned)
+ w->active_roles++;
+ }
+}
+
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
{
+ DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_SUB_ENTITY) = {};
struct rtw89_hal *hal = &rtwdev->hal;
const struct cfg80211_chan_def *chandef;
+ struct rtw89_entity_weight w = {};
enum rtw89_entity_mode mode;
struct rtw89_chan chan;
- u8 weight;
- u8 last;
u8 idx;
lockdep_assert_held(&rtwdev->mutex);
- weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
- switch (weight) {
+ bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+
+ rtw89_entity_calculate_weight(rtwdev, &w);
+ switch (w.active_chanctxs) {
default:
- rtw89_warn(rtwdev, "unknown ent chan weight: %d\n", weight);
- bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ rtw89_warn(rtwdev, "unknown ent chanctxs weight: %d\n",
+ w.active_chanctxs);
+ bitmap_zero(recalc_map, NUM_OF_RTW89_SUB_ENTITY);
fallthrough;
case 0:
rtw89_config_default_chandef(rtwdev);
+ set_bit(RTW89_SUB_ENTITY_0, recalc_map);
fallthrough;
case 1:
- last = RTW89_SUB_ENTITY_0;
mode = RTW89_ENTITY_MODE_SCC;
break;
- case 2:
- last = RTW89_SUB_ENTITY_1;
+ case 2 ... NUM_OF_RTW89_SUB_ENTITY:
+ if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "unhandled ent: %d chanctxs %d roles\n",
+ w.active_chanctxs, w.active_roles);
+ return RTW89_ENTITY_MODE_UNHANDLED;
+ }
+
mode = rtw89_get_entity_mode(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
break;
@@ -247,7 +282,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
break;
}
- for (idx = 0; idx <= last; idx++) {
+ for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_SUB_ENTITY) {
chandef = rtw89_chandef_get(rtwdev, idx);
rtw89_get_channel_params(chandef, &chan);
if (chan.channel == 0) {
@@ -287,6 +322,13 @@ static void rtw89_chanctx_notify(struct rtw89_dev *rtwdev,
}
}
+static bool rtw89_concurrent_via_mrc(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+
+ return chip_gen == RTW89_CHIP_BE;
+}
+
/* This function centrally manages how MCC roles are sorted and iterated.
* And, it guarantees that ordered_idx is less than NUM_OF_RTW89_MCC_ROLES.
* So, if data needs to pass an array for ordered_idx, the array can declare
@@ -320,19 +362,12 @@ int rtw89_iterate_mcc_roles(struct rtw89_dev *rtwdev,
return 0;
}
-/* For now, IEEE80211_HW_TIMING_BEACON_ONLY can make things simple to ensure
- * correctness of MCC calculation logic below. We have noticed that once driver
- * declares WIPHY_FLAG_SUPPORTS_MLO, the use of IEEE80211_HW_TIMING_BEACON_ONLY
- * will be restricted. We will make an alternative in driver when it is ready
- * for MLO.
- */
static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *role, u64 tsf)
{
struct rtw89_vif *rtwvif = role->rtwvif;
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
u32 bcn_intvl_us = ieee80211_tu_to_usec(role->beacon_interval);
- u64 sync_tsf = vif->bss_conf.sync_tsf;
+ u64 sync_tsf = READ_ONCE(rtwvif->sync_bcn_tsf);
u32 remainder;
if (tsf < sync_tsf) {
@@ -346,16 +381,13 @@ static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
return remainder;
}
-static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
+static int __mcc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mac_mcc_tsf_rpt rpt = {};
struct rtw89_fw_mcc_tsf_req req = {};
- u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
- u32 tbtt_ofst_ref, tbtt_ofst_aux;
- u64 tsf_ref, tsf_aux;
int ret;
req.group = mcc->group;
@@ -365,11 +397,63 @@ static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC h2c failed to request tsf: %d\n", ret);
- return RTW89_MCC_DFLT_BCN_OFST_TIME;
+ return ret;
+ }
+
+ *tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
+ *tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
+
+ return 0;
+}
+
+static int __mrc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_fw_mrc_req_tsf_arg arg = {};
+ struct rtw89_mac_mrc_tsf_rpt rpt = {};
+ int ret;
+
+ BUILD_BUG_ON(RTW89_MAC_MRC_MAX_REQ_TSF_NUM < NUM_OF_RTW89_MCC_ROLES);
+
+ arg.num = 2;
+ arg.infos[0].band = ref->rtwvif->mac_idx;
+ arg.infos[0].port = ref->rtwvif->port;
+ arg.infos[1].band = aux->rtwvif->mac_idx;
+ arg.infos[1].port = aux->rtwvif->port;
+
+ ret = rtw89_fw_h2c_mrc_req_tsf(rtwdev, &arg, &rpt);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to request tsf: %d\n", ret);
+ return ret;
}
- tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
- tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
+ *tsf_ref = rpt.tsfs[0];
+ *tsf_aux = rpt.tsfs[1];
+
+ return 0;
+}
+
+static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
+ u32 tbtt_ofst_ref, tbtt_ofst_aux;
+ u64 tsf_ref, tsf_aux;
+ int ret;
+
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_req_tsf(rtwdev, &tsf_ref, &tsf_aux);
+ else
+ ret = __mcc_fw_req_tsf(rtwdev, &tsf_ref, &tsf_aux);
+
+ if (ret)
+ return RTW89_MCC_DFLT_BCN_OFST_TIME;
+
tbtt_ofst_ref = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf_ref);
tbtt_ofst_aux = rtw89_mcc_get_tbtt_ofst(rtwdev, aux, tsf_aux);
@@ -392,6 +476,28 @@ void rtw89_mcc_role_fw_macid_bitmap_set_bit(struct rtw89_mcc_role *mcc_role,
mcc_role->macid_bitmap[idx] |= BIT(pos);
}
+static
+u32 rtw89_mcc_role_fw_macid_bitmap_to_u32(struct rtw89_mcc_role *mcc_role)
+{
+ unsigned int macid;
+ unsigned int i, j;
+ u32 bitmap = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mcc_role->macid_bitmap); i++) {
+ for (j = 0; j < 8; j++) {
+ macid = i * 8 + j;
+ if (macid >= 32)
+ goto out;
+
+ if (mcc_role->macid_bitmap[i] & BIT(j))
+ bitmap |= BIT(macid);
+ }
+ }
+
+out:
+ return bitmap;
+}
+
static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
@@ -588,6 +694,9 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
int ret;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (!rtwvif->chanctx_assigned)
+ continue;
+
if (sel.bind_vif[rtwvif->sub_entity_idx]) {
rtw89_warn(rtwdev,
"MCC skip extra vif <macid %d> on chanctx[%d]\n",
@@ -1150,7 +1259,11 @@ static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
tsf_ofst_tgt = bcn_intvl_src_us - remainder;
config->sync.macid_tgt = tgt->rtwvif->mac_id;
+ config->sync.band_tgt = tgt->rtwvif->mac_idx;
+ config->sync.port_tgt = tgt->rtwvif->port;
config->sync.macid_src = src->rtwvif->mac_id;
+ config->sync.band_src = src->rtwvif->mac_idx;
+ config->sync.port_src = src->rtwvif->port;
config->sync.offset = tsf_ofst_tgt / 1024;
config->sync.enable = true;
@@ -1297,6 +1410,37 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
return 0;
}
+static
+void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role,
+ struct rtw89_fw_mrc_add_arg *arg, u8 slot_idx)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_policy *policy = &role->policy;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg;
+ const struct rtw89_chan *chan;
+
+ slot_arg = &arg->slots[slot_idx];
+ role->slot_idx = slot_idx;
+
+ slot_arg->duration = role->duration;
+ slot_arg->role_num = 1;
+
+ chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
+
+ slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_WIFI;
+ slot_arg->roles[0].is_master = role == ref;
+ slot_arg->roles[0].band = chan->band_type;
+ slot_arg->roles[0].bw = chan->band_width;
+ slot_arg->roles[0].central_ch = chan->channel;
+ slot_arg->roles[0].primary_ch = chan->primary_channel;
+ slot_arg->roles[0].en_tx_null = !policy->dis_tx_null;
+ slot_arg->roles[0].null_early = policy->tx_null_early;
+ slot_arg->roles[0].macid = role->rtwvif->mac_id;
+ slot_arg->roles[0].macid_main_bitmap =
+ rtw89_mcc_role_fw_macid_bitmap_to_u32(role);
+}
+
static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1318,6 +1462,20 @@ static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
return 0;
}
+static
+void __mrc_fw_add_bt_role(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_mrc_add_arg *arg, u8 slot_idx)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg = &arg->slots[slot_idx];
+
+ slot_arg->duration = bt_role->duration;
+ slot_arg->role_num = 1;
+
+ slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_BT;
+}
+
static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1403,6 +1561,130 @@ static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
return 0;
}
+static void __mrc_fw_add_courtesy(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_mrc_add_arg *arg)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg_src;
+ u8 slot_idx_tgt;
+
+ if (!courtesy->enable)
+ return;
+
+ if (courtesy->macid_src == ref->rtwvif->mac_id) {
+ slot_arg_src = &arg->slots[ref->slot_idx];
+ slot_idx_tgt = aux->slot_idx;
+ } else {
+ slot_arg_src = &arg->slots[aux->slot_idx];
+ slot_idx_tgt = ref->slot_idx;
+ }
+
+ slot_arg_src->courtesy_target = slot_idx_tgt;
+ slot_arg_src->courtesy_period = courtesy->slot_num;
+ slot_arg_src->courtesy_en = true;
+}
+
+static int __mrc_fw_start(struct rtw89_dev *rtwdev, bool replace)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_sync *sync = &config->sync;
+ struct rtw89_fw_mrc_start_arg start_arg = {};
+ struct rtw89_fw_mrc_add_arg add_arg = {};
+ int ret;
+
+ BUILD_BUG_ON(RTW89_MAC_MRC_MAX_ADD_SLOT_NUM <
+ NUM_OF_RTW89_MCC_ROLES + 1 /* bt role */);
+
+ if (replace) {
+ start_arg.old_sch_idx = mcc->group;
+ start_arg.action = RTW89_H2C_MRC_START_ACTION_REPLACE_OLD;
+ mcc->group = RTW89_MCC_NEXT_GROUP(mcc->group);
+ }
+
+ add_arg.sch_idx = mcc->group;
+ add_arg.sch_type = RTW89_H2C_MRC_SCH_BAND0_ONLY;
+
+ switch (pattern->plan) {
+ case RTW89_MCC_PLAN_TAIL_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 1);
+ __mrc_fw_add_bt_role(rtwdev, &add_arg, 2);
+
+ add_arg.slot_num = 3;
+ add_arg.btc_in_sch = true;
+ break;
+ case RTW89_MCC_PLAN_MID_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_bt_role(rtwdev, &add_arg, 1);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 2);
+
+ add_arg.slot_num = 3;
+ add_arg.btc_in_sch = true;
+ break;
+ case RTW89_MCC_PLAN_NO_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 1);
+
+ add_arg.slot_num = 2;
+ add_arg.btc_in_sch = false;
+ break;
+ default:
+ rtw89_warn(rtwdev, "MCC unknown plan: %d\n", pattern->plan);
+ return -EFAULT;
+ }
+
+ __mrc_fw_add_courtesy(rtwdev, &add_arg);
+
+ ret = rtw89_fw_h2c_mrc_add(rtwdev, &add_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger add: %d\n", ret);
+ return ret;
+ }
+
+ if (sync->enable) {
+ struct rtw89_fw_mrc_sync_arg sync_arg = {
+ .offset = sync->offset,
+ .src = {
+ .band = sync->band_src,
+ .port = sync->port_src,
+ },
+ .dest = {
+ .band = sync->band_tgt,
+ .port = sync->port_tgt,
+ },
+ };
+
+ ret = rtw89_fw_h2c_mrc_sync(rtwdev, &sync_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger sync: %d\n", ret);
+ return ret;
+ }
+ }
+
+ start_arg.sch_idx = mcc->group;
+ start_arg.start_tsf = config->start_tsf;
+
+ ret = rtw89_fw_h2c_mrc_start(rtwdev, &start_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger start: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1444,6 +1726,60 @@ static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_chang
return 0;
}
+static int __mrc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_sync *sync = &config->sync;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_fw_mrc_upd_duration_arg dur_arg = {
+ .sch_idx = mcc->group,
+ .start_tsf = config->start_tsf,
+ .slot_num = 2,
+ .slots[0] = {
+ .slot_idx = ref->slot_idx,
+ .duration = ref->duration,
+ },
+ .slots[1] = {
+ .slot_idx = aux->slot_idx,
+ .duration = aux->duration,
+ },
+ };
+ struct rtw89_fw_mrc_sync_arg sync_arg = {
+ .offset = sync->offset,
+ .src = {
+ .band = sync->band_src,
+ .port = sync->port_src,
+ },
+ .dest = {
+ .band = sync->band_tgt,
+ .port = sync->port_tgt,
+ },
+
+ };
+ int ret;
+
+ ret = rtw89_fw_h2c_mrc_upd_duration(rtwdev, &dur_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to update duration: %d\n", ret);
+ return ret;
+ }
+
+ if (!sync->enable || !sync_changed)
+ return 0;
+
+ ret = rtw89_fw_h2c_mrc_sync(rtwdev, &sync_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger sync: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1494,7 +1830,7 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
if (!rtwvif_go->chanctx_assigned)
return;
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif_go);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_go);
}
static void rtw89_mcc_start_beacon_noa(struct rtw89_dev *rtwdev)
@@ -1562,7 +1898,11 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- ret = __mcc_fw_start(rtwdev, false);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_start(rtwdev, false);
+ else
+ ret = __mcc_fw_start(rtwdev, false);
+
if (ret)
return ret;
@@ -1580,16 +1920,23 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n");
- ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
- ref->rtwvif->mac_id, true);
- if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to trigger stop: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev)) {
+ ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger del: %d\n", ret);
+ } else {
+ ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
+ ref->rtwvif->mac_id, true);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to trigger stop: %d\n", ret);
- ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
- if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to delete group: %d\n", ret);
+ ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to delete group: %d\n", ret);
+ }
rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP);
@@ -1615,7 +1962,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT ||
config->pattern.plan != RTW89_MCC_PLAN_NO_BT) {
- ret = __mcc_fw_start(rtwdev, true);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_start(rtwdev, true);
+ else
+ ret = __mcc_fw_start(rtwdev, true);
+
if (ret)
return ret;
} else {
@@ -1624,7 +1975,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
else
sync_changed = true;
- ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_set_duration_no_bt(rtwdev, sync_changed);
+ else
+ ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
+
if (ret)
return ret;
}
@@ -1666,12 +2021,75 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BCN_OFFSET_CHANGE);
}
+static int __mcc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *upd)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ int ret;
+
+ ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
+ upd->rtwvif->mac_id,
+ upd->macid_bitmap);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to update macid bitmap: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __mrc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *cur,
+ struct rtw89_mcc_role *upd)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_fw_mrc_upd_bitmap_arg arg = {};
+ u32 old = rtw89_mcc_role_fw_macid_bitmap_to_u32(cur);
+ u32 new = rtw89_mcc_role_fw_macid_bitmap_to_u32(upd);
+ u32 add = new & ~old;
+ u32 del = old & ~new;
+ int ret;
+ int i;
+
+ arg.sch_idx = mcc->group;
+ arg.macid = upd->rtwvif->mac_id;
+
+ for (i = 0; i < 32; i++) {
+ if (add & BIT(i)) {
+ arg.client_macid = i;
+ arg.action = RTW89_H2C_MRC_UPD_BITMAP_ACTION_ADD;
+
+ ret = rtw89_fw_h2c_mrc_upd_bitmap(rtwdev, &arg);
+ if (ret)
+ goto err;
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (del & BIT(i)) {
+ arg.client_macid = i;
+ arg.action = RTW89_H2C_MRC_UPD_BITMAP_ACTION_DEL;
+
+ ret = rtw89_fw_h2c_mrc_upd_bitmap(rtwdev, &arg);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to update bitmap: %d\n", ret);
+ return ret;
+}
+
static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *mcc_role,
unsigned int ordered_idx,
void *data)
{
- struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role upd = {
.rtwvif = mcc_role->rtwvif,
};
@@ -1685,14 +2103,13 @@ static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
sizeof(mcc_role->macid_bitmap)) == 0)
return 0;
- ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
- upd.rtwvif->mac_id,
- upd.macid_bitmap);
- if (ret) {
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to update macid bitmap: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_upd_macid_bitmap(rtwdev, mcc_role, &upd);
+ else
+ ret = __mcc_fw_upd_macid_bitmap(rtwdev, &upd);
+
+ if (ret)
return ret;
- }
memcpy(mcc_role->macid_bitmap, upd.macid_bitmap,
sizeof(mcc_role->macid_bitmap));
@@ -1900,6 +2317,41 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_work(rtwdev);
}
+static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx1,
+ enum rtw89_sub_entity_idx idx2)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_sub_entity tmp;
+ struct rtw89_vif *rtwvif;
+ u8 cur;
+
+ if (idx1 == idx2)
+ return;
+
+ hal->sub[idx1].cfg->idx = idx2;
+ hal->sub[idx2].cfg->idx = idx1;
+
+ tmp = hal->sub[idx1];
+ hal->sub[idx1] = hal->sub[idx2];
+ hal->sub[idx2] = tmp;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (!rtwvif->chanctx_assigned)
+ continue;
+ if (rtwvif->sub_entity_idx == idx1)
+ rtwvif->sub_entity_idx = idx2;
+ else if (rtwvif->sub_entity_idx == idx2)
+ rtwvif->sub_entity_idx = idx1;
+ }
+
+ cur = atomic_read(&hal->roc_entity_idx);
+ if (cur == idx1)
+ atomic_set(&hal->roc_entity_idx, idx2);
+ else if (cur == idx2)
+ atomic_set(&hal->roc_entity_idx, idx1);
+}
+
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
@@ -1913,8 +2365,8 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
return -ENOENT;
rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
- rtw89_set_channel(rtwdev);
cfg->idx = idx;
+ cfg->ref_count = 0;
hal->sub[idx].cfg = cfg;
return 0;
}
@@ -1924,47 +2376,8 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
- enum rtw89_entity_mode mode;
- struct rtw89_vif *rtwvif;
- u8 drop, roll;
-
- drop = cfg->idx;
- if (drop != RTW89_SUB_ENTITY_0)
- goto out;
- roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY, drop + 1);
-
- /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
- if (roll == NUM_OF_RTW89_SUB_ENTITY)
- goto out;
-
- /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
- * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
- */
- hal->sub[roll].cfg->idx = RTW89_SUB_ENTITY_0;
- hal->sub[RTW89_SUB_ENTITY_0] = hal->sub[roll];
-
- rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- if (rtwvif->sub_entity_idx == roll)
- rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
- }
-
- atomic_cmpxchg(&hal->roc_entity_idx, roll, RTW89_SUB_ENTITY_0);
-
- drop = roll;
-
-out:
- mode = rtw89_get_entity_mode(rtwdev);
- switch (mode) {
- case RTW89_ENTITY_MODE_MCC:
- rtw89_mcc_stop(rtwdev);
- break;
- default:
- break;
- }
-
- clear_bit(drop, hal->entity_map);
- rtw89_set_channel(rtwdev);
+ clear_bit(cfg->idx, hal->entity_map);
}
void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
@@ -1985,16 +2398,73 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_entity_weight w = {};
rtwvif->sub_entity_idx = cfg->idx;
rtwvif->chanctx_assigned = true;
- return 0;
+ cfg->ref_count++;
+
+ if (cfg->idx == RTW89_SUB_ENTITY_0)
+ goto out;
+
+ rtw89_entity_calculate_weight(rtwdev, &w);
+ if (w.active_chanctxs != 1)
+ goto out;
+
+ /* put the first active chanctx at RTW89_SUB_ENTITY_0 */
+ rtw89_swap_sub_entity(rtwdev, cfg->idx, RTW89_SUB_ENTITY_0);
+
+out:
+ return rtw89_set_channel(rtwdev);
}
void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct ieee80211_chanctx_conf *ctx)
{
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_entity_weight w = {};
+ enum rtw89_sub_entity_idx roll;
+ enum rtw89_entity_mode cur;
+
rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
rtwvif->chanctx_assigned = false;
+ cfg->ref_count--;
+
+ if (cfg->ref_count != 0)
+ goto out;
+
+ if (cfg->idx != RTW89_SUB_ENTITY_0)
+ goto out;
+
+ roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY,
+ cfg->idx + 1);
+ /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
+ if (roll == NUM_OF_RTW89_SUB_ENTITY)
+ goto out;
+
+ /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
+ * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
+ */
+ rtw89_swap_sub_entity(rtwdev, cfg->idx, roll);
+
+out:
+ rtw89_entity_calculate_weight(rtwdev, &w);
+
+ cur = rtw89_get_entity_mode(rtwdev);
+ switch (cur) {
+ case RTW89_ENTITY_MODE_MCC:
+ /* If still multi-roles, re-plan MCC for chanctx changes.
+ * Otherwise, just stop MCC.
+ */
+ rtw89_mcc_stop(rtwdev);
+ if (w.active_roles == NUM_OF_RTW89_MCC_ROLES)
+ rtw89_mcc_start(rtwdev);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_set_channel(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 9b98d8f4ee9d..ffa412f281f3 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -38,6 +38,11 @@ enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_ROC,
};
+struct rtw89_entity_weight {
+ unsigned int active_chanctxs;
+ unsigned int active_roles;
+};
+
static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 46e25c6f88a6..08121fd899e6 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -23,6 +23,7 @@ enum btc_wl_rfk_type {
BTC_WRFKT_DACK = 4,
BTC_WRFKT_RXDCK = 5,
BTC_WRFKT_TSSI = 6,
+ BTC_WRFKT_CHLK = 7,
};
#define NM_EXEC false
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index fd527a249996..f697e3d898e6 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -372,7 +372,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
-void rtw89_set_channel(struct rtw89_dev *rtwdev)
+int rtw89_set_channel(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -399,7 +399,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
break;
default:
WARN(1, "Invalid ent mode: %d\n", mode);
- return;
+ return -EINVAL;
}
roc_idx = atomic_read(&hal->roc_entity_idx);
@@ -426,6 +426,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
}
rtw89_set_entity_state(rtwdev, true);
+ return 0;
}
void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1176,7 +1177,8 @@ static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) |
+ bool rts_en = !desc_info->is_bmc;
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, rts_en) |
FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1);
return cpu_to_le32(dword);
@@ -1329,7 +1331,8 @@ static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, 1) |
+ bool rts_en = !desc_info->is_bmc;
+ u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, rts_en) |
FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1);
return cpu_to_le32(dword);
@@ -1866,6 +1869,17 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
}
+static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif *rtwvif,
+ struct ieee80211_hdr *hdr, size_t len)
+{
+ struct ieee80211_mgmt *mgmt = (typeof(mgmt))hdr;
+
+ if (len < offsetof(typeof(*mgmt), u.beacon.variable))
+ return;
+
+ WRITE_ONCE(rtwvif->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
+}
+
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -1896,8 +1910,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
return;
if (ieee80211_is_beacon(hdr->frame_control)) {
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
+ }
pkt_stat->beacon_nr++;
}
@@ -3345,6 +3361,14 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
return ret;
}
+ ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ return ret;
+
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
}
@@ -3393,7 +3417,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, true);
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
return ret;
@@ -3442,7 +3466,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
}
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
return ret;
@@ -3485,6 +3509,8 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_warn(rtwdev, "failed to send h2c general packet\n");
return ret;
}
+
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
}
return ret;
@@ -3611,7 +3637,8 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
};
const struct rtw89_chip_info *chip = rtwdev->chip;
- const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
+ const __le16 *highest = chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160) ?
+ highest_bw160 : highest_bw80;
struct rtw89_hal *hal = &rtwdev->hal;
u16 tx_mcs_map = 0, rx_mcs_map = 0;
u8 sts_cap = 3;
@@ -3640,34 +3667,34 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
- if (chip->support_bw160)
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
IEEE80211_VHT_CAP_SHORT_GI_160;
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1];
-}
-#define RTW89_SBAND_IFTYPES_NR 2
+ if (ieee80211_hw_check(rtwdev->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+}
static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
enum nl80211_band band,
- struct ieee80211_supported_band *sband)
+ enum nl80211_iftype iftype,
+ struct ieee80211_sband_iftype_data *iftype_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
- struct ieee80211_sband_iftype_data *iftype_data;
bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) ||
(chip->chip_id == RTL8852B && hal->cv == CHIP_CAV);
+ struct ieee80211_sta_he_cap *he_cap;
+ int nss = hal->rx_nss;
+ u8 *mac_cap_info;
+ u8 *phy_cap_info;
u16 mcs_map = 0;
int i;
- int nss = hal->rx_nss;
- int idx = 0;
-
- iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
- if (!iftype_data)
- return;
for (i = 0; i < 8; i++) {
if (i < nss)
@@ -3676,12 +3703,198 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
}
- for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
- struct ieee80211_sta_he_cap *he_cap;
- u8 *mac_cap_info;
- u8 *phy_cap_info;
+ he_cap = &iftype_data->he_cap;
+ mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
+ phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
+
+ he_cap->has_he = true;
+ mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ if (iftype == NL80211_IFTYPE_STATION)
+ mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+ mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
+ IEEE80211_HE_MAC_CAP2_BSR;
+ mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
+ if (iftype == NL80211_IFTYPE_AP)
+ mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
+ mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+ if (iftype == NL80211_IFTYPE_STATION)
+ mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ if (band == NL80211_BAND_2GHZ) {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ } else {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
+ phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
+ phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
+ if (iftype == NL80211_IFTYPE_STATION)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
+ if (iftype == NL80211_IFTYPE_AP)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
+ phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ phy_cap_info[5] = no_ng16 ? 0 :
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
+ phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+ phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (iftype == NL80211_IFTYPE_STATION)
+ phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) {
+ he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ __le16 capa;
- switch (i) {
+ capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ iftype_data->he_6ghz_capa.capa = capa;
+ }
+}
+
+static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ enum nl80211_iftype iftype,
+ struct ieee80211_sband_iftype_data *iftype_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct ieee80211_eht_cap_elem_fixed *eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp *eht_nss;
+ struct ieee80211_sta_eht_cap *eht_cap;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool support_320mhz = false;
+ int sts = 3;
+ u8 val;
+
+ if (chip->chip_gen == RTW89_CHIP_AX)
+ return;
+
+ if (band == NL80211_BAND_6GHZ &&
+ chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_320))
+ support_320mhz = true;
+
+ eht_cap = &iftype_data->eht_cap;
+ eht_cap_elem = &eht_cap->eht_cap_elem;
+ eht_nss = &eht_cap->eht_mcs_nss_supp;
+
+ eht_cap->has_eht = true;
+
+ eht_cap_elem->mac_cap_info[0] =
+ u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991,
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ eht_cap_elem->mac_cap_info[1] = 0;
+
+ eht_cap_elem->phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+ if (support_320mhz)
+ eht_cap_elem->phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+ eht_cap_elem->phy_cap_info[0] |=
+ u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+ eht_cap_elem->phy_cap_info[1] =
+ u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+ if (support_320mhz)
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] = 0;
+
+ eht_cap_elem->phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
+
+ eht_cap_elem->phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ u8_encode_bits(1, IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+
+ eht_cap_elem->phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ eht_cap_elem->phy_cap_info[6] = 0;
+ eht_cap_elem->phy_cap_info[7] = 0;
+ eht_cap_elem->phy_cap_info[8] = 0;
+
+ val = u8_encode_bits(hal->rx_nss, IEEE80211_EHT_MCS_NSS_RX) |
+ u8_encode_bits(hal->tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ eht_nss->bw._80.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs13_max_nss = val;
+ if (support_320mhz) {
+ eht_nss->bw._320.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs13_max_nss = val;
+ }
+}
+
+#define RTW89_SBAND_IFTYPES_NR 2
+
+static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_sband_iftype_data *iftype_data;
+ enum nl80211_iftype iftype;
+ int idx = 0;
+
+ iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
+ if (!iftype_data)
+ return;
+
+ for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+ switch (iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
break;
@@ -3694,92 +3907,10 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
break;
}
- iftype_data[idx].types_mask = BIT(i);
- he_cap = &iftype_data[idx].he_cap;
- mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
- phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
-
- he_cap->has_he = true;
- mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
- if (i == NL80211_IFTYPE_STATION)
- mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
- mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
- IEEE80211_HE_MAC_CAP2_BSR;
- mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
- if (i == NL80211_IFTYPE_AP)
- mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
- mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
- IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
- if (i == NL80211_IFTYPE_STATION)
- mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
- if (band == NL80211_BAND_2GHZ) {
- phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- } else {
- phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
- if (chip->support_bw160)
- phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- }
- phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
- phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
- phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
- if (i == NL80211_IFTYPE_STATION)
- phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
- if (i == NL80211_IFTYPE_AP)
- phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
- phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
- if (chip->support_bw160)
- phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
- phy_cap_info[5] = no_ng16 ? 0 :
- IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
- IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
- phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
- IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
- IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
- phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP7_MAX_NC_1;
- phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
- if (chip->support_bw160)
- phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
- phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
- IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
- u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
- if (i == NL80211_IFTYPE_STATION)
- phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
- he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
- he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
- if (chip->support_bw160) {
- he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
- he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
- }
-
- if (band == NL80211_BAND_6GHZ) {
- __le16 capa;
+ iftype_data[idx].types_mask = BIT(iftype);
- capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
- IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
- le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
- le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
- IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
- iftype_data[idx].he_6ghz_capa.capa = capa;
- }
+ rtw89_init_he_cap(rtwdev, band, iftype, &iftype_data[idx]);
+ rtw89_init_eht_cap(rtwdev, band, iftype, &iftype_data[idx]);
idx++;
}
@@ -3800,7 +3931,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
if (!sband_2ghz)
goto err;
rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
}
@@ -3810,7 +3941,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
goto err;
rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
}
@@ -3818,7 +3949,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
if (!sband_6ghz)
goto err;
- rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
}
@@ -3879,7 +4010,7 @@ void rtw89_core_update_beacon_work(struct work_struct *work)
rtwdev = rtwvif->rtwdev;
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
mutex_unlock(&rtwdev->mutex);
}
@@ -3944,7 +4075,6 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
- rtwdev->mac.qta_mode = RTW89_QTA_SCC;
ret = rtw89_mac_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
@@ -3961,6 +4091,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
return ret;
rtw89_phy_init_bb_reg(rtwdev);
+ rtw89_chip_bb_postinit(rtwdev);
rtw89_phy_init_rf_reg(rtwdev, false);
rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL);
@@ -3983,6 +4114,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
+ rtw89_chip_rfk_init_late(rtwdev);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
rtw89_fw_h2c_init_ba_cam(rtwdev);
@@ -4078,6 +4210,15 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
+ rtwdev->dbcc_en = false;
+ rtwdev->mlo_dbcc_mode = MLO_DBCC_NOT_SUPPORT;
+ rtwdev->mac.qta_mode = RTW89_QTA_SCC;
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ rtwdev->dbcc_en = true;
+ rtwdev->mac.qta_mode = RTW89_QTA_DBCC;
+ rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
+ }
INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
@@ -4085,6 +4226,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
init_completion(&rtwdev->fw.req.completion);
+ init_completion(&rtwdev->rfk_wait.completion);
schedule_work(&rtwdev->load_firmware_work);
@@ -4290,6 +4432,7 @@ EXPORT_SYMBOL(rtw89_chip_info_setup);
static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_hal *hal = &rtwdev->hal;
@@ -4324,8 +4467,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
- /* ref: description of rtw89_mcc_get_tbtt_ofst() in chan.c */
- ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -4453,9 +4596,10 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &early_fw);
if (no_chanctx) {
- ops->add_chanctx = NULL;
- ops->remove_chanctx = NULL;
- ops->change_chanctx = NULL;
+ ops->add_chanctx = ieee80211_emulate_add_chanctx;
+ ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
+ ops->change_chanctx = ieee80211_emulate_change_chanctx;
+ ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->remain_on_channel = NULL;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index ea6df859ba15..d62d23015c48 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -17,6 +17,7 @@ struct rtw89_pci_info;
struct rtw89_mac_gen_def;
struct rtw89_phy_gen_def;
struct rtw89_efuse_block_cfg;
+struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
@@ -32,6 +33,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define MASKDWORD 0xffffffff
#define RFREG_MASK 0xfffff
#define INV_RF_DATA 0xffffffff
+#define BYPASS_CR_DATA 0xbabecafe
#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
#define RTW89_FORBID_BA_TIMER round_jiffies_relative(HZ * 4)
@@ -878,7 +880,7 @@ enum rtw89_ps_mode {
#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
#define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
-#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
enum rtw89_ru_bandwidth {
RTW89_RU26 = 0,
@@ -956,6 +958,9 @@ struct rtw89_port_reg {
u32 mbssid;
u32 mbssid_drop;
u32 tsf_sync;
+ u32 ptcl_dbg;
+ u32 ptcl_dbg_info;
+ u32 bcn_drop_all;
u32 hiq_win[RTW89_PORT_NUM];
};
@@ -2875,7 +2880,7 @@ struct rtw89_ba_cam_entry {
#define RTW89_MAX_ADDR_CAM_NUM 128
#define RTW89_MAX_BSSID_CAM_NUM 20
#define RTW89_MAX_SEC_CAM_NUM 128
-#define RTW89_MAX_BA_CAM_NUM 8
+#define RTW89_MAX_BA_CAM_NUM 24
#define RTW89_SEC_CAM_IN_ADDR_CAM 7
struct rtw89_addr_cam_entry {
@@ -2932,6 +2937,7 @@ struct rtw89_sta {
struct ewma_evm evm_min[RF_PATH_MAX];
struct ewma_evm evm_max[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
struct ieee80211_rx_status rx_status;
u16 rx_hw_rate;
__le32 htc_template;
@@ -3041,6 +3047,7 @@ struct rtw89_vif {
u8 bcn_hit_cond;
u8 hit_rule;
u8 last_noa_nr;
+ u64 sync_bcn_tsf;
bool offchan;
bool trigger;
bool lsig_txop;
@@ -3131,6 +3138,7 @@ struct rtw89_chip_ops {
int (*enable_bb_rf)(struct rtw89_dev *rtwdev);
int (*disable_bb_rf)(struct rtw89_dev *rtwdev);
void (*bb_preinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ void (*bb_postinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
void (*bb_reset)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
void (*bb_sethw)(struct rtw89_dev *rtwdev);
@@ -3152,7 +3160,9 @@ struct rtw89_chip_ops {
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
void (*fem_setup)(struct rtw89_dev *rtwdev);
void (*rfe_gpio)(struct rtw89_dev *rtwdev);
+ void (*rfk_hw_init)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
+ void (*rfk_init_late)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev);
void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
@@ -3196,6 +3206,22 @@ struct rtw89_chip_ops {
int (*h2c_dctl_sec_cam)(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta);
+ int (*h2c_default_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+ int (*h2c_assoc_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*h2c_ampdu_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+ int (*h2c_update_beacon)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
+ int (*h2c_ba_cam)(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -3225,8 +3251,62 @@ enum rtw89_dma_ch {
RTW89_DMA_CH_NUM = 13
};
+#define MLO_MODE_FOR_BB0_BB1_RF(bb0, bb1, rf) ((rf) << 12 | (bb1) << 4 | (bb0))
+
+enum rtw89_mlo_dbcc_mode {
+ MLO_DBCC_NOT_SUPPORT = 1,
+ MLO_0_PLUS_2_1RF = MLO_MODE_FOR_BB0_BB1_RF(0, 2, 1),
+ MLO_0_PLUS_2_2RF = MLO_MODE_FOR_BB0_BB1_RF(0, 2, 2),
+ MLO_1_PLUS_1_1RF = MLO_MODE_FOR_BB0_BB1_RF(1, 1, 1),
+ MLO_1_PLUS_1_2RF = MLO_MODE_FOR_BB0_BB1_RF(1, 1, 2),
+ MLO_2_PLUS_0_1RF = MLO_MODE_FOR_BB0_BB1_RF(2, 0, 1),
+ MLO_2_PLUS_0_2RF = MLO_MODE_FOR_BB0_BB1_RF(2, 0, 2),
+ MLO_2_PLUS_2_2RF = MLO_MODE_FOR_BB0_BB1_RF(2, 2, 2),
+ DBCC_LEGACY = 0xffffffff,
+};
+
+enum rtw89_scan_be_operation {
+ RTW89_SCAN_OP_STOP,
+ RTW89_SCAN_OP_START,
+ RTW89_SCAN_OP_SETPARM,
+ RTW89_SCAN_OP_GETRPT,
+ RTW89_SCAN_OP_NUM
+};
+
+enum rtw89_scan_be_mode {
+ RTW89_SCAN_MODE_SA,
+ RTW89_SCAN_MODE_MACC,
+ RTW89_SCAN_MODE_NUM
+};
+
+enum rtw89_scan_be_opmode {
+ RTW89_SCAN_OPMODE_NONE,
+ RTW89_SCAN_OPMODE_TBTT,
+ RTW89_SCAN_OPMODE_INTV,
+ RTW89_SCAN_OPMODE_CNT,
+ RTW89_SCAN_OPMODE_NUM,
+};
+
+struct rtw89_scan_option {
+ bool enable;
+ bool target_ch_mode;
+ u8 num_macc_role;
+ u8 num_opch;
+ u8 repeat;
+ u16 norm_pd;
+ u16 slow_pd;
+ u16 norm_cy;
+ u8 opch_end;
+ u64 prohib_chan;
+ enum rtw89_phy_idx band;
+ enum rtw89_scan_be_operation operation;
+ enum rtw89_scan_be_mode scan_mode;
+ enum rtw89_mlo_dbcc_mode mlo_mode;
+};
+
enum rtw89_qta_mode {
RTW89_QTA_SCC,
+ RTW89_QTA_DBCC,
RTW89_QTA_DLFW,
RTW89_QTA_WOW,
@@ -3713,7 +3793,7 @@ struct rtw89_chip_info {
u32 rf_base_addr[2];
u8 support_chanctx_num;
u8 support_bands;
- bool support_bw160;
+ u16 support_bandwidths;
bool support_unii4;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
@@ -3838,7 +3918,7 @@ enum rtw89_host_rpr_mode {
RTW89_RPR_MODE_STF
};
-#define RTW89_COMPLETION_BUF_SIZE 24
+#define RTW89_COMPLETION_BUF_SIZE 40
#define RTW89_WAIT_COND_IDLE UINT_MAX
struct rtw89_completion_data {
@@ -3897,6 +3977,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_DEEP_PS,
RTW89_FW_FEATURE_NO_LPS_PG,
RTW89_FW_FEATURE_BEACON_FILTER,
+ RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
};
struct rtw89_fw_suit {
@@ -3957,6 +4038,19 @@ struct rtw89_fw_elm_info {
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
};
+enum rtw89_fw_mss_dev_type {
+ RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF = 0xF,
+ RTW89_FW_MSS_DEV_TYPE_FWSEC_INV = 0xFF,
+};
+
+struct rtw89_fw_secure {
+ bool secure_boot;
+ u32 sb_sel_mgn;
+ u8 mss_dev_type;
+ u8 mss_cust_idx;
+ u8 mss_key_num;
+};
+
struct rtw89_fw_info {
struct rtw89_fw_req_info req;
int fw_format;
@@ -3971,6 +4065,7 @@ struct rtw89_fw_info {
struct rtw89_fw_log log;
u32 feature_map;
struct rtw89_fw_elm_info elm_info;
+ struct rtw89_fw_secure sec;
};
#define RTW89_CHK_FW_FEATURE(_feat, _fw) \
@@ -4045,6 +4140,7 @@ struct rtw89_tas_info {
struct rtw89_chanctx_cfg {
enum rtw89_sub_entity_idx idx;
+ int ref_count;
};
enum rtw89_chanctx_changes {
@@ -4064,13 +4160,16 @@ enum rtw89_entity_mode {
RTW89_ENTITY_MODE_MCC,
NUM_OF_RTW89_ENTITY_MODE,
- RTW89_ENTITY_MODE_INVALID = NUM_OF_RTW89_ENTITY_MODE,
+ RTW89_ENTITY_MODE_INVALID = -EINVAL,
+ RTW89_ENTITY_MODE_UNHANDLED = -ESRCH,
};
struct rtw89_sub_entity {
struct cfg80211_chan_def chandef;
struct rtw89_chan chan;
struct rtw89_chan_rcd rcd;
+
+ /* only assigned when running with chanctx_ops */
struct rtw89_chanctx_cfg *cfg;
};
@@ -4123,6 +4222,7 @@ enum rtw89_flags {
RTW89_FLAG_CMAC1_FUNC,
RTW89_FLAG_FW_RDY,
RTW89_FLAG_RUNNING,
+ RTW89_FLAG_PROBE_DONE,
RTW89_FLAG_BFEE_MON,
RTW89_FLAG_BFEE_EN,
RTW89_FLAG_BFEE_TIMER_KEEP,
@@ -4179,6 +4279,21 @@ struct rtw89_phy_stat {
struct rtw89_pkt_stat last_pkt_stat;
};
+enum rtw89_rfk_report_state {
+ RTW89_RFK_STATE_START = 0x0,
+ RTW89_RFK_STATE_OK = 0x1,
+ RTW89_RFK_STATE_FAIL = 0x2,
+ RTW89_RFK_STATE_TIMEOUT = 0x3,
+ RTW89_RFK_STATE_H2C_CMD_ERR = 0x4,
+};
+
+struct rtw89_rfk_wait_info {
+ struct completion completion;
+ ktime_t start_time;
+ enum rtw89_rfk_report_state state;
+ u8 version;
+};
+
#define RTW89_DACK_PATH_NR 2
#define RTW89_DACK_IDX_NR 2
#define RTW89_DACK_MSBK_NR 16
@@ -4194,15 +4309,18 @@ struct rtw89_dack_info {
bool msbk_timeout[RTW89_DACK_PATH_NR];
};
-#define RTW89_IQK_CHS_NR 2
-#define RTW89_IQK_PATH_NR 4
+#define RTW89_RFK_CHS_NR 3
struct rtw89_rfk_mcc_info {
- u8 ch[RTW89_IQK_CHS_NR];
- u8 band[RTW89_IQK_CHS_NR];
+ u8 ch[RTW89_RFK_CHS_NR];
+ u8 band[RTW89_RFK_CHS_NR];
+ u8 bw[RTW89_RFK_CHS_NR];
u8 table_idx;
};
+#define RTW89_IQK_CHS_NR 2
+#define RTW89_IQK_PATH_NR 4
+
struct rtw89_lck_info {
u8 thermal[RF_PATH_MAX];
};
@@ -4380,6 +4498,11 @@ struct rtw89_cfo_tracking_info {
u8 lock_cnt;
};
+enum rtw89_tssi_mode {
+ RTW89_TSSI_NORMAL = 0,
+ RTW89_TSSI_SCAN = 1,
+};
+
enum rtw89_tssi_alimk_band {
TSSI_ALIMK_2G = 0,
TSSI_ALIMK_5GL,
@@ -4589,6 +4712,7 @@ struct rtw89_hw_scan_info {
struct ieee80211_vif *scanning_vif;
struct list_head pkt_list[NUM_NL80211_BANDS];
struct rtw89_chan op_chan;
+ bool abort;
u32 last_chan_idx;
};
@@ -4605,6 +4729,48 @@ enum rtw89_phy_bb_gain_band {
RTW89_BB_GAIN_BAND_NR,
};
+enum rtw89_phy_gain_band_be {
+ RTW89_BB_GAIN_BAND_2G_BE = 0,
+ RTW89_BB_GAIN_BAND_5G_L_BE = 1,
+ RTW89_BB_GAIN_BAND_5G_M_BE = 2,
+ RTW89_BB_GAIN_BAND_5G_H_BE = 3,
+ RTW89_BB_GAIN_BAND_6G_L0_BE = 4,
+ RTW89_BB_GAIN_BAND_6G_L1_BE = 5,
+ RTW89_BB_GAIN_BAND_6G_M0_BE = 6,
+ RTW89_BB_GAIN_BAND_6G_M1_BE = 7,
+ RTW89_BB_GAIN_BAND_6G_H0_BE = 8,
+ RTW89_BB_GAIN_BAND_6G_H1_BE = 9,
+ RTW89_BB_GAIN_BAND_6G_UH0_BE = 10,
+ RTW89_BB_GAIN_BAND_6G_UH1_BE = 11,
+
+ RTW89_BB_GAIN_BAND_NR_BE,
+};
+
+enum rtw89_phy_bb_bw_be {
+ RTW89_BB_BW_20_40 = 0,
+ RTW89_BB_BW_80_160_320 = 1,
+
+ RTW89_BB_BW_NR_BE,
+};
+
+enum rtw89_bw20_sc {
+ RTW89_BW20_SC_20M = 1,
+ RTW89_BW20_SC_40M = 2,
+ RTW89_BW20_SC_80M = 4,
+ RTW89_BW20_SC_160M = 8,
+ RTW89_BW20_SC_320M = 16,
+};
+
+enum rtw89_cmac_table_bw {
+ RTW89_CMAC_BW_20M = 0,
+ RTW89_CMAC_BW_40M = 1,
+ RTW89_CMAC_BW_80M = 2,
+ RTW89_CMAC_BW_160M = 3,
+ RTW89_CMAC_BW_320M = 4,
+
+ RTW89_CMAC_BW_NR,
+};
+
enum rtw89_phy_bb_rxsc_num {
RTW89_BB_RXSC_NUM_40 = 9, /* SC: 0, 1~8 */
RTW89_BB_RXSC_NUM_80 = 13, /* SC: 0, 1~8, 9~12 */
@@ -4627,6 +4793,27 @@ struct rtw89_phy_bb_gain_info {
[RTW89_BB_RXSC_NUM_160];
};
+struct rtw89_phy_bb_gain_info_be {
+ s8 lna_gain[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE][RF_PATH_MAX]
+ [LNA_GAIN_NUM];
+ s8 tia_gain[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE][RF_PATH_MAX]
+ [TIA_GAIN_NUM];
+ s8 lna_gain_bypass[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 lna_op1db[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 tia_lna_op1db[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM + 1];
+ s8 rpl_ofst_20[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_20M];
+ s8 rpl_ofst_40[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_40M];
+ s8 rpl_ofst_80[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_80M];
+ s8 rpl_ofst_160[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_160M];
+};
+
struct rtw89_phy_efuse_gain {
bool offset_valid;
bool comp_valid;
@@ -4681,6 +4868,9 @@ struct rtw89_mcc_role {
struct rtw89_mcc_policy policy;
struct rtw89_mcc_limit limit;
+ /* only valid when running with FW MRC mechanism */
+ u8 slot_idx;
+
/* byte-array in LE order for FW */
u8 macid_bitmap[BITS_TO_BYTES(RTW89_MAX_MAC_ID_NUM)];
@@ -4724,7 +4914,11 @@ struct rtw89_mcc_sync {
bool enable;
u16 offset; /* TU */
u8 macid_src;
+ u8 band_src;
+ u8 port_src;
u8 macid_tgt;
+ u8 band_tgt;
+ u8 port_tgt;
};
struct rtw89_mcc_config {
@@ -4757,6 +4951,7 @@ struct rtw89_dev {
const struct ieee80211_ops *ops;
bool dbcc_en;
+ enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
const struct rtw89_pci_info *pci_info;
@@ -4806,6 +5001,7 @@ struct rtw89_dev {
DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
struct rtw89_phy_stat phystat;
+ struct rtw89_rfk_wait_info rfk_wait;
struct rtw89_dack_info dack;
struct rtw89_iqk_info iqk;
struct rtw89_dpk_info dpk;
@@ -4824,7 +5020,10 @@ struct rtw89_dev {
struct rtw89_env_monitor_info env_monitor;
struct rtw89_dig_info dig;
struct rtw89_phy_ch_info ch_info;
- struct rtw89_phy_bb_gain_info bb_gain;
+ union {
+ struct rtw89_phy_bb_gain_info ax;
+ struct rtw89_phy_bb_gain_info_be be;
+ } bb_gain;
struct rtw89_phy_efuse_gain efuse_gain;
struct rtw89_phy_ul_tb_info ul_tb_info;
struct rtw89_antdiv_info antdiv;
@@ -5437,6 +5636,14 @@ static inline void rtw89_chip_rfe_gpio(struct rtw89_dev *rtwdev)
chip->ops->rfe_gpio(rtwdev);
}
+static inline void rtw89_chip_rfk_hw_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_hw_init)
+ chip->ops->rfk_hw_init(rtwdev);
+}
+
static inline
void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -5446,6 +5653,20 @@ void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
chip->ops->bb_preinit(rtwdev, phy_idx);
}
+static inline
+void rtw89_chip_bb_postinit(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->bb_postinit)
+ return;
+
+ chip->ops->bb_postinit(rtwdev, RTW89_PHY_0);
+
+ if (rtwdev->dbcc_en)
+ chip->ops->bb_postinit(rtwdev, RTW89_PHY_1);
+}
+
static inline void rtw89_chip_bb_sethw(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5462,6 +5683,14 @@ static inline void rtw89_chip_rfk_init(struct rtw89_dev *rtwdev)
chip->ops->rfk_init(rtwdev);
}
+static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_init_late)
+ chip->ops->rfk_init_late(rtwdev);
+}
+
static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5750,6 +5979,18 @@ out:
rcu_read_unlock();
}
+static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
+{
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ case MLO_1_PLUS_1_2RF:
+ case DBCC_LEGACY:
+ return true;
+ default:
+ return false;
+ }
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
@@ -5815,7 +6056,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
struct rtw89_chan *chan);
-void rtw89_set_channel(struct rtw89_dev *rtwdev);
+int rtw89_set_channel(struct rtw89_dev *rtwdev);
void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_chan *chan);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h
index 5c6787179bad..72416f56a071 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.h
+++ b/drivers/net/wireless/realtek/rtw89/efuse.h
@@ -23,5 +23,6 @@ int rtw89_parse_efuse_map_be(struct rtw89_dev *rtwdev);
int rtw89_parse_phycap_map_be(struct rtw89_dev *rtwdev);
int rtw89_cnv_efuse_state_be(struct rtw89_dev *rtwdev, bool idle);
int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *efv);
+int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/efuse_be.c b/drivers/net/wireless/realtek/rtw89/efuse_be.c
index 8e8b7cd315f7..0be26d5fdf7c 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse_be.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse_be.c
@@ -7,6 +7,31 @@
#include "mac.h"
#include "reg.h"
+#define EFUSE_EXTERNALPN_ADDR_BE 0x1580
+#define EFUSE_B1_MSSDEVTYPE_MASK GENMASK(3, 0)
+#define EFUSE_B1_MSSCUSTIDX0_MASK GENMASK(7, 4)
+#define EFUSE_SERIALNUM_ADDR_BE 0x1581
+#define EFUSE_B2_MSSKEYNUM_MASK GENMASK(3, 0)
+#define EFUSE_B2_MSSCUSTIDX1_MASK BIT(6)
+#define EFUSE_SB_CRYP_SEL_ADDR 0x1582
+#define EFUSE_SB_CRYP_SEL_SIZE 2
+#define EFUSE_SB_CRYP_SEL_DEFAULT 0xFFFF
+#define SB_SEL_MGN_MAX_SIZE 2
+#define EFUSE_SEC_BE_START 0x1580
+#define EFUSE_SEC_BE_SIZE 4
+
+enum rtw89_efuse_mss_dev_type {
+ MSS_DEV_TYPE_FWSEC_DEF = 0xF,
+ MSS_DEV_TYPE_FWSEC_WINLIN_INBOX = 0xC,
+ MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB = 0xA,
+ MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB = 0x9,
+ MSS_DEV_TYPE_FWSEC_NONWIN_INBOX = 0x6,
+};
+
+static const u32 sb_sel_mgn[SB_SEL_MGN_MAX_SIZE] = {
+ 0x8000100, 0xC000180
+};
+
static void rtw89_enable_efuse_pwr_cut_ddv_be(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -418,3 +443,120 @@ out_free:
return ret;
}
+
+static u16 get_sb_cryp_sel_idx(u16 sb_cryp_sel)
+{
+ u8 low_bit, high_bit, cnt_zero = 0;
+ u8 idx, sel_form_v, sel_idx_v;
+ u16 sb_cryp_sel_v = 0x0;
+
+ sel_form_v = u16_get_bits(sb_cryp_sel, MASKBYTE0);
+ sel_idx_v = u16_get_bits(sb_cryp_sel, MASKBYTE1);
+
+ for (idx = 0; idx < 4; idx++) {
+ low_bit = !!(sel_form_v & BIT(idx));
+ high_bit = !!(sel_form_v & BIT(7 - idx));
+ if (low_bit != high_bit)
+ return U16_MAX;
+ if (low_bit)
+ continue;
+
+ cnt_zero++;
+ if (cnt_zero == 1)
+ sb_cryp_sel_v = idx * 16;
+ else if (cnt_zero > 1)
+ return U16_MAX;
+ }
+
+ low_bit = u8_get_bits(sel_idx_v, 0x0F);
+ high_bit = u8_get_bits(sel_idx_v, 0xF0);
+
+ if ((low_bit ^ high_bit) != 0xF)
+ return U16_MAX;
+
+ return sb_cryp_sel_v + low_bit;
+}
+
+static u8 get_mss_dev_type_idx(struct rtw89_dev *rtwdev, u8 mss_dev_type)
+{
+ switch (mss_dev_type) {
+ case MSS_DEV_TYPE_FWSEC_WINLIN_INBOX:
+ mss_dev_type = 0x0;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB:
+ mss_dev_type = 0x1;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB:
+ mss_dev_type = 0x2;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONWIN_INBOX:
+ mss_dev_type = 0x3;
+ break;
+ case MSS_DEV_TYPE_FWSEC_DEF:
+ mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF;
+ break;
+ default:
+ rtw89_warn(rtwdev, "unknown mss_dev_type %d", mss_dev_type);
+ mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_INV;
+ break;
+ }
+
+ return mss_dev_type;
+}
+
+int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 sec_addr = EFUSE_SEC_BE_START;
+ u32 sec_size = EFUSE_SEC_BE_SIZE;
+ u16 sb_cryp_sel, sb_cryp_sel_idx;
+ u8 sec_map[EFUSE_SEC_BE_SIZE];
+ u8 mss_dev_type;
+ u8 b1, b2;
+ int ret;
+
+ ret = rtw89_dump_physical_efuse_map_be(rtwdev, sec_map,
+ sec_addr, sec_size, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump secsel map\n");
+ return ret;
+ }
+
+ sb_cryp_sel = sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr] |
+ sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr + 1] << 8;
+ if (sb_cryp_sel == EFUSE_SB_CRYP_SEL_DEFAULT)
+ goto out;
+
+ sb_cryp_sel_idx = get_sb_cryp_sel_idx(sb_cryp_sel);
+ if (sb_cryp_sel_idx >= SB_SEL_MGN_MAX_SIZE) {
+ rtw89_warn(rtwdev, "invalid SB cryp sel idx %d\n", sb_cryp_sel_idx);
+ goto out;
+ }
+
+ sec->sb_sel_mgn = sb_sel_mgn[sb_cryp_sel_idx];
+
+ b1 = sec_map[EFUSE_EXTERNALPN_ADDR_BE - sec_addr];
+ b2 = sec_map[EFUSE_SERIALNUM_ADDR_BE - sec_addr];
+
+ mss_dev_type = u8_get_bits(b1, EFUSE_B1_MSSDEVTYPE_MASK);
+ sec->mss_cust_idx = 0x1F - (u8_get_bits(b1, EFUSE_B1_MSSCUSTIDX0_MASK) |
+ u8_get_bits(b2, EFUSE_B2_MSSCUSTIDX1_MASK) << 4);
+ sec->mss_key_num = 0xF - u8_get_bits(b2, EFUSE_B2_MSSKEYNUM_MASK);
+
+ sec->mss_dev_type = get_mss_dev_type_idx(rtwdev, mss_dev_type);
+ if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_INV) {
+ rtw89_warn(rtwdev, "invalid mss_dev_type %d\n", mss_dev_type);
+ goto out;
+ }
+
+ sec->secure_boot = true;
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "MSS secure_boot=%d dev_type=%d cust_idx=%d key_num=%d\n",
+ sec->secure_boot, sec->mss_dev_type, sec->mss_cust_idx,
+ sec->mss_key_num);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_efuse_read_fw_secure_be);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 09684cea9731..63897351ca15 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -13,6 +13,8 @@
#include "reg.h"
#include "util.h"
+static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+
union rtw89_fw_element_arg {
size_t offset;
enum rtw89_rf_path rf_path;
@@ -163,6 +165,161 @@ static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
return 0;
}
+static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mss_pool_hdr *mss_hdr,
+ u32 rmp_tbl_size, u32 *key_idx)
+{
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 sel_byte_idx;
+ u32 mss_sel_idx;
+ u8 sel_bit_idx;
+ int i;
+
+ if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
+ if (!mss_hdr->defen)
+ return -ENOENT;
+
+ mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
+ sec->mss_key_num;
+ } else {
+ if (mss_hdr->defen)
+ mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
+ else
+ mss_sel_idx = 0;
+ mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
+ le16_to_cpu(mss_hdr->msscust_max) +
+ sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
+ sec->mss_key_num;
+ }
+
+ sel_byte_idx = mss_sel_idx >> 3;
+ sel_bit_idx = mss_sel_idx & 0x7;
+
+ if (sel_byte_idx >= rmp_tbl_size)
+ return -EFAULT;
+
+ if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
+ return -ENOENT;
+
+ *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
+
+ for (i = 0; i < sel_byte_idx; i++)
+ *key_idx += hweight8(mss_hdr->rmp_tbl[i]);
+
+ return 0;
+}
+
+static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const struct rtw89_fw_hdr_section_v1 *section,
+ const void *content,
+ u32 *mssc_len)
+{
+ const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
+ const union rtw89_fw_section_mssc_content *section_content = content;
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 rmp_tbl_size;
+ u32 key_sign_len;
+ u32 real_key_idx;
+ u32 sb_sel_ver;
+ int ret;
+
+ if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
+ rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
+ return -ENOENT;
+ }
+
+ if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
+ rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
+ le16_to_cpu(mss_hdr->msscust_max) *
+ mss_hdr->mssdev_max) >> 3;
+ if (mss_hdr->defen)
+ rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
+ } else {
+ rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
+ mss_hdr->rmpfmt);
+ return -EINVAL;
+ }
+
+ if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
+ rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
+ rmp_tbl_size, (int)sizeof(*mss_hdr),
+ le32_to_cpu(mss_hdr->key_raw_offset));
+ return -EINVAL;
+ }
+
+ key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
+ if (!key_sign_len)
+ key_sign_len = 512;
+
+ if (info->dsp_checksum)
+ key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
+
+ *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
+ le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
+
+ if (!sec->secure_boot)
+ goto out;
+
+ sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
+ if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
+ goto ignore;
+
+ ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
+ if (ret)
+ goto ignore;
+
+ section_info->key_addr = content + section_info->len +
+ le32_to_cpu(mss_hdr->key_raw_offset) +
+ key_sign_len * real_key_idx;
+ section_info->key_len = key_sign_len;
+ section_info->key_idx = real_key_idx;
+
+out:
+ if (info->secure_section_exist) {
+ section_info->ignore = true;
+ return 0;
+ }
+
+ info->secure_section_exist = true;
+
+ return 0;
+
+ignore:
+ section_info->ignore = true;
+
+ return 0;
+}
+
+static int __parse_security_section(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const struct rtw89_fw_hdr_section_v1 *section,
+ const void *content,
+ u32 *mssc_len)
+{
+ int ret;
+
+ section_info->mssc =
+ le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
+
+ if (section_info->mssc == FORMATTED_MSSC) {
+ ret = __parse_formatted_mssc(rtwdev, info, section_info,
+ section, content, mssc_len);
+ if (ret)
+ return -EINVAL;
+ } else {
+ *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
+ if (info->dsp_checksum)
+ *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
+
+ info->secure_section_exist = true;
+ }
+
+ return 0;
+}
+
static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
struct rtw89_fw_bin_info *info)
{
@@ -173,10 +330,12 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
const u8 *fw_end = fw + len;
const u8 *bin;
u32 base_hdr_len;
- u32 mssc_len = 0;
+ u32 mssc_len;
+ int ret;
u32 i;
info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
+ info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
@@ -199,16 +358,9 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
section_info = info->section_info;
for (i = 0; i < info->section_num; i++) {
section = &fw_hdr->sections[i];
+
section_info->type =
le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
- if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
- section_info->mssc =
- le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
- mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
- } else {
- section_info->mssc = 0;
- }
-
section_info->len =
le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
@@ -217,15 +369,40 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
section_info->dladdr =
le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
section_info->addr = bin;
- bin += section_info->len;
+
+ if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
+ ret = __parse_security_section(rtwdev, info, section_info,
+ section, bin, &mssc_len);
+ if (ret)
+ return ret;
+ } else {
+ section_info->mssc = 0;
+ mssc_len = 0;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
+ i, section_info->type, section_info->len,
+ section_info->mssc, mssc_len, bin - fw);
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
+ section_info->ignore, section_info->key_addr,
+ section_info->key_addr ?
+ section_info->key_addr - section_info->addr : 0,
+ section_info->key_len, section_info->key_idx);
+
+ bin += section_info->len + mssc_len;
section_info++;
}
- if (fw_end != bin + mssc_len) {
+ if (fw_end != bin) {
rtw89_err(rtwdev, "[ERR]fw bin size\n");
return -EINVAL;
}
+ if (!info->secure_section_exist)
+ rtw89_warn(rtwdev, "no firmware secure section\n");
+
return 0;
}
@@ -458,6 +635,8 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -919,9 +1098,56 @@ static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
len + H2C_HEADER_LEN));
}
-static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr *fw_hdr)
{
+ le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
+ FW_HDR_W7_PART_SIZE);
+
+ return 0;
+}
+
+static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_v1 *fw_hdr)
+{
+ struct rtw89_fw_hdr_section_info *section_info;
+ struct rtw89_fw_hdr_section_v1 *section;
+ u8 dst_sec_idx = 0;
+ u8 sec_idx;
+
+ le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
+ FW_HDR_V1_W7_PART_SIZE);
+
+ for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
+ section_info = &info->section_info[sec_idx];
+ section = &fw_hdr->sections[sec_idx];
+
+ if (section_info->ignore)
+ continue;
+
+ if (dst_sec_idx != sec_idx)
+ fw_hdr->sections[dst_sec_idx] = *section;
+
+ dst_sec_idx++;
+ }
+
+ le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
+
+ return (info->section_num - dst_sec_idx) * sizeof(*section);
+}
+
+static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_suit *fw_suit,
+ struct rtw89_fw_bin_info *info)
+{
+ u32 len = info->hdr_len - info->dynamic_hdr_len;
+ struct rtw89_fw_hdr_v1 *fw_hdr_v1;
+ const u8 *fw = fw_suit->data;
+ struct rtw89_fw_hdr *fw_hdr;
struct sk_buff *skb;
+ u32 truncated;
u32 ret = 0;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
@@ -931,7 +1157,26 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
}
skb_put_data(skb, fw, len);
- SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
+
+ switch (fw_suit->hdr_ver) {
+ case 0:
+ fw_hdr = (struct rtw89_fw_hdr *)skb->data;
+ truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
+ break;
+ case 1:
+ fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
+ truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto fail;
+ }
+
+ if (truncated) {
+ len -= truncated;
+ skb_trim(skb, len);
+ }
+
rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FWDL,
H2C_FUNC_MAC_FWHDR_DL, len);
@@ -950,12 +1195,14 @@ fail:
return ret;
}
-static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_suit *fw_suit,
+ struct rtw89_fw_bin_info *info)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
- ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
+ ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
if (ret) {
rtw89_err(rtwdev, "[ERR]FW header download\n");
return ret;
@@ -979,9 +1226,21 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
const u8 *section = info->addr;
u32 residue_len = info->len;
+ bool copy_key = false;
u32 pkt_len;
int ret;
+ if (info->ignore)
+ return 0;
+
+ if (info->key_addr && info->key_len) {
+ if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len)
+ rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n",
+ info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len);
+ else
+ copy_key = true;
+ }
+
while (residue_len) {
if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
pkt_len = FWDL_SECTION_PER_PKT_LEN;
@@ -995,6 +1254,10 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
}
skb_put_data(skb, section, pkt_len);
+ if (copy_key)
+ memcpy(skb->data + pkt_len - info->key_len,
+ info->key_addr, info->key_len);
+
ret = rtw89_h2c_tx(rtwdev, skb, true);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
@@ -1101,7 +1364,7 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
struct rtw89_fw_suit *fw_suit)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- struct rtw89_fw_bin_info info;
+ struct rtw89_fw_bin_info info = {};
int ret;
ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
@@ -1120,8 +1383,7 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len -
- info.dynamic_hdr_len);
+ ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
if (ret)
return ret;
@@ -1485,13 +1747,108 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
-#define H2C_BA_CAM_LEN 8
+int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+
+ rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
+
+int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+
+ h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
+ h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
+ h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
+ h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
+ h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
+ h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
+ h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
+ h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
+
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_h2c_ba_cam *h2c;
u8 macid = rtwsta->mac_id;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
u8 entry_idx;
int ret;
@@ -1509,32 +1866,34 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
return 0;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_BA_CAM_LEN);
- SET_BA_CAM_MACID(skb->data, macid);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam *)skb->data;
+
+ h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
- SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
else
- SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
+ h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
if (!valid)
goto end;
- SET_BA_CAM_VALID(skb->data, valid);
- SET_BA_CAM_TID(skb->data, params->tid);
+ h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
+ le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
if (params->buf_size > 64)
- SET_BA_CAM_BMAP_SIZE(skb->data, 4);
+ h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
else
- SET_BA_CAM_BMAP_SIZE(skb->data, 0);
+ h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
/* If init req is set, hw will set the ssn */
- SET_BA_CAM_INIT_REQ(skb->data, 1);
- SET_BA_CAM_SSN(skb->data, params->ssn);
+ h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
+ le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
- SET_BA_CAM_STD_EN(skb->data, 1);
- SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
+ h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND);
}
end:
@@ -1542,7 +1901,7 @@ end:
H2C_CAT_MAC,
H2C_CL_BA_CAM,
H2C_FUNC_MAC_BA_CAM, 0, 1,
- H2C_BA_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1556,31 +1915,35 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
u8 entry_idx, u8 uid)
{
+ struct rtw89_h2c_ba_cam *h2c;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_BA_CAM_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam *)skb->data;
- SET_BA_CAM_VALID(skb->data, 1);
- SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
- SET_BA_CAM_UID(skb->data, uid);
- SET_BA_CAM_BAND(skb->data, 0);
- SET_BA_CAM_STD_EN(skb->data, 0);
+ h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
+ h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
+ le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
+ le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
+ le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_BA_CAM,
H2C_FUNC_MAC_BA_CAM, 0, 1,
- H2C_BA_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1609,14 +1972,132 @@ void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
}
}
+int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_h2c_ba_cam_v1 *h2c;
+ u8 macid = rtwsta->mac_id;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 entry_idx;
+ u8 bmap_size;
+ int ret;
+
+ ret = valid ?
+ rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
+ if (ret) {
+ /* it still works even if we don't have static BA CAM, because
+ * hardware can create dynamic BA CAM automatically.
+ */
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to %s entry tid=%d for h2c ba cam\n",
+ valid ? "alloc" : "free", params->tid);
+ return 0;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
+
+ if (params->buf_size > 512)
+ bmap_size = 10;
+ else if (params->buf_size > 256)
+ bmap_size = 8;
+ else if (params->buf_size > 64)
+ bmap_size = 4;
+ else
+ bmap_size = 0;
+
+ h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
+ le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
+ le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
+ le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
+ le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
+ le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
+
+ entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
+ h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
+ le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
+ le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
+
+int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ u8 offset, u8 mac_idx)
+{
+ struct rtw89_h2c_ba_cam_init *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
+
+ h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
+ le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
+ le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LOG_CFG_LEN 12
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
{
struct sk_buff *skb;
- u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
- BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
+ u32 comp = 0;
int ret;
+ if (enable)
+ comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
+ BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
+ BIT(RTW89_FW_LOG_COMP_SCAN);
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
@@ -1815,6 +2296,50 @@ fail:
return ret;
}
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ rtwvif->sub_entity_idx);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_h2c_lps_ch_info *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
+
+ h2c->info[0].central_ch = chan->channel;
+ h2c->info[0].pri_ch = chan->primary_channel;
+ h2c->info[0].band = chan->band_type;
+ h2c->info[0].bw = chan->band_width;
+ h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_p2p_noa_desc *desc,
@@ -1892,11 +2417,12 @@ static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
#define H2C_CMC_TBL_LEN 68
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
struct sk_buff *skb;
- u8 macid = rtwvif->mac_id;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
@@ -1937,6 +2463,91 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
+
+int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
+ h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
+
+ h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
+ le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
+ le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
+
+ h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
+
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
+
+ h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
+
+ h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
+
+ h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
+
+ h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
+ le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
+ le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
+ le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
+ le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
+ h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
+
+ h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
+
+ h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
+ h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
+
+ h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
+ le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
+ h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta, u8 *pads)
@@ -1950,9 +2561,6 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
u16 ppe;
int i;
- if (!sta->deflink.he_cap.has_he)
- return;
-
ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
if (!ppe_th) {
@@ -2011,7 +2619,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
int ret;
memset(pads, 0, sizeof(pads));
- if (sta)
+ if (sta && sta->deflink.he_cap.has_he)
__get_sta_he_pkt_padding(rtwdev, sta, pads);
if (vif->p2p)
@@ -2073,6 +2681,246 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
+
+static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta, u8 *pads)
+{
+ u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
+ u16 ppe_thres_hdr;
+ u8 ppe16, ppe8;
+ u8 n, idx, sh;
+ u8 ru_bitmap;
+ bool ppe_th;
+ u16 ppe;
+ int i;
+
+ ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
+ if (!ppe_th) {
+ u8 pad;
+
+ pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++)
+ pads[i] = pad;
+
+ return;
+ }
+
+ ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres);
+ ru_bitmap = u16_get_bits(ppe_thres_hdr,
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n = hweight8(ru_bitmap);
+ n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
+ (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
+ if (!(ru_bitmap & BIT(i))) {
+ pads[i] = 1;
+ continue;
+ }
+
+ idx = n >> 3;
+ sh = n & 7;
+ n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
+
+ ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx);
+ ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+ sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
+ ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+
+ if (ppe16 != 7 && ppe8 == 7)
+ pads[i] = 2;
+ else if (ppe8 != 7)
+ pads[i] = 1;
+ else
+ pads[i] = 0;
+ }
+}
+
+int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u8 pads[RTW89_PPE_BW_NUM];
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 lowest_rate;
+ int ret;
+
+ memset(pads, 0, sizeof(pads));
+ if (sta) {
+ if (sta->deflink.eht_cap.has_eht)
+ __get_sta_eht_pkt_padding(rtwdev, sta, pads);
+ else if (sta->deflink.he_cap.has_he)
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
+ }
+
+ if (vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
+ le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
+ h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
+ CCTLINFO_G7_W0_DISDATAFB);
+
+ h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+
+ h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
+ h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
+
+ h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+
+ h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+ h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
+ h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
+ }
+
+ if (vif->bss_conf.eht_support) {
+ u16 punct = vif->bss_conf.chanreq.oper.punctured;
+
+ h2c->w4 |= le32_encode_bits(~punct,
+ CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ }
+
+ h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+
+ h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ CCTLINFO_G7_W6_ULDL);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
+
+ if (sta) {
+ h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he,
+ CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
+
+int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 agg_num = 0;
+ u8 ba_bmap = 0;
+ int ret;
+ u8 tid;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
+ if (agg_num == 0)
+ agg_num = rtwsta->ampdu_params[tid].agg_num;
+ else
+ agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
+ }
+
+ if (agg_num <= 0x20)
+ ba_bmap = 3;
+ else if (agg_num > 0x20 && agg_num <= 0x40)
+ ba_bmap = 0;
+ else if (agg_num > 0x40 && agg_num <= 0x80)
+ ba_bmap = 1;
+ else if (agg_num > 0x80 && agg_num <= 0x100)
+ ba_bmap = 2;
+ else if (agg_num > 0x100 && agg_num <= 0x200)
+ ba_bmap = 4;
+ else if (agg_num > 0x200 && agg_num <= 0x400)
+ ba_bmap = 5;
+
+ h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta)
@@ -2155,18 +3003,20 @@ fail:
return ret;
}
-#define H2C_BCN_BASE_LEN 12
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif->sub_entity_idx);
- struct sk_buff *skb;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_h2c_bcn_upd *h2c;
struct sk_buff *skb_beacon;
- u16 tim_offset;
+ struct ieee80211_hdr *hdr;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
int bcn_total_len;
u16 beacon_rate;
+ u16 tim_offset;
void *noa_data;
u8 noa_len;
int ret;
@@ -2192,23 +3042,27 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
skb_put_data(skb_beacon, noa_data, noa_len);
}
- bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
+ hdr = (struct ieee80211_hdr *)skb_beacon;
+ tim_offset -= ieee80211_hdrlen(hdr->frame_control);
+
+ bcn_total_len = len + skb_beacon->len;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
dev_kfree_skb_any(skb_beacon);
return -ENOMEM;
}
- skb_put(skb, H2C_BCN_BASE_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
- SET_BCN_UPD_PORT(skb->data, rtwvif->port);
- SET_BCN_UPD_MBSSID(skb->data, 0);
- SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
- SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
- SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
- SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
- SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
- SET_BCN_UPD_RATE(skb->data, beacon_rate);
+ h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
+ le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
+ h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
+ le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
+ le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
+ le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
skb_put_data(skb, skb_beacon->data, skb_beacon->len);
dev_kfree_skb_any(skb_beacon);
@@ -2227,6 +3081,90 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
return 0;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
+
+int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_h2c_bcn_upd_be *h2c;
+ struct sk_buff *skb_beacon;
+ struct ieee80211_hdr *hdr;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int bcn_total_len;
+ u16 beacon_rate;
+ u16 tim_offset;
+ void *noa_data;
+ u8 noa_len;
+ int ret;
+
+ if (vif->p2p)
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ beacon_rate = RTW89_HW_RATE_CCK1;
+ else
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
+ NULL, 0);
+ if (!skb_beacon) {
+ rtw89_err(rtwdev, "failed to get beacon skb\n");
+ return -ENOMEM;
+ }
+
+ noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
+ if (noa_len &&
+ (noa_len <= skb_tailroom(skb_beacon) ||
+ pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
+ skb_put_data(skb_beacon, noa_data, noa_len);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb_beacon;
+ tim_offset -= ieee80211_hdrlen(hdr->frame_control);
+
+ bcn_total_len = len + skb_beacon->len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ dev_kfree_skb_any(skb_beacon);
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
+
+ h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
+ le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
+ h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
+ le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
+ le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
+ le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
+
+ skb_put_data(skb, skb_beacon->data, skb_beacon->len);
+ dev_kfree_skb_any(skb_beacon);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
+ bcn_total_len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
#define H2C_ROLE_MAINTAIN_LEN 4
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -2277,45 +3215,93 @@ fail:
return ret;
}
-#define H2C_JOIN_INFO_LEN 4
+static enum rtw89_fw_sta_type
+rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (!sta)
+ goto by_vif;
+
+ if (sta->deflink.eht_cap.has_eht)
+ return RTW89_FW_BE_STA;
+ else if (sta->deflink.he_cap.has_he)
+ return RTW89_FW_AX_STA;
+ else
+ return RTW89_FW_N_AC_STA;
+
+by_vif:
+ if (vif->bss_conf.eht_support)
+ return RTW89_FW_BE_STA;
+ else if (vif->bss_conf.he_support)
+ return RTW89_FW_AX_STA;
+ else
+ return RTW89_FW_N_AC_STA;
+}
+
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, bool dis_conn)
{
struct sk_buff *skb;
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
u8 self_role = rtwvif->self_role;
+ enum rtw89_fw_sta_type sta_type;
u8 net_type = rtwvif->net_type;
+ struct rtw89_h2c_join_v1 *h2c_v1;
+ struct rtw89_h2c_join *h2c;
+ u32 len = sizeof(*h2c);
+ bool format_v1 = false;
int ret;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ len = sizeof(*h2c_v1);
+ format_v1 = true;
+ }
+
if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
self_role = RTW89_SELF_ROLE_AP_CLIENT;
net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_JOIN_INFO_LEN);
- SET_JOININFO_MACID(skb->data, mac_id);
- SET_JOININFO_OP(skb->data, dis_conn);
- SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
- SET_JOININFO_WMM(skb->data, rtwvif->wmm);
- SET_JOININFO_TGR(skb->data, rtwvif->trigger);
- SET_JOININFO_ISHESTA(skb->data, 0);
- SET_JOININFO_DLBW(skb->data, 0);
- SET_JOININFO_TF_MAC_PAD(skb->data, 0);
- SET_JOININFO_DL_T_PE(skb->data, 0);
- SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
- SET_JOININFO_NET_TYPE(skb->data, net_type);
- SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
- SET_JOININFO_SELF_ROLE(skb->data, self_role);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_join *)skb->data;
+
+ h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
+ le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
+ le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) |
+ le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
+ le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
+ le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
+ le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
+ le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
+
+ if (!format_v1)
+ goto done;
+
+ h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
+ sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta);
+
+ h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
+ h2c_v1->w2 = 0;
+
+done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_JOININFO, 0, 1,
- H2C_JOIN_INFO_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -2368,24 +3354,49 @@ fail:
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause)
{
- struct rtw89_fw_macid_pause_grp h2c = {{0}};
- u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
+ struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
+ struct rtw89_fw_macid_pause_grp *h2c;
+ __le32 set = cpu_to_le32(BIT(sh));
+ u8 h2c_macid_pause_id;
struct sk_buff *skb;
+ u32 len;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
+ if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
+ h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
+ len = sizeof(*h2c_new);
+ } else {
+ h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
+ len = sizeof(*h2c);
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
+ rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
return -ENOMEM;
}
- h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
- if (pause)
- h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
- skb_put_data(skb, &h2c, len);
+ skb_put(skb, len);
+
+ if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
+ h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
+
+ h2c_new->n[0].pause_mask_grp[grp] = set;
+ h2c_new->n[0].sleep_mask_grp[grp] = set;
+ if (pause) {
+ h2c_new->n[0].pause_grp[grp] = set;
+ h2c_new->n[0].sleep_grp[grp] = set;
+ }
+ } else {
+ h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
+
+ h2c->mask_grp[grp] = set;
+ if (pause)
+ h2c->pause_grp[grp] = set;
+ }
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
- H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
+ h2c_macid_pause_id, 1, 0,
len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
@@ -2516,6 +3527,8 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
+ s32 thold = RTW89_DEFAULT_CQM_THOLD;
+ u32 hyst = RTW89_DEFAULT_CQM_HYST;
struct rtw89_h2c_bcnfltr *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -2536,14 +3549,19 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
+ if (bss_conf->cqm_rssi_hyst)
+ hyst = bss_conf->cqm_rssi_hyst;
+ if (bss_conf->cqm_rssi_thold)
+ thold = bss_conf->cqm_rssi_thold;
+
h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
RTW89_H2C_BCNFLTR_W0_MODE) |
le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
- le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
- le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI,
+ le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
+ le32_encode_bits(thold + MAX_RSSI,
RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
@@ -3296,62 +4314,163 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
return 0;
}
-#define H2C_LEN_SCAN_LIST_OFFLOAD 4
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list)
{
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_chinfo_elem *elem;
struct rtw89_mac_chinfo *ch_info;
+ struct rtw89_h2c_chinfo *h2c;
struct sk_buff *skb;
- int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
unsigned int cond;
- u8 *cmd;
+ int skb_len;
int ret;
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+
+ skb_len = struct_size(h2c, elem, ch_num);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
return -ENOMEM;
}
- skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
- cmd = skb->data;
+ skb_put(skb, sizeof(*h2c));
+ h2c = (struct rtw89_h2c_chinfo *)skb->data;
+
+ h2c->ch_num = ch_num;
+ h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
+
+ list_for_each_entry(ch_info, chan_list, list) {
+ elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
+
+ elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
+ le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
+ le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
+ le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
+
+ elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
+ le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
+ le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
+ le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
+ le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
+ le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
+ le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
+ le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
+ le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
+ le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
+
+ elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
+ le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
+ le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
+ le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
+
+ elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
+ le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
+ le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
+ le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
+
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_chinfo_elem_be *elem;
+ struct rtw89_mac_chinfo_be *ch_info;
+ struct rtw89_h2c_chinfo *h2c;
+ struct sk_buff *skb;
+ unsigned int cond;
+ int skb_len;
+ int ret;
+
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
- RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
- /* in unit of 4 bytes */
- RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
+ skb_len = struct_size(h2c, elem, ch_num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, sizeof(*h2c));
+ h2c = (struct rtw89_h2c_chinfo *)skb->data;
+
+ h2c->ch_num = ch_num;
+ h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
+ h2c->arg = u8_encode_bits(RTW89_PHY_0, RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
list_for_each_entry(ch_info, chan_list, list) {
- cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
-
- RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
- RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
- RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
- RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
- RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
- RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
- RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
- RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
- RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
- RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
- RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
- RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
- RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
- RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
- RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
- RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
- RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
- RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
- RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
- RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
- RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
- RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
+ elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
+
+ elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) |
+ le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
+ le32_encode_bits(ch_info->central_ch,
+ RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
+ le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
+
+ elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
+ le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
+ le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
+ le32_encode_bits(ch_info->pause_data,
+ RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
+ le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
+ le32_encode_bits(ch_info->rand_seq_num,
+ RTW89_H2C_CHINFO_BE_W1_RANDOM) |
+ le32_encode_bits(ch_info->notify_action,
+ RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
+ le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
+ RTW89_H2C_CHINFO_BE_W1_PROBE) |
+ le32_encode_bits(ch_info->leave_crit,
+ RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
+ le32_encode_bits(ch_info->chkpt_timer,
+ RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
+
+ elem->w2 = le32_encode_bits(ch_info->leave_time,
+ RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
+ le32_encode_bits(ch_info->leave_th,
+ RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
+ le32_encode_bits(ch_info->tx_pkt_ctrl,
+ RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
+
+ elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
+ le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
+ le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
+ le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
+
+ elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
+ le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
+ le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
+ le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
+
+ elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
+ le32_encode_bits(ch_info->fw_probe0_ssids,
+ RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
+
+ elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
+ RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
+ le32_encode_bits(ch_info->fw_probe0_bssids,
+ RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
- cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH);
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
@@ -3410,7 +4529,10 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
H2C_FUNC_SCANOFLD, 1, 1,
len);
- cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD);
+ if (option->enable)
+ cond = RTW89_SCANOFLD_WAIT_COND_START;
+ else
+ cond = RTW89_SCANOFLD_WAIT_COND_STOP;
ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
@@ -3421,6 +4543,169 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ u8 i, idx;
+
+ sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ if (chan->flags & IEEE80211_CHAN_DISABLED) {
+ idx = (chan->hw_value - 1) / 4;
+ option->prohib_chan |= BIT(idx);
+ }
+ }
+}
+
+int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_scanofld_be_macc_role *macc_role;
+ struct rtw89_chan *op = &scan_info->op_chan;
+ struct rtw89_h2c_scanofld_be_opch *opch;
+ struct rtw89_h2c_scanofld_be *h2c;
+ struct sk_buff *skb;
+ u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
+ u8 opch_size = sizeof(*opch) * option->num_opch;
+ u8 probe_id[NUM_NL80211_BANDS];
+ unsigned int cond;
+ void *ptr;
+ int ret;
+ u32 len;
+ u8 i;
+
+ rtw89_scan_get_6g_disabled_chan(rtwdev, option);
+
+ len = sizeof(*h2c) + macc_role_size + opch_size;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
+ ptr = skb->data;
+
+ h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
+ le32_encode_bits(option->scan_mode,
+ RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
+ le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
+ le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
+ le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
+ le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
+ le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
+ le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
+
+ h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
+ le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
+ le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
+
+ h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
+ le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
+ le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
+
+ h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
+ le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
+
+ h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
+ RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
+ le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
+ RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
+
+ h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
+
+ h2c->w6 = le32_encode_bits(option->prohib_chan,
+ RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
+ h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
+ RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
+ ptr += sizeof(*h2c);
+
+ for (i = 0; i < option->num_macc_role; i++) {
+ macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i];
+ macc_role->w0 =
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
+ ptr += sizeof(*macc_role);
+ }
+
+ for (i = 0; i < option->num_opch; i++) {
+ opch = ptr;
+ opch->w0 = le32_encode_bits(rtwvif->mac_id,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
+ le32_encode_bits(option->band,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
+ le32_encode_bits(rtwvif->port,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
+ le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
+ le32_encode_bits(true,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
+ le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
+
+ opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) |
+ le32_encode_bits(op->band_type,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
+ le32_encode_bits(op->band_width,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
+ le32_encode_bits(0x3,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
+ le32_encode_bits(op->primary_channel,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
+ le32_encode_bits(op->channel,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
+
+ opch->w2 = le32_encode_bits(0,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
+ le32_encode_bits(0,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
+ le32_encode_bits(2,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
+
+ opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
+ ptr += sizeof(*opch);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_SCANOFLD_BE, 1, 1,
+ len);
+
+ if (option->enable)
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
+ else
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
+ return ret;
+ }
+
+ return 0;
+}
+
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page)
@@ -3497,6 +4782,328 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
+int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info *h2c;
+ u8 tbl_sel = rfk_mcc->table_idx;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 tbl, path;
+ u32 val32;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
+
+ h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+
+ BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
+
+ for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]);
+ h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]);
+ }
+ }
+
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+ h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ }
+
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]);
+ h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
+ h2c->ktbl_sel0 = cpu_to_le32(val32);
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
+ h2c->ktbl_sel1 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ h2c->rfmod0 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
+ h2c->rfmod1 = cpu_to_le32(val32);
+
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c->mlo_1_1 = cpu_to_le32(1);
+
+ h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_tssi *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
+
+ h2c->len = cpu_to_le16(len);
+ h2c->phy = phy_idx;
+ h2c->ch = chan->channel;
+ h2c->bw = chan->band_width;
+ h2c->band = chan->band_type;
+ h2c->hwtx_en = true;
+ h2c->cv = hal->cv;
+ h2c->tssi_mode = tssi_mode;
+
+ rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
+ rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_h2c_rf_iqk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
+
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_h2c_rf_dpk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->dpk_enable = true;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_txgapk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
+
+ h2c->len = len;
+ h2c->ktype = 2;
+ h2c->phy = phy_idx;
+ h2c->kpath = RF_AB;
+ h2c->band = chan->band_type;
+ h2c->bw = chan->band_width;
+ h2c->ch = chan->channel;
+ h2c->cv = hal->cv;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_h2c_rf_dack *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_dack *)skb->data;
+
+ h2c->len = cpu_to_le32(len);
+ h2c->phy = cpu_to_le32(phy_idx);
+ h2c->type = cpu_to_le32(0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_h2c_rf_rxdck *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->is_afe = false;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
@@ -3600,7 +5207,7 @@ static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
default:
return false;
case RTW89_C2H_CAT_MAC:
- return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
+ return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
case RTW89_C2H_CAT_OUTSRC:
return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
}
@@ -4050,8 +5657,66 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
}
}
-static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool connected)
+static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo_be *ch_info)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_pktofld_info *info;
+ u8 band, probe_count = 0, i;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+
+ if (ssid_num) {
+ band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
+
+ list_for_each_entry(info, &scan_info->pkt_list[band], list) {
+ if (info->channel_6ghz &&
+ ch_info->pri_ch != info->channel_6ghz)
+ continue;
+ ch_info->pkt_id[probe_count++] = info->id;
+ if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
+ break;
+ }
+ }
+
+ if (ch_info->ch_band == RTW89_BAND_6G) {
+ if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
+ !ch_info->is_psc) {
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+ if (!req->duration_mandatory)
+ ch_info->period -= RTW89_DWELL_TIME_6G;
+ }
+ }
+
+ for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
+ ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
+
+ switch (chan_type) {
+ case RTW89_CHAN_DFS:
+ if (ch_info->ch_band != RTW89_BAND_6G)
+ ch_info->period =
+ max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_warn(rtwdev, "Channel type out of bound\n");
+ break;
+ }
+}
+
+int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo *ch_info, *tmp;
@@ -4127,9 +5792,69 @@ out:
return ret;
}
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ enum rtw89_chan_type type;
+ int list_len, ret;
+ bool random_seq;
+ u32 idx;
+
+ random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
+ INIT_LIST_HEAD(&chan_list);
+
+ for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = req->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (req->duration_mandatory)
+ ch_info->period = req->duration;
+ else if (channel->band == NL80211_BAND_6GHZ)
+ ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
+ else
+ ch_info->period = RTW89_CHANNEL_TIME;
+
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->rand_seq_num = random_seq;
+ ch_info->is_psc = cfg80211_channel_is_psc(channel);
+
+ if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+ rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
+
+ list_add_tail(&ch_info->list, &chan_list);
+ }
+
+ rtwdev->scan_info.last_chan_idx = idx;
+ ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
@@ -4137,7 +5862,7 @@ static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "Update probe request failed\n");
goto out;
}
- ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected);
+ ret = mac->add_chan_list(rtwdev, rtwvif, connected);
out:
return ret;
}
@@ -4154,9 +5879,11 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
rtwdev->scan_info.scanning_vif = vif;
rtwdev->scan_info.last_chan_idx = 0;
+ rtwdev->scan_info.abort = false;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
@@ -4181,10 +5908,10 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct cfg80211_scan_info info = {
.aborted = aborted,
};
- struct rtw89_vif *rtwvif;
if (!vif)
return;
@@ -4197,22 +5924,29 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_core_scan_complete(rtwdev, vif, true);
ieee80211_scan_completed(rtwdev->hw, &info);
ieee80211_wake_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
rtw89_release_pkt_list(rtwdev);
- rtwvif = (struct rtw89_vif *)vif->drv_priv;
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
scan_info->last_chan_idx = 0;
scan_info->scanning_vif = NULL;
+ scan_info->abort = false;
rtw89_chanctx_proceed(rtwdev);
}
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
{
- rtw89_hw_scan_offload(rtwdev, vif, false);
- rtw89_hw_scan_complete(rtwdev, vif, true);
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ int ret;
+
+ scan_info->abort = true;
+
+ ret = rtw89_hw_scan_offload(rtwdev, vif, false);
+ if (ret)
+ rtw89_hw_scan_complete(rtwdev, vif, true);
}
static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
@@ -4231,6 +5965,7 @@ static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_scan_option opt = {0};
struct rtw89_vif *rtwvif;
bool connected;
@@ -4248,7 +5983,18 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
if (ret)
goto out;
}
- ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
+ opt.scan_mode = RTW89_SCAN_MODE_SA;
+ opt.band = RTW89_PHY_0;
+ opt.num_macc_role = 0;
+ opt.mlo_mode = rtwdev->mlo_dbcc_mode;
+ opt.num_opch = connected ? 1 : 0;
+ opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
+ }
+
+ ret = mac->scan_offload(rtwdev, &opt, rtwvif);
out:
return ret;
}
@@ -4922,6 +6668,372 @@ int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
}
+static
+u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
+ struct rtw89_h2c_mrc_add_slot *slot_h2c)
+{
+ bool fill_h2c = !!slot_h2c;
+ unsigned int i;
+
+ if (!fill_h2c)
+ goto calc_len;
+
+ slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
+ RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
+ le32_encode_bits(slot_arg->courtesy_en,
+ RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
+ le32_encode_bits(slot_arg->role_num,
+ RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
+ slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
+ RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
+ le32_encode_bits(slot_arg->courtesy_target,
+ RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
+
+ for (i = 0; i < slot_arg->role_num; i++) {
+ slot_h2c->roles[i].w0 =
+ le32_encode_bits(slot_arg->roles[i].macid,
+ RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
+ le32_encode_bits(slot_arg->roles[i].role_type,
+ RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
+ le32_encode_bits(slot_arg->roles[i].is_master,
+ RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
+ le32_encode_bits(slot_arg->roles[i].en_tx_null,
+ RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
+ slot_h2c->roles[i].w1 =
+ le32_encode_bits(slot_arg->roles[i].central_ch,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
+ le32_encode_bits(slot_arg->roles[i].primary_ch,
+ RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
+ le32_encode_bits(slot_arg->roles[i].bw,
+ RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
+ le32_encode_bits(slot_arg->roles[i].band,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
+ le32_encode_bits(slot_arg->roles[i].null_early,
+ RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
+ le32_encode_bits(true,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
+ slot_h2c->roles[i].macid_main_bitmap =
+ cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
+ slot_h2c->roles[i].macid_paired_bitmap =
+ cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
+ }
+
+calc_len:
+ return struct_size(slot_h2c, roles, slot_arg->role_num);
+}
+
+int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_arg *arg)
+{
+ struct rtw89_h2c_mrc_add *h2c_head;
+ struct sk_buff *skb;
+ unsigned int i;
+ void *tmp;
+ u32 len;
+ int ret;
+
+ len = sizeof(*h2c_head);
+ for (i = 0; i < arg->slot_num; i++)
+ len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ tmp = skb->data;
+
+ h2c_head = tmp;
+ h2c_head->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
+ le32_encode_bits(arg->sch_type,
+ RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
+ le32_encode_bits(arg->slot_num,
+ RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
+ le32_encode_bits(arg->btc_in_sch,
+ RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
+
+ tmp += sizeof(*h2c_head);
+ for (i = 0; i < arg->slot_num; i++)
+ tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_ADD_MRC, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_start_arg *arg)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_start *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_start *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_START_W0_SCH_IDX) |
+ le32_encode_bits(arg->old_sch_idx,
+ RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
+ le32_encode_bits(arg->action,
+ RTW89_H2C_MRC_START_W0_ACTION);
+
+ h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
+ h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_START_MRC, 0, 0,
+ len);
+
+ cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_del *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_del *)skb->data;
+
+ h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_DEL_MRC, 0, 0,
+ len);
+
+ cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_req_tsf_arg *arg,
+ struct rtw89_mac_mrc_tsf_rpt *rpt)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_req_tsf *h2c;
+ struct rtw89_mac_mrc_tsf_rpt *tmp;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len;
+ int ret;
+
+ len = struct_size(h2c, infos, arg->num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
+
+ h2c->req_tsf_num = arg->num;
+ for (i = 0; i < arg->num; i++)
+ h2c->infos[i] =
+ u8_encode_bits(arg->infos[i].band,
+ RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
+ u8_encode_bits(arg->infos[i].port,
+ RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_REQ_TSF, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
+ if (ret)
+ return ret;
+
+ tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
+ *rpt = *tmp;
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
+{
+ struct rtw89_h2c_mrc_upd_bitmap *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
+ le32_encode_bits(arg->action,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
+ le32_encode_bits(arg->macid,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
+ h2c->w1 = le32_encode_bits(arg->client_macid,
+ RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_sync_arg *arg)
+{
+ struct rtw89_h2c_mrc_sync *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
+
+ h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
+ le32_encode_bits(arg->src.port,
+ RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
+ le32_encode_bits(arg->src.band,
+ RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
+ le32_encode_bits(arg->dest.port,
+ RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
+ le32_encode_bits(arg->dest.band,
+ RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
+ h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_SYNC, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_duration_arg *arg)
+{
+ struct rtw89_h2c_mrc_upd_duration *h2c;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len;
+ int ret;
+
+ len = struct_size(h2c, slots, arg->slot_num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
+ le32_encode_bits(arg->slot_num,
+ RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
+
+ h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
+ h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
+
+ for (i = 0; i < arg->slot_num; i++) {
+ h2c->slots[i] =
+ le32_encode_bits(arg->slots[i].slot_idx,
+ RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
+ le32_encode_bits(arg->slots[i].duration,
+ RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_UPD_DURATION, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 01016588b1fc..9c5464dcc081 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -149,6 +149,7 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_TWT,
RTW89_FW_LOG_COMP_RF,
RTW89_FW_LOG_COMP_MCC = 20,
+ RTW89_FW_LOG_COMP_SCAN = 28,
};
enum rtw89_pkt_offload_op {
@@ -169,6 +170,16 @@ enum rtw89_scanofld_notify_reason {
RTW89_SCAN_ENTER_CH_NOTIFY,
RTW89_SCAN_LEAVE_CH_NOTIFY,
RTW89_SCAN_END_SCAN_NOTIFY,
+ RTW89_SCAN_REPORT_NOTIFY,
+ RTW89_SCAN_CHKPT_NOTIFY,
+ RTW89_SCAN_ENTER_OP_NOTIFY,
+ RTW89_SCAN_LEAVE_OP_NOTIFY,
+};
+
+enum rtw89_scanofld_status {
+ RTW89_SCAN_STATUS_NOTIFY,
+ RTW89_SCAN_STATUS_SUCCESS,
+ RTW89_SCAN_STATUS_FAIL,
};
enum rtw89_chan_type {
@@ -184,6 +195,9 @@ enum rtw89_p2pps_action {
RTW89_P2P_ACT_TERMINATE = 3,
};
+#define RTW89_DEFAULT_CQM_HYST 4
+#define RTW89_DEFAULT_CQM_THOLD -70
+
enum rtw89_bcn_fltr_offload_mode {
RTW89_BCN_FLTR_OFFLOAD_MODE_0 = 0,
RTW89_BCN_FLTR_OFFLOAD_MODE_1,
@@ -216,6 +230,10 @@ struct rtw89_fw_hdr_section_info {
u32 dladdr;
u32 mssc;
u8 type;
+ bool ignore;
+ const u8 *key_addr;
+ u32 key_len;
+ u32 key_idx;
};
struct rtw89_fw_bin_info {
@@ -223,6 +241,8 @@ struct rtw89_fw_bin_info {
u32 hdr_len;
bool dynamic_hdr_en;
u32 dynamic_hdr_len;
+ bool dsp_checksum;
+ bool secure_section_exist;
struct rtw89_fw_hdr_section_info section_info[FWDL_SECTION_MAX_NUM];
};
@@ -231,6 +251,15 @@ struct rtw89_fw_macid_pause_grp {
__le32 mask_grp[4];
} __packed;
+struct rtw89_fw_macid_pause_sleep_grp {
+ struct {
+ __le32 pause_grp[4];
+ __le32 pause_mask_grp[4];
+ __le32 sleep_grp[4];
+ __le32 sleep_mask_grp[4];
+ } __packed n[4];
+} __packed;
+
#define RTW89_H2C_MAX_SIZE 2048
#define RTW89_CHANNEL_TIME 45
#define RTW89_CHANNEL_TIME_6G 20
@@ -243,6 +272,7 @@ struct rtw89_fw_macid_pause_grp {
#define RTW89_SCANOFLD_MAX_IE_LEN 512
#define RTW89_SCANOFLD_PKT_NONE 0xFF
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
+#define RTW89_CHAN_INVALID 0xFF
#define RTW89_MAC_CHINFO_SIZE 28
#define RTW89_SCAN_LIST_GUARD 4
#define RTW89_SCAN_LIST_LIMIT \
@@ -274,9 +304,32 @@ struct rtw89_mac_chinfo {
bool is_psc;
};
-struct rtw89_scan_option {
- bool enable;
- bool target_ch_mode;
+struct rtw89_mac_chinfo_be {
+ u8 period;
+ u8 dwell_time;
+ u8 central_ch;
+ u8 pri_ch;
+ u8 bw:3;
+ u8 ch_band:2;
+ u8 dfs_ch:1;
+ u8 pause_data:1;
+ u8 tx_null:1;
+ u8 rand_seq_num:1;
+ u8 notify_action:5;
+ u8 probe_id;
+ u8 leave_crit;
+ u8 chkpt_timer;
+ u8 leave_time;
+ u8 leave_th;
+ u16 tx_pkt_ctrl;
+ u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
+ u8 sw_def;
+ u16 fw_probe0_ssids;
+ u16 fw_probe0_shortssids;
+ u16 fw_probe0_bssids;
+
+ struct list_head list;
+ bool is_psc;
};
struct rtw89_pktofld_info {
@@ -419,6 +472,7 @@ static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
#define FWDL_SECURITY_SECTION_TYPE 9
#define FWDL_SECURITY_SIGLEN 512
+#define FWDL_SECURITY_CHKSUM_LEN 8
struct rtw89_fw_dynhdr_sec {
__le32 w0;
@@ -472,6 +526,7 @@ struct rtw89_fw_hdr {
#define FW_HDR_W4_MIN GENMASK(31, 24)
#define FW_HDR_W5_YEAR GENMASK(31, 0)
#define FW_HDR_W6_SEC_NUM GENMASK(15, 8)
+#define FW_HDR_W7_PART_SIZE GENMASK(15, 0)
#define FW_HDR_W7_DYN_HDR BIT(16)
#define FW_HDR_W7_CMD_VERSERION GENMASK(31, 24)
@@ -489,6 +544,7 @@ struct rtw89_fw_hdr_section_v1 {
#define FWSECTION_HDR_V1_W1_CHECKSUM BIT(28)
#define FWSECTION_HDR_V1_W1_REDL BIT(29)
#define FWSECTION_HDR_V1_W2_MSSC GENMASK(7, 0)
+#define FORMATTED_MSSC 0xFF
#define FWSECTION_HDR_V1_W2_BBMCU_IDX GENMASK(27, 24)
struct rtw89_fw_hdr_v1 {
@@ -521,12 +577,42 @@ struct rtw89_fw_hdr_v1 {
#define FW_HDR_V1_W5_YEAR GENMASK(15, 0)
#define FW_HDR_V1_W5_HDR_SIZE GENMASK(31, 16)
#define FW_HDR_V1_W6_SEC_NUM GENMASK(15, 8)
+#define FW_HDR_V1_W6_DSP_CHKSUM BIT(24)
+#define FW_HDR_V1_W7_PART_SIZE GENMASK(15, 0)
#define FW_HDR_V1_W7_DYN_HDR BIT(16)
-static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
-{
- le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
-}
+enum rtw89_fw_mss_pool_rmp_tbl_type {
+ MSS_POOL_RMP_TBL_BITMASK = 0x0,
+ MSS_POOL_RMP_TBL_RECORD = 0x1,
+};
+
+#define FWDL_MSS_POOL_DEFKEYSETS_SIZE 8
+
+struct rtw89_fw_mss_pool_hdr {
+ u8 signature[8]; /* equal to mss_signature[] */
+ __le32 rmp_tbl_offset;
+ __le32 key_raw_offset;
+ u8 defen;
+ u8 rsvd[3];
+ u8 rmpfmt; /* enum rtw89_fw_mss_pool_rmp_tbl_type */
+ u8 mssdev_max;
+ __le16 keypair_num;
+ __le16 msscust_max;
+ __le16 msskey_num_max;
+ __le32 rsvd3;
+ u8 rmp_tbl[];
+} __packed;
+
+union rtw89_fw_section_mssc_content {
+ struct {
+ u8 pad[58];
+ __le32 v;
+ } __packed sb_sel_ver;
+ struct {
+ u8 pad[60];
+ __le16 v;
+ } __packed key_sign_len;
+} __packed;
static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
{
@@ -1198,6 +1284,149 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
GENMASK(31, 30));
}
+struct rtw89_h2c_cctlinfo_ud_g7 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define CCTLINFO_G7_C0_MACID GENMASK(6, 0)
+#define CCTLINFO_G7_C0_OP BIT(7)
+
+#define CCTLINFO_G7_W0_DATARATE GENMASK(11, 0)
+#define CCTLINFO_G7_W0_DATA_GI_LTF GENMASK(14, 12)
+#define CCTLINFO_G7_W0_TRYRATE BIT(15)
+#define CCTLINFO_G7_W0_ARFR_CTRL GENMASK(17, 16)
+#define CCTLINFO_G7_W0_DIS_HE1SS_STBC BIT(18)
+#define CCTLINFO_G7_W0_ACQ_RPT_EN BIT(20)
+#define CCTLINFO_G7_W0_MGQ_RPT_EN BIT(21)
+#define CCTLINFO_G7_W0_ULQ_RPT_EN BIT(22)
+#define CCTLINFO_G7_W0_TWTQ_RPT_EN BIT(23)
+#define CCTLINFO_G7_W0_FORCE_TXOP BIT(24)
+#define CCTLINFO_G7_W0_DISRTSFB BIT(25)
+#define CCTLINFO_G7_W0_DISDATAFB BIT(26)
+#define CCTLINFO_G7_W0_NSTR_EN BIT(27)
+#define CCTLINFO_G7_W0_AMPDU_DENSITY GENMASK(31, 28)
+#define CCTLINFO_G7_W0_ALL (GENMASK(31, 20) | GENMASK(18, 0))
+#define CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE GENMASK(11, 0)
+#define CCTLINFO_G7_W1_RTS_TXCNT_LMT GENMASK(15, 12)
+#define CCTLINFO_G7_W1_RTSRATE GENMASK(27, 16)
+#define CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE GENMASK(31, 28)
+#define CCTLINFO_G7_W1_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W2_DATA_TX_CNT_LMT GENMASK(5, 0)
+#define CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL BIT(6)
+#define CCTLINFO_G7_W2_MAX_AGG_NUM_SEL BIT(7)
+#define CCTLINFO_G7_W2_RTS_EN BIT(8)
+#define CCTLINFO_G7_W2_CTS2SELF_EN BIT(9)
+#define CCTLINFO_G7_W2_CCA_RTS GENMASK(11, 10)
+#define CCTLINFO_G7_W2_HW_RTS_EN BIT(12)
+#define CCTLINFO_G7_W2_RTS_DROP_DATA_MODE GENMASK(14, 13)
+#define CCTLINFO_G7_W2_PRELD_EN BIT(15)
+#define CCTLINFO_G7_W2_AMPDU_MAX_LEN GENMASK(26, 16)
+#define CCTLINFO_G7_W2_UL_MU_DIS BIT(27)
+#define CCTLINFO_G7_W2_AMPDU_MAX_TIME GENMASK(31, 28)
+#define CCTLINFO_G7_W2_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W3_MAX_AGG_NUM GENMASK(7, 0)
+#define CCTLINFO_G7_W3_DATA_BW GENMASK(10, 8)
+#define CCTLINFO_G7_W3_DATA_BW_ER BIT(11)
+#define CCTLINFO_G7_W3_BA_BMAP GENMASK(14, 12)
+#define CCTLINFO_G7_W3_VCS_STBC BIT(15)
+#define CCTLINFO_G7_W3_VO_LFTIME_SEL GENMASK(18, 16)
+#define CCTLINFO_G7_W3_VI_LFTIME_SEL GENMASK(21, 19)
+#define CCTLINFO_G7_W3_BE_LFTIME_SEL GENMASK(24, 22)
+#define CCTLINFO_G7_W3_BK_LFTIME_SEL GENMASK(27, 25)
+#define CCTLINFO_G7_W3_AMPDU_TIME_SEL BIT(28)
+#define CCTLINFO_G7_W3_AMPDU_LEN_SEL BIT(29)
+#define CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL BIT(30)
+#define CCTLINFO_G7_W3_LSIG_TXOP_EN BIT(31)
+#define CCTLINFO_G7_W3_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W4_MULTI_PORT_ID GENMASK(2, 0)
+#define CCTLINFO_G7_W4_BYPASS_PUNC BIT(3)
+#define CCTLINFO_G7_W4_MBSSID GENMASK(7, 4)
+#define CCTLINFO_G7_W4_DATA_DCM BIT(8)
+#define CCTLINFO_G7_W4_DATA_ER BIT(9)
+#define CCTLINFO_G7_W4_DATA_LDPC BIT(10)
+#define CCTLINFO_G7_W4_DATA_STBC BIT(11)
+#define CCTLINFO_G7_W4_A_CTRL_BQR BIT(12)
+#define CCTLINFO_G7_W4_A_CTRL_BSR BIT(14)
+#define CCTLINFO_G7_W4_A_CTRL_CAS BIT(15)
+#define CCTLINFO_G7_W4_ACT_SUBCH_CBW GENMASK(31, 16)
+#define CCTLINFO_G7_W4_ALL (GENMASK(31, 14) | GENMASK(12, 0))
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 GENMASK(1, 0)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 GENMASK(3, 2)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 GENMASK(5, 4)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 GENMASK(7, 6)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4 GENMASK(9, 8)
+#define CCTLINFO_G7_W5_SR_RATE GENMASK(14, 10)
+#define CCTLINFO_G7_W5_TID_DISABLE GENMASK(23, 16)
+#define CCTLINFO_G7_W5_ADDR_CAM_INDEX GENMASK(31, 24)
+#define CCTLINFO_G7_W5_ALL (GENMASK(31, 16) | GENMASK(14, 0))
+#define CCTLINFO_G7_W6_AID12_PAID GENMASK(11, 0)
+#define CCTLINFO_G7_W6_RESP_REF_RATE GENMASK(23, 12)
+#define CCTLINFO_G7_W6_ULDL BIT(31)
+#define CCTLINFO_G7_W6_ALL (BIT(31) | GENMASK(23, 0))
+#define CCTLINFO_G7_W7_NC GENMASK(2, 0)
+#define CCTLINFO_G7_W7_NR GENMASK(5, 3)
+#define CCTLINFO_G7_W7_NG GENMASK(7, 6)
+#define CCTLINFO_G7_W7_CB GENMASK(9, 8)
+#define CCTLINFO_G7_W7_CS GENMASK(11, 10)
+#define CCTLINFO_G7_W7_CSI_STBC_EN BIT(13)
+#define CCTLINFO_G7_W7_CSI_LDPC_EN BIT(14)
+#define CCTLINFO_G7_W7_CSI_PARA_EN BIT(15)
+#define CCTLINFO_G7_W7_CSI_FIX_RATE GENMASK(27, 16)
+#define CCTLINFO_G7_W7_CSI_BW GENMASK(31, 29)
+#define CCTLINFO_G7_W7_ALL (GENMASK(31, 29) | GENMASK(27, 13) | GENMASK(11, 0))
+#define CCTLINFO_G7_W8_ALL_ACK_SUPPORT BIT(0)
+#define CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT BIT(1)
+#define CCTLINFO_G7_W8_BSR_OM_UPD_EN BIT(2)
+#define CCTLINFO_G7_W8_MACID_FWD_IDC BIT(3)
+#define CCTLINFO_G7_W8_AZ_SEC_EN BIT(4)
+#define CCTLINFO_G7_W8_CSI_SEC_EN BIT(5)
+#define CCTLINFO_G7_W8_FIX_UL_ADDRCAM_IDX BIT(6)
+#define CCTLINFO_G7_W8_CTRL_CNT_VLD BIT(7)
+#define CCTLINFO_G7_W8_CTRL_CNT GENMASK(11, 8)
+#define CCTLINFO_G7_W8_RESP_SEC_TYPE GENMASK(15, 12)
+#define CCTLINFO_G7_W8_ALL GENMASK(15, 0)
+/* W9~13 are reserved */
+#define CCTLINFO_G7_W14_VO_CURR_RATE GENMASK(11, 0)
+#define CCTLINFO_G7_W14_VI_CURR_RATE GENMASK(23, 12)
+#define CCTLINFO_G7_W14_BE_CURR_RATE_L GENMASK(31, 24)
+#define CCTLINFO_G7_W14_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W15_BE_CURR_RATE_H GENMASK(3, 0)
+#define CCTLINFO_G7_W15_BK_CURR_RATE GENMASK(15, 4)
+#define CCTLINFO_G7_W15_MGNT_CURR_RATE GENMASK(27, 16)
+#define CCTLINFO_G7_W15_ALL GENMASK(27, 0)
+
static inline void SET_DCTL_MACID_V1(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
@@ -1500,105 +1729,98 @@ static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val)
GENMASK(31, 24));
}
-static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_BCN_UPD_MBSSID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void SET_BCN_UPD_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
-}
-
-static inline void SET_BCN_UPD_GRP_IE_OFST(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, (val - 24) | BIT(7), GENMASK(31, 24));
-}
-
-static inline void SET_BCN_UPD_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
-}
-
-static inline void SET_BCN_UPD_SSN_SEL(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(9, 8));
-}
-
-static inline void SET_BCN_UPD_SSN_MODE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(11, 10));
-}
-
-static inline void SET_BCN_UPD_RATE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(20, 12));
-}
-
-static inline void SET_BCN_UPD_TXPWR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(23, 21));
-}
-
-static inline void SET_BCN_UPD_TXINFO_CTRL_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(0));
-}
-
-static inline void SET_BCN_UPD_NTX_PATH_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(4, 1));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_A(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(6, 5));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_B(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(8, 7));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_C(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(10, 9));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_D(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(12, 11));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_A(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(13));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_B(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(14));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_C(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(15));
-}
+struct rtw89_h2c_bcn_upd {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
-static inline void SET_BCN_UPD_PATH_ANTSEL_D(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(16));
-}
+#define RTW89_H2C_BCN_UPD_W0_PORT GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_W0_MBSSID GENMASK(15, 8)
+#define RTW89_H2C_BCN_UPD_W0_BAND GENMASK(23, 16)
+#define RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_W1_MACID GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_W1_SSN_SEL GENMASK(9, 8)
+#define RTW89_H2C_BCN_UPD_W1_SSN_MODE GENMASK(11, 10)
+#define RTW89_H2C_BCN_UPD_W1_RATE GENMASK(20, 12)
+#define RTW89_H2C_BCN_UPD_W1_TXPWR GENMASK(23, 21)
+#define RTW89_H2C_BCN_UPD_W2_TXINFO_CTRL_EN BIT(0)
+#define RTW89_H2C_BCN_UPD_W2_NTX_PATH_EN GENMASK(4, 1)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_A GENMASK(6, 5)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_B GENMASK(8, 7)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_C GENMASK(10, 9)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_D GENMASK(12, 11)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_A BIT(13)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_B BIT(14)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_C BIT(15)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_D BIT(16)
+#define RTW89_H2C_BCN_UPD_W2_CSA_OFST GENMASK(31, 17)
+
+struct rtw89_h2c_bcn_upd_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 w16;
+ __le32 w17;
+ __le32 w18;
+ __le32 w19;
+ __le32 w20;
+ __le32 w21;
+ __le32 w22;
+ __le32 w23;
+ __le32 w24;
+ __le32 w25;
+ __le32 w26;
+ __le32 w27;
+ __le32 w28;
+ __le32 w29;
+} __packed;
-static inline void SET_BCN_UPD_CSA_OFST(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 17));
-}
+#define RTW89_H2C_BCN_UPD_BE_W0_PORT GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_BE_W0_MBSSID GENMASK(15, 8)
+#define RTW89_H2C_BCN_UPD_BE_W0_BAND GENMASK(23, 16)
+#define RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_BE_W1_MACID GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL GENMASK(9, 8)
+#define RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE GENMASK(11, 10)
+#define RTW89_H2C_BCN_UPD_BE_W1_RATE GENMASK(20, 12)
+#define RTW89_H2C_BCN_UPD_BE_W1_TXPWR GENMASK(23, 21)
+#define RTW89_H2C_BCN_UPD_BE_W1_MACID_EXT GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_BE_W2_TXINFO_CTRL_EN BIT(0)
+#define RTW89_H2C_BCN_UPD_BE_W2_NTX_PATH_EN GENMASK(4, 1)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_A GENMASK(6, 5)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_B GENMASK(8, 7)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_C GENMASK(10, 9)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_D GENMASK(12, 11)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_A BIT(13)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_B BIT(14)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_C BIT(15)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_D BIT(16)
+#define RTW89_H2C_BCN_UPD_BE_W2_CSA_OFST GENMASK(31, 17)
+#define RTW89_H2C_BCN_UPD_BE_W3_MLIE_CSA_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W3_CRITICAL_UPD_FLAG_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W4_VAP1_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W4_VAP2_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W5_VAP3_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W5_VAP4_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W6_VAP5_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W6_VAP6_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W7_VAP7_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
+#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
{
@@ -1620,70 +1842,46 @@ static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13));
}
-static inline void SET_JOININFO_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_JOININFO_OP(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(8));
-}
-
-static inline void SET_JOININFO_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(9));
-}
-
-static inline void SET_JOININFO_WMM(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10));
-}
-
-static inline void SET_JOININFO_TGR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(12));
-}
-
-static inline void SET_JOININFO_ISHESTA(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(13));
-}
-
-static inline void SET_JOININFO_DLBW(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14));
-}
-
-static inline void SET_JOININFO_TF_MAC_PAD(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16));
-}
-
-static inline void SET_JOININFO_DL_T_PE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18));
-}
-
-static inline void SET_JOININFO_PORT_ID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21));
-}
+enum rtw89_fw_sta_type { /* value of RTW89_H2C_JOININFO_W1_STA_TYPE */
+ RTW89_FW_N_AC_STA = 0,
+ RTW89_FW_AX_STA = 1,
+ RTW89_FW_BE_STA = 2,
+};
-static inline void SET_JOININFO_NET_TYPE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24));
-}
+struct rtw89_h2c_join {
+ __le32 w0;
+} __packed;
-static inline void SET_JOININFO_WIFI_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26));
-}
+struct rtw89_h2c_join_v1 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
-static inline void SET_JOININFO_SELF_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30));
-}
+#define RTW89_H2C_JOININFO_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_JOININFO_W0_OP BIT(8)
+#define RTW89_H2C_JOININFO_W0_BAND BIT(9)
+#define RTW89_H2C_JOININFO_W0_WMM GENMASK(11, 10)
+#define RTW89_H2C_JOININFO_W0_TGR BIT(12)
+#define RTW89_H2C_JOININFO_W0_ISHESTA BIT(13)
+#define RTW89_H2C_JOININFO_W0_DLBW GENMASK(15, 14)
+#define RTW89_H2C_JOININFO_W0_TF_MAC_PAD GENMASK(17, 16)
+#define RTW89_H2C_JOININFO_W0_DL_T_PE GENMASK(20, 18)
+#define RTW89_H2C_JOININFO_W0_PORT_ID GENMASK(23, 21)
+#define RTW89_H2C_JOININFO_W0_NET_TYPE GENMASK(25, 24)
+#define RTW89_H2C_JOININFO_W0_WIFI_ROLE GENMASK(29, 26)
+#define RTW89_H2C_JOININFO_W0_SELF_ROLE GENMASK(31, 30)
+#define RTW89_H2C_JOININFO_W1_STA_TYPE GENMASK(2, 0)
+#define RTW89_H2C_JOININFO_W1_IS_MLD BIT(3)
+#define RTW89_H2C_JOININFO_W1_MAIN_MACID GENMASK(11, 4)
+#define RTW89_H2C_JOININFO_W1_MLO_MODE BIT(12)
+#define RTW89_H2C_JOININFO_W1_EMLSR_CAB BIT(13)
+#define RTW89_H2C_JOININFO_W1_NSTR_EN BIT(14)
+#define RTW89_H2C_JOININFO_W1_INIT_PWR_STATE BIT(15)
+#define RTW89_H2C_JOININFO_W1_EMLSR_PADDING GENMASK(18, 16)
+#define RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY GENMASK(21, 19)
+#define RTW89_H2C_JOININFO_W2_MACID_EXT GENMASK(7, 0)
+#define RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT GENMASK(15, 8)
struct rtw89_h2c_notify_dbcc {
__le32 w0;
@@ -1741,60 +1939,47 @@ static inline void SET_LOG_CFG_COMP_EXT(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0));
}
-static inline void SET_BA_CAM_VALID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(0));
-}
-
-static inline void SET_BA_CAM_INIT_REQ(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(1));
-}
-
-static inline void SET_BA_CAM_ENTRY_IDX(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2));
-}
-
-static inline void SET_BA_CAM_TID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4));
-}
-
-static inline void SET_BA_CAM_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void SET_BA_CAM_BMAP_SIZE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16));
-}
-
-static inline void SET_BA_CAM_SSN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
-}
-
-static inline void SET_BA_CAM_UID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(7, 0));
-}
+struct rtw89_h2c_ba_cam {
+ __le32 w0;
+ __le32 w1;
+} __packed;
-static inline void SET_BA_CAM_STD_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, BIT(8));
-}
+#define RTW89_H2C_BA_CAM_W0_VALID BIT(0)
+#define RTW89_H2C_BA_CAM_W0_INIT_REQ BIT(1)
+#define RTW89_H2C_BA_CAM_W0_ENTRY_IDX GENMASK(3, 2)
+#define RTW89_H2C_BA_CAM_W0_TID GENMASK(7, 4)
+#define RTW89_H2C_BA_CAM_W0_MACID GENMASK(15, 8)
+#define RTW89_H2C_BA_CAM_W0_BMAP_SIZE GENMASK(19, 16)
+#define RTW89_H2C_BA_CAM_W0_SSN GENMASK(31, 20)
+#define RTW89_H2C_BA_CAM_W1_UID GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_W1_STD_EN BIT(8)
+#define RTW89_H2C_BA_CAM_W1_BAND BIT(9)
+#define RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1 GENMASK(31, 28)
+
+struct rtw89_h2c_ba_cam_v1 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
-static inline void SET_BA_CAM_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, BIT(9));
-}
+#define RTW89_H2C_BA_CAM_V1_W0_VALID BIT(0)
+#define RTW89_H2C_BA_CAM_V1_W0_INIT_REQ BIT(1)
+#define RTW89_H2C_BA_CAM_V1_W0_TID_MASK GENMASK(7, 4)
+#define RTW89_H2C_BA_CAM_V1_W0_MACID_MASK GENMASK(15, 8)
+#define RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK GENMASK(19, 16)
+#define RTW89_H2C_BA_CAM_V1_W0_SSN_MASK GENMASK(31, 20)
+#define RTW89_H2C_BA_CAM_V1_W1_UID_VALUE_MASK GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN BIT(8)
+#define RTW89_H2C_BA_CAM_V1_W1_BAND_SEL BIT(9)
+#define RTW89_H2C_BA_CAM_V1_W1_MLD_EN BIT(10)
+#define RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK GENMASK(31, 24)
+
+struct rtw89_h2c_ba_cam_init {
+ __le32 w0;
+} __packed;
-static inline void SET_BA_CAM_ENTRY_IDX_V1(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 28));
-}
+#define RTW89_H2C_BA_CAM_INIT_USERS_MASK GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_INIT_OFFSET_MASK GENMASK(19, 12)
+#define RTW89_H2C_BA_CAM_INIT_BAND_SEL BIT(24)
static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
{
@@ -1846,6 +2031,17 @@ static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8));
}
+struct rtw89_h2c_lps_ch_info {
+ struct {
+ u8 pri_ch;
+ u8 central_ch;
+ u8 bw;
+ u8 band;
+ } __packed info[2];
+
+ __le32 mlo_dbcc_mode_lps;
+} __packed;
+
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
@@ -2569,135 +2765,91 @@ static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 16));
}
-static inline void RTW89_SET_FWCMD_SCANOFLD_CH_NUM(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PERIOD(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_DWELL(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_CENTER_CH(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PRI_CH(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_BW(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(2, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_ACTION(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 3));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_NUM_PKT(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(11, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_TX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(12));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(13));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_BAND(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 14));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT_ID(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_DFS(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_TX_NULL(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(25));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_RANDOM(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(26));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_CFG_TX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(27));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT0(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT1(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT2(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT3(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(31, 24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT4(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT5(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(15, 8));
-}
+struct rtw89_h2c_chinfo_elem {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_PKT6(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(23, 16));
-}
+#define RTW89_H2C_CHINFO_W0_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W0_DWELL GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W0_PRI_CH GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W1_BW GENMASK(2, 0)
+#define RTW89_H2C_CHINFO_W1_ACTION GENMASK(7, 3)
+#define RTW89_H2C_CHINFO_W1_NUM_PKT GENMASK(11, 8)
+#define RTW89_H2C_CHINFO_W1_TX BIT(12)
+#define RTW89_H2C_CHINFO_W1_PAUSE_DATA BIT(13)
+#define RTW89_H2C_CHINFO_W1_BAND GENMASK(15, 14)
+#define RTW89_H2C_CHINFO_W1_PKT_ID GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W1_DFS BIT(24)
+#define RTW89_H2C_CHINFO_W1_TX_NULL BIT(25)
+#define RTW89_H2C_CHINFO_W1_RANDOM BIT(26)
+#define RTW89_H2C_CHINFO_W1_CFG_TX BIT(27)
+#define RTW89_H2C_CHINFO_W2_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W2_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W2_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W2_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W3_PKT4 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W3_PKT5 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W3_PKT6 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W3_PKT7 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W4_POWER_IDX GENMASK(15, 0)
+
+struct rtw89_h2c_chinfo_elem_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_PKT7(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 24));
-}
+#define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W0_DWELL GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W0_PRI_CH GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W1_BW GENMASK(2, 0)
+#define RTW89_H2C_CHINFO_BE_W1_CH_BAND GENMASK(4, 3)
+#define RTW89_H2C_CHINFO_BE_W1_DFS BIT(5)
+#define RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA BIT(6)
+#define RTW89_H2C_CHINFO_BE_W1_TX_NULL BIT(7)
+#define RTW89_H2C_CHINFO_BE_W1_RANDOM BIT(8)
+#define RTW89_H2C_CHINFO_BE_W1_NOTIFY GENMASK(13, 9)
+#define RTW89_H2C_CHINFO_BE_W1_PROBE BIT(14)
+#define RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT GENMASK(17, 15)
+#define RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W3_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W3_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W3_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W3_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W4_PKT4 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W4_PKT5 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W4_PKT6 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W4_PKT7 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W5_SW_DEF GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0)
+#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16)
+
+struct rtw89_h2c_chinfo {
+ u8 ch_num;
+ u8 elem_size;
+ u8 arg;
+ u8 rsvd0;
+ struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num);
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_POWER_IDX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(15, 0));
-}
+#define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0)
+#define RTW89_H2C_CHINFO_ARG_APPEND_MASK BIT(1)
struct rtw89_h2c_scanofld {
__le32 w0;
@@ -2726,6 +2878,79 @@ struct rtw89_h2c_scanofld {
#define RTW89_H2C_SCANOFLD_W2_NORM_PD GENMASK(15, 0)
#define RTW89_H2C_SCANOFLD_W2_SLOW_PD GENMASK(23, 16)
+struct rtw89_h2c_scanofld_be_macc_role {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND GENMASK(1, 0)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT GENMASK(4, 2)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID GENMASK(23, 8)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END GENMASK(31, 24)
+
+struct rtw89_h2c_scanofld_be_opch {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND GENMASK(17, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT GENMASK(20, 18)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY GENMASK(22, 21)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL BIT(23)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND GENMASK(9, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW GENMASK(12, 10)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY GENMASK(14, 13)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS GENMASK(18, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24)
+
+struct rtw89_h2c_scanofld_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ struct rtw89_h2c_scanofld_be_macc_role role[];
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_W0_OP GENMASK(1, 0)
+#define RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE GENMASK(3, 2)
+#define RTW89_H2C_SCANOFLD_BE_W0_REPEAT GENMASK(5, 4)
+#define RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END BIT(6)
+#define RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH BIT(7)
+#define RTW89_H2C_SCANOFLD_BE_W0_MACID GENMASK(23, 8)
+#define RTW89_H2C_SCANOFLD_BE_W0_PORT GENMASK(26, 24)
+#define RTW89_H2C_SCANOFLD_BE_W0_BAND GENMASK(28, 27)
+#define RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W1_NUM_OP GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W1_NORM_PD GENMASK(31, 16)
+#define RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD GENMASK(15, 0)
+#define RTW89_H2C_SCANOFLD_BE_W2_NORM_CY GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W2_OPCH_END GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W3_PROBEID GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W4_DELAY_START GENMASK(31, 16)
+#define RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH GENMASK(31, 0)
+
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
@@ -3160,6 +3385,225 @@ inline void RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(void *cmd, u32 val)
le32p_replace_bits((__le32 *)cmd + 4, val, GENMASK(31, 0));
}
+enum rtw89_h2c_mrc_sch_types {
+ RTW89_H2C_MRC_SCH_BAND0_ONLY = 0,
+ RTW89_H2C_MRC_SCH_BAND1_ONLY = 1,
+ RTW89_H2C_MRC_SCH_DUAL_BAND = 2,
+};
+
+enum rtw89_h2c_mrc_role_types {
+ RTW89_H2C_MRC_ROLE_WIFI = 0,
+ RTW89_H2C_MRC_ROLE_BT = 1,
+ RTW89_H2C_MRC_ROLE_EMPTY = 2,
+};
+
+#define RTW89_MAC_MRC_MAX_ADD_SLOT_NUM 3
+#define RTW89_MAC_MRC_MAX_ADD_ROLE_NUM_PER_SLOT 1 /* before MLO */
+
+struct rtw89_fw_mrc_add_slot_arg {
+ u16 duration; /* unit: TU */
+ bool courtesy_en;
+ u8 courtesy_period;
+ u8 courtesy_target; /* slot idx */
+
+ unsigned int role_num;
+ struct {
+ enum rtw89_h2c_mrc_role_types role_type;
+ bool is_master;
+ bool en_tx_null;
+ enum rtw89_band band;
+ enum rtw89_bandwidth bw;
+ u8 macid;
+ u8 central_ch;
+ u8 primary_ch;
+ u8 null_early; /* unit: TU */
+
+ /* if MLD, for macid: [0, chip::support_mld_num)
+ * otherwise, for macid: [0, 32)
+ */
+ u32 macid_main_bitmap;
+ /* for MLD, bit X maps to macid: X + chip::support_mld_num */
+ u32 macid_paired_bitmap;
+ } roles[RTW89_MAC_MRC_MAX_ADD_ROLE_NUM_PER_SLOT];
+};
+
+struct rtw89_fw_mrc_add_arg {
+ u8 sch_idx;
+ enum rtw89_h2c_mrc_sch_types sch_type;
+ bool btc_in_sch;
+
+ unsigned int slot_num;
+ struct rtw89_fw_mrc_add_slot_arg slots[RTW89_MAC_MRC_MAX_ADD_SLOT_NUM];
+};
+
+struct rtw89_h2c_mrc_add_role {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 macid_main_bitmap;
+ __le32 macid_paired_bitmap;
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_ROLE_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE GENMASK(23, 16)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER BIT(24)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE BIT(25)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN BIT(26)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN BIT(27)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_BW GENMASK(19, 16)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE GENMASK(21, 20)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS BIT(22)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC BIT(23)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY GENMASK(31, 24)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_ROLE_TYPE GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_ROLE_MACID GENMASK(23, 16)
+
+struct rtw89_h2c_mrc_add_slot {
+ __le32 w0;
+ __le32 w1;
+ struct rtw89_h2c_mrc_add_role roles[];
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_SLOT_W0_DURATION GENMASK(15, 0)
+#define RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN BIT(17)
+#define RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM GENMASK(31, 24)
+#define RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET GENMASK(15, 8)
+
+struct rtw89_h2c_mrc_add {
+ __le32 w0;
+ /* Logically append flexible struct rtw89_h2c_mrc_add_slot, but there
+ * are other flexible array inside it. We cannot access them correctly
+ * through this struct. So, in case misusing, we don't really declare
+ * it here.
+ */
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_ADD_W0_SCH_TYPE GENMASK(7, 4)
+#define RTW89_H2C_MRC_ADD_W0_SLOT_NUM GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH BIT(16)
+
+enum rtw89_h2c_mrc_start_actions {
+ RTW89_H2C_MRC_START_ACTION_START_NEW = 0,
+ RTW89_H2C_MRC_START_ACTION_REPLACE_OLD = 1,
+};
+
+struct rtw89_fw_mrc_start_arg {
+ u8 sch_idx;
+ u8 old_sch_idx;
+ u64 start_tsf;
+ enum rtw89_h2c_mrc_start_actions action;
+};
+
+struct rtw89_h2c_mrc_start {
+ __le32 w0;
+ __le32 start_tsf_low;
+ __le32 start_tsf_high;
+} __packed;
+
+#define RTW89_H2C_MRC_START_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_START_W0_OLD_SCH_IDX GENMASK(7, 4)
+#define RTW89_H2C_MRC_START_W0_ACTION GENMASK(15, 8)
+
+struct rtw89_h2c_mrc_del {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_MRC_DEL_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_DEL_W0_DEL_ALL BIT(4)
+#define RTW89_H2C_MRC_DEL_W0_STOP_ONLY BIT(5)
+#define RTW89_H2C_MRC_DEL_W0_SPECIFIC_ROLE_EN BIT(6)
+#define RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX GENMASK(15, 8)
+#define RTW89_H2C_MRC_DEL_W0_SPECIFIC_ROLE_MACID GENMASK(31, 16)
+
+#define RTW89_MAC_MRC_MAX_REQ_TSF_NUM 2
+
+struct rtw89_fw_mrc_req_tsf_arg {
+ unsigned int num;
+ struct {
+ u8 band;
+ u8 port;
+ } infos[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
+};
+
+struct rtw89_h2c_mrc_req_tsf {
+ u8 req_tsf_num;
+ u8 infos[] __counted_by(req_tsf_num);
+} __packed;
+
+#define RTW89_H2C_MRC_REQ_TSF_INFO_BAND GENMASK(3, 0)
+#define RTW89_H2C_MRC_REQ_TSF_INFO_PORT GENMASK(7, 4)
+
+enum rtw89_h2c_mrc_upd_bitmap_actions {
+ RTW89_H2C_MRC_UPD_BITMAP_ACTION_DEL = 0,
+ RTW89_H2C_MRC_UPD_BITMAP_ACTION_ADD = 1,
+};
+
+struct rtw89_fw_mrc_upd_bitmap_arg {
+ u8 sch_idx;
+ u8 macid;
+ u8 client_macid;
+ enum rtw89_h2c_mrc_upd_bitmap_actions action;
+};
+
+struct rtw89_h2c_mrc_upd_bitmap {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION BIT(4)
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_MACID GENMASK(31, 16)
+#define RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID GENMASK(15, 0)
+
+struct rtw89_fw_mrc_sync_arg {
+ u8 offset; /* unit: TU */
+ struct {
+ u8 band;
+ u8 port;
+ } src, dest;
+};
+
+struct rtw89_h2c_mrc_sync {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_MRC_SYNC_W0_SYNC_EN BIT(0)
+#define RTW89_H2C_MRC_SYNC_W0_SRC_PORT GENMASK(11, 8)
+#define RTW89_H2C_MRC_SYNC_W0_SRC_BAND GENMASK(15, 12)
+#define RTW89_H2C_MRC_SYNC_W0_DEST_PORT GENMASK(19, 16)
+#define RTW89_H2C_MRC_SYNC_W0_DEST_BAND GENMASK(23, 20)
+#define RTW89_H2C_MRC_SYNC_W1_OFFSET GENMASK(15, 0)
+
+struct rtw89_fw_mrc_upd_duration_arg {
+ u8 sch_idx;
+ u64 start_tsf;
+
+ unsigned int slot_num;
+ struct {
+ u8 slot_idx;
+ u16 duration; /* unit: TU */
+ } slots[RTW89_MAC_MRC_MAX_ADD_SLOT_NUM];
+};
+
+struct rtw89_h2c_mrc_upd_duration {
+ __le32 w0;
+ __le32 start_tsf_low;
+ __le32 start_tsf_high;
+ __le32 slots[];
+} __packed;
+
+#define RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM GENMASK(15, 8)
+#define RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH BIT(16)
+#define RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX GENMASK(7, 0)
+#define RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION GENMASK(31, 16)
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3275,20 +3719,29 @@ struct rtw89_c2h_ra_rpt {
#define RTW89_GET_MAC_C2H_PKTOFLD_LEN(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 16))
-#define RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
-#define RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
-#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
-#define RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
-#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
-#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(7, 4))
-#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+struct rtw89_c2h_scanofld {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+} __packed;
+
+#define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0)
+#define RTW89_C2H_SCANOFLD_W2_RSN GENMASK(19, 16)
+#define RTW89_C2H_SCANOFLD_W2_STATUS GENMASK(23, 20)
+#define RTW89_C2H_SCANOFLD_W2_PERIOD GENMASK(31, 24)
+#define RTW89_C2H_SCANOFLD_W5_TX_FAIL GENMASK(3, 0)
+#define RTW89_C2H_SCANOFLD_W5_AIR_DENSITY GENMASK(7, 4)
+#define RTW89_C2H_SCANOFLD_W5_BAND GENMASK(25, 24)
+#define RTW89_C2H_SCANOFLD_W5_MAC_IDX BIT(26)
+#define RTW89_C2H_SCANOFLD_W6_SW_DEF GENMASK(7, 0)
+#define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8)
+#define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16)
+#define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0)
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@@ -3339,6 +3792,36 @@ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE)
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+struct rtw89_mac_mrc_tsf_rpt {
+ unsigned int num;
+ u64 tsfs[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
+};
+
+static_assert(sizeof(struct rtw89_mac_mrc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE);
+
+struct rtw89_c2h_mrc_tsf_rpt_info {
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+struct rtw89_c2h_mrc_tsf_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+ struct rtw89_c2h_mrc_tsf_rpt_info infos[];
+} __packed;
+
+#define RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM GENMASK(7, 0)
+
+struct rtw89_c2h_mrc_status_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+#define RTW89_C2H_MRC_STATUS_RPT_W2_STATUS GENMASK(5, 0)
+#define RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX GENMASK(7, 6)
+
struct rtw89_c2h_pkt_ofld_rsp {
__le32 w0;
__le32 w1;
@@ -3647,6 +4130,9 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_FUNC_MAC_BCN_UPD 0x5
#define H2C_FUNC_MAC_DCTLINFO_UD_V1 0x9
#define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa
+#define H2C_FUNC_MAC_DCTLINFO_UD_V2 0xc
+#define H2C_FUNC_MAC_BCN_UPD_BE 0xd
+#define H2C_FUNC_MAC_CCTLINFO_UD_G7 0x11
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
@@ -3672,6 +4158,8 @@ enum rtw89_fw_ofld_h2c_func {
H2C_FUNC_CFG_BCNFLTR = 0x1e,
H2C_FUNC_OFLD_RSSI = 0x1f,
H2C_FUNC_OFLD_TP = 0x20,
+ H2C_FUNC_MAC_MACID_PAUSE_SLEEP = 0x28,
+ H2C_FUNC_SCANOFLD_BE = 0x2c,
NUM_OF_RTW89_FW_OFLD_H2C_FUNC,
};
@@ -3683,6 +4171,14 @@ enum rtw89_fw_ofld_h2c_func {
RTW89_FW_OFLD_WAIT_COND(RTW89_PKT_OFLD_WAIT_TAG(pkt_id, pkt_op), \
H2C_FUNC_PACKET_OFLD)
+#define RTW89_SCANOFLD_WAIT_COND_ADD_CH RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH)
+
+#define RTW89_SCANOFLD_WAIT_COND_START RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD)
+#define RTW89_SCANOFLD_WAIT_COND_STOP RTW89_FW_OFLD_WAIT_COND(1, H2C_FUNC_SCANOFLD)
+#define RTW89_SCANOFLD_BE_WAIT_COND_START RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD_BE)
+#define RTW89_SCANOFLD_BE_WAIT_COND_STOP RTW89_FW_OFLD_WAIT_COND(1, H2C_FUNC_SCANOFLD_BE)
+
+
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
#define H2C_FUNC_MAC_SEC_UPD 0x1
@@ -3690,6 +4186,8 @@ enum rtw89_fw_ofld_h2c_func {
/* CLASS 12 - BA CAM */
#define H2C_CL_BA_CAM 0xc
#define H2C_FUNC_MAC_BA_CAM 0x0
+#define H2C_FUNC_MAC_BA_CAM_V1 0x1
+#define H2C_FUNC_MAC_BA_CAM_INIT 0x2
/* CLASS 14 - MCC */
#define H2C_CL_MCC 0xe
@@ -3710,15 +4208,50 @@ enum rtw89_mcc_h2c_func {
#define RTW89_MCC_WAIT_COND(group, func) \
((group) * NUM_OF_RTW89_MCC_H2C_FUNC + (func))
+/* CLASS 24 - MRC */
+#define H2C_CL_MRC 0x18
+enum rtw89_mrc_h2c_func {
+ H2C_FUNC_MRC_REQ_TSF = 0x0,
+ H2C_FUNC_ADD_MRC = 0x1,
+ H2C_FUNC_START_MRC = 0x2,
+ H2C_FUNC_DEL_MRC = 0x3,
+ H2C_FUNC_MRC_SYNC = 0x4,
+ H2C_FUNC_MRC_UPD_DURATION = 0x5,
+ H2C_FUNC_MRC_UPD_BITMAP = 0x6,
+
+ NUM_OF_RTW89_MRC_H2C_FUNC,
+};
+
+/* can consider MRC's sch_idx as MCC's group */
+#define RTW89_MRC_WAIT_COND(sch_idx, func) \
+ ((sch_idx) * NUM_OF_RTW89_MRC_H2C_FUNC + (func))
+
+#define RTW89_MRC_WAIT_COND_REQ_TSF \
+ RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF)
+
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
#define H2C_FUNC_OUTSRC_RA_MACIDCFG 0x0
+#define H2C_CL_OUTSRC_DM 0x2
+#define H2C_FUNC_FW_LPS_CH_INFO 0xb
+
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa
#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2
+#define H2C_CL_OUTSRC_RF_FW_RFK 0xb
+
+enum rtw89_rfk_offload_h2c_func {
+ H2C_FUNC_RFK_TSSI_OFFLOAD = 0x0,
+ H2C_FUNC_RFK_IQK_OFFLOAD = 0x1,
+ H2C_FUNC_RFK_DPK_OFFLOAD = 0x3,
+ H2C_FUNC_RFK_TXGAPK_OFFLOAD = 0x4,
+ H2C_FUNC_RFK_DACK_OFFLOAD = 0x5,
+ H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6,
+ H2C_FUNC_RFK_PRE_NOTIFY = 0x8,
+};
struct rtw89_fw_h2c_rf_get_mccch {
__le32 ch_0;
@@ -3729,6 +4262,114 @@ struct rtw89_fw_h2c_rf_get_mccch {
__le32 current_band_type;
} __packed;
+#define NUM_OF_RTW89_FW_RFK_PATH 2
+#define NUM_OF_RTW89_FW_RFK_TBL 3
+
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct {
+ __le32 ch[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
+ __le32 band[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
+ } __packed dbcc;
+
+ __le32 mlo_mode;
+ struct {
+ __le32 cur_ch[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 cur_band[NUM_OF_RTW89_FW_RFK_PATH];
+ } __packed tbl;
+
+ __le32 phy_idx;
+ __le32 cur_band;
+ __le32 cur_bw;
+ __le32 cur_center_ch;
+
+ __le32 ktbl_sel0;
+ __le32 ktbl_sel1;
+ __le32 rfmod0;
+ __le32 rfmod1;
+
+ __le32 mlo_1_1;
+ __le32 rfe_type;
+ __le32 drv_mode;
+
+ struct {
+ __le32 ch[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 band[NUM_OF_RTW89_FW_RFK_PATH];
+ } __packed mlo;
+} __packed;
+
+struct rtw89_h2c_rf_tssi {
+ __le16 len;
+ u8 phy;
+ u8 ch;
+ u8 bw;
+ u8 band;
+ u8 hwtx_en;
+ u8 cv;
+ s8 curr_tssi_cck_de[2];
+ s8 curr_tssi_cck_de_20m[2];
+ s8 curr_tssi_cck_de_40m[2];
+ s8 curr_tssi_efuse_cck_de[2];
+ s8 curr_tssi_ofdm_de[2];
+ s8 curr_tssi_ofdm_de_20m[2];
+ s8 curr_tssi_ofdm_de_40m[2];
+ s8 curr_tssi_ofdm_de_80m[2];
+ s8 curr_tssi_ofdm_de_160m[2];
+ s8 curr_tssi_ofdm_de_320m[2];
+ s8 curr_tssi_efuse_ofdm_de[2];
+ s8 curr_tssi_ofdm_de_diff_20m[2];
+ s8 curr_tssi_ofdm_de_diff_80m[2];
+ s8 curr_tssi_ofdm_de_diff_160m[2];
+ s8 curr_tssi_ofdm_de_diff_320m[2];
+ s8 curr_tssi_trim_de[2];
+ u8 pg_thermal[2];
+ u8 ftable[2][128];
+ u8 tssi_mode;
+} __packed;
+
+struct rtw89_h2c_rf_iqk {
+ __le32 phy_idx;
+ __le32 dbcc;
+} __packed;
+
+struct rtw89_h2c_rf_dpk {
+ u8 len;
+ u8 phy;
+ u8 dpk_enable;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 dpk_dbg_en;
+} __packed;
+
+struct rtw89_h2c_rf_txgapk {
+ u8 len;
+ u8 ktype;
+ u8 phy;
+ u8 kpath;
+ u8 band;
+ u8 bw;
+ u8 ch;
+ u8 cv;
+} __packed;
+
+struct rtw89_h2c_rf_dack {
+ __le32 len;
+ __le32 phy;
+ __le32 type;
+} __packed;
+
+struct rtw89_h2c_rf_rxdck {
+ u8 len;
+ u8 phy;
+ u8 is_afe;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 rxdck_dbg_en;
+} __packed;
+
enum rtw89_rf_log_type {
RTW89_RF_RUN_LOG = 0,
RTW89_RF_RPT_LOG = 1,
@@ -3800,6 +4441,12 @@ struct rtw89_c2h_rf_txgapk_rpt_log {
u8 rsv1;
} __packed;
+struct rtw89_c2h_rfk_report {
+ struct rtw89_c2h_hdr hdr;
+ u8 state; /* enum rtw89_rfk_report_state */
+ u8 version;
+} __packed;
+
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@@ -3830,21 +4477,39 @@ void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len);
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif);
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -3876,15 +4541,29 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
struct sk_buff *skb_ofld);
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list);
+int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list);
int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *opt,
struct rtw89_vif *vif);
+int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode);
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -3898,10 +4577,16 @@ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ u8 offset, u8 mac_idx);
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
@@ -3916,6 +4601,10 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
const struct rtw89_pkt_drop_params *params);
@@ -3956,6 +4645,20 @@ int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
u8 target, u8 offset);
int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mcc_duration *p);
+int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_arg *arg);
+int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_start_arg *arg);
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx);
+int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_req_tsf_arg *arg,
+ struct rtw89_mac_mrc_tsf_rpt *rpt);
+int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_bitmap_arg *arg);
+int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_sync_arg *arg);
+int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_duration_arg *arg);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
@@ -3965,6 +4668,65 @@ static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(rtwdev);
}
+static inline int rtw89_chip_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
+}
+
+static inline int rtw89_chip_h2c_default_dmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->h2c_default_dmac_tbl)
+ return chip->ops->h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
+
+ return 0;
+}
+
+static inline int rtw89_chip_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_update_beacon(rtwdev, rtwvif);
+}
+
+static inline int rtw89_chip_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+}
+
+static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->h2c_ampdu_cmac_tbl)
+ return chip->ops->h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
+
+ return 0;
+}
+
+static inline
+int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_ba_cam(rtwdev, rtwsta, valid, params);
+}
+
/* must consider compatibility; don't insert new in the mid */
struct rtw89_fw_txpwr_byrate_entry {
u8 band;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index c485ef2cc3d3..908245ac46bd 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1625,7 +1625,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
/* PCIE */
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
- .ple_size0_v1 = {RTW89_PLE_PG_128, 2672, 256, 212992,},
+ .ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,},
.ple_size3_v1 = {RTW89_PLE_PG_128, 2928, 0, 212992,},
/* DLFW */
.ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
@@ -1650,8 +1650,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
- .ple_qt0 = {320, 0, 32, 16, 13, 13, 292, 0, 32, 18, 1, 4, 0,},
- .ple_qt1 = {320, 0, 32, 16, 1944, 1944, 2223, 0, 1963, 1949, 1, 1935, 0,},
+ .ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,},
+ .ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,},
/* PCIE SCC */
.ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
/* PCIE SCC */
@@ -1677,7 +1677,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
/* 8851B PCIE WOW */
.ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
- .ple_rsvd_qt0 = {2, 112, 56, 6, 6, 6, 6, 0, 0, 62,},
+ .ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,},
.ple_rsvd_qt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
.rsvd0_size0 = {212992, 0,},
.rsvd1_size0 = {587776, 2048,},
@@ -2537,6 +2537,9 @@ static int spatial_reuse_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_SR_CTRL, mac_idx);
rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BSSID_SRC_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_AX_PLCP_SRC_EN);
+
return 0;
}
@@ -3192,13 +3195,11 @@ static int set_cpuio_ax(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ bool band1_en)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_dle_mem *cfg;
- struct rtw89_cpuio_ctrl ctrl_para = {0};
- u16 pkt_id;
- int ret;
cfg = get_dle_mem_cfg(rtwdev, mode);
if (!cfg) {
@@ -3213,6 +3214,16 @@ int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mod
dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU);
+ return mac->dle_quota_change(rtwdev, band1_en);
+}
+
+static int dle_quota_change_ax(struct rtw89_dev *rtwdev, bool band1_en)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_cpuio_ctrl ctrl_para = {0};
+ u16 pkt_id;
+ int ret;
+
ret = mac->dle_buf_req(rtwdev, 0x20, true, &pkt_id);
if (ret) {
rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n");
@@ -3301,7 +3312,7 @@ static int band1_enable_ax(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -3676,6 +3687,28 @@ static int trx_init_ax(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev)
+{
+#define BACAM_1024BMP_OCC_ENTRY 4
+#define BACAM_MAX_RU_SUPPORT_B0_STA 1
+#define BACAM_MAX_RU_SUPPORT_B1_STA 1
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 users, offset;
+
+ if (chip->bacam_ver != RTW89_BACAM_V1)
+ return 0;
+
+ offset = 0;
+ users = BACAM_MAX_RU_SUPPORT_B0_STA;
+ rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_0);
+
+ offset += users * BACAM_1024BMP_OCC_ENTRY;
+ users = BACAM_MAX_RU_SUPPORT_B1_STA;
+ rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_1);
+
+ return 0;
+}
+
static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -3910,6 +3943,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
if (ret)
goto fail;
+ ret = rtw89_mac_feat_init(rtwdev);
+ if (ret)
+ goto fail;
+
if (rtwdev->hci.ops->mac_post_init) {
ret = rtwdev->hci.ops->mac_post_init(rtwdev);
if (ret)
@@ -4000,6 +4037,9 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
.mbssid = R_AX_MBSSID_CTRL,
.mbssid_drop = R_AX_MBSSID_DROP_0,
.tsf_sync = R_AX_PORT0_TSF_SYNC,
+ .ptcl_dbg = R_AX_PTCL_DBG,
+ .ptcl_dbg_info = R_AX_PTCL_DBG_INFO,
+ .bcn_drop_all = R_AX_BCN_DROP_ALL0,
.hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
R_AX_PORT_HGQ_WINDOW_CFG + 3},
@@ -4008,13 +4048,15 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, u8 type)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_port_reg *p = mac->port_base;
u8 mask = B_AX_PTCL_DBG_INFO_MASK_BY_PORT(rtwvif->port);
u32 reg_info, reg_ctrl;
u32 val;
int ret;
- reg_info = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG_INFO, rtwvif->mac_idx);
- reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG, rtwvif->mac_idx);
+ reg_info = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg_info, rtwvif->mac_idx);
+ reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg, rtwvif->mac_idx);
rtw89_write32_mask(rtwdev, reg_ctrl, B_AX_PTCL_DBG_SEL_MASK, type);
rtw89_write32_set(rtwdev, reg_ctrl, B_AX_PTCL_DBG_EN);
@@ -4031,7 +4073,7 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- rtw89_write32_set(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port));
+ rtw89_write32_set(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, 1);
rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, 0);
rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 0);
@@ -4044,9 +4086,9 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
if (rtwvif->port == RTW89_PORT_0)
rtw89_mac_check_packet_ctrl(rtwdev, rtwvif, AX_PTCL_DBG_BCNQ_NUM1);
- rtw89_write32_clr(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port));
+ rtw89_write32_clr(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TBTT_PROHIB_EN);
- fsleep(2);
+ fsleep(2000);
}
#define BCN_INTERVAL 100
@@ -4159,13 +4201,11 @@ static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev,
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit);
}
-static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
- rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
if (en)
rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
@@ -4173,6 +4213,15 @@ static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
}
+static void rtw89_mac_port_cfg_rx_sync_by_nettype(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
+ rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
+
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, en);
+}
+
static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool en)
{
@@ -4471,7 +4520,11 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
if (ret)
return ret;
- ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
+ ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, NULL);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, NULL);
if (ret)
return ret;
@@ -4508,7 +4561,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_net_type(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif);
rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif);
- rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_rx_sync_by_nettype(rtwdev, rtwvif);
rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
@@ -4571,6 +4624,7 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct ieee80211_hw *hw = rtwdev->hw;
bool tolerated = true;
u32 reg;
@@ -4578,18 +4632,19 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
return;
- cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
rtw89_mac_check_he_obss_narrow_bw_ru_iter,
&tolerated);
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx);
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->narrow_bw_ru_dis.addr,
+ rtwvif->mac_idx);
if (tolerated)
- rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ rtw89_write32_clr(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
else
- rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
}
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -4641,35 +4696,52 @@ static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
}
static void
-rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len)
{
+ const struct rtw89_c2h_scanofld *c2h =
+ (const struct rtw89_c2h_scanofld *)skb->data;
struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct rtw89_chan new;
- u8 reason, status, tx_fail, band, actual_period;
- u32 last_chan = rtwdev->scan_info.last_chan_idx;
+ u8 reason, status, tx_fail, band, actual_period, expect_period;
+ u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
+ u8 mac_idx, sw_def, fw_def;
u16 chan;
int ret;
if (!rtwvif)
return;
- tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
- status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
- chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
- reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
- band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
- actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data);
+ tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
+ status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
+ reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
+ band = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_BAND);
+ actual_period = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PERIOD);
+ mac_idx = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_MAC_IDX);
+
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
- band, chan, reason, status, tx_fail, actual_period);
+ "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ mac_idx, band, chan, reason, status, tx_fail, actual_period);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF);
+ expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
+ fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF);
+ report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF);
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n",
+ sw_def, fw_def, report_tsf, expect_period);
+ }
switch (reason) {
+ case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, false);
@@ -4685,9 +4757,10 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
}
} else {
- rtw89_hw_scan_complete(rtwdev, vif, false);
+ rtw89_hw_scan_complete(rtwdev, vif, rtwdev->scan_info.abort);
}
break;
+ case RTW89_SCAN_ENTER_OP_NOTIFY:
case RTW89_SCAN_ENTER_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
@@ -4807,8 +4880,13 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
default:
return;
case H2C_FUNC_ADD_SCANOFLD_CH:
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+ break;
case H2C_FUNC_SCANOFLD:
- cond = RTW89_FW_OFLD_WAIT_COND(0, h2c_func);
+ cond = RTW89_SCANOFLD_WAIT_COND_START;
+ break;
+ case H2C_FUNC_SCANOFLD_BE:
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
break;
}
@@ -5021,6 +5099,84 @@ rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
}
+static void
+rtw89_mac_c2h_mrc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ const struct rtw89_c2h_mrc_tsf_rpt *c2h_rpt;
+ struct rtw89_completion_data data = {};
+ struct rtw89_mac_mrc_tsf_rpt *rpt;
+ unsigned int i;
+
+ c2h_rpt = (const struct rtw89_c2h_mrc_tsf_rpt *)c2h->data;
+ rpt = (struct rtw89_mac_mrc_tsf_rpt *)data.buf;
+ rpt->num = min_t(u8, RTW89_MAC_MRC_MAX_REQ_TSF_NUM,
+ le32_get_bits(c2h_rpt->w2,
+ RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM));
+
+ for (i = 0; i < rpt->num; i++) {
+ u32 tsf_high = le32_to_cpu(c2h_rpt->infos[i].tsf_high);
+ u32 tsf_low = le32_to_cpu(c2h_rpt->infos[i].tsf_low);
+
+ rpt->tsfs[i] = (u64)tsf_high << 32 | tsf_low;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H TSF RPT: index %u> %llu\n",
+ i, rpt->tsfs[i]);
+ }
+
+ rtw89_complete_cond(wait, RTW89_MRC_WAIT_COND_REQ_TSF, &data);
+}
+
+static void
+rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ const struct rtw89_c2h_mrc_status_rpt *c2h_rpt;
+ struct rtw89_completion_data data = {};
+ enum rtw89_mac_mrc_status status;
+ unsigned int cond;
+ bool next = false;
+ u32 tsf_high;
+ u32 tsf_low;
+ u8 sch_idx;
+ u8 func;
+
+ c2h_rpt = (const struct rtw89_c2h_mrc_status_rpt *)c2h->data;
+ sch_idx = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX);
+ status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_STATUS);
+ tsf_high = le32_to_cpu(c2h_rpt->tsf_high);
+ tsf_low = le32_to_cpu(c2h_rpt->tsf_low);
+
+ switch (status) {
+ case RTW89_MAC_MRC_START_SCH_OK:
+ func = H2C_FUNC_START_MRC;
+ break;
+ case RTW89_MAC_MRC_STOP_SCH_OK:
+ /* H2C_FUNC_DEL_MRC without STOP_ONLY, so wait for DEL_SCH_OK */
+ func = H2C_FUNC_DEL_MRC;
+ next = true;
+ break;
+ case RTW89_MAC_MRC_DEL_SCH_OK:
+ func = H2C_FUNC_DEL_MRC;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "invalid MRC C2H STS RPT: status %d\n", status);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: sch_idx %d, status %d, tsf %llu\n",
+ sch_idx, status, (u64)tsf_high << 32 | tsf_low);
+
+ if (next)
+ return;
+
+ cond = RTW89_MRC_WAIT_COND(sch_idx, func);
+ rtw89_complete_cond(wait, cond, &data);
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -5052,7 +5208,39 @@ void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_MCC_STATUS_RPT] = rtw89_mac_c2h_mcc_status_rpt,
};
-bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
+static
+void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_MRC_TSF_RPT] = rtw89_mac_c2h_mrc_tsf_rpt,
+ [RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT] = rtw89_mac_c2h_mrc_status_rpt,
+};
+
+static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ const struct rtw89_c2h_scanofld *c2h =
+ (const struct rtw89_c2h_scanofld *)skb->data;
+ struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+ u8 status, reason;
+
+ status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
+ data.err = status != RTW89_SCAN_STATUS_SUCCESS;
+
+ if (reason == RTW89_SCAN_END_SCAN_NOTIFY) {
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
+ else
+ cond = RTW89_SCANOFLD_WAIT_COND_STOP;
+
+ rtw89_complete_cond(fw_ofld_wait, cond, &data);
+ }
+}
+
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u8 class, u8 func)
{
switch (class) {
default:
@@ -5069,11 +5257,16 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
switch (func) {
default:
return false;
+ case RTW89_MAC_C2H_FUNC_SCANOFLD_RSP:
+ rtw89_mac_c2h_scanofld_rsp_atomic(rtwdev, c2h);
+ return false;
case RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP:
return true;
}
case RTW89_MAC_C2H_CLASS_MCC:
return true;
+ case RTW89_MAC_C2H_CLASS_MRC:
+ return true;
}
}
@@ -5096,6 +5289,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC)
handler = rtw89_mac_c2h_mcc_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_MRC:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC)
+ handler = rtw89_mac_c2h_mrc_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
@@ -5115,8 +5312,7 @@ bool rtw89_mac_get_txpwr_cr_ax(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
{
- const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
- enum rtw89_qta_mode mode = dle_mem->mode;
+ enum rtw89_qta_mode mode = rtwdev->mac.qta_mode;
u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx);
if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR_AX) {
@@ -5143,7 +5339,8 @@ error:
return false;
}
-int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+static
+int rtw89_mac_cfg_ppdu_status_ax(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PPDU_STAT, mac_idx);
int ret;
@@ -5166,7 +5363,6 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
return 0;
}
-EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
{
@@ -6096,6 +6292,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.rx_fltr = R_AX_RX_FLTR_OPT,
.port_base = &rtw89_port_base_ax,
.agg_len_ht = R_AX_AGG_LEN_HT_0,
+ .ps_status = R_AX_PPWRBIT_SETTING,
.muedca_ctrl = {
.addr = R_AX_MUEDCA_EN,
@@ -6106,6 +6303,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN |
B_AX_BFMEE_HE_NDPA_EN,
},
+ .narrow_bw_ru_dis = {
+ .addr = R_AX_RXTRIG_TEST_USER_2,
+ .mask = B_AX_RXTRIG_RU26_DIS,
+ },
.check_mac_en = rtw89_mac_check_mac_en_ax,
.sys_init = sys_init_ax,
@@ -6117,6 +6318,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.bf_assoc = rtw89_mac_bf_assoc_ax,
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
+ .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,
@@ -6128,6 +6330,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.wde_quota_cfg = wde_quota_cfg_ax,
.ple_quota_cfg = ple_quota_cfg_ax,
.set_cpuio = set_cpuio_ax,
+ .dle_quota_change = dle_quota_change_ax,
.disable_cpu = rtw89_mac_disable_cpu_ax,
.fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax,
@@ -6146,5 +6349,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.dump_err_status = rtw89_mac_dump_err_status_ax,
.is_txq_empty = mac_is_txq_empty_ax,
+
+ .add_chan_list = rtw89_hw_scan_add_chan_list,
+ .scan_offload = rtw89_fw_h2c_scan_offload,
};
EXPORT_SYMBOL(rtw89_mac_gen_ax);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index ed98b49809a4..db95509fad2f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -406,13 +406,21 @@ enum rtw89_mac_c2h_mcc_func {
NUM_OF_RTW89_MAC_C2H_FUNC_MCC,
};
+enum rtw89_mac_c2h_mrc_func {
+ RTW89_MAC_C2H_FUNC_MRC_TSF_RPT = 0,
+ RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT = 1,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_MRC,
+};
+
enum rtw89_mac_c2h_class {
- RTW89_MAC_C2H_CLASS_INFO,
- RTW89_MAC_C2H_CLASS_OFLD,
- RTW89_MAC_C2H_CLASS_TWT,
- RTW89_MAC_C2H_CLASS_WOW,
- RTW89_MAC_C2H_CLASS_MCC,
- RTW89_MAC_C2H_CLASS_FWDBG,
+ RTW89_MAC_C2H_CLASS_INFO = 0x0,
+ RTW89_MAC_C2H_CLASS_OFLD = 0x1,
+ RTW89_MAC_C2H_CLASS_TWT = 0x2,
+ RTW89_MAC_C2H_CLASS_WOW = 0x3,
+ RTW89_MAC_C2H_CLASS_MCC = 0x4,
+ RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
+ RTW89_MAC_C2H_CLASS_MRC = 0xe,
RTW89_MAC_C2H_CLASS_MAX,
};
@@ -441,6 +449,12 @@ enum rtw89_mac_mcc_status {
RTW89_MAC_MCC_TXNULL1_FAIL = 27,
};
+enum rtw89_mac_mrc_status {
+ RTW89_MAC_MRC_START_SCH_OK = 0,
+ RTW89_MAC_MRC_STOP_SCH_OK = 1,
+ RTW89_MAC_MRC_DEL_SCH_OK = 2,
+};
+
struct rtw89_mac_ax_coex {
#define RTW89_MAC_AX_COEX_RTK_MODE 0
#define RTW89_MAC_AX_COEX_CSR_MODE 1
@@ -894,9 +908,11 @@ struct rtw89_mac_gen_def {
u32 rx_fltr;
const struct rtw89_port_reg *port_base;
u32 agg_len_ht;
+ u32 ps_status;
struct rtw89_reg_def muedca_ctrl;
struct rtw89_reg_def bfee_ctrl;
+ struct rtw89_reg_def narrow_bw_ru_dis;
int (*check_mac_en)(struct rtw89_dev *rtwdev, u8 band,
enum rtw89_mac_hwmod_sel sel);
@@ -913,6 +929,7 @@ struct rtw89_mac_gen_def {
enum rtw89_machdr_frame_type type,
enum rtw89_mac_fwd_target fwd_target,
u8 mac_idx);
+ int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@@ -930,6 +947,7 @@ struct rtw89_mac_gen_def {
const struct rtw89_ple_quota *max_cfg);
int (*set_cpuio)(struct rtw89_dev *rtwdev,
struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
+ int (*dle_quota_change)(struct rtw89_dev *rtwdev, bool band1_en);
void (*disable_cpu)(struct rtw89_dev *rtwdev);
int (*fwdl_enable_wcpu)(struct rtw89_dev *rtwdev, u8 boot_reason,
@@ -952,6 +970,12 @@ struct rtw89_mac_gen_def {
enum mac_ax_err_info err);
bool (*is_txq_empty)(struct rtw89_dev *rtwdev);
+
+ int (*add_chan_list)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+ int (*scan_offload)(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif);
};
extern const struct rtw89_mac_gen_def rtw89_mac_gen_ax;
@@ -1086,6 +1110,8 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
u16 offset_tu);
int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u64 *tsf);
+void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
@@ -1127,7 +1153,8 @@ static inline int rtw89_chip_reset_bb_rf(struct rtw89_dev *rtwdev)
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
-bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u8 class, u8 func);
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
@@ -1135,9 +1162,20 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
-int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
+int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+
+static inline
+int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->cfg_ppdu_status(rtwdev, mac_idx, enable);
+}
+
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
@@ -1306,6 +1344,7 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SI_BIG_PWR_CUT BIT(1)
XTAL_SI_XTAL_DRV = 0x15,
#define XTAL_SI_DRV_LATCH BIT(4)
+ XTAL_SI_XTAL_PLL = 0x16,
XTAL_SI_XTAL_XMD_2 = 0x24,
#define XTAL_SI_LDO_LPS GENMASK(6, 4)
XTAL_SI_XTAL_XMD_4 = 0x26,
@@ -1339,6 +1378,7 @@ enum rtw89_mac_xtal_si_offset {
XTAL_SI_SRAM_CTRL = 0xA1,
#define XTAL_SI_SRAM_DIS BIT(1)
#define FULL_BIT_MASK GENMASK(7, 0)
+ XTAL_SI_APBT = 0xD1,
XTAL_SI_PLL = 0xE0,
XTAL_SI_PLL_1 = 0xE1,
};
@@ -1364,7 +1404,8 @@ int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow);
int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx band);
void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool wow);
-int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode);
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ bool band1_en);
int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_dle_rsvd_qt_type type,
struct rtw89_mac_dle_rsvd_qt_cfg *cfg);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 93889d2fface..31d1ffb16e83 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -441,7 +441,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
* when disconnected by peer
*/
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
}
}
@@ -449,10 +449,11 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
ether_addr_copy(rtwvif->bssid, conf->bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+ WRITE_ONCE(rtwvif->sync_bcn_tsf, 0);
}
if (changed & BSS_CHANGED_BEACON)
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
if (changed & BSS_CHANGED_ERP_SLOT)
rtw89_conf_tx(rtwdev, rtwvif);
@@ -497,7 +498,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
rtw89_mac_port_update(rtwdev, rtwvif);
- rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
@@ -518,7 +519,7 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&rtwdev->mutex);
rtw89_mac_stop_ap(rtwdev, rtwvif);
- rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
mutex_unlock(&rtwdev->mutex);
}
@@ -660,6 +661,8 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
+ clear_bit(tid, rtwsta->ampdu_map);
+ rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -668,17 +671,19 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
+ set_bit(tid, rtwsta->ampdu_map);
rtw89_leave_ps_mode(rtwdev);
+ rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, true, params);
+ rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, true, params);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_STOP:
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, false, params);
+ rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, false, params);
mutex_unlock(&rtwdev->mutex);
break;
default:
@@ -990,7 +995,7 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
}
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
if (type == IEEE80211_ROC_TYPE_MGMT_TX)
roc->state = RTW89_ROC_MGMT;
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index be30c9346293..320e88229971 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -52,6 +52,9 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
.mbssid = R_BE_MBSSID_CTRL,
.mbssid_drop = R_BE_MBSSID_DROP_0,
.tsf_sync = R_BE_PORT_0_TSF_SYNC,
+ .ptcl_dbg = R_BE_PTCL_DBG,
+ .ptcl_dbg_info = R_BE_PTCL_DBG_INFO,
+ .bcn_drop_all = R_BE_BCN_DROP_ALL0,
.hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG,
R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2,
R_BE_PORT_HGQ_WINDOW_CFG + 3},
@@ -988,6 +991,9 @@ static int spatial_reuse_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_SR_CTRL, mac_idx);
rtw89_write8_clr(rtwdev, reg, B_BE_SR_EN | B_BE_SR_CTRL_PLCP_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BSSID_SRC_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_BE_PLCP_SRC_EN);
+
return 0;
}
@@ -995,7 +1001,8 @@ static int tmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 reg;
- rtw89_write32_clr(rtwdev, R_BE_TB_PPDU_CTRL, B_BE_QOSNULL_UPD_MUEDCA_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TB_PPDU_CTRL, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_QOSNULL_UPD_MUEDCA_EN);
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMTX_TCR_BE_4, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_4XLTF_ZLD_USTIMER_MASK, 0x12);
@@ -1449,6 +1456,71 @@ static int set_cpuio_be(struct rtw89_dev *rtwdev,
return 0;
}
+static int dle_upd_qta_aval_page_be(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_dle_ctrl_type type,
+ enum rtw89_mac_dle_ple_quota_id quota_id)
+{
+ u32 val;
+
+ if (type == DLE_CTRL_TYPE_WDE) {
+ rtw89_write32_mask(rtwdev, R_BE_WDE_BUFMGN_CTL,
+ B_BE_WDE_AVAL_UPD_QTAID_MASK, quota_id);
+ rtw89_write32_set(rtwdev, R_BE_WDE_BUFMGN_CTL, B_BE_WDE_AVAL_UPD_REQ);
+
+ return read_poll_timeout(rtw89_read32, val,
+ !(val & B_BE_WDE_AVAL_UPD_REQ),
+ 1, 2000, false, rtwdev, R_BE_WDE_BUFMGN_CTL);
+ } else if (type == DLE_CTRL_TYPE_PLE) {
+ rtw89_write32_mask(rtwdev, R_BE_PLE_BUFMGN_CTL,
+ B_BE_PLE_AVAL_UPD_QTAID_MASK, quota_id);
+ rtw89_write32_set(rtwdev, R_BE_PLE_BUFMGN_CTL, B_BE_PLE_AVAL_UPD_REQ);
+
+ return read_poll_timeout(rtw89_read32, val,
+ !(val & B_BE_PLE_AVAL_UPD_REQ),
+ 1, 2000, false, rtwdev, R_BE_PLE_BUFMGN_CTL);
+ }
+
+ rtw89_warn(rtwdev, "%s wrong type %d\n", __func__, type);
+ return -EINVAL;
+}
+
+static int dle_quota_change_be(struct rtw89_dev *rtwdev, bool band1_en)
+{
+ int ret;
+
+ if (band1_en) {
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_B0_TXPL);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE B0 TX avail page fail %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_CMAC0_RX);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE CMAC0 RX avail page fail %d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_B1_TXPL);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE B1 TX avail page fail %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_CMAC1_RX);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE CMAC1 RX avail page fail %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_qta_mode mode)
{
@@ -1480,6 +1552,13 @@ static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
static int dbcc_bb_ctrl_be(struct rtw89_dev *rtwdev, bool bb1_en)
{
+ u32 set = B_BE_FEN_BB1PLAT_RSTB | B_BE_FEN_BB1_IP_RSTN;
+
+ if (bb1_en)
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, set);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE, set);
+
return 0;
}
@@ -1538,7 +1617,7 @@ static int band1_enable_be(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -1593,7 +1672,7 @@ static int band1_disable_be(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, false);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -1616,7 +1695,7 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
ret = rtw89_fw_h2c_notify_dbcc(rtwdev, true);
if (ret) {
- rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ rtw89_err(rtwdev, "%s:[ERR] notify dbcc1 fail %d\n",
__func__, ret);
return ret;
}
@@ -1625,7 +1704,7 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
ret = rtw89_fw_h2c_notify_dbcc(rtwdev, false);
if (ret) {
- rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ rtw89_err(rtwdev, "%s:[ERR] notify dbcc1 fail %d\n",
__func__, ret);
return ret;
}
@@ -1718,12 +1797,106 @@ static int trx_init_be(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_set_hw_sch_tx_en_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 tx_en, u32 tx_en_mask)
+{
+ u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_DRV_TXEN, mac_idx);
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, reg);
+ val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
+int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ int ret;
+
+ *tx_en = rtw89_read32(rtwdev,
+ rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_DRV_TXEN, mac_idx));
+
+ switch (sel) {
+ case RTW89_SCH_TX_SEL_ALL:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, 0,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_HIQ:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx,
+ 0, B_BE_CTN_TXEN_HGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MG0:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx,
+ 0, B_BE_CTN_TXEN_MGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MACID:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, 0,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v2);
+
+int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ int ret;
+
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, tx_en,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2);
+
+static
+int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PPDU_STAT, mac_idx);
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ rtw89_write32_clr(rtwdev, reg, B_BE_PPDU_STAT_RPT_EN);
+ return 0;
+ }
+
+ rtw89_write32_mask(rtwdev, R_BE_HW_PPDU_STATUS, B_BE_FWD_PPDU_STAT_MASK, 3);
+ rtw89_write32(rtwdev, reg, B_BE_PPDU_STAT_RPT_EN | B_BE_PPDU_MAC_INFO |
+ B_BE_APP_RX_CNT_RPT | B_BE_APP_PLCP_HDR_RPT |
+ B_BE_PPDU_STAT_RPT_CRC32 | B_BE_PPDU_STAT_RPT_DMA);
+
+ return 0;
+}
+
static bool rtw89_mac_get_txpwr_cr_be(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
{
- const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
- enum rtw89_qta_mode mode = dle_mem->mode;
+ enum rtw89_qta_mode mode = rtwdev->mac.qta_mode;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, (enum rtw89_mac_idx)phy_idx,
@@ -2218,6 +2391,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.rx_fltr = R_BE_RX_FLTR_OPT,
.port_base = &rtw89_port_base_be,
.agg_len_ht = R_BE_AGG_LEN_HT_0,
+ .ps_status = R_BE_WMTX_POWER_BE_BIT_CTL,
.muedca_ctrl = {
.addr = R_BE_MUEDCA_EN,
@@ -2228,6 +2402,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.mask = B_BE_BFMEE_HT_NDPA_EN | B_BE_BFMEE_VHT_NDPA_EN |
B_BE_BFMEE_HE_NDPA_EN | B_BE_BFMEE_EHT_NDPA_EN,
},
+ .narrow_bw_ru_dis = {
+ .addr = R_BE_RXTRIG_TEST_USER_2,
+ .mask = B_BE_RXTRIG_RU26_DIS,
+ },
.check_mac_en = rtw89_mac_check_mac_en_be,
.sys_init = sys_init_be,
@@ -2239,6 +2417,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.bf_assoc = rtw89_mac_bf_assoc_be,
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
+ .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,
@@ -2250,6 +2429,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.wde_quota_cfg = wde_quota_cfg_be,
.ple_quota_cfg = ple_quota_cfg_be,
.set_cpuio = set_cpuio_be,
+ .dle_quota_change = dle_quota_change_be,
.disable_cpu = rtw89_mac_disable_cpu_be,
.fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be,
@@ -2268,5 +2448,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.dump_err_status = rtw89_mac_dump_err_status_be,
.is_txq_empty = mac_is_txq_empty_be,
+
+ .add_chan_list = rtw89_hw_scan_add_chan_list_be,
+ .scan_offload = rtw89_fw_h2c_scan_offload_be,
};
EXPORT_SYMBOL(rtw89_mac_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 769f1ce62ebc..67d7294e488a 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -155,8 +155,8 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
DMA_FROM_DEVICE);
}
-static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
- struct sk_buff *skb)
+static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
{
struct rtw89_pci_rxbd_info *rxbd_info;
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
@@ -166,10 +166,58 @@ static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
+}
+
+static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 target_rx_tag;
+
+ if (!info->check_rx_tag)
+ return 0;
+
+ /* valid range is 1 ~ 0x1FFF */
+ if (rx_ring->target_rx_tag == 0)
+ target_rx_tag = 1;
+ else
+ target_rx_tag = rx_ring->target_rx_tag;
+
+ if (rx_info->tag != target_rx_tag) {
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
+ rx_info->tag, target_rx_tag);
+ return -EAGAIN;
+ }
return 0;
}
+static
+int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ int rx_tag_retry = 100;
+ int ret;
+
+ do {
+ rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+ rtw89_pci_rxbd_info_update(rtwdev, skb);
+
+ ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
+ if (ret != -EAGAIN)
+ break;
+ } while (rx_tag_retry--);
+
+ /* update target rx_tag for next RX */
+ rx_ring->target_rx_tag = rx_info->tag + 1;
+
+ return ret;
+}
+
static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
@@ -259,9 +307,8 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -549,9 +596,8 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -705,7 +751,7 @@ void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
- isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR);
+ isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
if (isrs->halt_c2h_isrs)
rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
@@ -1550,6 +1596,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
@@ -1907,22 +1954,87 @@ static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u
return 0;
}
+static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+{
+ u16 addr_2lsb = addr & B_AX_DBI_2LSB;
+ u16 write_addr;
+ u8 flag;
+ int ret;
+
+ write_addr = addr & B_AX_DBI_ADDR_MSK;
+ write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
+ rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret)
+ rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
+ addr);
+
+ return ret;
+}
+
+static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+{
+ u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
+ u8 flag;
+ int ret;
+
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
+ addr);
+ return ret;
+ }
+
+ read_addr = R_AX_DBI_RDATA + (addr & 3);
+ *value = rtw89_read8(rtwdev, read_addr);
+
+ return 0;
+}
+
static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 data)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
+
+ ret = pci_write_config_byte(pdev, addr, data);
+ if (!ret)
+ return 0;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ ret = rtw89_dbi_write8(rtwdev, addr, data);
- return pci_write_config_byte(pdev, addr, data);
+ return ret;
}
static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 *value)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
- return pci_read_config_byte(pdev, addr, value);
+ ret = pci_read_config_byte(pdev, addr, value);
+ if (!ret)
+ return 0;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ ret = rtw89_dbi_read8(rtwdev, addr, value);
+
+ return ret;
}
static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
@@ -3148,6 +3260,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->buf_sz = buf_sz;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
for (i = 0; i < len; i++) {
skb = dev_alloc_skb(buf_sz);
@@ -3387,8 +3500,7 @@ static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = 0;
- rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
- B_BE_PCIE_RX_RPQ0_IMR0_V1;
+ rtwpci->intrs[1] = 0;
}
static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
@@ -4068,6 +4180,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq;
}
+ set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
+
return 0;
err_free_irq:
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index ca5de77fee90..532f78eaf6df 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -42,6 +42,7 @@
#define B_AX_DBI_WFLAG BIT(16)
#define B_AX_DBI_WREN_MSK GENMASK(15, 12)
#define B_AX_DBI_ADDR_MSK GENMASK(11, 2)
+#define B_AX_DBI_2LSB GENMASK(1, 0)
#define R_AX_DBI_WDATA 0x1094
#define R_AX_DBI_RDATA 0x1098
@@ -996,7 +997,7 @@
#define RTW89_PCI_TXWD_NUM_MAX 512
#define RTW89_PCI_TXWD_PAGE_SIZE 128
#define RTW89_PCI_ADDRINFO_MAX 4
-#define RTW89_PCI_RX_BUF_SIZE 11460
+#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
#define RTW89_PCI_MULTITAG 8
@@ -1234,6 +1235,7 @@ struct rtw89_pci_info {
enum mac_ax_pcie_func_ctrl io_rcy_en;
enum mac_ax_io_rcy_tmr io_rcy_tmr;
bool rx_ring_eq_is_full;
+ bool check_rx_tag;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -1276,7 +1278,7 @@ struct rtw89_pci_tx_data {
struct rtw89_pci_rx_info {
dma_addr_t dma;
- u32 fs:1, ls:1, tag:11, len:14;
+ u32 fs:1, ls:1, tag:13, len:14;
};
#define RTW89_PCI_TXBD_OPTION_LS BIT(14)
@@ -1405,6 +1407,7 @@ struct rtw89_pci_rx_ring {
u32 buf_sz;
struct sk_buff *diliver_skb;
struct rtw89_rx_desc_info diliver_desc;
+ u32 target_rx_tag:13;
};
struct rtw89_pci_isrs {
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 629ffa4bee91..5c9e39357773 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -105,6 +105,10 @@ static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
val |= B_BE_STOP_AXI_MST;
rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
+
+ if (io_en == MAC_AX_PCIE_ENABLE)
+ rtw89_write32_mask(rtwdev, R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1,
+ B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK, 4);
}
static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
@@ -257,6 +261,7 @@ static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_PL1_DBG_INFO, 0x0);
rtw89_write32_set(rtwdev, R_BE_FWS1IMR, B_BE_PCIE_SER_TIMEOUT_INDIC_EN);
rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+ rtw89_write32_mask(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_TIMER_UNIT_MASK, 1);
val32 = rtw89_read32(rtwdev, R_BE_REG_PL1_MASK);
val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index bafc7b1cc104..12da63d64307 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -13,6 +13,13 @@
#include "txrx.h"
#include "util.h"
+static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ return phy->phy0_phy1_offset(rtwdev, addr);
+}
+
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
{
@@ -718,6 +725,53 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_get_txsc);
+u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_bandwidth dbw)
+{
+ enum rtw89_bandwidth cbw = chan->band_width;
+ u8 pri_ch = chan->primary_channel;
+ u8 central_ch = chan->channel;
+ u8 txsb_idx = 0;
+
+ if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
+ return txsb_idx;
+
+ switch (cbw) {
+ case RTW89_CHANNEL_WIDTH_40:
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 6) / 4;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 14) / 4;
+ else if (dbw == RTW89_CHANNEL_WIDTH_40)
+ txsb_idx = (pri_ch - central_ch + 12) / 8;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 30) / 4;
+ else if (dbw == RTW89_CHANNEL_WIDTH_40)
+ txsb_idx = (pri_ch - central_ch + 28) / 8;
+ else if (dbw == RTW89_CHANNEL_WIDTH_80)
+ txsb_idx = (pri_ch - central_ch + 24) / 16;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ default:
+ break;
+ }
+
+ return txsb_idx;
+}
+EXPORT_SYMBOL(rtw89_phy_get_txsb);
+
static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
{
return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
@@ -796,6 +850,71 @@ u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
+static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr)
+{
+ static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
+ static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
+ bool busy, done;
+ int ret;
+ u32 val;
+
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 3800, false,
+ rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
+ if (ret) {
+ rtw89_warn(rtwdev, "poll HWSI is busy\n");
+ return INV_RF_DATA;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
+ udelay(2);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
+ 1, 3800, false,
+ rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
+ if (ret) {
+ rtw89_warn(rtwdev, "read HWSI is busy\n");
+ val = INV_RF_DATA;
+ goto out;
+ }
+
+ val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
+out:
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
+
+ return val;
+}
+
+static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+ u32 val;
+
+ val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
+
+ return (val & mask) >> __ffs(mask);
+}
+
+u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+ else
+ return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
+
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -875,6 +994,66 @@ bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+static
+bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 data)
+{
+ static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
+ static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
+ bool busy;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 3800, false,
+ rtwdev, addr_is_idle[rf_path], BIT(29));
+ if (ret) {
+ rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
+ return false;
+ }
+
+ val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
+ u32_encode_bits(data, B_HWSI_DATA_VAL);
+
+ rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
+
+ return true;
+}
+
+static
+bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ u32 val;
+
+ if (mask == RFREG_MASK) {
+ val = data;
+ } else {
+ val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
+ val &= ~mask;
+ val |= (data << __ffs(mask)) & mask;
+ }
+
+ return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
+}
+
+bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+ else
+ return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
+
static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
{
return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
@@ -893,20 +1072,30 @@ static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path,
void *extra_data)
{
- if (reg->addr == 0xfe)
+ u32 addr;
+
+ if (reg->addr == 0xfe) {
mdelay(50);
- else if (reg->addr == 0xfd)
+ } else if (reg->addr == 0xfd) {
mdelay(5);
- else if (reg->addr == 0xfc)
+ } else if (reg->addr == 0xfc) {
mdelay(1);
- else if (reg->addr == 0xfb)
+ } else if (reg->addr == 0xfb) {
udelay(50);
- else if (reg->addr == 0xfa)
+ } else if (reg->addr == 0xfa) {
udelay(5);
- else if (reg->addr == 0xf9)
+ } else if (reg->addr == 0xf9) {
udelay(1);
- else
- rtw89_phy_write32(rtwdev, reg->addr, reg->data);
+ } else if (reg->data == BYPASS_CR_DATA) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr);
+ } else {
+ addr = reg->addr;
+
+ if ((uintptr_t)extra_data == RTW89_PHY_1)
+ addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
+
+ rtw89_phy_write32(rtwdev, addr, reg->data);
+ }
}
union rtw89_phy_bb_gain_arg {
@@ -929,7 +1118,7 @@ static void
rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -968,7 +1157,7 @@ static void
rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 rxsc_start = arg.rxsc_start;
u8 bw = arg.bw;
u8 path = arg.path;
@@ -1050,7 +1239,7 @@ static void
rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -1077,7 +1266,7 @@ static void
rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -1108,10 +1297,10 @@ rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
- const struct rtw89_reg2_def *reg,
- enum rtw89_rf_path rf_path,
- void *extra_data)
+static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
@@ -1420,12 +1609,15 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
+ if (rtwdev->dbcc_en)
+ rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg,
+ (void *)RTW89_PHY_1);
rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
if (bb_gain_table)
rtw89_phy_init_reg(rtwdev, bb_gain_table,
- rtw89_phy_config_bb_gain, NULL);
+ chip->phy_def->config_bb_gain, NULL);
rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
}
@@ -1467,11 +1659,9 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
kfree(rf_reg_info);
}
-static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
{
- struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_phy_table *nctl_table;
u32 val;
int ret;
@@ -1491,6 +1681,15 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
1000, false, rtwdev);
if (ret)
rtw89_err(rtwdev, "failed to poll nctl block\n");
+}
+
+static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_phy_table *nctl_table;
+
+ rtw89_phy_preinit_rf_nctl(rtwdev);
nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
@@ -1499,14 +1698,11 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
}
-static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
{
u32 phy_page = addr >> 8;
u32 ofst = 0;
- if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
- return addr < 0x10000 ? 0x20000 : 0;
-
switch (phy_page) {
case 0x6:
case 0x7:
@@ -1561,6 +1757,7 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
}
+EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl)
@@ -2699,9 +2896,63 @@ void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
};
+static
+void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+
+ wait->state = RTW89_RFK_STATE_START;
+ wait->start_time = ktime_get();
+ reinit_completion(&wait->completion);
+}
+
+static
+int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name,
+ unsigned int ms)
+{
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+ unsigned long time_left;
+
+ /* Since we can't receive C2H event during SER, use a fixed delay. */
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
+ fsleep(1000 * ms / 2);
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&wait->completion,
+ msecs_to_jiffies(ms));
+ if (time_left == 0) {
+ rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name);
+ return -ETIMEDOUT;
+ } else if (wait->state != RTW89_RFK_STATE_OK) {
+ rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n",
+ rfk_name, wait->state);
+ return -EFAULT;
+ }
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n",
+ rfk_name, ktime_ms_delta(ktime_get(), wait->start_time));
+
+ return 0;
+}
+
static void
rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
+ const struct rtw89_c2h_rfk_report *report =
+ (const struct rtw89_c2h_rfk_report *)c2h->data;
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+
+ wait->state = report->state;
+ wait->version = report->version;
+
+ complete(&wait->completion);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "RFK report state %d with version %d (%*ph)\n",
+ wait->state, wait->version,
+ (int)(len - sizeof(report->hdr)), &report->state);
}
static
@@ -2772,6 +3023,726 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler(rtwdev, skb, len);
}
+int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
+
+int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
+
+int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
+
+int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
+
+int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
+
+int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
+
+int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
+
+static u32 phy_tssi_get_cck_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define PHY_TSSI_EXTRA_GROUP_BIT BIT(31)
+#define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx))
+#define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT)
+#define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \
+ ((group) & ~PHY_TSSI_EXTRA_GROUP_BIT)
+#define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \
+ (PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 phy_tssi_get_ofdm_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return PHY_TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return PHY_TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return PHY_TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return PHY_TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return PHY_TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return PHY_TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return PHY_TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return PHY_TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_6g_ofdm_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 5:
+ return 0;
+ case 6 ... 8:
+ return PHY_TSSI_EXTRA_GROUP(0);
+ case 9 ... 13:
+ return 1;
+ case 14 ... 16:
+ return PHY_TSSI_EXTRA_GROUP(1);
+ case 17 ... 21:
+ return 2;
+ case 22 ... 24:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 25 ... 29:
+ return 3;
+ case 33 ... 37:
+ return 4;
+ case 38 ... 40:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 41 ... 45:
+ return 5;
+ case 46 ... 48:
+ return PHY_TSSI_EXTRA_GROUP(5);
+ case 49 ... 53:
+ return 6;
+ case 54 ... 56:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 57 ... 61:
+ return 7;
+ case 65 ... 69:
+ return 8;
+ case 70 ... 72:
+ return PHY_TSSI_EXTRA_GROUP(8);
+ case 73 ... 77:
+ return 9;
+ case 78 ... 80:
+ return PHY_TSSI_EXTRA_GROUP(9);
+ case 81 ... 85:
+ return 10;
+ case 86 ... 88:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 89 ... 93:
+ return 11;
+ case 97 ... 101:
+ return 12;
+ case 102 ... 104:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 105 ... 109:
+ return 13;
+ case 110 ... 112:
+ return PHY_TSSI_EXTRA_GROUP(13);
+ case 113 ... 117:
+ return 14;
+ case 118 ... 120:
+ return PHY_TSSI_EXTRA_GROUP(14);
+ case 121 ... 125:
+ return 15;
+ case 129 ... 133:
+ return 16;
+ case 134 ... 136:
+ return PHY_TSSI_EXTRA_GROUP(16);
+ case 137 ... 141:
+ return 17;
+ case 142 ... 144:
+ return PHY_TSSI_EXTRA_GROUP(17);
+ case 145 ... 149:
+ return 18;
+ case 150 ... 152:
+ return PHY_TSSI_EXTRA_GROUP(18);
+ case 153 ... 157:
+ return 19;
+ case 161 ... 165:
+ return 20;
+ case 166 ... 168:
+ return PHY_TSSI_EXTRA_GROUP(20);
+ case 169 ... 173:
+ return 21;
+ case 174 ... 176:
+ return PHY_TSSI_EXTRA_GROUP(21);
+ case 177 ... 181:
+ return 22;
+ case 182 ... 184:
+ return PHY_TSSI_EXTRA_GROUP(22);
+ case 185 ... 189:
+ return 23;
+ case 193 ... 197:
+ return 24;
+ case 198 ... 200:
+ return PHY_TSSI_EXTRA_GROUP(24);
+ case 201 ... 205:
+ return 25;
+ case 206 ... 208:
+ return PHY_TSSI_EXTRA_GROUP(25);
+ case 209 ... 213:
+ return 26;
+ case 214 ... 216:
+ return PHY_TSSI_EXTRA_GROUP(26);
+ case 217 ... 221:
+ return 27;
+ case 225 ... 229:
+ return 28;
+ case 230 ... 232:
+ return PHY_TSSI_EXTRA_GROUP(28);
+ case 233 ... 237:
+ return 29;
+ case 238 ... 240:
+ return PHY_TSSI_EXTRA_GROUP(29);
+ case 241 ... 245:
+ return 30;
+ case 246 ... 248:
+ return PHY_TSSI_EXTRA_GROUP(30);
+ case 249 ... 253:
+ return 31;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_trim_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 49 ... 51:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 113 ... 115:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_6g_trim_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 13:
+ return 0;
+ case 14 ... 16:
+ return PHY_TSSI_EXTRA_GROUP(0);
+ case 17 ... 29:
+ return 1;
+ case 33 ... 45:
+ return 2;
+ case 46 ... 48:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 49 ... 61:
+ return 3;
+ case 65 ... 77:
+ return 4;
+ case 78 ... 80:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 81 ... 93:
+ return 5;
+ case 97 ... 109:
+ return 6;
+ case 110 ... 112:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 113 ... 125:
+ return 7;
+ case 129 ... 141:
+ return 8;
+ case 142 ... 144:
+ return PHY_TSSI_EXTRA_GROUP(8);
+ case 145 ... 157:
+ return 9;
+ case 161 ... 173:
+ return 10;
+ case 174 ... 176:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 177 ... 189:
+ return 11;
+ case 193 ... 205:
+ return 12;
+ case 206 ... 208:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 209 ... 221:
+ return 13;
+ case 225 ... 237:
+ return 14;
+ case 238 ... 240:
+ return PHY_TSSI_EXTRA_GROUP(14);
+ case 241 ... 253:
+ return 15;
+ }
+
+ return 0;
+}
+
+static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 gidx_1st;
+ u32 gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ u32 gidx;
+ s8 val;
+
+ if (band == RTW89_BAND_6G)
+ goto calc_6g;
+
+ gidx = phy_tssi_get_ofdm_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+
+calc_6g:
+ gidx = phy_tssi_get_6g_ofdm_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_6g_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+}
+
+static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 tgidx_1st;
+ u32 tgidx_2nd;
+ s8 tde_1st;
+ s8 tde_2nd;
+ u32 tgidx;
+ s8 val;
+
+ if (band == RTW89_BAND_6G)
+ goto calc_6g;
+
+ tgidx = phy_tssi_get_trim_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+
+calc_6g:
+ tgidx = phy_tssi_get_6g_trim_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim_6g[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+}
+
+void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = chan->channel;
+ s8 trim_de;
+ s8 ofdm_de;
+ s8 cck_de;
+ u8 gidx;
+ s8 val;
+ int i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ for (i = RF_PATH_A; i <= RF_PATH_B; i++) {
+ trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i);
+ h2c->curr_tssi_trim_de[i] = trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de);
+
+ gidx = phy_tssi_get_cck_group(ch);
+ cck_de = tssi_info->tssi_cck[i][gidx];
+ val = u32_get_bits(cck_de + trim_de, 0xff);
+
+ h2c->curr_tssi_cck_de[i] = 0x0;
+ h2c->curr_tssi_cck_de_20m[i] = val;
+ h2c->curr_tssi_cck_de_40m[i] = val;
+ h2c->curr_tssi_efuse_cck_de[i] = cck_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de);
+
+ ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
+ val = u32_get_bits(ofdm_de + trim_de, 0xff);
+
+ h2c->curr_tssi_ofdm_de[i] = 0x0;
+ h2c->curr_tssi_ofdm_de_20m[i] = val;
+ h2c->curr_tssi_ofdm_de_40m[i] = val;
+ h2c->curr_tssi_ofdm_de_80m[i] = val;
+ h2c->curr_tssi_ofdm_de_160m[i] = val;
+ h2c->curr_tssi_ofdm_de_320m[i] = val;
+ h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de);
+ }
+}
+
+void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c)
+{
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const s8 *thm_up[RF_PATH_B + 1] = {};
+ const s8 *thm_down[RF_PATH_B + 1] = {};
+ u8 subband = chan->subband_type;
+ s8 thm_ofst[128] = {0};
+ u8 thermal;
+ u8 path;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
+ break;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0];
+ break;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1];
+ break;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2];
+ break;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3];
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] tmeter tbl on subband: %u\n", subband);
+
+ for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
+ thermal = tssi_info->thermal[path];
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "path: %u, pg thermal: 0x%x\n", path, thermal);
+
+ if (thermal == 0xff) {
+ h2c->pg_thermal[path] = 0x38;
+ memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path]));
+ continue;
+ }
+
+ h2c->pg_thermal[path] = thermal;
+
+ i = 0;
+ for (j = 0; j < 64; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up[path][i++] :
+ thm_up[path][DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 127; j >= 64; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down[path][i++] :
+ -thm_down[path][DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 128; i += 4) {
+ h2c->ftable[path][i + 0] = thm_ofst[i + 3];
+ h2c->ftable[path][i + 1] = thm_ofst[i + 2];
+ h2c->ftable[path][i + 2] = thm_ofst[i + 1];
+ h2c->ftable[path][i + 3] = thm_ofst[i + 0];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "thm ofst [%x]: %02x %02x %02x %02x\n",
+ i, thm_ofst[i], thm_ofst[i + 1],
+ thm_ofst[i + 2], thm_ofst[i + 3]);
+ }
+ }
+}
+
static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
{
const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
@@ -4551,6 +5522,9 @@ static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
const struct rtw89_agc_gaincode_set set)
{
+ if (!rtwdev->hal.support_igi)
+ return;
+
rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
@@ -4606,7 +5580,8 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
s8 cck_cca_th;
u32 pd_val = 0;
- under_region += PD_TH_SB_FLTR_CMP_VAL;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
+ under_region += PD_TH_SB_FLTR_CMP_VAL;
switch (cbw) {
case RTW89_CHANNEL_WIDTH_40:
@@ -4953,12 +5928,15 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
+ rtw89_phy_bb_wrap_init(rtwdev);
rtw89_phy_edcca_init(rtwdev);
+ rtw89_phy_ch_info_init(rtwdev);
rtw89_phy_ul_tb_info_init(rtwdev);
rtw89_phy_antdiv_init(rtwdev);
rtw89_chip_rfe_gpio(rtwdev);
rtw89_phy_antdiv_set_ant(rtwdev);
+ rtw89_chip_rfk_hw_init(rtwdev);
rtw89_phy_init_rf_nctl(rtwdev);
rtw89_chip_rfk_init(rtwdev);
rtw89_chip_set_txpwr_ctrl(rtwdev);
@@ -5400,6 +6378,78 @@ void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
rtw89_phy_edcca_log(rtwdev);
}
+enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
+ rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_B;
+ case MLO_1_PLUS_1_2RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_D;
+ case MLO_0_PLUS_2_1RF:
+ case MLO_2_PLUS_0_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_AB;
+ else
+ return RF_AB;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ default:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_AB;
+ else
+ return RF_CD;
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_get_kpath);
+
+enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
+ rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_B;
+ case MLO_1_PLUS_1_2RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_D;
+ case MLO_0_PLUS_2_1RF:
+ case MLO_2_PLUS_0_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_B;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ default:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_C;
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_get_syn_sel);
+
static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
.setting_addr = R_CCX,
.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
@@ -5476,6 +6526,11 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
.ccx = &rtw89_ccx_regs_ax,
.physts = &rtw89_physts_regs_ax,
.cfo = &rtw89_cfo_regs_ax,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
+ .config_bb_gain = rtw89_phy_config_bb_gain_ax,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
+ .bb_wrap_init = NULL,
+ .ch_info_init = NULL,
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax,
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 3e379077c6ca..082231ebbee5 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -7,6 +7,7 @@
#include "core.h"
+#define RTW89_BBMCU_ADDR_OFFSET 0x30000
#define RTW89_RF_ADDR_ADSEL_MASK BIT(16)
#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr)
@@ -509,6 +510,14 @@ struct rtw89_phy_gen_def {
const struct rtw89_ccx_regs *ccx;
const struct rtw89_physts_regs *physts;
const struct rtw89_cfo_regs *cfo;
+ u32 (*phy0_phy1_offset)(struct rtw89_dev *rtwdev, u32 addr);
+ void (*config_bb_gain)(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data);
+ void (*preinit_rf_nctl)(struct rtw89_dev *rtwdev);
+ void (*bb_wrap_init)(struct rtw89_dev *rtwdev);
+ void (*ch_info_init)(struct rtw89_dev *rtwdev);
void (*set_txpwr_byrate)(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
@@ -604,6 +613,15 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
return rtw89_read32_mask(rtwdev, addr + phy->cr_base, mask);
}
+static inline void rtw89_bbmcu_write32(struct rtw89_dev *rtwdev,
+ u32 addr, u32 data, enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx && addr < 0x10000)
+ addr += 0x20000;
+
+ rtw89_write32(rtwdev, addr + RTW89_BBMCU_ADDR_OFFSET, data);
+}
+
static inline
enum rtw89_gain_offset rtw89_subband_to_gain_offset_band_of_ofdm(enum rtw89_subband subband)
{
@@ -664,6 +682,38 @@ enum rtw89_phy_bb_gain_band rtw89_subband_to_bb_gain_band(enum rtw89_subband sub
}
}
+static inline
+enum rtw89_phy_gain_band_be rtw89_subband_to_gain_band_be(enum rtw89_subband subband)
+{
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ return RTW89_BB_GAIN_BAND_2G_BE;
+ case RTW89_CH_5G_BAND_1:
+ return RTW89_BB_GAIN_BAND_5G_L_BE;
+ case RTW89_CH_5G_BAND_3:
+ return RTW89_BB_GAIN_BAND_5G_M_BE;
+ case RTW89_CH_5G_BAND_4:
+ return RTW89_BB_GAIN_BAND_5G_H_BE;
+ case RTW89_CH_6G_BAND_IDX0:
+ return RTW89_BB_GAIN_BAND_6G_L0_BE;
+ case RTW89_CH_6G_BAND_IDX1:
+ return RTW89_BB_GAIN_BAND_6G_L1_BE;
+ case RTW89_CH_6G_BAND_IDX2:
+ return RTW89_BB_GAIN_BAND_6G_M0_BE;
+ case RTW89_CH_6G_BAND_IDX3:
+ return RTW89_BB_GAIN_BAND_6G_M1_BE;
+ case RTW89_CH_6G_BAND_IDX4:
+ return RTW89_BB_GAIN_BAND_6G_H0_BE;
+ case RTW89_CH_6G_BAND_IDX5:
+ return RTW89_BB_GAIN_BAND_6G_H1_BE;
+ case RTW89_CH_6G_BAND_IDX6:
+ return RTW89_BB_GAIN_BAND_6G_UH0_BE;
+ case RTW89_CH_6G_BAND_IDX7:
+ return RTW89_BB_GAIN_BAND_6G_UH1_BE;
+ }
+}
+
enum rtw89_rfk_flag {
RTW89_RFK_F_WRF = 0,
RTW89_RFK_F_WM = 1,
@@ -728,14 +778,20 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw);
+u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
@@ -759,6 +815,29 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch);
+static inline void rtw89_phy_preinit_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ phy->preinit_rf_nctl(rtwdev);
+}
+
+static inline void rtw89_phy_bb_wrap_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ if (phy->bb_wrap_init)
+ phy->bb_wrap_init(rtwdev);
+}
+
+static inline void rtw89_phy_ch_info_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ if (phy->ch_info_init)
+ phy->ch_info_init(rtwdev);
+}
+
static inline
void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
@@ -809,6 +888,36 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
+int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode,
+ unsigned int ms);
+int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c);
+void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c);
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev);
void rtw89_phy_cfo_track_work(struct work_struct *work);
void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
@@ -836,5 +945,9 @@ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan);
void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev);
void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev);
+enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index 63eeeea72b68..be0148f2b96f 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -78,6 +78,332 @@ static const struct rtw89_cfo_regs rtw89_cfo_regs_be = {
.valid_0_mask = B_DCFO_OPT_EN_V1,
};
+static u32 rtw89_phy0_phy1_offset_be(struct rtw89_dev *rtwdev, u32 addr)
+{
+ u32 phy_page = addr >> 8;
+ u32 ofst = 0;
+
+ if ((phy_page >= 0x4 && phy_page <= 0xF) ||
+ (phy_page >= 0x20 && phy_page <= 0x2B) ||
+ (phy_page >= 0x40 && phy_page <= 0x4f) ||
+ (phy_page >= 0x60 && phy_page <= 0x6f) ||
+ (phy_page >= 0xE4 && phy_page <= 0xE5) ||
+ (phy_page >= 0xE8 && phy_page <= 0xED))
+ ofst = 0x1000;
+ else
+ ofst = 0x0;
+
+ return ofst;
+}
+
+union rtw89_phy_bb_gain_arg_be {
+ u32 addr;
+ struct {
+ u8 type;
+#define BB_GAIN_TYPE_SUB0_BE GENMASK(3, 0)
+#define BB_GAIN_TYPE_SUB1_BE GENMASK(7, 4)
+ u8 path_bw;
+#define BB_GAIN_PATH_BE GENMASK(3, 0)
+#define BB_GAIN_BW_BE GENMASK(7, 4)
+ u8 gain_band;
+ u8 cfg_type;
+ } __packed;
+} __packed;
+
+static void
+rtw89_phy_cfg_bb_gain_error_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 type = arg.type;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 2; i++, data >>= 8)
+ gain->tia_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain error {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_rpl_ofst_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 type_sub0 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB0_BE);
+ u8 type_sub1 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB1_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 ofst = 0;
+ int i;
+
+ switch (type_sub1) {
+ case RTW89_CMAC_BW_20M:
+ gain->rpl_ofst_20[gband][path][0] = (s8)data;
+ break;
+ case RTW89_CMAC_BW_40M:
+ for (i = 0; i < RTW89_BW20_SC_40M; i++, data >>= 8)
+ gain->rpl_ofst_40[gband][path][i] = data & 0xff;
+ break;
+ case RTW89_CMAC_BW_80M:
+ for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
+ gain->rpl_ofst_80[gband][path][i] = data & 0xff;
+ break;
+ case RTW89_CMAC_BW_160M:
+ if (type_sub0 == 0)
+ ofst = 0;
+ else
+ ofst = RTW89_BW20_SC_80M;
+
+ for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
+ gain->rpl_ofst_160[gband][path][i + ofst] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb rpl ofst {0x%x:0x%x} with unknown type_sub1: %d\n",
+ arg.addr, data, type_sub1);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_gain_op1db_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 type = arg.type;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 3:
+ for (i = 4; i < 8; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void rtw89_phy_config_bb_gain_be(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ union rtw89_phy_bb_gain_arg_be arg = { .addr = reg->addr };
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+
+ if (bw_type >= RTW89_BB_BW_NR_BE)
+ return;
+
+ if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR_BE)
+ return;
+
+ if (path >= chip->rf_path_num)
+ return;
+
+ if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
+ rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
+ return;
+ }
+
+ switch (arg.cfg_type) {
+ case 0:
+ rtw89_phy_cfg_bb_gain_error_be(rtwdev, arg, reg->data);
+ break;
+ case 1:
+ rtw89_phy_cfg_bb_rpl_ofst_be(rtwdev, arg, reg->data);
+ break;
+ case 2:
+ /* ignore BB gain bypass */
+ break;
+ case 3:
+ rtw89_phy_cfg_bb_gain_op1db_be(rtwdev, arg, reg->data);
+ break;
+ case 4:
+ /* This cfg_type is only used by rfe_type >= 50 with eFEM */
+ if (efuse->rfe_type < 50)
+ break;
+ fallthrough;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
+ arg.addr, reg->data, arg.cfg_type);
+ break;
+ }
+}
+
+static void rtw89_phy_preinit_rf_nctl_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C0, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C1, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQKDPK_HC, B_IQKDPK_HC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CLK_GCK, B_CLK_GCK, 0x00fffff);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST_C1, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXRFC, B_TXRFC_RST, 0x1);
+
+ if (rtwdev->dbcc_en) {
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST_C1, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXRFC_C1, B_TXRFC_RST, 0x1);
+ }
+}
+
+static
+void rtw89_phy_bb_wrap_pwr_by_macid_init(struct rtw89_dev *rtwdev)
+{
+ u32 macid_idx, cr, base_macid_lmt, max_macid = 32;
+
+ base_macid_lmt = R_BE_PWR_MACID_LMT_BASE;
+
+ for (macid_idx = 0; macid_idx < 4 * max_macid; macid_idx += 4) {
+ cr = base_macid_lmt + macid_idx;
+ rtw89_write32(rtwdev, cr, 0x03007F7F);
+ }
+}
+
+static
+void rtw89_phy_bb_wrap_tx_path_by_macid_init(struct rtw89_dev *rtwdev)
+{
+ int i, max_macid = 32;
+ u32 cr = R_BE_PWR_MACID_PATH_BASE;
+
+ for (i = 0; i < max_macid; i++, cr += 4)
+ rtw89_write32(rtwdev, cr, 0x03C86000);
+}
+
+static void rtw89_phy_bb_wrap_tpu_set_all(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ for (addr = R_BE_PWR_BY_RATE; addr <= R_BE_PWR_BY_RATE_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+ for (addr = R_BE_PWR_RULMT_START; addr <= R_BE_PWR_RULMT_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+ for (addr = R_BE_PWR_RATE_OFST_CTRL; addr <= R_BE_PWR_RATE_OFST_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_REF_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMT_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_LMTBF, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMTBF_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_BYRATE_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_RULMT_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_SW, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_SW_DB, 0);
+}
+
+static
+void rtw89_phy_bb_wrap_listen_path_en_init(struct rtw89_dev *rtwdev)
+{
+ u32 addr;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
+ if (ret)
+ return;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_LISTEN_PATH, RTW89_MAC_1);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_LISTEN_PATH_EN, 0x2);
+}
+
+static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_LMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_LMT_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RATE_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_FORCE_PWR_BY_RATE_EN, 0);
+}
+
+static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM, mac_idx);
+ rtw89_write32(rtwdev, addr, 0xE4E431);
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM_SS, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, 0x7, 0);
+}
+
+static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_mac_idx mac_idx = RTW89_MAC_0;
+
+ rtw89_phy_bb_wrap_pwr_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_tx_path_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_listen_path_en_init(rtwdev);
+ rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
+}
+
+static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG_LEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_DATA, B_CHINFO_DATA_BITMAP, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_ELM_BITMAP, 0x40303);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_SRC, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_TYPE, 0x3);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_SCAL, 0x0);
+}
+
struct rtw89_byr_spec_ent_be {
struct rtw89_rate_desc init;
u8 num_of_idx;
@@ -644,6 +970,11 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_be = {
.ccx = &rtw89_ccx_regs_be,
.physts = &rtw89_physts_regs_be,
.cfo = &rtw89_cfo_regs_be,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_be,
+ .config_bb_gain = rtw89_phy_config_bb_gain_be,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_be,
+ .bb_wrap_init = rtw89_phy_bb_wrap_init_be,
+ .ch_info_init = rtw89_phy_ch_info_init_be,
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be,
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_be,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 917c01e5e9ed..31290d8cb7f7 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -14,6 +14,7 @@
static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 pwr_en_bit = 0xE;
u32 chk_msk = pwr_en_bit << (4 * macid);
u32 polling;
@@ -21,7 +22,7 @@ static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
ret = read_poll_timeout_atomic(rtw89_read32_mask, polling, !polling,
1000, 50000, false, rtwdev,
- R_AX_PPWRBIT_SETTING, chk_msk);
+ mac->ps_status, chk_msk);
if (ret) {
rtw89_info(rtwdev, "rtw89: failed to leave lps state\n");
return -EBUSY;
@@ -83,16 +84,17 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
rtw89_ps_power_mode_change(rtwdev, false);
}
-static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_lps_parm lps_param = {
- .macid = mac_id,
+ .macid = rtwvif->mac_id,
.psmode = RTW89_MAC_AX_PS_MODE_LEGACY,
.lastrpwm = RTW89_LAST_RPWM_PS,
};
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+ rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
@@ -123,7 +125,7 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_enter_lps(rtwdev, rtwvif);
if (ps_mode)
__rtw89_enter_ps_mode(rtwdev, rtwvif);
}
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 8456e2b0c14f..37ccd8ffa87a 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -3246,6 +3246,13 @@
#define R_AX_RX_SR_CTRL_C1 0xEE4A
#define B_AX_SR_EN BIT(0)
+#define R_AX_BSSID_SRC_CTRL 0xCE4B
+#define R_AX_BSSID_SRC_CTRL_C1 0xEE4B
+#define B_AX_BSSID_MATCH BIT(3)
+#define B_AX_PARTIAL_AID_MATCH BIT(2)
+#define B_AX_BSSCOLOR_MATCH BIT(1)
+#define B_AX_PLCP_SRC_EN BIT(0)
+
#define R_AX_CSIRPT_OPTION 0xCE64
#define R_AX_CSIRPT_OPTION_C1 0xEE64
#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
@@ -4033,6 +4040,30 @@
#define B_BE_SYSON_DIS_PMCR_BE_WRMSK BIT(2)
#define B_BE_SYSON_R_BE_ARB_MASK GENMASK(1, 0)
+#define R_BE_MEM_PWR_CTRL 0x00D0
+#define B_BE_DMEM5_WLMCU_DS BIT(31)
+#define B_BE_DMEM4_WLMCU_DS BIT(30)
+#define B_BE_DMEM3_WLMCU_DS BIT(29)
+#define B_BE_DMEM2_WLMCU_DS BIT(28)
+#define B_BE_DMEM1_WLMCU_DS BIT(27)
+#define B_BE_DMEM0_WLMCU_DS BIT(26)
+#define B_BE_IMEM5_WLMCU_DS BIT(25)
+#define B_BE_IMEM4_WLMCU_DS BIT(24)
+#define B_BE_IMEM3_WLMCU_DS BIT(23)
+#define B_BE_IMEM2_WLMCU_DS BIT(22)
+#define B_BE_IMEM1_WLMCU_DS BIT(21)
+#define B_BE_IMEM0_WLMCU_DS BIT(20)
+#define B_BE_MEM_BBMCU1_DS BIT(19)
+#define B_BE_MEM_BBMCU0_DS_V1 BIT(17)
+#define B_BE_MEM_BT_DS BIT(10)
+#define B_BE_MEM_SDIO_LS BIT(9)
+#define B_BE_MEM_SDIO_DS BIT(8)
+#define B_BE_MEM_USB_LS BIT(7)
+#define B_BE_MEM_USB_DS BIT(6)
+#define B_BE_MEM_PCI_LS BIT(5)
+#define B_BE_MEM_PCI_DS BIT(4)
+#define B_BE_MEM_WLMAC_LS BIT(3)
+
#define R_BE_PCIE_MIO_INTF 0x00E4
#define B_BE_AON_MIO_EPHY_1K_SEL_MASK GENMASK(29, 24)
#define B_BE_PCIE_MIO_ADDR_PAGE_V1_MASK GENMASK(20, 16)
@@ -4401,6 +4432,19 @@
#define R_BE_LTR_LATENCY_IDX2_V1 0x361C
#define R_BE_LTR_LATENCY_IDX3_V1 0x3620
+#define R_BE_H2CREG_DATA0 0x7140
+#define R_BE_H2CREG_DATA1 0x7144
+#define R_BE_H2CREG_DATA2 0x7148
+#define R_BE_H2CREG_DATA3 0x714C
+#define R_BE_C2HREG_DATA0 0x7150
+#define R_BE_C2HREG_DATA1 0x7154
+#define R_BE_C2HREG_DATA2 0x7158
+#define R_BE_C2HREG_DATA3 0x715C
+#define R_BE_H2CREG_CTRL 0x7160
+#define B_BE_H2CREG_TRIGGER BIT(0)
+#define R_BE_C2HREG_CTRL 0x7164
+#define B_BE_C2HREG_TRIGGER BIT(0)
+
#define R_BE_HCI_FUNC_EN 0x7880
#define B_BE_HCI_CR_PROTECT BIT(31)
#define B_BE_HCI_TRXBUF_EN BIT(2)
@@ -4488,6 +4532,42 @@
#define B_BE_RMAC_PPDU_HANG_CNT_MASK GENMASK(23, 16)
#define B_BE_SER_L0_COUNTER_MASK GENMASK(8, 0)
+#define R_BE_DMAC_SYS_CR32B 0x842C
+#define B_BE_DMAC_BB_PHY1_MASK GENMASK(31, 16)
+#define B_BE_DMAC_BB_PHY0_MASK GENMASK(15, 0)
+#define B_BE_DMAC_BB_CTRL_39 BIT(31)
+#define B_BE_DMAC_BB_CTRL_38 BIT(30)
+#define B_BE_DMAC_BB_CTRL_37 BIT(29)
+#define B_BE_DMAC_BB_CTRL_36 BIT(28)
+#define B_BE_DMAC_BB_CTRL_35 BIT(27)
+#define B_BE_DMAC_BB_CTRL_34 BIT(26)
+#define B_BE_DMAC_BB_CTRL_33 BIT(25)
+#define B_BE_DMAC_BB_CTRL_32 BIT(24)
+#define B_BE_DMAC_BB_CTRL_31 BIT(23)
+#define B_BE_DMAC_BB_CTRL_30 BIT(22)
+#define B_BE_DMAC_BB_CTRL_29 BIT(21)
+#define B_BE_DMAC_BB_CTRL_28 BIT(20)
+#define B_BE_DMAC_BB_CTRL_27 BIT(19)
+#define B_BE_DMAC_BB_CTRL_26 BIT(18)
+#define B_BE_DMAC_BB_CTRL_25 BIT(17)
+#define B_BE_DMAC_BB_CTRL_24 BIT(16)
+#define B_BE_DMAC_BB_CTRL_23 BIT(15)
+#define B_BE_DMAC_BB_CTRL_22 BIT(14)
+#define B_BE_DMAC_BB_CTRL_21 BIT(13)
+#define B_BE_DMAC_BB_CTRL_20 BIT(12)
+#define B_BE_DMAC_BB_CTRL_19 BIT(11)
+#define B_BE_DMAC_BB_CTRL_18 BIT(10)
+#define B_BE_DMAC_BB_CTRL_17 BIT(9)
+#define B_BE_DMAC_BB_CTRL_16 BIT(8)
+#define B_BE_DMAC_BB_CTRL_15 BIT(7)
+#define B_BE_DMAC_BB_CTRL_14 BIT(6)
+#define B_BE_DMAC_BB_CTRL_13 BIT(5)
+#define B_BE_DMAC_BB_CTRL_12 BIT(4)
+#define B_BE_DMAC_BB_CTRL_11 BIT(3)
+#define B_BE_DMAC_BB_CTRL_10 BIT(2)
+#define B_BE_DMAC_BB_CTRL_9 BIT(1)
+#define B_BE_DMAC_BB_CTRL_8 BIT(0)
+
#define R_BE_DLE_EMPTY0 0x8430
#define B_BE_PLE_EMPTY_QTA_DMAC_H2D BIT(27)
#define B_BE_PLE_EMPTY_QTA_DMAC_CPUIO BIT(26)
@@ -4947,6 +5027,11 @@
#define B_BE_WDE_START_BOUND_MASK GENMASK(14, 8)
#define B_BE_WDE_PAGE_SEL_MASK GENMASK(1, 0)
+#define R_BE_WDE_BUFMGN_CTL 0x8C10
+#define B_BE_WDE_AVAL_UPD_REQ BIT(29)
+#define B_BE_WDE_AVAL_UPD_QTAID_MASK GENMASK(27, 24)
+#define B_BE_WDE_BUFMGN_FRZTMR_MODE BIT(0)
+
#define R_BE_WDE_ERR_IMR 0x8C38
#define B_BE_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_BE_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -5063,6 +5148,11 @@
#define B_BE_PLE_START_BOUND_MASK GENMASK(14, 8)
#define B_BE_PLE_PAGE_SEL_MASK GENMASK(1, 0)
+#define R_BE_PLE_BUFMGN_CTL 0x9010
+#define B_BE_PLE_AVAL_UPD_REQ BIT(29)
+#define B_BE_PLE_AVAL_UPD_QTAID_MASK GENMASK(27, 24)
+#define B_BE_PLE_BUFMGN_FRZTMR_MODE BIT(0)
+
#define R_BE_PLE_ERR_IMR 0x9038
#define B_BE_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_BE_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -5429,6 +5519,14 @@
#define B_BE_DROP_NONDMA_PPDU BIT(2)
#define B_BE_APPEND_FCS BIT(0)
+#define R_BE_HW_PPDU_STATUS 0x9C30
+#define B_BE_FWD_RPKTTYPE_MASK GENMASK(31, 26)
+#define B_BE_FWD_PPDU_PRTID_MASK GENMASK(25, 23)
+#define B_BE_FWD_PPDU_FW_RLS BIT(22)
+#define B_BE_FWD_PPDU_QUEID_MASK GENMASK(21, 16)
+#define B_BE_FWD_OTHER_RPKT_MASK GENMASK(15, 8)
+#define B_BE_FWD_PPDU_STAT_MASK GENMASK(7, 0)
+
#define R_BE_CUT_AMSDU_CTRL 0x9C94
#define B_BE_EN_CUT_AMSDU BIT(31)
#define B_BE_CUT_AMSDU_CHKLEN_EN BIT(30)
@@ -5727,6 +5825,9 @@
#define B_BE_STOP_CH1 BIT(1)
#define B_BE_STOP_CH0 BIT(0)
+#define R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1 0xB02C
+#define B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK GENMASK(4, 0)
+
#define R_BE_HAXI_IDCT_MSK 0xB0B8
#define B_BE_HAXI_RRESP_ERR_IDCT_MSK BIT(7)
#define B_BE_HAXI_BRESP_ERR_IDCT_MSK BIT(6)
@@ -5777,6 +5878,15 @@
#define B_BE_PREC_PAGE_CH12_V1_MASK GENMASK(21, 16)
#define B_BE_PREC_PAGE_CH011_V1_MASK GENMASK(5, 0)
+#define R_BE_CH0_PAGE_CTRL 0xB718
+#define B_BE_CH0_GRP BIT(31)
+#define B_BE_CH0_MAX_PG_MASK GENMASK(28, 16)
+#define B_BE_CH0_MIN_PG_MASK GENMASK(12, 0)
+
+#define R_BE_CH0_PAGE_INFO 0xB750
+#define B_BE_CH0_AVAL_PG_MASK GENMASK(28, 16)
+#define B_BE_CH0_USE_PG_MASK GENMASK(12, 0)
+
#define R_BE_PUB_PAGE_INFO3 0xB78C
#define B_BE_G1_AVAL_PG_MASK GENMASK(28, 16)
#define B_BE_G0_AVAL_PG_MASK GENMASK(12, 0)
@@ -5822,6 +5932,9 @@
#define B_BE_MACID_ACQ_GRP0_CLR_P BIT(2)
#define B_BE_R_MACID_ACQ_CHK_EN BIT(0)
+#define R_BE_PWR_MACID_PATH_BASE 0x0E500
+#define R_BE_PWR_MACID_LMT_BASE 0x0ED00
+
#define R_BE_CMAC_FUNC_EN 0x10000
#define R_BE_CMAC_FUNC_EN_C1 0x14000
#define B_BE_CMAC_CRPRT BIT(31)
@@ -5873,6 +5986,16 @@
B_BE_RMAC_CKEN | B_BE_TXTIME_CKEN | B_BE_RESP_PKTCTL_CKEN | \
B_BE_SIGB_CKEN)
+#define R_BE_WMAC_RFMOD 0x10010
+#define R_BE_WMAC_RFMOD_C1 0x14010
+#define B_BE_CMAC_ASSERTION BIT(31)
+#define B_BE_WMAC_RFMOD_MASK GENMASK(2, 0)
+#define BE_WMAC_RFMOD_20M 0
+#define BE_WMAC_RFMOD_40M 1
+#define BE_WMAC_RFMOD_80M 2
+#define BE_WMAC_RFMOD_160M 3
+#define BE_WMAC_RFMOD_320M 4
+
#define R_BE_TX_SUB_BAND_VALUE 0x10088
#define R_BE_TX_SUB_BAND_VALUE_C1 0x14088
#define B_BE_PRI20_BITMAP_MASK GENMASK(31, 16)
@@ -6009,6 +6132,13 @@
#define B_BE_MACTX_LATENCY_MASK GENMASK(10, 8)
#define B_BE_PREBKF_TIME_MASK GENMASK(4, 0)
+#define R_BE_PREBKF_CFG_1 0x1033C
+#define R_BE_PREBKF_CFG_1_C1 0x1433C
+#define B_BE_SIFS_TIMEOUT_TB_AGGR_MASK GENMASK(31, 24)
+#define B_BE_SIFS_PREBKF_MASK GENMASK(23, 16)
+#define B_BE_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8)
+#define B_BE_SIFS_MACTXEN_T1_MASK GENMASK(6, 0)
+
#define R_BE_CCA_CFG_0 0x10340
#define R_BE_CCA_CFG_0_C1 0x14340
#define B_BE_R_SIFS_AGGR_TIME_V1_MASK GENMASK(31, 24)
@@ -6050,11 +6180,36 @@
#define R_BE_MUEDCA_EN 0x10370
#define R_BE_MUEDCA_EN_C1 0x14370
+#define B_BE_SIFS_TIMEOUT_TB_T2_MASK GENMASK(30, 24)
+#define B_BE_SIFS_MACTXEN_TB_T1_MASK GENMASK(22, 16)
#define B_BE_MUEDCA_WMM_SEL BIT(8)
-#define B_BE_SET_MUEDCATIMER_TF_1 BIT(5)
+#define B_BE_SET_MUEDCATIMER_TF_MASK GENMASK(5, 4)
#define B_BE_SET_MUEDCATIMER_TF_0 BIT(4)
+#define B_BE_MUEDCA_EN_MASK GENMASK(1, 0)
#define B_BE_MUEDCA_EN_0 BIT(0)
+#define R_BE_CTN_DRV_TXEN 0x10398
+#define R_BE_CTN_DRV_TXEN_C1 0x14398
+#define B_BE_CTN_TXEN_TWT_3 BIT(17)
+#define B_BE_CTN_TXEN_TWT_2 BIT(16)
+#define B_BE_CTN_TXEN_TWT_1 BIT(15)
+#define B_BE_CTN_TXEN_TWT_0 BIT(14)
+#define B_BE_CTN_TXEN_ULQ BIT(13)
+#define B_BE_CTN_TXEN_BCNQ BIT(12)
+#define B_BE_CTN_TXEN_HGQ BIT(11)
+#define B_BE_CTN_TXEN_CPUMGQ BIT(10)
+#define B_BE_CTN_TXEN_MGQ1 BIT(9)
+#define B_BE_CTN_TXEN_MGQ BIT(8)
+#define B_BE_CTN_TXEN_VO_1 BIT(7)
+#define B_BE_CTN_TXEN_VI_1 BIT(6)
+#define B_BE_CTN_TXEN_BK_1 BIT(5)
+#define B_BE_CTN_TXEN_BE_1 BIT(4)
+#define B_BE_CTN_TXEN_VO_0 BIT(3)
+#define B_BE_CTN_TXEN_VI_0 BIT(2)
+#define B_BE_CTN_TXEN_BK_0 BIT(1)
+#define B_BE_CTN_TXEN_BE_0 BIT(0)
+#define B_BE_CTN_TXEN_ALL_MASK GENMASK(17, 0)
+
#define R_BE_TB_CHK_CCA_NAV 0x103AC
#define R_BE_TB_CHK_CCA_NAV_C1 0x143AC
#define B_BE_TB_CHK_TX_NAV BIT(15)
@@ -6212,6 +6367,8 @@
#define R_BE_TSFTR_HIGH_P0_C1 0x1443C
#define B_BE_TSFTR_HIGH_P0_MASK GENMASK(31, 0)
+#define R_BE_BCN_DROP_ALL0 0x10560
+
#define R_BE_MBSSID_CTRL 0x10568
#define R_BE_MBSSID_CTRL_C1 0x14568
#define B_BE_MBSSID_MODE_SEL BIT(20)
@@ -6282,6 +6439,17 @@
#define B_BE_SPEC_SIFS_OFDM_PTCL_MASK GENMASK(15, 8)
#define B_BE_SPEC_SIFS_CCK_PTCL_MASK GENMASK(7, 0)
+#define R_BE_TXRATE_CHK 0x10828
+#define R_BE_TXRATE_CHK_C1 0x14828
+#define B_BE_LATENCY_PADDING_PKT_TH_MASK GENMASK(31, 24)
+#define B_BE_PLCP_FETCH_BUFF_MASK GENMASK(23, 16)
+#define B_BE_OFDM_CCK_ERR_PROC BIT(6)
+#define B_BE_PKT_LAST_TX BIT(5)
+#define B_BE_BAND_MODE BIT(4)
+#define B_BE_MAX_TXNSS_MASK GENMASK(3, 2)
+#define B_BE_RTS_LIMIT_IN_OFDM6 BIT(1)
+#define B_BE_CHECK_CCK_EN BIT(0)
+
#define R_BE_MBSSID_DROP_0 0x1083C
#define R_BE_MBSSID_DROP_0_C1 0x1483C
#define B_BE_GI_LTF_FB_SEL BIT(30)
@@ -6398,6 +6566,10 @@
#define B_BE_PTCL_DROP BIT(5)
#define B_BE_PTCL_TX_QUEUE_IDX_MASK GENMASK(4, 0)
+#define R_BE_PTCL_DBG_INFO 0x108F0
+
+#define R_BE_PTCL_DBG 0x108F4
+
#define R_BE_RX_ERROR_FLAG 0x10C00
#define R_BE_RX_ERROR_FLAG_C1 0x14C00
#define B_BE_RX_CSI_NOT_RELEASE_ERROR BIT(31)
@@ -6676,6 +6848,9 @@
#define B_BE_UPD_HGQMD BIT(1)
#define B_BE_UPD_TIMIE BIT(0)
+#define R_BE_WMTX_POWER_BE_BIT_CTL 0x10E0C
+#define R_BE_WMTX_POWER_BE_BIT_CTL_C1 0x14E0C
+
#define R_BE_WMTX_TCR_BE_4 0x10E2C
#define R_BE_WMTX_TCR_BE_4_C1 0x14E2C
#define B_BE_UL_EHT_MUMIMO_LTF_MODE BIT(30)
@@ -7056,6 +7231,20 @@
#define S_BE_BACAM_RST_ENT 1
#define S_BE_BACAM_RST_ALL 2
+#define R_BE_PPDU_STAT 0x11440
+#define R_BE_PPDU_STAT_C1 0x15440
+#define B_BE_STAT_IORST BIT(13)
+#define B_BE_STAT_GCKDIS BIT(12)
+#define B_BE_PPDU_STAT_WR_BW_MASK GENMASK(11, 10)
+#define B_BE_PPDU_STAT_RPT_TRIG BIT(8)
+#define B_BE_PPDU_STAT_RPT_DMA BIT(6)
+#define B_BE_PPDU_STAT_RPT_CRC32 BIT(5)
+#define B_BE_PPDU_STAT_RPT_ADDR BIT(4)
+#define B_BE_APP_PLCP_HDR_RPT BIT(3)
+#define B_BE_APP_RX_CNT_RPT BIT(2)
+#define B_BE_PPDU_MAC_INFO BIT(1)
+#define B_BE_PPDU_STAT_RPT_EN BIT(0)
+
#define R_BE_RX_SR_CTRL 0x1144A
#define R_BE_RX_SR_CTRL_C1 0x1544A
#define B_BE_SR_OP_MODE_MASK GENMASK(5, 4)
@@ -7063,6 +7252,13 @@
#define B_BE_SR_CTRL_PLCP_EN BIT(1)
#define B_BE_SR_EN BIT(0)
+#define R_BE_BSSID_SRC_CTRL 0x1144B
+#define R_BE_BSSID_SRC_CTRL_C1 0x1544B
+#define B_BE_BSSID_MATCH BIT(3)
+#define B_BE_PARTIAL_AID_MATCH BIT(2)
+#define B_BE_BSSCOLOR_MATCH BIT(1)
+#define B_BE_PLCP_SRC_EN BIT(0)
+
#define R_BE_CSIRPT_OPTION 0x11464
#define R_BE_CSIRPT_OPTION_C1 0x15464
#define B_BE_CSIPRT_EHTSU_AID_EN BIT(26)
@@ -7178,12 +7374,56 @@
#define R_BE_PWR_MODULE 0x11900
#define R_BE_PWR_MODULE_C1 0x15900
+#define R_BE_PWR_LISTEN_PATH 0x11988
+#define B_BE_PWR_LISTEN_PATH_EN GENMASK(31, 28)
+
+#define R_BE_PWR_REF_CTRL 0x11A20
+#define B_BE_PWR_REF_CTRL_OFDM GENMASK(9, 1)
+#define B_BE_PWR_REF_CTRL_CCK GENMASK(18, 10)
+#define B_BE_PWR_OFST_LMT_DB GENMASK(27, 19)
+#define R_BE_PWR_OFST_LMTBF 0x11A24
+#define B_BE_PWR_OFST_LMTBF_DB GENMASK(8, 0)
+#define R_BE_PWR_FORCE_LMT 0x11A28
+#define B_BE_PWR_FORCE_LMT_ON BIT(6)
+
+#define R_BE_PWR_RATE_CTRL 0x11A2C
+#define B_BE_PWR_OFST_BYRATE_DB GENMASK(8, 0)
+#define B_BE_FORCE_PWR_BY_RATE_EN BIT(19)
+#define B_BE_FORCE_PWR_BY_RATE_VAL GENMASK(28, 20)
#define R_BE_PWR_RATE_OFST_CTRL 0x11A30
+#define R_BE_PWR_RATE_OFST_END 0x11A38
+#define R_BE_PWR_RULMT_START 0x12048
+#define R_BE_PWR_RULMT_END 0x120e4
+
+#define R_BE_PWR_BOOST 0x11A40
+#define B_BE_PWR_CTRL_SEL BIT(16)
+#define B_BE_PWR_FORCE_RATE_ON BIT(29)
+#define R_BE_PWR_OFST_RULMT 0x11A44
+#define B_BE_PWR_OFST_RULMT_DB GENMASK(17, 9)
+#define B_BE_PWR_FORCE_RU_ON BIT(18)
+#define B_BE_PWR_FORCE_RU_ENON BIT(28)
+#define R_BE_PWR_FORCE_MACID 0x11A48
+#define B_BE_PWR_FORCE_MACID_ON BIT(9)
+
+#define R_BE_PWR_REG_CTRL 0x11A50
+#define B_BE_PWR_BT_EN BIT(23)
+
+#define R_BE_PWR_COEX_CTRL 0x11A54
+#define B_BE_PWR_BT_VAL GENMASK(8, 0)
+#define B_BE_PWR_FORCE_COEX_ON GENMASK(29, 27)
+
+#define R_BE_PWR_OFST_SW 0x11AE8
+#define B_BE_PWR_OFST_SW_DB GENMASK(27, 24)
+
+#define R_BE_PWR_FTM 0x11B00
+#define R_BE_PWR_FTM_SS 0x11B04
+
#define R_BE_PWR_BY_RATE 0x11E00
#define R_BE_PWR_BY_RATE_MAX 0x11FA8
#define R_BE_PWR_LMT 0x11FAC
#define R_BE_PWR_LMT_MAX 0x12040
+#define R_BE_PWR_BY_RATE_END 0x12044
#define R_BE_PWR_RU_LMT 0x12048
#define R_BE_PWR_RU_LMT_MAX 0x120E4
@@ -7223,6 +7463,7 @@
#define RR_MOD_M_RXBB GENMASK(9, 5)
#define RR_MOD_LO_SEL BIT(1)
#define RR_MODOPT 0x01
+#define RR_TXG_SEL GENMASK(19, 17)
#define RR_MODOPT_M_TXPWR GENMASK(5, 0)
#define RR_WLSEL 0x02
#define RR_WLSEL_AG GENMASK(18, 16)
@@ -7256,6 +7497,12 @@
#define CFGCH_BAND0_2G 0
#define CFGCH_BAND0_5G 1
#define CFGCH_BAND0_6G 0
+#define RR_CFGCH_BW_V2 GENMASK(12, 10)
+#define CFGCH_BW_V2_20M 0
+#define CFGCH_BW_V2_40M 1
+#define CFGCH_BW_V2_80M 2
+#define CFGCH_BW_V2_160M 3
+#define CFGCH_BW_V2_320M 4
#define RR_CFGCH_BW GENMASK(11, 10)
#define RR_CFGCH_CH GENMASK(7, 0)
#define CFGCH_BW_20M 3
@@ -7292,6 +7539,7 @@
#define RR_LUTWD0_LB GENMASK(5, 0)
#define RR_TM 0x42
#define RR_TM_TRI BIT(19)
+#define RR_TM_VAL_V1 GENMASK(7, 0)
#define RR_TM_VAL GENMASK(6, 1)
#define RR_TM2 0x43
#define RR_TM2_OFF GENMASK(19, 16)
@@ -7325,8 +7573,12 @@
#define RR_TXAC 0x5f
#define RR_TXAC_IQG GENMASK(3, 0)
#define RR_BIASA 0x60
-#define RR_BIASA_TXG GENMASK(15, 12)
#define RR_BIASA_TXA GENMASK(19, 16)
+#define RR_BIASA_TXG GENMASK(15, 12)
+#define RR_BIASD_TXA_V1 GENMASK(15, 12)
+#define RR_BIASA_TXA_V1 GENMASK(11, 8)
+#define RR_BIASD_TXG_V1 GENMASK(7, 4)
+#define RR_BIASA_TXG_V1 GENMASK(3, 0)
#define RR_BIASA_A GENMASK(2, 0)
#define RR_BIASA2 0x63
#define RR_BIASA2_LB GENMASK(4, 2)
@@ -7410,6 +7662,7 @@
#define RR_MIXER_GN GENMASK(4, 3)
#define RR_POW 0xa0
#define RR_POW_SYN GENMASK(3, 2)
+#define RR_POW_SYN_V1 GENMASK(3, 0)
#define RR_LOGEN 0xa3
#define RR_LOGEN_RPT GENMASK(19, 16)
#define RR_SX 0xaf
@@ -7436,6 +7689,8 @@
#define RR_MMD 0xd5
#define RR_MMD_RST_EN BIT(8)
#define RR_MMD_RST_SYN BIT(6)
+#define RR_SMD 0xd6
+#define RR_VCO2 BIT(19)
#define RR_IQKPLL 0xdc
#define RR_IQKPLL_MOD GENMASK(9, 8)
#define RR_SYNLUT 0xdd
@@ -7459,15 +7714,24 @@
#define RR_RFC_CKEN BIT(1)
#define R_UPD_P0 0x0000
+#define R_BBCLK 0x0000
+#define B_CLK_640M BIT(2)
#define R_RSTB_WATCH_DOG 0x000C
#define B_P0_RSTB_WATCH_DOG BIT(0)
#define B_P1_RSTB_WATCH_DOG BIT(1)
#define B_UPD_P0_EN BIT(31)
+#define R_EMLSR 0x0044
+#define B_EMLSR_PARM GENMASK(27, 12)
#define R_SPOOF_CG 0x00B4
#define B_SPOOF_CG_EN BIT(17)
+#define R_CHINFO_SEG 0x00B4
+#define B_CHINFO_SEG_LEN GENMASK(2, 0)
+#define B_CHINFO_SEG GENMASK(16, 7)
#define R_DFS_FFT_CG 0x00B8
#define B_DFS_CG_EN BIT(1)
#define B_DFS_FFT_EN BIT(0)
+#define R_CHINFO_DATA 0x00C0
+#define B_CHINFO_DATA_BITMAP GENMASK(22, 0)
#define R_ANAPAR_PW15 0x030C
#define B_ANAPAR_PW15 GENMASK(31, 24)
#define B_ANAPAR_PW15_H GENMASK(27, 24)
@@ -7497,6 +7761,23 @@
#define B_SWSI_READ_ADDR_ADDR_V1 GENMASK(7, 0)
#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
+#define R_BRK_R 0x0418
+#define B_VHTMCS_LMT GENMASK(22, 21)
+#define B_HTMCS_LMT GENMASK(9, 8)
+#define R_BRK_EHT 0x0474
+#define B_RXEHT_NSS_MAX GENMASK(4, 2)
+#define R_BRK_RXEHT 0x0478
+#define B_RXEHT_N_USER_MAX GENMASK(31, 24)
+#define B_RXEHTTB_NSS_MAX GENMASK(16, 14)
+#define R_EN_SND_WO_NDP 0x047c
+#define R_EN_SND_WO_NDP_C1 0x147c
+#define B_EN_SND_WO_NDP BIT(1)
+#define R_BRK_HE 0x0480
+#define B_TB_NSS_MAX GENMASK(25, 23)
+#define B_NSS_MAX GENMASK(16, 14)
+#define B_N_USR_MAX GENMASK(13, 6)
+#define R_RXCCA_BE1 0x0520
+#define B_RXCCA_BE1_DIS BIT(0)
#define R_UPD_CLK_ADC 0x0700
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
#define B_UPD_CLK_ADC_ON BIT(24)
@@ -7543,6 +7824,7 @@
#define B_PMAC_RXMOD_MSK GENMASK(7, 4)
#define R_MAC_SEL 0x09A4
#define B_MAC_SEL_OFDM_TRI_FILTER BIT(31)
+#define B_MAC_SEL GENMASK(19, 17)
#define B_MAC_SEL_PWR_EN BIT(16)
#define B_MAC_SEL_DPD_EN BIT(10)
#define B_MAC_SEL_MOD GENMASK(4, 2)
@@ -7588,19 +7870,28 @@
#define R_PD_CTRL 0x0C3C
#define B_PD_HIT_DIS BIT(9)
#define R_IOQ_IQK_DPK 0x0C60
+#define B_IOQ_IQK_DPK_CLKEN GENMASK(1, 0)
#define B_IOQ_IQK_DPK_EN BIT(1)
#define R_GNT_BT_WGT_EN 0x0C6C
#define B_GNT_BT_WGT_EN BIT(21)
+#define R_IQK_DPK_RST 0x0C6C
+#define R_IQK_DPK_RST_C1 0x1C6C
+#define B_IQK_DPK_RST BIT(0)
#define R_TX_COLLISION_T2R_ST 0x0C70
#define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20)
#define R_TXGATING 0x0C74
#define B_TXGATING_EN BIT(4)
+#define R_TXRFC 0x0C7C
+#define R_TXRFC_C1 0x1C7C
+#define B_TXRFC_RST GENMASK(23, 21)
#define R_PD_ARBITER_OFF 0x0C80
#define B_PD_ARBITER_OFF BIT(31)
#define R_SNDCCA_A1 0x0C9C
#define B_SNDCCA_A1_EN GENMASK(19, 12)
#define R_SNDCCA_A2 0x0CA0
#define B_SNDCCA_A2_VAL GENMASK(19, 12)
+#define R_UDP_COEEF 0x0CBC
+#define B_UDP_COEEF BIT(19)
#define R_TX_COLLISION_T2R_ST_BE 0x0CC8
#define B_TX_COLLISION_T2R_ST_BE_M GENMASK(13, 8)
#define R_RXHT_MCS_LIMIT 0x0D18
@@ -7624,7 +7915,11 @@
#define R_CTLTOP 0x1008
#define B_CTLTOP_ON BIT(23)
#define B_CTLTOP_VAL GENMASK(15, 12)
+#define R_CLK_GCK 0x1008
+#define B_CLK_GCK GENMASK(24, 0)
#define R_EDCCA_RPT_SEL_BE 0x10CC
+#define R_ADC_FIFO_V1 0x10FC
+#define B_ADC_FIFO_EN_V1 GENMASK(31, 24)
#define R_S0_HW_SI_DIS 0x1200
#define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P0_RXCK 0x12A0
@@ -7771,6 +8066,27 @@
#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0 BIT(13)
#define R_DBCC_80P80_SEL_EVM_RPT2 0x2A10
#define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0)
+#define R_AFEDAC0 0x2A5C
+#define B_AFEDAC0 GENMASK(31, 27)
+#define R_AFEDAC1 0x2A60
+#define B_AFEDAC1 GENMASK(2, 0)
+#define R_IQKDPK_HC 0x2AB8
+#define B_IQKDPK_HC BIT(28)
+#define R_HWSI_ADD0 0x2ADC
+#define R_HWSI_ADD1 0x2BDC
+#define B_HWSI_ADD_MASK GENMASK(11, 4)
+#define B_HWSI_ADD_CTL_MASK GENMASK(2, 0)
+#define B_HWSI_ADD_RD BIT(2)
+#define B_HWSI_ADD_POLL_MASK GENMASK(1, 0)
+#define B_HWSI_ADD_RUN BIT(1)
+#define B_HWSI_ADD_BUSY BIT(0)
+#define R_HWSI_DATA 0x2AE0
+#define B_HWSI_DATA_VAL GENMASK(27, 8)
+#define B_HWSI_DATA_ADDR GENMASK(7, 0)
+#define R_HWSI_VAL0 0x2C24
+#define R_HWSI_VAL1 0x2D24
+#define B_HWSI_VAL_RDONE BIT(31)
+#define B_HWSI_VAL_BUSY BIT(29)
#define R_P1_EN_SOUND_WO_NDP 0x2D7C
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_EDCCA_RPT_A_BE 0x2E38
@@ -7806,8 +8122,30 @@
#define R_S1_ADDCK 0x3E00
#define B_S1_ADDCK_I GENMASK(9, 0)
#define B_S1_ADDCK_Q GENMASK(19, 10)
+#define R_OP1DB_A 0x40B0
+#define B_OP1DB_A GENMASK(31, 24)
+#define R_OP1DB1_A 0x40BC
+#define B_TIA10_A GENMASK(15, 0)
+#define B_TIA1_A GENMASK(15, 8)
+#define B_TIA0_A GENMASK(7, 0)
+#define R_BKOFF_A 0x40E0
+#define B_BKOFF_IBADC_A GENMASK(23, 18)
+#define R_BACKOFF_A 0x40E4
+#define B_LNA_IBADC_A GENMASK(29, 18)
+#define B_BACKOFF_LNA_A GENMASK(29, 24)
+#define B_BACKOFF_IBADC_A GENMASK(23, 18)
+#define R_RXBY_WBADC_A 0x40F4
+#define B_RXBY_WBADC_A GENMASK(14, 10)
#define R_MUIC 0x40F8
#define B_MUIC_EN BIT(0)
+#define R_BT_RXBY_WBADC_A 0x4160
+#define B_BT_RXBY_WBADC_A BIT(31)
+#define R_BT_SHARE_A 0x4164
+#define B_BT_SHARE_A BIT(0)
+#define B_BT_TRK_OFF_A BIT(1)
+#define B_BTG_PATH_A BIT(4)
+#define R_FORCE_FIR_A 0x418C
+#define B_FORCE_FIR_A GENMASK(1, 0)
#define R_DCFO 0x4264
#define B_DCFO GENMASK(7, 0)
#define R_SEG0CSI 0x42AC
@@ -7846,8 +8184,30 @@
#define R_DPD_BF 0x44a0
#define B_DPD_BF_OFDM GENMASK(16, 12)
#define B_DPD_BF_SCA GENMASK(6, 0)
+#define R_LNA_OP 0x44B0
+#define B_LNA6 GENMASK(31, 24)
+#define R_LNA_TIA 0x44BC
+#define B_TIA10_B GENMASK(15, 0)
+#define B_TIA1_B GENMASK(15, 8)
+#define B_TIA0_B GENMASK(7, 0)
+#define R_BKOFF_B 0x44E0
+#define B_BKOFF_IBADC_B GENMASK(23, 18)
+#define R_BACKOFF_B 0x44E4
+#define B_LNA_IBADC_B GENMASK(29, 18)
+#define B_BACKOFF_LNA_B GENMASK(29, 24)
+#define B_BACKOFF_IBADC_B GENMASK(23, 18)
+#define R_RXBY_WBADC_B 0x44F4
+#define B_RXBY_WBADC_B GENMASK(14, 10)
+#define R_BT_RXBY_WBADC_B 0x4560
+#define B_BT_RXBY_WBADC_B BIT(31)
+#define R_BT_SHARE_B 0x4564
+#define B_BT_SHARE_B BIT(0)
+#define B_BT_TRK_OFF_B BIT(1)
+#define B_BTG_PATH_B BIT(4)
#define R_TXPATH_SEL 0x458C
#define B_TXPATH_SEL_MSK GENMASK(31, 28)
+#define R_FORCE_FIR_B 0x458C
+#define B_FORCE_FIR_B GENMASK(1, 0)
#define R_TXPWR 0x4594
#define B_TXPWR_MSK GENMASK(30, 22)
#define R_TXNSS_MAP 0x45B4
@@ -7910,10 +8270,12 @@
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1 0x4C24
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2 0x46E8
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V3 0x41C8
#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1 0x4C28
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2 0x46EC
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V3 0x41CC
#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_RXB_INIT_V1 0x46A8
#define B_PATH0_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
@@ -7958,10 +8320,12 @@
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1 0x4CE8
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2 0x47A8
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V3 0x45C8
#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1 0x4CEC
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2 0x47AC
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V3 0x45CC
#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778
#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
@@ -8092,6 +8456,15 @@
#define B_PATH1_5MDET_SB2 BIT(8)
#define B_PATH1_5MDET_SB0 BIT(6)
#define B_PATH1_5MDET_TH GENMASK(5, 0)
+#define R_S0S1_CSI_WGT 0x4D34
+#define B_S0S1_CSI_WGT_EN BIT(0)
+#define B_S0S1_CSI_WGT_TONE_IDX GENMASK(31, 20)
+#define R_CHINFO_ELM_SRC 0x4D84
+#define B_CHINFO_ELM_BITMAP GENMASK(22, 0)
+#define B_CHINFO_SRC GENMASK(31, 30)
+#define R_CHINFO_TYPE_SCAL 0x4D88
+#define B_CHINFO_TYPE GENMASK(2, 1)
+#define B_CHINFO_SCAL BIT(8)
#define R_RPL_BIAS_COMP 0x4DF0
#define B_RPL_BIAS_COMP_MASK GENMASK(7, 0)
#define R_RPL_PATHAB 0x4E0C
@@ -8239,14 +8612,90 @@
#define B_S0_DACKQ8_K GENMASK(15, 8)
#define R_DCFO_WEIGHT_V1 0x6244
#define B_DCFO_WEIGHT_MSK_V1 GENMASK(31, 28)
+#define R_DAC_CLK 0x625C
+#define B_DAC_CLK GENMASK(31, 30)
#define R_DCFO_OPT_V1 0x6260
#define B_DCFO_OPT_EN_V1 BIT(17)
+#define R_TXFCTR 0x627C
+#define B_TXFCTR_THD GENMASK(19, 10)
+#define R_TXSCALE 0x6284
+#define B_TXFCTR_EN BIT(19)
+#define R_PCOEFF01 0x6684
+#define B_PCOEFF01 GENMASK(23, 0)
+#define R_PCOEFF23 0x6688
+#define B_PCOEFF23 GENMASK(23, 0)
+#define R_PCOEFF45 0x668c
+#define B_PCOEFF45 GENMASK(23, 0)
+#define R_PCOEFF67 0x6690
+#define B_PCOEFF67 GENMASK(23, 0)
+#define R_PCOEFF89 0x6694
+#define B_PCOEFF89 GENMASK(23, 0)
+#define R_PCOEFFAB 0x6698
+#define B_PCOEFFAB GENMASK(23, 0)
+#define R_PCOEFFCD 0x669c
+#define B_PCOEFFCD GENMASK(23, 0)
+#define R_PCOEFFEF 0x66a0
+#define B_PCOEFFEF GENMASK(23, 0)
+#define R_MGAIN_BIAS 0x672c
+#define B_MGAIN_BIAS_BW20 GENMASK(3, 0)
+#define B_MGAIN_BIAS_BW40 GENMASK(7, 4)
+#define R_CCK_RPL_OFST 0x6750
+#define B_CCK_RPL_OFST GENMASK(7, 0)
+#define R_BK_FC0INV 0x6758
+#define B_BK_FC0INV GENMASK(18, 0)
+#define R_CCK_FC0INV 0x675c
+#define B_CCK_FC0INV GENMASK(18, 0)
#define R_SEG0R_EDCCA_LVL_BE 0x69EC
#define R_SEG0R_PPDU_LVL_BE 0x69F0
#define R_SEGSND 0x6A14
#define B_SEGSND_EN BIT(31)
+#define R_DBCC 0x6B48
+#define B_DBCC_EN BIT(0)
+#define R_FC0 0x6B4C
+#define B_BW40_2XFFT BIT(31)
+#define B_FC0 GENMASK(12, 0)
+#define R_FC0INV_SBW 0x6B50
+#define B_SMALLBW GENMASK(31, 30)
+#define B_RX_BT_SG0 GENMASK(25, 22)
+#define B_RX_1RCCA GENMASK(17, 14)
+#define B_FC0_INV GENMASK(6, 0)
+#define R_ANT_CHBW 0x6B54
+#define B_ANT_BT_SHARE BIT(16)
+#define B_CHBW_BW GENMASK(14, 12)
+#define B_CHBW_PRICH GENMASK(11, 8)
+#define B_ANT_RX_SG0 GENMASK(3, 0)
+#define R_SLOPE 0x6B6C
+#define B_EHT_RATE_TH GENMASK(31, 28)
+#define B_SLOPE_B GENMASK(27, 14)
+#define B_SLOPE_A GENMASK(13, 0)
+#define R_SC_CORNER 0x6B70
+#define B_SC_CORNER GENMASK(10, 0)
+#define R_MAG_A 0x6BF4
+#define B_MGA_AEND GENMASK(31, 24)
+#define R_MAG_AB 0x6BF8
+#define B_BY_SLOPE GENMASK(31, 24)
+#define B_MAG_AB GENMASK(23, 0)
+#define R_BEDGE 0x6BFC
+#define B_EHT_MCS14 BIT(31)
+#define B_HE_RATE_TH GENMASK(30, 27)
+#define R_BEDGE2 0x6C00
+#define B_EHT_MCS15 BIT(31)
+#define B_HT_VHT_TH GENMASK(11, 0)
+#define R_BEDGE3 0x6C04
+#define B_TB_EN BIT(23)
+#define B_HEMU_EN BIT(21)
+#define B_HEERSU_EN BIT(19)
+#define B_EHTTB_EN BIT(15)
+#define B_BEDGE_CFG GENMASK(1, 0)
+#define R_SU_PUNC 0x6C08
+#define B_SU_PUNC_EN BIT(1)
+#define R_BEDGE5 0x6C10
+#define B_HWGEN_EN BIT(25)
+#define B_PWROFST_COMP BIT(20)
#define R_RPL_BIAS_COMP1 0x6DF0
#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0)
+#define R_DBCC_FA 0x703C
+#define B_DBCC_FA BIT(12)
#define R_P1_TSSI_ALIM1 0x7630
#define B_P1_TSSI_ALIM1 GENMASK(29, 0)
#define B_P1_TSSI_ALIM11 GENMASK(29, 20)
@@ -8389,8 +8838,12 @@
#define B_PRT_COM_RXBB_V1 GENMASK(4, 0)
#define B_PRT_COM_DONE BIT(0)
#define R_COEF_SEL 0x8104
+#define R_COEF_SEL_C1 0x8204
#define B_COEF_SEL_IQC BIT(0)
+#define B_COEF_SEL_IQC_V1 GENMASK(1, 0)
#define B_COEF_SEL_MDPD BIT(8)
+#define B_COEF_SEL_MDPD_V1 GENMASK(9, 8)
+#define B_COEF_SEL_EN BIT(31)
#define R_CFIR_SYS 0x8120
#define R_IQK_RES 0x8124
#define B_IQK_RES_K BIT(28)
@@ -8412,8 +8865,10 @@
#define B_RFGAIN_BND GENMASK(4, 0)
#define R_CFIR_MAP 0x8150
#define R_CFIR_LUT 0x8154
+#define R_CFIR_LUT_C1 0x8254
#define B_CFIR_LUT_SEL BIT(8)
#define B_CFIR_LUT_SET BIT(4)
+#define B_CFIR_LUT_G5 BIT(5)
#define B_CFIR_LUT_G3 BIT(3)
#define B_CFIR_LUT_G2 BIT(2)
#define B_CFIR_LUT_GP_V1 GENMASK(2, 0)
@@ -8626,6 +9081,35 @@
#define B_DACKN0_V GENMASK(21, 14)
#define R_DACKN1_CTL 0xC224
#define B_DACKN1_V GENMASK(21, 14)
+#define R_GAIN_MAP0 0xE44C
+#define B_GAIN_MAP0_EN BIT(0)
+#define R_GAIN_MAP1 0xE54C
+#define B_GAIN_MAP1_EN BIT(0)
+#define R_GOTX_IQKDPK_C0 0xE464
+#define R_GOTX_IQKDPK_C1 0xE564
+#define B_GOTX_IQKDPK GENMASK(28, 27)
+#define R_IQK_DPK_PRST 0xE4AC
+#define R_IQK_DPK_PRST_C1 0xE5AC
+#define B_IQK_DPK_PRST BIT(27)
+#define R_TXPWR_RSTA 0xE60C
+#define B_TXPWR_RSTA BIT(16)
+#define R_TSSI_PWR_P0 0xE610
+#define R_TSSI_PWR_P1 0xE710
+#define B_TSSI_CONT_EN BIT(3)
+#define R_TSSI_MAP_OFST_P0 0xE620
+#define R_TSSI_MAP_OFST_P1 0xE720
+#define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9)
+#define B_TSSI_MAP_OFST_CCK GENMASK(26, 18)
+#define R_TXAGC_REF0_P0 0xE628
+#define R_TXAGC_REF0_P1 0xE728
+#define B_TXAGC_REF0_OFDM_DBM GENMASK(8, 0)
+#define B_TXAGC_REF0_CCK_DBM GENMASK(17, 9)
+#define B_TXAGC_REF0_OFDM_CW GENMASK(26, 18)
+#define R_TXAGC_REF1_P0 0xE62C
+#define R_TXAGC_REF1_P1 0xE72C
+#define B_TXAGC_REF1_CCK_CW GENMASK(8, 0)
+#define R_TXPWR_RSTB 0xE70C
+#define B_TXPWR_RSTB BIT(16)
/* WiFi CPU local domain */
#define R_AX_WDT_CTRL 0x0040
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 5c167a9278ce..83db0a686ee2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -901,7 +901,7 @@ static void rtw8851b_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -987,7 +987,7 @@ next:
static
void rtw8851b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 band = rtw89_subband_to_bb_gain_band(subband);
u32 val;
@@ -2299,6 +2299,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.enable_bb_rf = rtw8851b_mac_enable_bb_rf,
.disable_bb_rf = rtw8851b_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8851b_bb_reset,
.bb_sethw = rtw8851b_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2309,7 +2310,9 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.read_phycap = rtw8851b_read_phycap,
.fem_setup = NULL,
.rfe_gpio = rtw8851b_rfe_gpio,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8851b_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8851b_rfk_channel,
.rfk_band_changed = rtw8851b_rfk_band_changed,
.rfk_scan = rtw8851b_rfk_scan,
@@ -2334,6 +2337,12 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8851b_btc_set_rfe,
.btc_init_cfg = rtw8851b_btc_init_cfg,
@@ -2394,7 +2403,9 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_chanctx_num = 0,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
index 8cb5bde8f625..522883c8dfb9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
@@ -5345,7 +5345,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -5353,7 +5353,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -5361,7 +5361,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -5793,7 +5793,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 74,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 74,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -5801,7 +5801,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 76,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 76,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -5809,7 +5809,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 76,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 76,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -6361,7 +6361,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 84,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 84,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -6369,7 +6369,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 84,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 84,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -6649,7 +6649,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 74,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 74,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -7975,7 +7975,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 42,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 42,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -7983,7 +7983,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 42,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -7991,7 +7991,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 40,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 40,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -8423,7 +8423,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 52,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 52,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -8431,7 +8431,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 52,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 52,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -8439,7 +8439,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 52,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 52,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -8871,7 +8871,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 64,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 64,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -8879,7 +8879,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 64,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 64,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -8887,7 +8887,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 60,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 60,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
@@ -11055,7 +11055,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -11063,7 +11063,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -11071,7 +11071,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -11503,7 +11503,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 74,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 74,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -11511,7 +11511,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 74,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 74,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -11519,7 +11519,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 74,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 74,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -12071,7 +12071,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 80,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 80,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -12079,7 +12079,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 80,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 80,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -12359,7 +12359,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 72,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 72,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -13685,7 +13685,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 40,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 40,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -13693,7 +13693,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 42,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -13701,7 +13701,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 38,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 38,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -14133,7 +14133,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 52,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 52,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -14141,7 +14141,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 52,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 52,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -14149,7 +14149,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 50,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 50,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -14581,7 +14581,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 62,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 62,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -14589,7 +14589,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 62,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 62,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -14597,7 +14597,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 60,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 60,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index ade69bd30fc8..ca1374a71727 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -25,6 +25,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.autok_en = MAC_AX_PCIE_DISABLE,
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 0c76c52ce22c..8e808ded5d52 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2043,6 +2043,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.enable_bb_rf = rtw89_mac_enable_bb_rf,
.disable_bb_rf = rtw89_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852a_bb_reset,
.bb_sethw = rtw8852a_bb_sethw,
.read_rf = rtw89_phy_read_rf,
@@ -2053,7 +2054,9 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.read_phycap = rtw8852a_read_phycap,
.fem_setup = rtw8852a_fem_setup,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852a_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852a_rfk_channel,
.rfk_band_changed = rtw8852a_rfk_band_changed,
.rfk_scan = rtw8852a_rfk_scan,
@@ -2078,6 +2081,12 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -2130,7 +2139,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_chanctx_num = 1,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index f1e890bde049..7c6ffedb77e2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index de887a35f3fb..19454766f3de 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -988,7 +988,7 @@ static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -1086,7 +1086,7 @@ next:
static
void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 band = rtw89_subband_to_bb_gain_band(subband);
u32 val;
@@ -2468,6 +2468,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.enable_bb_rf = rtw8852b_mac_enable_bb_rf,
.disable_bb_rf = rtw8852b_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852b_bb_reset,
.bb_sethw = rtw8852b_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2478,7 +2479,9 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.read_phycap = rtw8852b_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852b_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852b_rfk_channel,
.rfk_band_changed = rtw8852b_rfk_band_changed,
.rfk_scan = rtw8852b_rfk_scan,
@@ -2503,6 +2506,12 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852b_btc_set_rfe,
.btc_init_cfg = rtw8852b_btc_init_cfg,
@@ -2564,7 +2573,9 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_chanctx_num = 0,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
index d2ce16e98bac..07945d06dc59 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
@@ -16936,7 +16936,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][8] = 52,
[0][0][1][0][RTW89_WW][10] = 52,
[0][0][1][0][RTW89_WW][12] = 52,
- [0][0][1][0][RTW89_WW][14] = 1,
+ [0][0][1][0][RTW89_WW][14] = 52,
[0][0][1][0][RTW89_WW][15] = 52,
[0][0][1][0][RTW89_WW][17] = 52,
[0][0][1][0][RTW89_WW][19] = 52,
@@ -16954,10 +16954,10 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][42] = 28,
[0][0][1][0][RTW89_WW][44] = 28,
[0][0][1][0][RTW89_WW][46] = 28,
- [0][0][1][0][RTW89_WW][48] = 78,
- [0][0][1][0][RTW89_WW][50] = 78,
- [0][0][1][0][RTW89_WW][52] = 78,
- [0][1][1][0][RTW89_WW][0] = 1,
+ [0][0][1][0][RTW89_WW][48] = 76,
+ [0][0][1][0][RTW89_WW][50] = 76,
+ [0][0][1][0][RTW89_WW][52] = 76,
+ [0][1][1][0][RTW89_WW][0] = 30,
[0][1][1][0][RTW89_WW][2] = 32,
[0][1][1][0][RTW89_WW][4] = 30,
[0][1][1][0][RTW89_WW][6] = 30,
@@ -16982,9 +16982,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_WW][42] = 16,
[0][1][1][0][RTW89_WW][44] = 16,
[0][1][1][0][RTW89_WW][46] = 16,
- [0][1][1][0][RTW89_WW][48] = 56,
- [0][1][1][0][RTW89_WW][50] = 56,
- [0][1][1][0][RTW89_WW][52] = 56,
+ [0][1][1][0][RTW89_WW][48] = 50,
+ [0][1][1][0][RTW89_WW][50] = 50,
+ [0][1][1][0][RTW89_WW][52] = 50,
[0][0][2][0][RTW89_WW][0] = 42,
[0][0][2][0][RTW89_WW][2] = 42,
[0][0][2][0][RTW89_WW][4] = 42,
@@ -17038,9 +17038,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_WW][42] = 16,
[0][1][2][0][RTW89_WW][44] = 16,
[0][1][2][0][RTW89_WW][46] = 16,
- [0][1][2][0][RTW89_WW][48] = 58,
- [0][1][2][0][RTW89_WW][50] = 58,
- [0][1][2][0][RTW89_WW][52] = 58,
+ [0][1][2][0][RTW89_WW][48] = 50,
+ [0][1][2][0][RTW89_WW][50] = 52,
+ [0][1][2][0][RTW89_WW][52] = 52,
[0][1][2][1][RTW89_WW][0] = 14,
[0][1][2][1][RTW89_WW][2] = 14,
[0][1][2][1][RTW89_WW][4] = 14,
@@ -17066,9 +17066,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_WW][42] = 4,
[0][1][2][1][RTW89_WW][44] = 4,
[0][1][2][1][RTW89_WW][46] = 4,
- [0][1][2][1][RTW89_WW][48] = 58,
- [0][1][2][1][RTW89_WW][50] = 58,
- [0][1][2][1][RTW89_WW][52] = 58,
+ [0][1][2][1][RTW89_WW][48] = 50,
+ [0][1][2][1][RTW89_WW][50] = 52,
+ [0][1][2][1][RTW89_WW][52] = 52,
[1][0][2][0][RTW89_WW][1] = 42,
[1][0][2][0][RTW89_WW][5] = 42,
[1][0][2][0][RTW89_WW][9] = 52,
@@ -17095,8 +17095,8 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_WW][36] = 50,
[1][1][2][0][RTW89_WW][39] = 16,
[1][1][2][0][RTW89_WW][43] = 16,
- [1][1][2][0][RTW89_WW][47] = 68,
- [1][1][2][0][RTW89_WW][51] = 66,
+ [1][1][2][0][RTW89_WW][47] = 62,
+ [1][1][2][0][RTW89_WW][51] = 62,
[1][1][2][1][RTW89_WW][1] = 16,
[1][1][2][1][RTW89_WW][5] = 16,
[1][1][2][1][RTW89_WW][9] = 28,
@@ -17109,8 +17109,8 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_WW][36] = 36,
[1][1][2][1][RTW89_WW][39] = 4,
[1][1][2][1][RTW89_WW][43] = 4,
- [1][1][2][1][RTW89_WW][47] = 68,
- [1][1][2][1][RTW89_WW][51] = 66,
+ [1][1][2][1][RTW89_WW][47] = 62,
+ [1][1][2][1][RTW89_WW][51] = 62,
[2][0][2][0][RTW89_WW][3] = 42,
[2][0][2][0][RTW89_WW][11] = 52,
[2][0][2][0][RTW89_WW][18] = 52,
@@ -17227,7 +17227,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][14] = 78,
[0][0][1][0][RTW89_CN][14] = 58,
[0][0][1][0][RTW89_QATAR][14] = 58,
- [0][0][1][0][RTW89_UK][14] = 1,
+ [0][0][1][0][RTW89_UK][14] = 58,
[0][0][1][0][RTW89_FCC][15] = 76,
[0][0][1][0][RTW89_ETSI][15] = 58,
[0][0][1][0][RTW89_MKK][15] = 76,
@@ -17435,7 +17435,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 78,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 76,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CHILE][48] = 127,
@@ -17447,7 +17447,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 78,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 76,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CHILE][50] = 127,
@@ -17459,7 +17459,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 78,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 76,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CHILE][52] = 127,
@@ -17479,7 +17479,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][0] = 50,
[0][1][1][0][RTW89_CN][0] = 46,
[0][1][1][0][RTW89_QATAR][0] = 46,
- [0][1][1][0][RTW89_UK][0] = 1,
+ [0][1][1][0][RTW89_UK][0] = 46,
[0][1][1][0][RTW89_FCC][2] = 68,
[0][1][1][0][RTW89_ETSI][2] = 46,
[0][1][1][0][RTW89_MKK][2] = 48,
@@ -17771,7 +17771,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][48] = 56,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
- [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 50,
[0][1][1][0][RTW89_KCC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
[0][1][1][0][RTW89_CHILE][48] = 127,
@@ -17783,7 +17783,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][50] = 56,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
- [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 50,
[0][1][1][0][RTW89_KCC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
[0][1][1][0][RTW89_CHILE][50] = 127,
@@ -17795,7 +17795,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][52] = 56,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
- [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 50,
[0][1][1][0][RTW89_KCC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
[0][1][1][0][RTW89_CHILE][52] = 127,
@@ -18107,7 +18107,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 78,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 78,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CHILE][48] = 127,
@@ -18119,7 +18119,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 78,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 78,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CHILE][50] = 127,
@@ -18131,7 +18131,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 78,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 78,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CHILE][52] = 127,
@@ -18443,7 +18443,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][48] = 58,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
- [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 50,
[0][1][2][0][RTW89_KCC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
[0][1][2][0][RTW89_CHILE][48] = 127,
@@ -18455,7 +18455,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][50] = 58,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
- [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 52,
[0][1][2][0][RTW89_KCC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
[0][1][2][0][RTW89_CHILE][50] = 127,
@@ -18467,7 +18467,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][52] = 58,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
- [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 52,
[0][1][2][0][RTW89_KCC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
[0][1][2][0][RTW89_CHILE][52] = 127,
@@ -18779,7 +18779,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][48] = 58,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
- [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 50,
[0][1][2][1][RTW89_KCC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
[0][1][2][1][RTW89_CHILE][48] = 127,
@@ -18791,7 +18791,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][50] = 58,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
- [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 52,
[0][1][2][1][RTW89_KCC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
[0][1][2][1][RTW89_CHILE][50] = 127,
@@ -18803,7 +18803,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][52] = 58,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
- [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 52,
[0][1][2][1][RTW89_KCC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
[0][1][2][1][RTW89_CHILE][52] = 127,
@@ -18959,7 +18959,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 78,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 78,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CHILE][47] = 127,
@@ -18971,7 +18971,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 70,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 78,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CHILE][51] = 127,
@@ -19127,7 +19127,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][47] = 68,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
- [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 62,
[1][1][2][0][RTW89_KCC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
[1][1][2][0][RTW89_CHILE][47] = 127,
@@ -19139,7 +19139,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][51] = 66,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
- [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 62,
[1][1][2][0][RTW89_KCC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
[1][1][2][0][RTW89_CHILE][51] = 127,
@@ -19295,7 +19295,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][47] = 68,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
- [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 62,
[1][1][2][1][RTW89_KCC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
[1][1][2][1][RTW89_CHILE][47] = 127,
@@ -19307,7 +19307,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][51] = 66,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
- [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 62,
[1][1][2][1][RTW89_KCC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
[1][1][2][1][RTW89_CHILE][51] = 127,
@@ -19391,7 +19391,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 64,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 74,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CHILE][49] = 127,
@@ -19475,7 +19475,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_FCC][49] = 58,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
- [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 66,
[2][1][2][0][RTW89_KCC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
[2][1][2][0][RTW89_CHILE][49] = 127,
@@ -19559,7 +19559,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_FCC][49] = 58,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
- [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 66,
[2][1][2][1][RTW89_KCC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
[2][1][2][1][RTW89_CHILE][49] = 127,
@@ -20723,9 +20723,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_WW][42] = 14,
[0][1][RTW89_WW][44] = 14,
[0][1][RTW89_WW][46] = 14,
- [0][1][RTW89_WW][48] = 20,
- [0][1][RTW89_WW][50] = 20,
- [0][1][RTW89_WW][52] = 20,
+ [0][1][RTW89_WW][48] = 16,
+ [0][1][RTW89_WW][50] = 16,
+ [0][1][RTW89_WW][52] = 16,
[1][0][RTW89_WW][0] = 34,
[1][0][RTW89_WW][2] = 34,
[1][0][RTW89_WW][4] = 34,
@@ -20779,9 +20779,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_WW][42] = 16,
[1][1][RTW89_WW][44] = 16,
[1][1][RTW89_WW][46] = 16,
- [1][1][RTW89_WW][48] = 32,
- [1][1][RTW89_WW][50] = 32,
- [1][1][RTW89_WW][52] = 32,
+ [1][1][RTW89_WW][48] = 28,
+ [1][1][RTW89_WW][50] = 30,
+ [1][1][RTW89_WW][52] = 30,
[2][0][RTW89_WW][0] = 44,
[2][0][RTW89_WW][2] = 44,
[2][0][RTW89_WW][4] = 44,
@@ -20835,9 +20835,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_WW][42] = 16,
[2][1][RTW89_WW][44] = 16,
[2][1][RTW89_WW][46] = 16,
- [2][1][RTW89_WW][48] = 44,
- [2][1][RTW89_WW][50] = 44,
- [2][1][RTW89_WW][52] = 44,
+ [2][1][RTW89_WW][48] = 40,
+ [2][1][RTW89_WW][50] = 40,
+ [2][1][RTW89_WW][52] = 40,
[0][0][RTW89_FCC][0] = 52,
[0][0][RTW89_ETSI][0] = 24,
[0][0][RTW89_MKK][0] = 26,
@@ -21141,7 +21141,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 32,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 42,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CHILE][48] = 127,
@@ -21153,7 +21153,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 32,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CHILE][50] = 127,
@@ -21165,7 +21165,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 32,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 40,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CHILE][52] = 127,
@@ -21477,7 +21477,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][48] = 20,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
- [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_IC][48] = 16,
[0][1][RTW89_KCC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
[0][1][RTW89_CHILE][48] = 127,
@@ -21489,7 +21489,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][50] = 20,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
- [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_IC][50] = 16,
[0][1][RTW89_KCC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
[0][1][RTW89_CHILE][50] = 127,
@@ -21501,7 +21501,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][52] = 20,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
- [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_IC][52] = 16,
[0][1][RTW89_KCC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
[0][1][RTW89_CHILE][52] = 127,
@@ -21813,7 +21813,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 44,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 54,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CHILE][48] = 127,
@@ -21825,7 +21825,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 44,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 54,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CHILE][50] = 127,
@@ -21837,7 +21837,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 44,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 52,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CHILE][52] = 127,
@@ -22149,7 +22149,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][48] = 32,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
- [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_IC][48] = 28,
[1][1][RTW89_KCC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
[1][1][RTW89_CHILE][48] = 127,
@@ -22161,7 +22161,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][50] = 32,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
- [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_IC][50] = 30,
[1][1][RTW89_KCC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
[1][1][RTW89_CHILE][50] = 127,
@@ -22173,7 +22173,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][52] = 32,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
- [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_IC][52] = 30,
[1][1][RTW89_KCC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
[1][1][RTW89_CHILE][52] = 127,
@@ -22486,7 +22486,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
[2][0][RTW89_IC][48] = 127,
- [2][0][RTW89_KCC][48] = 127,
+ [2][0][RTW89_KCC][48] = 66,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CHILE][48] = 127,
[2][0][RTW89_UKRAINE][48] = 127,
@@ -22498,7 +22498,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
[2][0][RTW89_IC][50] = 127,
- [2][0][RTW89_KCC][50] = 127,
+ [2][0][RTW89_KCC][50] = 66,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CHILE][50] = 127,
[2][0][RTW89_UKRAINE][50] = 127,
@@ -22510,7 +22510,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
[2][0][RTW89_IC][52] = 127,
- [2][0][RTW89_KCC][52] = 127,
+ [2][0][RTW89_KCC][52] = 66,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CHILE][52] = 127,
[2][0][RTW89_UKRAINE][52] = 127,
@@ -22821,7 +22821,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][48] = 44,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
- [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_IC][48] = 40,
[2][1][RTW89_KCC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
[2][1][RTW89_CHILE][48] = 127,
@@ -22833,7 +22833,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][50] = 44,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
- [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_IC][50] = 40,
[2][1][RTW89_KCC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
[2][1][RTW89_CHILE][50] = 127,
@@ -22845,7 +22845,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][52] = 44,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
- [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_IC][52] = 40,
[2][1][RTW89_KCC][52] = 127,
[2][1][RTW89_ACMA][52] = 127,
[2][1][RTW89_CHILE][52] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index 920b20bbcfb7..ed71364e6437 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 8618d0204f66..ca8547fbd70e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -842,7 +842,7 @@ static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -2813,6 +2813,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.enable_bb_rf = rtw8852c_mac_enable_bb_rf,
.disable_bb_rf = rtw8852c_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852c_bb_reset,
.bb_sethw = rtw8852c_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2823,7 +2824,9 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.read_phycap = rtw8852c_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852c_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852c_rfk_channel,
.rfk_band_changed = rtw8852c_rfk_band_changed,
.rfk_scan = rtw8852c_rfk_scan,
@@ -2848,6 +2851,12 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
.resume_sch_tx = rtw89_mac_resume_sch_tx_v1,
.h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v1,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852c_btc_set_rfe,
.btc_init_cfg = rtw8852c_btc_init_cfg,
@@ -2902,7 +2911,10 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
- .support_bw160 = true,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 4592de3dbd94..583ea673a4f5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -35,6 +35,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 0e7300cc6d9e..823f0d840df9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2023 Realtek Corporation
*/
+#include "coex.h"
#include "debug.h"
#include "efuse.h"
#include "fw.h"
@@ -9,12 +10,16 @@
#include "phy.h"
#include "reg.h"
#include "rtw8922a.h"
+#include "rtw8922a_rfk.h"
+#include "util.h"
#define RTW8922A_FW_FORMAT_MAX 0
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
RTW8922A_FW_BASENAME ".bin"
+#define HE_N_USER_MAX_8922A 4
+
static const struct rtw89_hfc_ch_cfg rtw8922a_hfc_chcfg_pcie[] = {
{2, 1641, grp_0}, /* ACH 0 */
{2, 1641, grp_0}, /* ACH 1 */
@@ -43,6 +48,8 @@ static const struct rtw89_hfc_pub_cfg rtw8922a_hfc_pubcfg_pcie = {
static const struct rtw89_hfc_param_ini rtw8922a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
&rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
+ [RTW89_QTA_DBCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
[RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_prec_cfg_c2,
RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
@@ -54,6 +61,11 @@ static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
&rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
&rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
&rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
+ [RTW89_QTA_DBCC] = {RTW89_QTA_DBCC, &rtw89_mac_size.wde_size0_v1,
+ &rtw89_mac_size.ple_size0_v1, &rtw89_mac_size.wde_qt0_v1,
+ &rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
+ &rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
+ &rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
[RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4_v1,
&rtw89_mac_size.ple_size3_v1, &rtw89_mac_size.wde_qt4,
&rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt9,
@@ -63,6 +75,31 @@ static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
NULL},
};
+static const u32 rtw8922a_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_BE_H2CREG_DATA0, R_BE_H2CREG_DATA1, R_BE_H2CREG_DATA2,
+ R_BE_H2CREG_DATA3
+};
+
+static const u32 rtw8922a_c2h_regs[RTW89_H2CREG_MAX] = {
+ R_BE_C2HREG_DATA0, R_BE_C2HREG_DATA1, R_BE_C2HREG_DATA2,
+ R_BE_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8922a_page_regs = {
+ .hci_fc_ctrl = R_BE_HCI_FC_CTRL,
+ .ch_page_ctrl = R_BE_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_BE_CH0_PAGE_CTRL,
+ .ach_page_info = R_BE_CH0_PAGE_INFO,
+ .pub_page_info3 = R_BE_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_BE_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_BE_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_BE_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_BE_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_BE_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_BE_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_BE_WP_PAGE_INFO1,
+};
+
static const struct rtw89_reg_imr rtw8922a_imr_dmac_regs[] = {
{R_BE_DISP_HOST_IMR, B_BE_DISP_HOST_IMR_CLR, B_BE_DISP_HOST_IMR_SET},
{R_BE_DISP_CPU_IMR, B_BE_DISP_CPU_IMR_CLR, B_BE_DISP_CPU_IMR_SET},
@@ -119,6 +156,51 @@ static const struct rtw89_imr_table rtw8922a_imr_cmac_table = {
.n_regs = ARRAY_SIZE(rtw8922a_imr_cmac_regs),
};
+static const struct rtw89_rrsr_cfgs rtw8922a_rrsr_cfgs = {
+ .ref_rate = {R_BE_TRXPTCL_RESP_1, B_BE_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_BE_PTCL_RRSR1, B_BE_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8922a_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD_V2,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1,
+ .bmode_pd_reg = R_BMODE_PDTH_EN_V2,
+ .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1,
+ .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V2,
+ .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static const struct rtw89_edcca_regs rtw8922a_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL_BE,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_PPDU_LVL_BE,
+ .ppdu_mask = B_EDCCA_LVL_MSK1,
+ .rpt_a = R_EDCCA_RPT_A_BE,
+ .rpt_b = R_EDCCA_RPT_B_BE,
+ .rpt_sel = R_EDCCA_RPT_SEL_BE,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .rpt_sel_be = R_EDCCA_RPTREG_SEL_BE,
+ .rpt_sel_be_mask = B_EDCCA_RPTREG_SEL_BE_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST_BE,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_BE_M,
+};
+
static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
[RTW89_EFUSE_BLOCK_SYS] = {.offset = 0x00000, .size = 0x310},
[RTW89_EFUSE_BLOCK_RF] = {.offset = 0x10000, .size = 0x240},
@@ -130,6 +212,36 @@ static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
[RTW89_EFUSE_BLOCK_ADIE] = {.offset = 0x70000, .size = 0x10},
};
+static void rtw8922a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_SHARE_A, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BTG_PATH_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_SHARE_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BTG_PATH_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x20, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA0_B, 0x30, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_BT_SHARE, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_BT_SG0, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x1, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_SHARE_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BTG_PATH_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_SHARE_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BTG_PATH_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x1a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA0_B, 0x2a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_BT_SHARE, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_BT_SG0, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x0, phy_idx);
+ }
+}
+
static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
@@ -273,6 +385,9 @@ static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_FEN_BB_IP_RSTN |
B_BE_FEN_BBPLAT_RSTB);
+ if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags))
+ rtw89_efuse_read_fw_secure_be(rtwdev);
+
return 0;
}
@@ -574,6 +689,32 @@ static void rtw8922a_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
}
}
+static void rtw8922a_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG_V1, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA_V1, pabias_5g);
+ }
+}
+
static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
u8 *phycap_map)
{
@@ -591,6 +732,31 @@ static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
}
}
+static void rtw8922a_pad_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pad_bias_2g, pad_bias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] no PG, do nothing\n");
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pad_bias_2g = u8_get_bits(info->pad_bias_trim[i], GENMASK(3, 0));
+ pad_bias_5g = u8_get_bits(info->pad_bias_trim[i], GENMASK(7, 4));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pad_bias_2g, pad_bias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASD_TXG_V1, pad_bias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASD_TXA_V1, pad_bias_5g);
+ }
+}
+
static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
{
rtw8922a_phycap_parsing_thermal_trim(rtwdev, phycap_map);
@@ -600,6 +766,1440 @@ static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
return 0;
}
+static void rtw8922a_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8922a_pa_bias_trim(rtwdev);
+ rtw8922a_pad_bias_trim(rtwdev);
+}
+
+static void rtw8922a_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 mac_idx)
+{
+ u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_BE_TX_SUB_BAND_VALUE, mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_BE_TXRATE_CHK, mac_idx);
+ u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMAC_RFMOD, mac_idx);
+ u8 txsb20 = 0, txsb40 = 0, txsb80 = 0;
+ u8 rf_mod_val, chk_rate_mask;
+ u32 txsb;
+ u32 reg;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ txsb80 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_80);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_80:
+ txsb40 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsb20 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_mod_val = BE_WMAC_RFMOD_160M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK) |
+ u32_encode_bits(txsb40, B_BE_TXSB_40M_MASK) |
+ u32_encode_bits(txsb80, B_BE_TXSB_80M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_mod_val = BE_WMAC_RFMOD_80M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK) |
+ u32_encode_bits(txsb40, B_BE_TXSB_40M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_mod_val = BE_WMAC_RFMOD_40M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ rf_mod_val = BE_WMAC_RFMOD_20M;
+ txsb = 0;
+ break;
+ }
+
+ if (txsb20 <= BE_PRI20_BITMAP_MAX)
+ txsb |= u32_encode_bits(BIT(txsb20), B_BE_PRI20_BITMAP_MASK);
+
+ rtw89_write8_mask(rtwdev, rf_mod, B_BE_WMAC_RFMOD_MASK, rf_mod_val);
+ rtw89_write32(rtwdev, sub_carr, txsb);
+
+ switch (chan->band_type) {
+ case RTW89_BAND_2G:
+ chk_rate_mask = B_BE_BAND_MODE;
+ break;
+ case RTW89_BAND_5G:
+ case RTW89_BAND_6G:
+ chk_rate_mask = B_BE_CHECK_CCK_EN | B_BE_RTS_LIMIT_IN_OFDM6;
+ break;
+ default:
+ rtw89_warn(rtwdev, "Invalid band_type:%d\n", chan->band_type);
+ return;
+ }
+
+ rtw89_write8_clr(rtwdev, chk_rate, B_BE_BAND_MODE | B_BE_CHECK_CCK_EN |
+ B_BE_RTS_LIMIT_IN_OFDM6);
+ rtw89_write8_set(rtwdev, chk_rate, chk_rate_mask);
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_320:
+ case RTW89_CHANNEL_WIDTH_160:
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_40:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_T1_MASK, 0x41);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MUEDCA_EN, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_TB_T1_MASK, 0x41);
+ break;
+ default:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_T1_MASK, 0x3f);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MUEDCA_EN, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_TB_T1_MASK, 0x3e);
+ break;
+ }
+}
+
+static const u32 rtw8922a_sco_barker_threshold[14] = {
+ 0x1fe4f, 0x1ff5e, 0x2006c, 0x2017b, 0x2028a, 0x20399, 0x204a8, 0x205b6,
+ 0x206c5, 0x207d4, 0x208e3, 0x209f2, 0x20b00, 0x20d8a
+};
+
+static const u32 rtw8922a_sco_cck_threshold[14] = {
+ 0x2bdac, 0x2bf21, 0x2c095, 0x2c209, 0x2c37e, 0x2c4f2, 0x2c666, 0x2c7db,
+ 0x2c94f, 0x2cac3, 0x2cc38, 0x2cdac, 0x2cf21, 0x2d29e
+};
+
+static int rtw8922a_ctrl_sco_cck(struct rtw89_dev *rtwdev,
+ u8 primary_ch, enum rtw89_bandwidth bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ch_element;
+
+ if (primary_ch >= 14)
+ return -EINVAL;
+
+ ch_element = primary_ch - 1;
+
+ rtw89_phy_write32_idx(rtwdev, R_BK_FC0INV, B_BK_FC0INV,
+ rtw8922a_sco_barker_threshold[ch_element],
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CCK_FC0INV, B_CCK_FC0INV,
+ rtw8922a_sco_cck_threshold[ch_element],
+ phy_idx);
+
+ return 0;
+}
+
+struct rtw8922a_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8922A];
+ u32 gain_a[BB_PATH_NUM_8922A];
+ u32 gain_g_mask;
+ u32 gain_a_mask;
+};
+
+static const struct rtw89_reg_def rpl_comp_bw160[RTW89_BW20_SC_160M] = {
+ { .addr = 0x41E8, .mask = 0xFF00},
+ { .addr = 0x41E8, .mask = 0xFF0000},
+ { .addr = 0x41E8, .mask = 0xFF000000},
+ { .addr = 0x41EC, .mask = 0xFF},
+ { .addr = 0x41EC, .mask = 0xFF00},
+ { .addr = 0x41EC, .mask = 0xFF0000},
+ { .addr = 0x41EC, .mask = 0xFF000000},
+ { .addr = 0x41F0, .mask = 0xFF}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw80[RTW89_BW20_SC_80M] = {
+ { .addr = 0x41F4, .mask = 0xFF},
+ { .addr = 0x41F4, .mask = 0xFF00},
+ { .addr = 0x41F4, .mask = 0xFF0000},
+ { .addr = 0x41F4, .mask = 0xFF000000}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw40[RTW89_BW20_SC_40M] = {
+ { .addr = 0x41F0, .mask = 0xFF0000},
+ { .addr = 0x41F0, .mask = 0xFF000000}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw20[RTW89_BW20_SC_20M] = {
+ { .addr = 0x41F0, .mask = 0xFF00}
+};
+
+static const struct rtw8922a_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x409c, 0x449c}, .gain_a = {0x406C, 0x446C},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x409c, 0x449c}, .gain_a = {0x406C, 0x446C},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a0, 0x44a0}, .gain_a = {0x4070, 0x4470},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x40a0, 0x44a0}, .gain_a = {0x4070, 0x4470},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a4, 0x44a4}, .gain_a = {0x4074, 0x4474},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x40a4, 0x44a4}, .gain_a = {0x4074, 0x4474},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a8, 0x44a8}, .gain_a = {0x4078, 0x4478},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+};
+
+static const struct rtw8922a_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4054, 0x4454}, .gain_a = {0x4054, 0x4454},
+ .gain_g_mask = 0x7FC0000, .gain_a_mask = 0x1FF},
+ { .gain_g = {0x4058, 0x4458}, .gain_a = {0x4054, 0x4454},
+ .gain_g_mask = 0x1FF, .gain_a_mask = 0x3FE00 },
+};
+
+struct rtw8922a_bb_gain_bypass {
+ u32 gain_g[BB_PATH_NUM_8922A];
+ u32 gain_a[BB_PATH_NUM_8922A];
+ u32 gain_mask_g;
+ u32 gain_mask_a;
+};
+
+static void rtw8922a_set_rpl_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ u32 reg_path_ofst = 0;
+ u32 mask;
+ s32 val;
+ u32 reg;
+ int i;
+
+ if (path == RF_PATH_B)
+ reg_path_ofst = 0x400;
+
+ for (i = 0; i < RTW89_BW20_SC_160M; i++) {
+ reg = rpl_comp_bw160[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw160[i].mask;
+ val = gain->rpl_ofst_160[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_80M; i++) {
+ reg = rpl_comp_bw80[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw80[i].mask;
+ val = gain->rpl_ofst_80[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_40M; i++) {
+ reg = rpl_comp_bw40[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw40[i].mask;
+ val = gain->rpl_ofst_40[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_20M; i++) {
+ reg = rpl_comp_bw20[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw20[i].mask;
+ val = gain->rpl_ofst_20[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+}
+
+static void rtw8922a_set_lna_tia_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ enum rtw89_phy_bb_bw_be bw_type;
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ bw_type = chan->band_width <= RTW89_CHANNEL_WIDTH_40 ?
+ RTW89_BB_BW_20_40 : RTW89_BB_BW_80_160_320;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (chan->band_type == RTW89_BAND_2G) {
+ reg = bb_gain_lna[i].gain_g[path];
+ mask = bb_gain_lna[i].gain_g_mask;
+ } else {
+ reg = bb_gain_lna[i].gain_a[path];
+ mask = bb_gain_lna[i].gain_a_mask;
+ }
+ val = gain->lna_gain[gain_band][bw_type][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (chan->band_type == RTW89_BAND_2G) {
+ reg = bb_gain_tia[i].gain_g[path];
+ mask = bb_gain_tia[i].gain_g_mask;
+ } else {
+ reg = bb_gain_tia[i].gain_a[path];
+ mask = bb_gain_tia[i].gain_a_mask;
+ }
+ val = gain->tia_gain[gain_band][bw_type][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+}
+
+static void rtw8922a_set_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_lna_tia_gain(rtwdev, chan, path, phy_idx);
+ rtw8922a_set_rpl_gain(rtwdev, chan, path, phy_idx);
+}
+
+static void rtw8922a_set_rx_gain_normal_cck(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ s8 value = -gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK]; /* S(8,2) */
+ u8 fraction = value & 0x3;
+
+ if (fraction) {
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW20,
+ (0x4 - fraction) << 1);
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW40,
+ (0x4 - fraction) << 1);
+
+ value >>= 2;
+ rtw89_phy_write32_mask(rtwdev, R_CCK_RPL_OFST, B_CCK_RPL_OFST,
+ value + 1 + 0xdc);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW20, 0);
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW40, 0);
+
+ value >>= 2;
+ rtw89_phy_write32_mask(rtwdev, R_CCK_RPL_OFST, B_CCK_RPL_OFST,
+ value + 0xdc);
+ }
+}
+
+static void rtw8922a_set_rx_gain_normal_ofdm(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ static const u32 rssi_tb_bias_comp[2] = {0x41f8, 0x45f8};
+ static const u32 rssi_tb_ext_comp[2] = {0x4208, 0x4608};
+ static const u32 rssi_ofst_addr[2] = {0x40c8, 0x44c8};
+ static const u32 rpl_bias_comp[2] = {0x41e8, 0x45e8};
+ static const u32 rpl_ext_comp[2] = {0x41f8, 0x45f8};
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_band;
+ s8 v1, v2, v3;
+ s32 value;
+
+ gain_band = rtw89_subband_to_gain_offset_band_of_ofdm(chan->subband_type);
+ value = gain->offset[path][gain_band];
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[path], 0xff000000, value + 0xF8);
+
+ value *= -4;
+ v1 = clamp_t(s32, value, S8_MIN, S8_MAX);
+ value -= v1;
+ v2 = clamp_t(s32, value, S8_MIN, S8_MAX);
+ value -= v2;
+ v3 = clamp_t(s32, value, S8_MIN, S8_MAX);
+
+ rtw89_phy_write32_mask(rtwdev, rpl_bias_comp[path], 0xff, v1);
+ rtw89_phy_write32_mask(rtwdev, rpl_ext_comp[path], 0xff, v2);
+ rtw89_phy_write32_mask(rtwdev, rpl_ext_comp[path], 0xff00, v3);
+
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_bias_comp[path], 0xff0000, v1);
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_ext_comp[path], 0xff0000, v2);
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_ext_comp[path], 0xff000000, v3);
+}
+
+static void rtw8922a_set_rx_gain_normal(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ if (!gain->offset_valid)
+ return;
+
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw8922a_set_rx_gain_normal_cck(rtwdev, chan, path);
+
+ rtw8922a_set_rx_gain_normal_ofdm(rtwdev, chan, path);
+}
+
+static void rtw8922a_set_cck_parameters(struct rtw89_dev *rtwdev, u8 central_ch,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (central_ch == 14) {
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF01, B_PCOEFF01, 0x3b13ff, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF23, B_PCOEFF23, 0x1c42de, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF45, B_PCOEFF45, 0xfdb0ad, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF67, B_PCOEFF67, 0xf60f6e, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF89, B_PCOEFF89, 0xfd8f92, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFAB, B_PCOEFFAB, 0x02d011, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFCD, B_PCOEFFCD, 0x01c02c, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFEF, B_PCOEFFEF, 0xfff00a, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF01, B_PCOEFF01, 0x3a63ca, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF23, B_PCOEFF23, 0x2a833f, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF45, B_PCOEFF45, 0x1491f8, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF67, B_PCOEFF67, 0x03c0b0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF89, B_PCOEFF89, 0xfccff1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFAB, B_PCOEFFAB, 0xfccfc3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFCD, B_PCOEFFCD, 0xfebfdc, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFEF, B_PCOEFFEF, 0xffdff7, phy_idx);
+ }
+}
+
+static void rtw8922a_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 band_sel[2] = {0x4160, 0x4560};
+ u16 central_freq = chan->freq;
+ u8 central_ch = chan->channel;
+ u8 band = chan->band_type;
+ bool is_2g = band == RTW89_BAND_2G;
+ u8 chan_idx;
+ u8 path;
+ u8 sco;
+
+ if (!central_freq) {
+ rtw89_warn(rtwdev, "Invalid central_freq\n");
+ return;
+ }
+
+ rtw8922a_set_gain(rtwdev, chan, RF_PATH_A, phy_idx);
+ rtw8922a_set_gain(rtwdev, chan, RF_PATH_B, phy_idx);
+
+ for (path = RF_PATH_A; path < BB_PATH_NUM_8922A; path++)
+ rtw89_phy_write32_idx(rtwdev, band_sel[path], BIT((26)), is_2g, phy_idx);
+
+ rtw8922a_set_rx_gain_normal(rtwdev, chan, RF_PATH_A);
+ rtw8922a_set_rx_gain_normal(rtwdev, chan, RF_PATH_B);
+
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_FC0, central_freq, phy_idx);
+ sco = DIV_ROUND_CLOSEST(1 << 18, central_freq);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_FC0_INV, sco, phy_idx);
+
+ if (band == RTW89_BAND_2G)
+ rtw8922a_set_cck_parameters(rtwdev, central_ch, phy_idx);
+
+ chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx);
+}
+
+static void
+rtw8922a_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_sb, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x1, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x1, phy_idx);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri_sb:%d)\n", bw,
+ pri_sb);
+ break;
+ }
+
+ if (bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_BW40_2XFFT, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_BW40_2XFFT, 0, phy_idx);
+}
+
+static u32 rtw8922a_spur_freq(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ return 0;
+}
+
+#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */
+#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */
+#define MAX_TONE_NUM 2048
+
+static void rtw8922a_set_csi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s32 freq_diff, csi_idx, csi_tone_idx;
+ u32 spur_freq;
+
+ spur_freq = rtw8922a_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_EN,
+ 0, phy_idx);
+ return;
+ }
+
+ freq_diff = (spur_freq - chan->freq) * 1000000;
+ csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
+ s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_TONE_IDX,
+ csi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_EN, 1, phy_idx);
+}
+
+static const struct rtw89_nbi_reg_def rtw8922a_nbi_reg_def[] = {
+ [RF_PATH_A] = {
+ .notch1_idx = {0x41a0, 0xFF},
+ .notch1_frac_idx = {0x41a0, 0xC00},
+ .notch1_en = {0x41a0, 0x1000},
+ .notch2_idx = {0x41ac, 0xFF},
+ .notch2_frac_idx = {0x41ac, 0xC00},
+ .notch2_en = {0x41ac, 0x1000},
+ },
+ [RF_PATH_B] = {
+ .notch1_idx = {0x45a0, 0xFF},
+ .notch1_frac_idx = {0x45a0, 0xC00},
+ .notch1_en = {0x45a0, 0x1000},
+ .notch2_idx = {0x45ac, 0xFF},
+ .notch2_frac_idx = {0x45ac, 0xC00},
+ .notch2_en = {0x45ac, 0x1000},
+ },
+};
+
+static void rtw8922a_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_nbi_reg_def *nbi = &rtw8922a_nbi_reg_def[path];
+ s32 nbi_frac_idx, nbi_frac_tone_idx;
+ s32 nbi_idx, nbi_tone_idx;
+ bool notch2_chk = false;
+ u32 spur_freq, fc;
+ s32 freq_diff;
+
+ spur_freq = rtw8922a_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ return;
+ }
+
+ fc = chan->freq;
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160) {
+ fc = (spur_freq > fc) ? fc + 40 : fc - 40;
+ if ((fc > spur_freq &&
+ chan->channel < chan->primary_channel) ||
+ (fc < spur_freq &&
+ chan->channel > chan->primary_channel))
+ notch2_chk = true;
+ }
+
+ freq_diff = (spur_freq - fc) * 1000000;
+ nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5,
+ &nbi_frac_idx);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_20) {
+ s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx);
+ } else {
+ u16 tone_para = (chan->band_width == RTW89_CHANNEL_WIDTH_40) ?
+ 128 : 256;
+
+ s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx);
+ }
+ nbi_frac_tone_idx =
+ s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_idx.addr,
+ nbi->notch2_idx.mask, nbi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_frac_idx.addr,
+ nbi->notch2_frac_idx.mask, nbi_frac_tone_idx,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_idx.addr,
+ nbi->notch1_idx.mask, nbi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_frac_idx.addr,
+ nbi->notch1_frac_idx.mask, nbi_frac_tone_idx,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ }
+}
+
+static void rtw8922a_spur_elimination(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_csi_tone_idx(rtwdev, chan, phy_idx);
+ rtw8922a_set_nbi_tone_idx(rtwdev, chan, RF_PATH_A, phy_idx);
+ rtw8922a_set_nbi_tone_idx(rtwdev, chan, RF_PATH_B, phy_idx);
+}
+
+static void rtw8922a_ctrl_afe_dac(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
+ enum rtw89_rf_path path)
+{
+ u32 cr_ofst = 0x0;
+
+ if (path == RF_PATH_B)
+ cr_ofst = 0x100;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC0 + cr_ofst, B_AFEDAC0, 0xE);
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC1 + cr_ofst, B_AFEDAC1, 0x7);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC0 + cr_ofst, B_AFEDAC0, 0xD);
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC1 + cr_ofst, B_AFEDAC1, 0x6);
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct rtw89_reg2_def bb_mcu0_init_reg[] = {
+ {0x6990, 0x00000000},
+ {0x6994, 0x00000000},
+ {0x6998, 0x00000000},
+ {0x6820, 0xFFFFFFFE},
+ {0x6800, 0xC0000FFE},
+ {0x6808, 0x76543210},
+ {0x6814, 0xBFBFB000},
+ {0x6818, 0x0478C009},
+ {0x6800, 0xC0000FFF},
+ {0x6820, 0xFFFFFFFF},
+};
+
+static const struct rtw89_reg2_def bb_mcu1_init_reg[] = {
+ {0x6990, 0x00000000},
+ {0x6994, 0x00000000},
+ {0x6998, 0x00000000},
+ {0x6820, 0xFFFFFFFE},
+ {0x6800, 0xC0000FFE},
+ {0x6808, 0x76543210},
+ {0x6814, 0xBFBFB000},
+ {0x6818, 0x0478C009},
+ {0x6800, 0xC0000FFF},
+ {0x6820, 0xFFFFFFFF},
+};
+
+static void rtw8922a_bbmcu_cr_init(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_reg2_def *reg;
+ int size;
+ int i;
+
+ if (phy_idx == RTW89_PHY_0) {
+ reg = bb_mcu0_init_reg;
+ size = ARRAY_SIZE(bb_mcu0_init_reg);
+ } else {
+ reg = bb_mcu1_init_reg;
+ size = ARRAY_SIZE(bb_mcu1_init_reg);
+ }
+
+ for (i = 0; i < size; i++, reg++)
+ rtw89_bbmcu_write32(rtwdev, reg->addr, reg->data, phy_idx);
+}
+
+static const u32 dmac_sys_mask[2] = {B_BE_DMAC_BB_PHY0_MASK, B_BE_DMAC_BB_PHY1_MASK};
+static const u32 bbrst_mask[2] = {B_BE_FEN_BBPLAT_RSTB, B_BE_FEN_BB1PLAT_RSTB};
+static const u32 glbrst_mask[2] = {B_BE_FEN_BB_IP_RSTN, B_BE_FEN_BB1_IP_RSTN};
+static const u32 mcu_bootrdy_mask[2] = {B_BE_BOOT_RDY0, B_BE_BOOT_RDY1};
+
+static void rtw8922a_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u32 rdy = 0;
+
+ if (phy_idx == RTW89_PHY_1)
+ rdy = 1;
+
+ rtw89_write32_mask(rtwdev, R_BE_DMAC_SYS_CR32B, dmac_sys_mask[phy_idx], 0x7FF9);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, glbrst_mask[phy_idx], 0x0);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, bbrst_mask[phy_idx], 0x0);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, glbrst_mask[phy_idx], 0x1);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, mcu_bootrdy_mask[phy_idx], rdy);
+ rtw89_write32_mask(rtwdev, R_BE_MEM_PWR_CTRL, B_BE_MEM_BBMCU0_DS_V1, 0);
+
+ fsleep(1);
+ rtw8922a_bbmcu_cr_init(rtwdev, phy_idx);
+}
+
+static void rtw8922a_bb_postinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx == RTW89_PHY_0)
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, mcu_bootrdy_mask[phy_idx]);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, bbrst_mask[phy_idx]);
+
+ rtw89_phy_write32_set(rtwdev, R_BBCLK, B_CLK_640M);
+ rtw89_phy_write32_clr(rtwdev, R_TXSCALE, B_TXFCTR_EN);
+ rtw89_phy_set_phy_regs(rtwdev, R_TXFCTR, B_TXFCTR_THD, 0x200);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_EHT_RATE_TH, 0xA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE, B_HE_RATE_TH, 0xA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE2, B_HT_VHT_TH, 0xAAA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE, B_EHT_MCS14, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE2, B_EHT_MCS15, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_EHTTB_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_HEERSU_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_HEMU_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_TB_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SU_PUNC, B_SU_PUNC_EN, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE5, B_HWGEN_EN, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE5, B_PWROFST_COMP, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_AB, B_BY_SLOPE, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_A, B_MGA_AEND, 0xe0);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_AB, B_MAG_AB, 0xe0c000);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_SLOPE_A, 0x3FE0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_SLOPE_B, 0x3FE0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SC_CORNER, B_SC_CORNER, 0x200);
+ rtw89_phy_write32_idx(rtwdev, R_UDP_COEEF, B_UDP_COEEF, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UDP_COEEF, B_UDP_COEEF, 0x1, phy_idx);
+}
+
+static void rtw8922a_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
+ bool en, enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ if (band == RTW89_BAND_2G)
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1,
+ B_RXCCA_BE1_DIS, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ }
+}
+
+static int rtw8922a_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path tx_path,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_reg2_def path_com_cr[] = {
+ {0x11A00, 0x21C86900},
+ {0x11A04, 0x00E4E433},
+ {0x11A08, 0x39390CC9},
+ {0x11A0C, 0x4E433240},
+ {0x11A10, 0x90CC900E},
+ {0x11A14, 0x00240393},
+ {0x11A18, 0x201C8600},
+ };
+ int ret = 0;
+ u32 reg;
+ int i;
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL, 0x0, phy_idx);
+
+ if (phy_idx == RTW89_PHY_1 && !rtwdev->dbcc_en)
+ return 0;
+
+ if (tx_path == RF_PATH_A) {
+ path_com_cr[0].data = 0x21C82900;
+ path_com_cr[1].data = 0x00E4E431;
+ path_com_cr[2].data = 0x39390C49;
+ path_com_cr[3].data = 0x4E431240;
+ path_com_cr[4].data = 0x90C4900E;
+ path_com_cr[6].data = 0x201C8200;
+ } else if (tx_path == RF_PATH_B) {
+ path_com_cr[0].data = 0x21C04900;
+ path_com_cr[1].data = 0x00E4E032;
+ path_com_cr[2].data = 0x39380C89;
+ path_com_cr[3].data = 0x4E032240;
+ path_com_cr[4].data = 0x80C8900E;
+ path_com_cr[6].data = 0x201C0400;
+ } else if (tx_path == RF_PATH_AB) {
+ path_com_cr[0].data = 0x21C86900;
+ path_com_cr[1].data = 0x00E4E433;
+ path_com_cr[2].data = 0x39390CC9;
+ path_com_cr[3].data = 0x4E433240;
+ path_com_cr[4].data = 0x90CC900E;
+ path_com_cr[6].data = 0x201C8600;
+ } else {
+ ret = -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(path_com_cr); i++) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, path_com_cr[i].addr, phy_idx);
+ rtw89_write32(rtwdev, reg, path_com_cr[i].data);
+ }
+
+ return ret;
+}
+
+static void rtw8922a_bb_reset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+}
+
+static int rtw8922a_cfg_rx_nss_limit(struct rtw89_dev *rtwdev, u8 rx_nss,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rx_nss == 1) {
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_HTMCS_LMT, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_VHTMCS_LMT, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_N_USR_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_TB_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_EHT, B_RXEHT_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHTTB_NSS_MAX, 0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHT_N_USER_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ } else if (rx_nss == 2) {
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_HTMCS_LMT, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_VHTMCS_LMT, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_N_USR_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_TB_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_EHT, B_RXEHT_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHTTB_NSS_MAX, 1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHT_N_USER_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtw8922a_tssi_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x1);
+ }
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x1);
+ }
+}
+
+static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rx_path,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 rx_nss = (rx_path == RF_PATH_AB) ? 2 : 1;
+
+ /* Set to 0 first to avoid abnormal EDCCA report */
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x0, phy_idx);
+
+ if (rx_path == RF_PATH_A) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 1, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else if (rx_path == RF_PATH_B) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 2, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else if (rx_path == RF_PATH_AB) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 3, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ if (mode == MLO_1_PLUS_1_1RF || mode == DBCC_LEGACY) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DBCC_FA, B_DBCC_FA, 0x0);
+ } else if (mode == MLO_2_PLUS_0_1RF || mode == MLO_0_PLUS_2_1RF ||
+ mode == MLO_DBCC_NOT_SUPPORT) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DBCC_FA, B_DBCC_FA, 0x1);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (mode == MLO_2_PLUS_0_1RF) {
+ rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_A);
+ rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_B);
+ } else {
+ rtw89_warn(rtwdev, "unsupported MLO mode %d\n", mode);
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180);
+
+ if (mode == MLO_2_PLUS_0_1RF) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xABA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEBA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEAA9);
+ } else if (mode == MLO_0_PLUS_2_1RF) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xAFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEEFF);
+ } else if ((mode == MLO_1_PLUS_1_1RF) || (mode == DBCC_LEGACY)) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x7BAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x3BAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x3AAB);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x180);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x0);
+ }
+
+ return 0;
+}
+
+static void rtw8922a_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ u32 reg;
+
+ rtw89_phy_write32_clr(rtwdev, R_EN_SND_WO_NDP, B_EN_SND_WO_NDP);
+ rtw89_phy_write32_clr(rtwdev, R_EN_SND_WO_NDP_C1, B_EN_SND_WO_NDP);
+
+ rtw89_write32_mask(rtwdev, R_BE_PWR_BOOST, B_BE_PWR_CTRL_SEL, 0);
+ if (rtwdev->dbcc_en) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, RTW89_MAC_1);
+ rtw89_write32_mask(rtwdev, reg, B_BE_PWR_CTRL_SEL, 0);
+ }
+
+ rtw8922a_ctrl_mlo(rtwdev, rtwdev->mlo_dbcc_mode);
+}
+
+static void rtw8922a_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (cck_en) {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, B_PD_ARBITER_OFF,
+ 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, B_PD_ARBITER_OFF,
+ 1, phy_idx);
+ }
+}
+
+static void rtw8922a_set_channel_bb(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ bool cck_en = chan->band_type == RTW89_BAND_2G;
+ u8 pri_sb = chan->pri_sb_idx;
+
+ if (cck_en)
+ rtw8922a_ctrl_sco_cck(rtwdev, chan->primary_channel,
+ chan->band_width, phy_idx);
+
+ rtw8922a_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8922a_ctrl_bw(rtwdev, pri_sb, chan->band_width, phy_idx);
+ rtw8922a_ctrl_cck_en(rtwdev, cck_en, phy_idx);
+ rtw8922a_spur_elimination(rtwdev, chan, phy_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, RF_PATH_AB, phy_idx);
+}
+
+static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (!rtwdev->dbcc_en)
+ return;
+
+ if (phy_idx == RTW89_PHY_0) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xABA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEBA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEAA9);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xAFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEEFF);
+ }
+}
+
+static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_mlo_dbcc_mode mode)
+{
+ if (!rtwdev->dbcc_en)
+ return;
+
+ rtw8922a_ctrl_mlo(rtwdev, mode);
+}
+
+static void rtw8922a_set_channel(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8922a_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8922a_set_channel_rf(rtwdev, chan, phy_idx);
+}
+
+static void rtw8922a_dfs_en_idx(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, enum rtw89_rf_path path,
+ bool en)
+{
+ u32 path_ofst = (path == RF_PATH_B) ? 0x100 : 0x0;
+
+ if (en)
+ rtw89_phy_write32_idx(rtwdev, 0x2800 + path_ofst, BIT(1), 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, 0x2800 + path_ofst, BIT(1), 0,
+ phy_idx);
+}
+
+static void rtw8922a_dfs_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_dfs_en_idx(rtwdev, phy_idx, RF_PATH_A, en);
+ rtw8922a_dfs_en_idx(rtwdev, phy_idx, RF_PATH_B, en);
+}
+
+static void rtw8922a_adc_en_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool en)
+{
+ u32 val;
+
+ val = rtw89_phy_read32_mask(rtwdev, R_ADC_FIFO_V1, B_ADC_FIFO_EN_V1);
+
+ if (en) {
+ if (path == RF_PATH_A)
+ val &= ~0x1;
+ else
+ val &= ~0x2;
+ } else {
+ if (path == RF_PATH_A)
+ val |= 0x1;
+ else
+ val |= 0x2;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO_V1, B_ADC_FIFO_EN_V1, val);
+}
+
+static void rtw8922a_adc_en(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_A, en);
+ else
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_B, en);
+ } else {
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_A, en);
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_B, en);
+ }
+}
+
+static
+void rtw8922a_hal_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, enum rtw89_mac_idx mac_idx,
+ enum rtw89_band band, u32 *tx_en, bool enter)
+{
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
+ rtw8922a_dfs_en(rtwdev, false, phy_idx);
+ rtw8922a_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8922a_adc_en(rtwdev, false, phy_idx);
+ fsleep(40);
+ rtw8922a_bb_reset_en(rtwdev, band, false, phy_idx);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
+ rtw8922a_adc_en(rtwdev, true, phy_idx);
+ rtw8922a_dfs_en(rtwdev, true, phy_idx);
+ rtw8922a_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8922a_bb_reset_en(rtwdev, band, true, phy_idx);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, *tx_en);
+ }
+}
+
+static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enter) {
+ rtw8922a_pre_set_channel_bb(rtwdev, phy_idx);
+ rtw8922a_pre_set_channel_rf(rtwdev, phy_idx);
+ }
+
+ rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter);
+
+ if (!enter) {
+ rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode);
+ rtw8922a_post_set_channel_rf(rtwdev, phy_idx);
+ }
+}
+
+static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ memset(rfk_mcc, 0, sizeof(*rfk_mcc));
+}
+
+static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
+
+ rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u32 rf_mode;
+ u8 path;
+ int ret;
+
+ for (path = 0; path < RF_PATH_NUM_8922A; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
+ 2, 5000, false, rtwdev, path, 0x00,
+ RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
+ path, ret);
+ }
+}
+
+static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, RF_AB);
+
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
+ rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54);
+ rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6);
+ rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP);
+}
+
+static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6);
+}
+
+static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+}
+
+static void rtw8922a_rfk_track(struct rtw89_dev *rtwdev)
+{
+}
+
+static void rtw8922a_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 ref_ofdm = 0;
+ s16 ref_cck = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_BE_PWR_REF_CTRL,
+ B_BE_PWR_REF_CTRL_OFDM, ref_ofdm);
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_BE_PWR_REF_CTRL,
+ B_BE_PWR_REF_CTRL_CCK, ref_cck);
+}
+
+static void rtw8922a_bb_tx_triangular(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ctrl = en ? 0x1 : 0x0;
+
+ rtw89_phy_write32_idx(rtwdev, R_BEDGE3, B_BEDGE_CFG, ctrl, phy_idx);
+}
+
+static void rtw8922a_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ const struct rtw89_tx_shape *tx_shape = &rfe_parms->tx_shape;
+ u8 tx_shape_idx;
+ u8 band, regd;
+
+ band = chan->band_type;
+ regd = rtw89_regd_get(rtwdev, band);
+ tx_shape_idx = (*tx_shape->lmt)[band][RTW89_RS_OFDM][regd];
+
+ if (tx_shape_idx == 0)
+ rtw8922a_bb_tx_triangular(rtwdev, false, phy_idx);
+ else
+ rtw8922a_bb_tx_triangular(rtwdev, true, phy_idx);
+}
+
+static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8922a_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+}
+
+static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_txpwr_ref(rtwdev, phy_idx);
+}
+
+static void rtw8922a_ctrl_trx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path tx_path, u8 tx_nss,
+ enum rtw89_rf_path rx_path, u8 rx_nss)
+{
+ enum rtw89_phy_idx phy_idx;
+
+ for (phy_idx = RTW89_PHY_0; phy_idx <= RTW89_PHY_1; phy_idx++) {
+ rtw8922a_ctrl_tx_path_tmac(rtwdev, tx_path, phy_idx);
+ rtw8922a_ctrl_rx_path_tmac(rtwdev, rx_path, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ }
+}
+
+static void rtw8922a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_A, B_FORCE_FIR_A, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_A, B_RXBY_WBADC_A,
+ 0xf, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_A, B_BT_RXBY_WBADC_A,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_TRK_OFF_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB_A, B_OP1DB_A, 0x80, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB1_A, B_TIA10_A, 0x8080, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_A, B_LNA_IBADC_A, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_A, B_BKOFF_IBADC_A, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_B, B_FORCE_FIR_B, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_B, B_RXBY_WBADC_B,
+ 0xf, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_B, B_BT_RXBY_WBADC_B,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_TRK_OFF_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x80, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA10_B, 0x8080, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_B, B_LNA_IBADC_B, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_B, B_BKOFF_IBADC_B, 0x34, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_A, B_FORCE_FIR_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_A, B_RXBY_WBADC_A,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_A, B_BT_RXBY_WBADC_A,
+ 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_TRK_OFF_A, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB_A, B_OP1DB_A, 0x1a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB1_A, B_TIA10_A, 0x2a2a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_A, B_LNA_IBADC_A, 0x7a6, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_A, B_BKOFF_IBADC_A, 0x26, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_B, B_FORCE_FIR_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_B, B_RXBY_WBADC_B,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_B, B_BT_RXBY_WBADC_B,
+ 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_TRK_OFF_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x20, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA10_B, 0x2a30, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_B, B_LNA_IBADC_B, 0x7a6, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_B, B_BKOFF_IBADC_B, 0x26, phy_idx);
+ }
+}
+
+static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ntx_path = RF_PATH_AB;
+ u32 tx_en0, tx_en1;
+
+ if (hal->antenna_tx == RF_A)
+ ntx_path = RF_PATH_A;
+ else if (hal->antenna_tx == RF_B)
+ ntx_path = RF_PATH_B;
+
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, true);
+ if (rtwdev->dbcc_en)
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
+ &tx_en1, true);
+
+ rtw8922a_ctrl_trx_path(rtwdev, ntx_path, 2, RF_PATH_AB, 2);
+
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, false);
+ if (rtwdev->dbcc_en)
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
+ &tx_en0, false);
+}
+
+static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ int th;
+
+ /* read thermal only if debugging */
+ if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_CFO | RTW89_DBG_RFK_TRACK))
+ return 80;
+
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ th = rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL_V1);
+ th += (s8)info->thermal_trim[rf_path];
+
+ return clamp_t(int, th, 0, U8_MAX);
+}
+
+static void rtw8922a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 chan_idx = phy_ppdu->chan_idx;
+ enum nl80211_band band;
+ u8 ch;
+
+ if (chan_idx == 0)
+ return;
+
+ rtw89_decode_chan_idx(rtwdev, chan_idx, &ch, &band);
+ status->freq = ieee80211_channel_to_frequency(ch, band);
+ status->band = band;
+}
+
+static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ u8 *rx_power = phy_ppdu->rssi;
+
+ status->signal =
+ RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+ }
+ if (phy_ppdu->valid)
+ rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_FEN_BBPLAT_RSTB | B_BE_FEN_BB_IP_RSTN);
+ rtw89_write32(rtwdev, R_BE_DMAC_SYS_CR32B, 0x7FF97FF9);
+
+ return 0;
+}
+
+static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_clr(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_FEN_BBPLAT_RSTB | B_BE_FEN_BB_IP_RSTN);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
@@ -610,10 +2210,51 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
#endif
static const struct rtw89_chip_ops rtw8922a_chip_ops = {
+ .enable_bb_rf = rtw8922a_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8922a_mac_disable_bb_rf,
+ .bb_preinit = rtw8922a_bb_preinit,
+ .bb_postinit = rtw8922a_bb_postinit,
+ .bb_reset = rtw8922a_bb_reset,
+ .bb_sethw = rtw8922a_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v2,
+ .write_rf = rtw89_phy_write_rf_v2,
+ .set_channel = rtw8922a_set_channel,
+ .set_channel_help = rtw8922a_set_channel_help,
.read_efuse = rtw8922a_read_efuse,
.read_phycap = rtw8922a_read_phycap,
+ .fem_setup = NULL,
+ .rfe_gpio = NULL,
+ .rfk_hw_init = rtw8922a_rfk_hw_init,
+ .rfk_init = rtw8922a_rfk_init,
+ .rfk_init_late = rtw8922a_rfk_init_late,
+ .rfk_channel = rtw8922a_rfk_channel,
+ .rfk_band_changed = rtw8922a_rfk_band_changed,
+ .rfk_scan = rtw8922a_rfk_scan,
+ .rfk_track = rtw8922a_rfk_track,
+ .power_trim = rtw8922a_power_trim,
+ .set_txpwr = rtw8922a_set_txpwr,
+ .set_txpwr_ctrl = rtw8922a_set_txpwr_ctrl,
+ .init_txpwr_unit = NULL,
+ .get_thermal = rtw8922a_get_thermal,
+ .ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
+ .query_ppdu = rtw8922a_query_ppdu,
+ .ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
+ .cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = NULL,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
+ .query_rxdesc = rtw89_core_query_rxdesc_v2,
+ .fill_txdesc = rtw89_core_fill_txdesc_v2,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v2,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx_v2,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx_v2,
+ .h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v2,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl_g7,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl_g7,
+ .h2c_ampdu_cmac_tbl = rtw89_fw_h2c_ampdu_cmac_tbl_g7,
+ .h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon_be,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1,
};
const struct rtw89_chip_info rtw8922a_chip_info = {
@@ -650,11 +2291,16 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
+ .dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
- .support_chanctx_num = 1,
+ .support_chanctx_num = 2,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
@@ -665,7 +2311,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.acam_num = 128,
.bcam_num = 20,
.scam_num = 32,
- .bacam_num = 8,
+ .bacam_num = 24,
.bacam_dynamic_num = 8,
.bacam_ver = RTW89_BACAM_V1,
.ppdu_max_usr = 16,
@@ -683,10 +2329,18 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
.low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_G7,
.hci_func_en_addr = R_BE_HCI_FUNC_EN,
.h2c_desc_size = sizeof(struct rtw89_rxdesc_short_v2),
.txwd_body_size = sizeof(struct rtw89_txwd_body_v2),
.txwd_info_size = sizeof(struct rtw89_txwd_info_v2),
+ .h2c_ctrl_reg = R_BE_H2CREG_CTRL,
+ .h2c_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8},
+ .h2c_regs = rtw8922a_h2c_regs,
+ .c2h_ctrl_reg = R_BE_C2HREG_CTRL,
+ .c2h_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
+ .c2h_regs = rtw8922a_c2h_regs,
+ .page_regs = &rtw8922a_page_regs,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = NULL,
@@ -694,9 +2348,11 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.imr_info = NULL,
.imr_dmac_table = &rtw8922a_imr_dmac_table,
.imr_cmac_table = &rtw8922a_imr_cmac_table,
+ .rrsr_cfgs = &rtw8922a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_VLD_V2, B_BSS_CLR_VLD0_V2},
.bss_clr_map_reg = R_BSS_CLR_MAP_V2,
.dma_ch_mask = 0,
+ .edcca_regs = &rtw8922a_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8922a,
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
new file mode 100644
index 000000000000..2a371829268c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#include "chan.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8922a.h"
+#include "rtw8922a_rfk.h"
+
+static void rtw8922a_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk_man[2] = {R_TSSI_PWR_P0, R_TSSI_PWR_P1};
+
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 0);
+ else
+ rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 1);
+}
+
+void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ } else {
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static
+void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 central_ch, enum rtw89_band band,
+ enum rtw89_bandwidth bw)
+{
+ const u32 rf_addr[2] = {RR_CFGCH, RR_CFGCH_V1};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 rf_reg[RF_PATH_NUM_8922A][2];
+ u8 synpath;
+ u32 rf18;
+ u8 kpath;
+ u8 path;
+ u8 i;
+
+ rf_reg[RF_PATH_A][0] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[0], RFREG_MASK);
+ rf_reg[RF_PATH_A][1] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[1], RFREG_MASK);
+ rf_reg[RF_PATH_B][0] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[0], RFREG_MASK);
+ rf_reg[RF_PATH_B][1] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[1], RFREG_MASK);
+
+ kpath = rtw89_phy_get_kpath(rtwdev, phy);
+ synpath = rtw89_phy_get_syn_sel(rtwdev, phy);
+
+ rf18 = rtw89_read_rf(rtwdev, synpath, RR_CFGCH, RFREG_MASK);
+ if (rf18 == INV_RF_DATA) {
+ rtw89_warn(rtwdev, "[RFK] Invalid RF18 value\n");
+ return;
+ }
+
+ for (path = 0; path < RF_PATH_NUM_8922A; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ for (i = 0; i < 2; i++) {
+ if (rf_reg[path][i] == INV_RF_DATA) {
+ rtw89_warn(rtwdev,
+ "[RFK] Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+
+ rf_reg[path][i] &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BW |
+ RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH);
+
+ if (band == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0);
+ else
+ rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1);
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ default:
+ break;
+ case RTW89_BAND_5G:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BAND1_5G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_5G, RR_CFGCH_BAND0);
+ break;
+ case RTW89_BAND_6G:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BAND1_6G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_6G, RR_CFGCH_BAND0);
+ break;
+ }
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_40M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_80M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_160M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_320M, RR_CFGCH_BW_V2);
+ break;
+ }
+
+ rtw89_write_rf(rtwdev, path, rf_addr[i],
+ RFREG_MASK, rf_reg[path][i]);
+ fsleep(100);
+ }
+ }
+
+ if (hal->cv != CHIP_CAV)
+ return;
+
+ if (band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c990);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ } else {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c190);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ }
+}
+
+void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_ctl_band_ch_bw(rtwdev, phy_idx, chan->channel, chan->band_type,
+ chan->band_width);
+}
+
+enum _rf_syn_pow {
+ RF_SYN_ON_OFF,
+ RF_SYN_OFF_ON,
+ RF_SYN_ALLON,
+ RF_SYN_ALLOFF,
+};
+
+static void rtw8922a_set_syn01_cav(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ if (syn == RF_SYN_ALLON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ } else if (syn == RF_SYN_ON_OFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0);
+ } else if (syn == RF_SYN_OFF_ON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ } else if (syn == RF_SYN_ALLOFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0);
+ }
+}
+
+static void rtw8922a_set_syn01_cbv(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ if (syn == RF_SYN_ALLON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf);
+ } else if (syn == RF_SYN_ON_OFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0);
+ } else if (syn == RF_SYN_OFF_ON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf);
+ } else if (syn == RF_SYN_ALLOFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0);
+ }
+}
+
+static void rtw8922a_set_syn01(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "SYN config=%d\n", syn);
+
+ if (hal->cv == CHIP_CAV)
+ rtw8922a_set_syn01_cav(rtwdev, syn);
+ else
+ rtw8922a_set_syn01_cbv(rtwdev, syn);
+}
+
+static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx)
+{
+ u32 tmp;
+
+ if (idx > 2) {
+ rtw89_warn(rtwdev, "[DBCC][ERROR]indx is out of limit!! index(%d)", idx);
+ return;
+ }
+
+ if (kpath & RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1, idx);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_MDPD_V1, idx);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RR_TXG_SEL, 0x4 | idx);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(0));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(1));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G5, tmp);
+ }
+
+ if (kpath & RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1, idx);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_MDPD_V1, idx);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RR_TXG_SEL, 0x4 | idx);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(0));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G3, tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(1));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G5, tmp);
+ }
+}
+
+static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ const struct rtw89_chan *chan;
+ enum rtw89_entity_mode mode;
+ u8 s0_tbl, s1_tbl;
+ u8 tbl_sel;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ switch (mode) {
+ case RTW89_ENTITY_MODE_MCC_PREPARE:
+ sub_entity_idx = RTW89_SUB_ENTITY_1;
+ tbl_sel = 1;
+ break;
+ default:
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ tbl_sel = 0;
+ break;
+ }
+
+ chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+
+ rfk_mcc->ch[tbl_sel] = chan->channel;
+ rfk_mcc->band[tbl_sel] = chan->band_type;
+ rfk_mcc->bw[tbl_sel] = chan->band_width;
+ rfk_mcc->table_idx = tbl_sel;
+
+ s0_tbl = tbl_sel;
+ s1_tbl = tbl_sel;
+
+ rtw8922a_chlk_ktbl_sel(rtwdev, RF_A, s0_tbl);
+ rtw8922a_chlk_ktbl_sel(rtwdev, RF_B, s1_tbl);
+}
+
+static void rtw8922a_rfk_mlo_ctrl(struct rtw89_dev *rtwdev)
+{
+ enum _rf_syn_pow syn_pow;
+
+ if (!rtwdev->dbcc_en)
+ goto set_rfk_reload;
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_0_PLUS_2_1RF:
+ syn_pow = RF_SYN_OFF_ON;
+ break;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_1_PLUS_1_2RF:
+ case MLO_2_PLUS_0_1RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ case MLO_DBCC_NOT_SUPPORT:
+ default:
+ syn_pow = RF_SYN_ON_OFF;
+ break;
+ case MLO_1_PLUS_1_1RF:
+ case DBCC_LEGACY:
+ syn_pow = RF_SYN_ALLON;
+ break;
+ }
+
+ rtw8922a_set_syn01(rtwdev, syn_pow);
+
+set_rfk_reload:
+ rtw8922a_chlk_reload(rtwdev);
+}
+
+static void rtw8922a_rfk_pll_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+ u8 tmp;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_PLL_1, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL_1, tmp | 0xf8, 0xFF);
+ if (ret)
+ return;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_APBT, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_APBT, tmp & ~0x60, 0xFF);
+ if (ret)
+ return;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, tmp | 0x38, 0xFF);
+ if (ret)
+ return;
+}
+
+void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->dbcc_en)
+ rtw8922a_rfk_mlo_ctrl(rtwdev);
+
+ rtw8922a_rfk_pll_init(rtwdev);
+}
+
+void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ bool mlo_1_1;
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ mlo_1_1 = rtw89_is_mlo_1_1(rtwdev);
+ if (mlo_1_1)
+ rtw8922a_set_syn01(rtwdev, RF_SYN_ALLON);
+ else if (phy_idx == RTW89_PHY_0)
+ rtw8922a_set_syn01(rtwdev, RF_SYN_ON_OFF);
+ else
+ rtw8922a_set_syn01(rtwdev, RF_SYN_OFF_ON);
+
+ fsleep(1000);
+}
+
+void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_rfk_mlo_ctrl(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h
new file mode 100644
index 000000000000..66bdd57c1eea
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#ifndef __RTW89_8922A_RFK_H__
+#define __RTW89_8922A_RFK_H__
+
+#include "core.h"
+
+void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev);
+void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index 7b3d98d2c402..9f46fb166105 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_DEF,
.rx_ring_eq_is_full = true,
+ .check_rx_tag = true,
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 5c7ca36c09b6..4c17936795b6 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -519,7 +519,7 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
return ret;
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c assoc cmac tbl\n");
return ret;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 05890536e353..211fa25b9a78 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -740,7 +740,7 @@ u16 rsi_get_connected_channel(struct ieee80211_vif *vif)
return 0;
bss = &vif->bss_conf;
- channel = bss->chandef.chan;
+ channel = bss->chanreq.oper.chan;
if (!channel)
return 0;
@@ -759,7 +759,7 @@ static void rsi_switch_channel(struct rsi_hw *adapter,
if (!vif)
return;
- channel = vif->bss_conf.chandef.chan;
+ channel = vif->bss_conf.chanreq.oper.chan;
if (!channel)
return;
@@ -1957,6 +1957,10 @@ static int rsi_mac80211_resume(struct ieee80211_hw *hw)
#endif
static const struct ieee80211_ops mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rsi_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rsi_mac80211_start,
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 10a465686439..dccc139cabb2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -232,17 +232,17 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
if (!usb_reg_buf)
return status;
- usb_reg_buf[0] = (cpu_to_le32(value) & 0x00ff);
- usb_reg_buf[1] = (cpu_to_le32(value) & 0xff00) >> 8;
- usb_reg_buf[2] = (cpu_to_le32(value) & 0x00ff0000) >> 16;
- usb_reg_buf[3] = (cpu_to_le32(value) & 0xff000000) >> 24;
+ usb_reg_buf[0] = value & 0x00ff;
+ usb_reg_buf[1] = (value & 0xff00) >> 8;
+ usb_reg_buf[2] = (value & 0x00ff0000) >> 16;
+ usb_reg_buf[3] = (value & 0xff000000) >> 24;
status = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
RSI_USB_REQ_OUT,
- ((cpu_to_le32(reg) & 0xffff0000) >> 16),
- (cpu_to_le32(reg) & 0xffff),
+ (reg & 0xffff0000) >> 16,
+ reg & 0xffff,
(void *)usb_reg_buf,
len,
USB_CTRL_SET_TIMEOUT);
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 537caf9d914a..a904602f02ce 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -144,13 +144,13 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0);
struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0);
- chan0 = vif_ch0->bss_conf.chandef.chan;
+ chan0 = vif_ch0->bss_conf.chanreq.oper.chan;
}
if (wdev_to_wvif(wvif->wdev, 1)) {
struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1);
struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1);
- chan1 = vif_ch1->bss_conf.chandef.chan;
+ chan1 = vif_ch1->bss_conf.chanreq.oper.chan;
}
if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) {
if (chan0->hw_value == chan1->hw_value) {
@@ -344,6 +344,7 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
const int pairwise_cipher_suite_size = 4 / sizeof(u16);
const int akm_suite_size = 4 / sizeof(u16);
+ int ret = -EINVAL;
const u16 *ptr;
if (unlikely(!skb))
@@ -352,22 +353,26 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
if (unlikely(!ptr))
- return -EINVAL;
+ goto free_skb;
ptr += pairwise_cipher_suite_count_offset;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + pairwise_cipher_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + akm_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
- return 0;
+ ret = 0;
+
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
}
int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index 4c30b5772ce0..00c4731d8f8e 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
@@ -178,12 +178,15 @@ static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self)
return ret;
}
+/* Like the rest of the driver, this only supports one device per system */
+static struct gpio_desc *cw1200_reset;
+static struct gpio_desc *cw1200_powerup;
+
static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
{
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 0);
+ if (cw1200_reset) {
+ gpiod_set_value(cw1200_reset, 0);
msleep(30); /* Min is 2 * CLK32K cycles */
- gpio_free(pdata->reset);
}
if (pdata->power_ctrl)
@@ -196,16 +199,21 @@ static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
{
- /* Ensure I/Os are pulled low */
- if (pdata->reset) {
- gpio_request(pdata->reset, "cw1200_wlan_reset");
- gpio_direction_output(pdata->reset, 0);
+ /* Ensure I/Os are pulled low (reset is active low) */
+ cw1200_reset = devm_gpiod_get_optional(NULL, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(cw1200_reset)) {
+ pr_err("could not get CW1200 SDIO reset GPIO\n");
+ return PTR_ERR(cw1200_reset);
}
- if (pdata->powerup) {
- gpio_request(pdata->powerup, "cw1200_wlan_powerup");
- gpio_direction_output(pdata->powerup, 0);
+ gpiod_set_consumer_name(cw1200_reset, "cw1200_wlan_reset");
+ cw1200_powerup = devm_gpiod_get_optional(NULL, "powerup", GPIOD_OUT_LOW);
+ if (IS_ERR(cw1200_powerup)) {
+ pr_err("could not get CW1200 SDIO powerup GPIO\n");
+ return PTR_ERR(cw1200_powerup);
}
- if (pdata->reset || pdata->powerup)
+ gpiod_set_consumer_name(cw1200_powerup, "cw1200_wlan_powerup");
+
+ if (cw1200_reset || cw1200_powerup)
msleep(10); /* Settle time? */
/* Enable 3v3 and 1v8 to hardware */
@@ -226,13 +234,13 @@ static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
}
/* Enable POWERUP signal */
- if (pdata->powerup) {
- gpio_set_value(pdata->powerup, 1);
+ if (cw1200_powerup) {
+ gpiod_set_value(cw1200_powerup, 1);
msleep(250); /* or more..? */
}
- /* Enable RSTn signal */
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 1);
+ /* Deassert RSTn signal, note active low */
+ if (cw1200_reset) {
+ gpiod_set_value(cw1200_reset, 0);
msleep(50); /* Or more..? */
}
return 0;
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index c82c0688b549..fb3aafcafe18 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -11,7 +11,7 @@
*/
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -38,6 +38,8 @@ struct hwbus_priv {
const struct cw1200_platform_data_spi *pdata;
spinlock_t lock; /* Serialize all bus operations */
wait_queue_head_t wq;
+ struct gpio_desc *reset;
+ struct gpio_desc *powerup;
int claimed;
};
@@ -79,9 +81,6 @@ static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, regaddr);
#endif
- /* Header is LE16 */
- regaddr = cpu_to_le16(regaddr);
-
/* We have to byteswap if the SPI bus is limited to 8b operation
or we are running on a Big Endian system
*/
@@ -144,9 +143,6 @@ static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr, regaddr);
#endif
- /* Header is LE16 */
- regaddr = cpu_to_le16(regaddr);
-
/* We have to byteswap if the SPI bus is limited to 8b operation
or we are running on a Big Endian system
*/
@@ -275,12 +271,12 @@ static void cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
free_irq(self->func->irq, self);
}
-static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
+static int cw1200_spi_off(struct hwbus_priv *self, const struct cw1200_platform_data_spi *pdata)
{
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 0);
+ if (self->reset) {
+ /* Assert RESET, note active low */
+ gpiod_set_value(self->reset, 1);
msleep(30); /* Min is 2 * CLK32K cycles */
- gpio_free(pdata->reset);
}
if (pdata->power_ctrl)
@@ -291,18 +287,12 @@ static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
return 0;
}
-static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
+static int cw1200_spi_on(struct hwbus_priv *self, const struct cw1200_platform_data_spi *pdata)
{
/* Ensure I/Os are pulled low */
- if (pdata->reset) {
- gpio_request(pdata->reset, "cw1200_wlan_reset");
- gpio_direction_output(pdata->reset, 0);
- }
- if (pdata->powerup) {
- gpio_request(pdata->powerup, "cw1200_wlan_powerup");
- gpio_direction_output(pdata->powerup, 0);
- }
- if (pdata->reset || pdata->powerup)
+ gpiod_direction_output(self->reset, 1); /* Active low */
+ gpiod_direction_output(self->powerup, 0);
+ if (self->reset || self->powerup)
msleep(10); /* Settle time? */
/* Enable 3v3 and 1v8 to hardware */
@@ -323,13 +313,13 @@ static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
}
/* Enable POWERUP signal */
- if (pdata->powerup) {
- gpio_set_value(pdata->powerup, 1);
+ if (self->powerup) {
+ gpiod_set_value(self->powerup, 1);
msleep(250); /* or more..? */
}
- /* Enable RSTn signal */
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 1);
+ /* Assert RSTn signal, note active low */
+ if (self->reset) {
+ gpiod_set_value(self->reset, 0);
msleep(50); /* Or more..? */
}
return 0;
@@ -381,20 +371,33 @@ static int cw1200_spi_probe(struct spi_device *func)
spi_get_chipselect(func, 0), func->mode, func->bits_per_word,
func->max_speed_hz);
- if (cw1200_spi_on(plat_data)) {
+ self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ pr_err("Can't allocate SPI hwbus_priv.");
+ return -ENOMEM;
+ }
+
+ /* Request reset asserted */
+ self->reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(self->reset))
+ return dev_err_probe(&func->dev, PTR_ERR(self->reset),
+ "could not get reset GPIO\n");
+ gpiod_set_consumer_name(self->reset, "cw1200_wlan_reset");
+
+ self->powerup = devm_gpiod_get_optional(&func->dev, "powerup", GPIOD_OUT_LOW);
+ if (IS_ERR(self->powerup))
+ return dev_err_probe(&func->dev, PTR_ERR(self->powerup),
+ "could not get powerup GPIO\n");
+ gpiod_set_consumer_name(self->reset, "cw1200_wlan_powerup");
+
+ if (cw1200_spi_on(self, plat_data)) {
pr_err("spi_on() failed!\n");
- return -1;
+ return -ENODEV;
}
if (spi_setup(func)) {
pr_err("spi_setup() failed!\n");
- return -1;
- }
-
- self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL);
- if (!self) {
- pr_err("Can't allocate SPI hwbus_priv.");
- return -ENOMEM;
+ return -ENODEV;
}
self->pdata = plat_data;
@@ -416,7 +419,7 @@ static int cw1200_spi_probe(struct spi_device *func)
if (status) {
cw1200_spi_irq_unsubscribe(self);
- cw1200_spi_off(plat_data);
+ cw1200_spi_off(self, plat_data);
}
return status;
@@ -434,7 +437,7 @@ static void cw1200_spi_disconnect(struct spi_device *func)
self->core = NULL;
}
}
- cw1200_spi_off(dev_get_platdata(&func->dev));
+ cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 381013e0db63..a54a7b86864f 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -203,6 +203,10 @@ static const unsigned long cw1200_ttl[] = {
};
static const struct ieee80211_ops cw1200_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = cw1200_start,
.stop = cw1200_stop,
.add_interface = cw1200_add_interface,
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd9a41f59f32..0da2d29dd7bd 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1351,6 +1351,10 @@ static struct ieee80211_supported_band wl1251_band_2ghz = {
};
static const struct ieee80211_ops wl1251_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = wl1251_op_start,
.stop = wl1251_op_stop,
.add_interface = wl1251_op_add_interface,
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 301bd0043a43..4e5b351f80f0 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -343,5 +343,6 @@ static void __exit wl1251_sdio_exit(void)
module_init(wl1251_sdio_init);
module_exit(wl1251_sdio_exit);
+MODULE_DESCRIPTION("TI WL1251 SDIO helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 29292f06bd3d..1936bb3af54a 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -342,6 +342,7 @@ static struct spi_driver wl1251_spi_driver = {
module_spi_driver(wl1251_spi_driver);
+MODULE_DESCRIPTION("TI WL1251 SPI helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index de045fe4ca1e..b26d42b4e3cc 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1955,6 +1955,7 @@ module_param_named(tcxo, tcxo_param, charp, 0);
MODULE_PARM_DESC(tcxo,
"TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
+MODULE_DESCRIPTION("TI WL12xx wireless driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 20d9181b3410..2ccac1cdec01 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -2086,6 +2086,7 @@ module_param_named(num_rx_desc, num_rx_desc_param, int, 0400);
MODULE_PARM_DESC(num_rx_desc_param,
"Number of Rx descriptors: u8 (default is 32)");
+MODULE_DESCRIPTION("TI WiLink 8 wireless driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL18XX_FW_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 1e082d039b82..2499dc908305 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -233,7 +233,7 @@ void wlcore_event_channel_switch(struct wl1271 *wl,
cancel_delayed_work(&wlvif->channel_switch_work);
} else {
set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
}
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index fb9ed97774c7..ef12169f8044 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2910,7 +2910,7 @@ static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int ret;
wlvif->aid = vif->cfg.aid;
- wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
+ wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper);
wlvif->beacon_int = bss_conf->beacon_int;
wlvif->wmm_enabled = bss_conf->qos;
@@ -4242,7 +4242,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
+ (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) {
ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
@@ -4515,7 +4515,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
/* Handle new association with HT. Do this after join. */
if (sta_exists) {
bool enabled =
- bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+ bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT;
ret = wlcore_hw_set_peer_cap(wl,
&sta_ht_cap,
@@ -6793,6 +6793,7 @@ MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
module_param(no_recovery, int, 0600);
MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
+MODULE_DESCRIPTION("TI WLAN core driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index f0686635db46..966edb39ad9e 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -16,7 +16,6 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
-#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/printk.h>
#include <linux/of.h>
@@ -447,6 +446,7 @@ module_sdio_driver(wl1271_sdio_driver);
module_param(dump, bool, 0600);
MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
+MODULE_DESCRIPTION("TI WLAN SDIO helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 7d9a139db59e..0aa2b2f3c5c9 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -562,6 +562,7 @@ static struct spi_driver wl1271_spi_driver = {
};
module_spi_driver(wl1271_spi_driver);
+MODULE_DESCRIPTION("TI WLAN SPI helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index a84340c2075f..b55fe320633c 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
/*
@@ -196,8 +196,11 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_04 = {
.reg_rules = {
REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
- REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, 0),
+ REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, NL80211_RRF_AUTO_BW),
REG_RULE(5260 - 10, 5320 + 10, 80, 0, 30,
+ NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS |
+ NL80211_RRF_AUTO_BW),
+ REG_RULE(5500 - 10, 5720 + 10, 160, 0, 30,
NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS),
REG_RULE(5745 - 10, 5825 + 10, 80, 0, 30, 0),
REG_RULE(5855 - 10, 5925 + 10, 80, 0, 33, 0),
@@ -213,6 +216,7 @@ static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
struct hwsim_vif_priv {
u32 magic;
+ u32 skip_beacons;
u8 bssid[ETH_ALEN];
bool assoc;
bool bcn_en;
@@ -2128,6 +2132,16 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
return 0;
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void mac80211_hwsim_vif_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+
+ debugfs_create_u32("skip_beacons", 0600, vif->debugfs_dir,
+ &vp->skip_beacons);
+}
+#endif
static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2193,12 +2207,19 @@ static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf,
struct ieee80211_vif *vif,
struct sk_buff *skb)
{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct ieee80211_tx_info *info;
struct ieee80211_rate *txrate;
struct ieee80211_mgmt *mgmt;
/* TODO: get MCS */
int bitrate = 100;
+ if (vp->skip_beacons) {
+ vp->skip_beacons--;
+ dev_kfree_skb(skb);
+ return;
+ }
+
info = IEEE80211_SKB_CB(skb);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
ieee80211_get_tx_rates(vif, NULL, skb,
@@ -2284,8 +2305,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
rcu_dereference(link_conf->chanctx_conf)->def.chan);
}
- if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
- ieee80211_csa_finish(vif);
+ if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id))
+ ieee80211_csa_finish(vif, link_id);
}
static enum hrtimer_restart
@@ -2462,7 +2483,7 @@ static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw,
}
if (vif->type == NL80211_IFTYPE_STATION &&
- changed & BSS_CHANGED_MLD_VALID_LINKS) {
+ changed & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM)) {
u16 usable_links = ieee80211_vif_usable_links(vif);
if (vif->active_links != usable_links)
@@ -2653,10 +2674,11 @@ static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw,
return mac80211_hwsim_sta_add(hw, vif, sta);
/*
- * when client is authorized (AP station marked as such),
- * enable all links
+ * in an MLO connection, when client is authorized
+ * (AP station marked as such), enable all links
*/
- if (vif->type == NL80211_IFTYPE_STATION &&
+ if (ieee80211_vif_is_mld(vif) &&
+ vif->type == NL80211_IFTYPE_STATION &&
new_state == IEEE80211_STA_AUTHORIZED && !sta->tdls)
ieee80211_set_active_links_async(vif,
ieee80211_vif_usable_links(vif));
@@ -2738,6 +2760,24 @@ static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static enum ieee80211_neg_ttlm_res
+mac80211_hwsim_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u32 i;
+
+ /* For testing purposes, accept if all TIDs are mapped to the same links
+ * set, otherwise reject.
+ */
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->downlink[i] != neg_ttlm->downlink[0])
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
#ifdef CONFIG_NL80211_TESTMODE
/*
* This section contains example code for using netlink
@@ -3175,6 +3215,47 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
}
+static int mac80211_hwsim_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ int i;
+
+ if (n_vifs <= 0)
+ return -EINVAL;
+
+ wiphy_dbg(hw->wiphy,
+ "switch vif channel context mode: %u\n", mode);
+
+ for (i = 0; i < n_vifs; i++) {
+ hwsim_check_chanctx_magic(vifs[i].old_ctx);
+ wiphy_dbg(hw->wiphy,
+ "switch vif channel context: %d MHz/width: %d/cfreqs:%d/%d MHz -> %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].old_ctx->def.center_freq1,
+ vifs[i].old_ctx->def.center_freq2,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.width,
+ vifs[i].new_ctx->def.center_freq1,
+ vifs[i].new_ctx->def.center_freq2);
+
+ switch (mode) {
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ hwsim_check_chanctx_magic(vifs[i].new_ctx);
+ break;
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ hwsim_set_chanctx_magic(vifs[i].new_ctx);
+ hwsim_clear_chanctx_magic(vifs[i].old_ctx);
+ break;
+ default:
+ WARN_ON("Invalid mode");
+ }
+ }
+ return 0;
+}
+
static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
@@ -3839,6 +3920,13 @@ out:
return err;
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+#define HWSIM_DEBUGFS_OPS \
+ .vif_add_debugfs = mac80211_hwsim_vif_add_debugfs,
+#else
+#define HWSIM_DEBUGFS_OPS
+#endif
+
#define HWSIM_COMMON_OPS \
.tx = mac80211_hwsim_tx, \
.wake_tx_queue = ieee80211_handle_wake_tx_queue, \
@@ -3863,7 +3951,8 @@ out:
.get_et_stats = mac80211_hwsim_get_et_stats, \
.get_et_strings = mac80211_hwsim_get_et_strings, \
.start_pmsr = mac80211_hwsim_start_pmsr, \
- .abort_pmsr = mac80211_hwsim_abort_pmsr,
+ .abort_pmsr = mac80211_hwsim_abort_pmsr, \
+ HWSIM_DEBUGFS_OPS
#define HWSIM_NON_MLO_OPS \
.sta_add = mac80211_hwsim_sta_add, \
@@ -3877,6 +3966,10 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
HWSIM_NON_MLO_OPS
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
};
#define HWSIM_CHANCTX_OPS \
@@ -3888,7 +3981,8 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
.remove_chanctx = mac80211_hwsim_remove_chanctx, \
.change_chanctx = mac80211_hwsim_change_chanctx, \
.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,\
- .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+ .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, \
+ .switch_vif_chanctx = mac80211_hwsim_switch_vif_chanctx,
static const struct ieee80211_ops mac80211_hwsim_mchan_ops = {
HWSIM_COMMON_OPS
@@ -3903,6 +3997,7 @@ static const struct ieee80211_ops mac80211_hwsim_mlo_ops = {
.change_vif_links = mac80211_hwsim_change_vif_links,
.change_sta_links = mac80211_hwsim_change_sta_links,
.sta_state = mac80211_hwsim_sta_state,
+ .can_neg_ttlm = mac80211_hwsim_can_neg_ttlm,
};
struct hwsim_new_radio_params {
@@ -4965,6 +5060,33 @@ static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband)
BIT(NL80211_IFTYPE_MESH_POINT) | \
BIT(NL80211_IFTYPE_OCB))
+static const u8 iftypes_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+ [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+#define MAC80211_HWSIM_MLD_CAPA_OPS \
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS, \
+ IEEE80211_MLD_MAX_NUM_LINKS - 1)
+
+static const struct wiphy_iftype_ext_capab mac80211_hwsim_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = iftypes_ext_capa_ap,
+ .extended_capabilities_mask = iftypes_ext_capa_ap,
+ .extended_capabilities_len = sizeof(iftypes_ext_capa_ap),
+ .eml_capabilities = IEEE80211_EML_CAP_EMLSR_SUPP |
+ IEEE80211_EML_CAP_EMLMR_SUPPORT,
+ .mld_capa_and_ops = MAC80211_HWSIM_MLD_CAPA_OPS,
+ },
+};
+
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
{
@@ -5159,6 +5281,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, AP_LINK_PS);
+
+ hw->wiphy->iftype_ext_capab = mac80211_hwsim_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(mac80211_hwsim_iftypes_ext_capa);
} else {
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
@@ -5309,7 +5435,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
schedule_timeout_interruptible(1);
}
- /* TODO: Add param */
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_DFS_CONCURRENT);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h
index 4676cdaf4cfd..21b1afd83dc1 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.h
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.h
@@ -3,7 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
- * Copyright (C) 2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2020, 2022-2024 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -84,6 +84,8 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_START_PMSR: request to start peer measurement with the
* %HWSIM_ATTR_PMSR_REQUEST. Result will be sent back asynchronously
* with %HWSIM_CMD_REPORT_PMSR.
+ * @HWSIM_CMD_ABORT_PMSR: Abort previously started peer measurement.
+ * @HWSIM_CMD_REPORT_PMSR: Report peer measurement data.
* @__HWSIM_CMD_MAX: enum limit
*/
enum hwsim_commands {
@@ -298,6 +300,7 @@ enum hwsim_vqs {
* Information about a receiving or transmitting bitrate
* that can be mapped to struct rate_info
*
+ * @__HWSIM_RATE_INFO_ATTR_INVALID: reserved, netlink attribute 0 is invalid
* @HWSIM_RATE_INFO_ATTR_FLAGS: bitflag of flags from &enum rate_info_flags
* @HWSIM_RATE_INFO_ATTR_MCS: mcs index if struct describes an HT/VHT/HE rate
* @HWSIM_RATE_INFO_ATTR_LEGACY: bitrate in 100kbit/s for 802.11abg
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index ba14d83353a4..6a84ec58d618 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -453,7 +453,7 @@ static int virt_wifi_net_device_get_iflink(const struct net_device *dev)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
- return priv->lowerdev->ifindex;
+ return READ_ONCE(priv->lowerdev->ifindex);
}
static const struct net_device_ops virt_wifi_ops = {
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_def.h b/drivers/net/wireless/zydas/zd1211rw/zd_def.h
index 8ca2d0aab170..2f55e8deee82 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_def.h
@@ -12,7 +12,7 @@
#include <linux/stringify.h>
#include <linux/device.h>
-typedef u16 __nocast zd_addr_t;
+typedef u16 zd_addr_t;
#define dev_printk_f(level, dev, fmt, args...) \
dev_printk(level, dev, "%s() " fmt, __func__, ##args)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 5d534e15a844..900c063bd724 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1343,6 +1343,10 @@ static u64 zd_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops zd_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = zd_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = zd_op_start,
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index cc70360364b7..abc41a7089fa 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -57,8 +57,6 @@
#define CHECK_Q_STOP_TIMEOUT_US 1000000
#define CHECK_Q_STOP_STEP_US 10000
-#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
-
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index)
{
@@ -161,7 +159,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
skb_reset_tail_pointer(skb);
skb_put(skb, le16_to_cpu(gpd->data_buff_len));
- ret = md_ctrl->recv_skb(queue, skb);
+ ret = queue->recv_skb(queue, skb);
/* Break processing, will try again later */
if (ret < 0)
return ret;
@@ -897,13 +895,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
/**
* t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
- * @md_ctrl: CLDMA context structure.
+ * @queue: CLDMA queue.
* @recv_skb: Receiving skb callback.
*/
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{
- md_ctrl->recv_skb = recv_skb;
+ queue->recv_skb = recv_skb;
}
/**
@@ -993,6 +991,28 @@ allow_sleep:
return ret;
}
+static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
+{
+ int qno;
+
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
+ md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
+ }
+
+ md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
+
+ for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
+ md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+
+ if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
+ md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
+ t7xx_port_proxy_recv_skb_from_dedicated_queue);
+ }
+}
+
static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{
char dma_pool_name[32];
@@ -1018,16 +1038,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
-
- md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
- md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
-
- if (j == CLDMA_RXQ_NUM - 1)
- md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
-
ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
if (ret) {
dev_err(md_ctrl->dev, "Control RX ring init fail\n");
@@ -1094,6 +1107,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct cldma_ctrl *md_ctrl;
+ int qno;
md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
if (!md_ctrl)
@@ -1102,7 +1116,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
md_ctrl->t7xx_dev = t7xx_dev;
md_ctrl->dev = dev;
md_ctrl->hif_id = hif_id;
- md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
+ md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
+
t7xx_hw_info_init(md_ctrl);
t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
return 0;
@@ -1332,9 +1348,10 @@ err_workqueue:
return -ENOMEM;
}
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{
t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_adjust_config(md_ctrl, cfg_id);
t7xx_cldma_late_init(md_ctrl);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
index 4410bac6993a..f2d9941be9c8 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -31,6 +31,10 @@
#include "t7xx_cldma.h"
#include "t7xx_pci.h"
+#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
+#define CLDMA_SHARED_Q_BUFF_SZ 3584
+#define CLDMA_DEDICATED_Q_BUFF_SZ 2048
+
/**
* enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel.
@@ -55,6 +59,11 @@ struct cldma_gpd {
__le16 not_used2;
};
+enum cldma_cfg {
+ CLDMA_SHARED_Q_CFG,
+ CLDMA_DEDICATED_Q_CFG,
+};
+
struct cldma_request {
struct cldma_gpd *gpd; /* Virtual address for CPU */
dma_addr_t gpd_addr; /* Physical address for DMA */
@@ -82,6 +91,7 @@ struct cldma_queue {
wait_queue_head_t req_wq; /* Only for TX */
struct workqueue_struct *worker;
struct work_struct cldma_work;
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
struct cldma_ctrl {
@@ -101,24 +111,22 @@ struct cldma_ctrl {
struct md_pm_entity *pm_entity;
struct t7xx_cldma_hw hw_info;
bool is_late_init;
- int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
+#define CLDMA_Q_IDX_DUMP 1
#define GPD_FLAGS_HWO BIT(0)
#define GPD_FLAGS_IOC BIT(7)
#define GPD_DMAPOOL_ALIGN 16
-#define CLDMA_MTU 3584 /* 3.5kB */
-
int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id);
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 24e7d491468e..8d864d4ed77f 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -177,6 +177,11 @@ int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
return t7xx_acpi_reset(t7xx_dev, "_RST");
}
+int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev)
+{
+ return t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+}
+
static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
{
u32 val;
@@ -192,6 +197,7 @@ static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
+ t7xx_mode_update(t7xx_dev, T7XX_RESET);
msleep(RGU_RESET_DELAY_MS);
t7xx_reset_device_via_pmic(t7xx_dev);
return IRQ_HANDLED;
@@ -529,7 +535,7 @@ static void t7xx_md_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in core_reset() */
t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
- t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true;
@@ -544,7 +550,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
- t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
md->core_ap.handshake_ongoing = true;
t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
@@ -758,6 +764,7 @@ err_destroy_hswq:
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
{
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
struct t7xx_modem *md = t7xx_dev->md;
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
@@ -765,7 +772,8 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
if (!md->md_init_finish)
return;
- t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
+ t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
index abe633cf7adc..b39e945a92e0 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -85,6 +85,7 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev);
int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 91256e005b84..e0b1e7a616ca 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -52,6 +52,81 @@
#define PM_RESOURCE_POLL_TIMEOUT_US 10000
#define PM_RESOURCE_POLL_STEP_US 100
+static const char * const t7xx_mode_names[] = {
+ [T7XX_UNKNOWN] = "unknown",
+ [T7XX_READY] = "ready",
+ [T7XX_RESET] = "reset",
+ [T7XX_FASTBOOT_SWITCHING] = "fastboot_switching",
+ [T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download",
+ [T7XX_FASTBOOT_DUMP] = "fastboot_dump",
+};
+
+static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST);
+
+static ssize_t t7xx_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct pci_dev *pdev;
+ int index = 0;
+
+ pdev = to_pci_dev(dev);
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!t7xx_dev)
+ return -ENODEV;
+
+ index = sysfs_match_string(t7xx_mode_names, buf);
+ if (index == T7XX_FASTBOOT_SWITCHING) {
+ WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
+ } else if (index == T7XX_RESET) {
+ WRITE_ONCE(t7xx_dev->mode, T7XX_RESET);
+ t7xx_acpi_pldr_func(t7xx_dev);
+ }
+
+ return count;
+};
+
+static ssize_t t7xx_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum t7xx_mode mode = T7XX_UNKNOWN;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!t7xx_dev)
+ return -ENODEV;
+
+ mode = READ_ONCE(t7xx_dev->mode);
+ if (mode < T7XX_MODE_LAST)
+ return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]);
+
+ return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]);
+}
+
+static DEVICE_ATTR_RW(t7xx_mode);
+
+static struct attribute *t7xx_mode_attr[] = {
+ &dev_attr_t7xx_mode.attr,
+ NULL
+};
+
+static const struct attribute_group t7xx_mode_attribute_group = {
+ .attrs = t7xx_mode_attr,
+};
+
+void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode)
+{
+ if (!t7xx_dev)
+ return;
+
+ WRITE_ONCE(t7xx_dev->mode, mode);
+ sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode");
+}
+
enum t7xx_pm_state {
MTK_PM_EXCEPTION,
MTK_PM_INIT, /* Device initialized, but handshake not completed */
@@ -108,7 +183,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(&pdev->dev);
- return t7xx_wait_pm_config(t7xx_dev);
+ return 0;
}
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
@@ -279,7 +354,8 @@ static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
int ret;
t7xx_dev = pci_get_drvdata(pdev);
- if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT ||
+ READ_ONCE(t7xx_dev->mode) != T7XX_READY) {
dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
return -EFAULT;
}
@@ -729,16 +805,28 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+ ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
+ if (ret)
+ goto err_md_exit;
+
ret = t7xx_interrupt_init(t7xx_dev);
- if (ret) {
- t7xx_md_exit(t7xx_dev);
- return ret;
- }
+ if (ret)
+ goto err_remove_group;
+
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
t7xx_pcie_mac_interrupts_en(t7xx_dev);
return 0;
+
+err_remove_group:
+ sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
+
+err_md_exit:
+ t7xx_md_exit(t7xx_dev);
+ return ret;
}
static void t7xx_pci_remove(struct pci_dev *pdev)
@@ -747,6 +835,9 @@ static void t7xx_pci_remove(struct pci_dev *pdev)
int i;
t7xx_dev = pci_get_drvdata(pdev);
+
+ sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
t7xx_md_exit(t7xx_dev);
for (i = 0; i < EXT_INT_NUM; i++) {
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index f08f1ab74469..49a11586d8d8 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -43,6 +43,16 @@ struct t7xx_addr_base {
typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
+enum t7xx_mode {
+ T7XX_UNKNOWN,
+ T7XX_READY,
+ T7XX_RESET,
+ T7XX_FASTBOOT_SWITCHING,
+ T7XX_FASTBOOT_DOWNLOAD,
+ T7XX_FASTBOOT_DUMP,
+ T7XX_MODE_LAST, /* must always be last */
+};
+
/* struct t7xx_pci_dev - MTK device context structure
* @intr_handler: array of handler function for request_threaded_irq
* @intr_thread: array of thread_fn for request_threaded_irq
@@ -59,6 +69,7 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
* @md_pm_lock: protects PCIe sleep lock
* @sleep_disable_count: PCIe L1.2 lock counter
* @sleep_lock_acquire: indicates that sleep has been disabled
+ * @mode: indicates the device mode
*/
struct t7xx_pci_dev {
t7xx_intr_callback intr_handler[EXT_INT_NUM];
@@ -82,6 +93,7 @@ struct t7xx_pci_dev {
#ifdef CONFIG_WWAN_DEBUGFS
struct dentry *debugfs_dir;
#endif
+ u32 mode;
};
enum t7xx_pm_id {
@@ -120,5 +132,5 @@ int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_enti
int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
-
+void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode);
#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
index 4ae8a00a8532..f74d3bab810d 100644
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -75,6 +75,8 @@ enum port_ch {
PORT_CH_DSS6_TX = 0x20df,
PORT_CH_DSS7_RX = 0x20e0,
PORT_CH_DSS7_TX = 0x20e1,
+
+ PORT_CH_UNIMPORTANT = 0xffff,
};
struct t7xx_port;
@@ -135,11 +137,13 @@ struct t7xx_port {
};
};
+int t7xx_get_port_mtu(struct t7xx_port *port);
struct sk_buff *t7xx_port_alloc_skb(int payload);
struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
unsigned int ex_msg);
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
unsigned int ex_msg);
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 274846d39fbf..7d6388bf1d7c 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -48,6 +48,9 @@
i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i])
+#define T7XX_MAX_POSSIBLE_PORTS_NUM \
+ (max(ARRAY_SIZE(t7xx_port_conf), ARRAY_SIZE(t7xx_early_port_conf)))
+
static const struct t7xx_port_conf t7xx_port_conf[] = {
{
.tx_ch = PORT_CH_UART2_TX,
@@ -100,6 +103,21 @@ static const struct t7xx_port_conf t7xx_port_conf[] = {
},
};
+static const struct t7xx_port_conf t7xx_early_port_conf[] = {
+ {
+ .tx_ch = PORT_CH_UNIMPORTANT,
+ .rx_ch = PORT_CH_UNIMPORTANT,
+ .txq_index = CLDMA_Q_IDX_DUMP,
+ .rxq_index = CLDMA_Q_IDX_DUMP,
+ .txq_exp_index = CLDMA_Q_IDX_DUMP,
+ .rxq_exp_index = CLDMA_Q_IDX_DUMP,
+ .path_id = CLDMA_ID_AP,
+ .ops = &wwan_sub_port_ops,
+ .name = "fastboot",
+ .port_type = WWAN_PORT_FASTBOOT,
+ },
+};
+
static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
{
const struct t7xx_port_conf *port_conf;
@@ -214,7 +232,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
return 0;
}
-static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
+int t7xx_get_port_mtu(struct t7xx_port *port)
+{
+ enum cldma_id path_id = port->port_conf->path_id;
+ int tx_qno = t7xx_port_get_queue_no(port);
+ struct cldma_ctrl *md_ctrl;
+
+ md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
+ return md_ctrl->tx_ring[tx_qno].pkt_size;
+}
+
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
{
enum cldma_id path_id = port->port_conf->path_id;
struct cldma_ctrl *md_ctrl;
@@ -329,6 +357,39 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
}
}
+/**
+ * t7xx_port_proxy_recv_skb_from_dedicated_queue() - Dispatch early port received skb.
+ * @queue: CLDMA queue.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ ** 0 - Packet consumed.
+ ** -ERROR - Failed to process skb.
+ */
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
+ struct port_proxy *port_prox = t7xx_dev->md->port_prox;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ int ret;
+
+ port = &port_prox->ports[0];
+ if (WARN_ON_ONCE(port->port_conf->rxq_index != queue->index)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ port_conf = port->port_conf;
+ ret = port_conf->ops->recv_skb(port, skb);
+ if (ret < 0 && ret != -ENOBUFS) {
+ dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret);
+ dev_kfree_skb_any(skb);
+ }
+
+ return ret;
+}
+
static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
struct cldma_queue *queue, u16 channel)
{
@@ -359,7 +420,7 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev
** 0 - Packet consumed.
** -ERROR - Failed to process skb.
*/
-static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
{
struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
@@ -444,33 +505,56 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
spin_lock_init(&port->port_update_lock);
port->chan_enable = false;
- if (port_conf->ops->init)
+ if (port_conf->ops && port_conf->ops->init)
port_conf->ops->init(port);
}
t7xx_proxy_setup_ch_mapping(port_prox);
}
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id)
+{
+ struct port_proxy *port_prox = md->port_prox;
+ const struct t7xx_port_conf *port_conf;
+ u32 port_count;
+ int i;
+
+ t7xx_port_proxy_uninit(port_prox);
+
+ if (cfg_id == PORT_CFG_ID_EARLY) {
+ port_conf = t7xx_early_port_conf;
+ port_count = ARRAY_SIZE(t7xx_early_port_conf);
+ } else {
+ port_conf = t7xx_port_conf;
+ port_count = ARRAY_SIZE(t7xx_port_conf);
+ }
+
+ for (i = 0; i < port_count; i++)
+ port_prox->ports[i].port_conf = &port_conf[i];
+
+ port_prox->cfg_id = cfg_id;
+ port_prox->port_count = port_count;
+
+ t7xx_proxy_init_all_ports(md);
+}
+
static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
- unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox;
- int i;
- port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
+ port_prox = devm_kzalloc(dev,
+ struct_size(port_prox,
+ ports,
+ T7XX_MAX_POSSIBLE_PORTS_NUM),
GFP_KERNEL);
if (!port_prox)
return -ENOMEM;
md->port_prox = port_prox;
port_prox->dev = dev;
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
- for (i = 0; i < port_count; i++)
- port_prox->ports[i].port_conf = &t7xx_port_conf[i];
-
- port_prox->port_count = port_count;
- t7xx_proxy_init_all_ports(md);
return 0;
}
@@ -492,8 +576,6 @@ int t7xx_port_proxy_init(struct t7xx_modem *md)
if (ret)
return ret;
- t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
- t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0;
}
@@ -505,7 +587,7 @@ void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf;
- if (port_conf->ops->uninit)
+ if (port_conf->ops && port_conf->ops->uninit)
port_conf->ops->uninit(port);
}
}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
index 81d059fbc0fb..7f5706811445 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -31,11 +31,18 @@
#define RX_QUEUE_MAXLEN 32
#define CTRL_QUEUE_MAXLEN 16
+enum port_cfg_id {
+ PORT_CFG_ID_INVALID,
+ PORT_CFG_ID_NORMAL,
+ PORT_CFG_ID_EARLY,
+};
+
struct port_proxy {
int port_count;
struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1];
struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES];
struct device *dev;
+ enum port_cfg_id cfg_id;
struct t7xx_port ports[];
};
@@ -98,5 +105,8 @@ void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int
int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
bool en_flag);
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id);
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb);
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb);
#endif /* __T7XX_PORT_PROXY_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
index 17389c8f6600..4b23ba693f3f 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
+ * Copyright (c) 2024, Fibocom Wireless Inc.
*
* Authors:
* Amir Hanania <amir.hanania@intel.com>
@@ -15,6 +16,7 @@
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Eliot Lee <eliot.lee@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
+ * Jinjian Song <jinjian.song@fibocom.com>
*/
#include <linux/atomic.h>
@@ -33,7 +35,7 @@
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
-static int t7xx_port_ctrl_start(struct wwan_port *port)
+static int t7xx_port_wwan_start(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
@@ -44,30 +46,60 @@ static int t7xx_port_ctrl_start(struct wwan_port *port)
return 0;
}
-static void t7xx_port_ctrl_stop(struct wwan_port *port)
+static void t7xx_port_wwan_stop(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
atomic_dec(&port_mtk->usage_cnt);
}
-static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+static int t7xx_port_fastboot_tx(struct t7xx_port *port, struct sk_buff *skb)
+{
+ struct sk_buff *cur = skb, *tx_skb;
+ size_t actual, len, offset = 0;
+ int txq_mtu;
+ int ret;
+
+ txq_mtu = t7xx_get_port_mtu(port);
+ if (txq_mtu < 0)
+ return -EINVAL;
+
+ actual = cur->len;
+ while (actual) {
+ len = min_t(size_t, actual, txq_mtu);
+ tx_skb = __dev_alloc_skb(len, GFP_KERNEL);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ skb_put_data(tx_skb, cur->data + offset, len);
+
+ ret = t7xx_port_send_raw_skb(port, tx_skb);
+ if (ret) {
+ dev_kfree_skb(tx_skb);
+ dev_err(port->dev, "Write error on fastboot port, %d\n", ret);
+ break;
+ }
+ offset += len;
+ actual -= len;
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int t7xx_port_ctrl_tx(struct t7xx_port *port, struct sk_buff *skb)
{
- struct t7xx_port *port_private = wwan_port_get_drvdata(port);
const struct t7xx_port_conf *port_conf;
struct sk_buff *cur = skb, *cloned;
struct t7xx_fsm_ctl *ctl;
enum md_state md_state;
int cnt = 0, ret;
- if (!port_private->chan_enable)
- return -EINVAL;
-
- port_conf = port_private->port_conf;
- ctl = port_private->t7xx_dev->md->fsm_ctl;
+ port_conf = port->port_conf;
+ ctl = port->t7xx_dev->md->fsm_ctl;
md_state = t7xx_fsm_get_md_state(ctl);
if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
- dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
+ dev_warn(port->dev, "Cannot write to %s port when md_state=%d\n",
port_conf->name, md_state);
return -ENODEV;
}
@@ -75,10 +107,10 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
while (cur) {
cloned = skb_clone(cur, GFP_KERNEL);
cloned->len = skb_headlen(cur);
- ret = t7xx_port_send_skb(port_private, cloned, 0, 0);
+ ret = t7xx_port_send_skb(port, cloned, 0, 0);
if (ret) {
dev_kfree_skb(cloned);
- dev_err(port_private->dev, "Write error on %s port, %d\n",
+ dev_err(port->dev, "Write error on %s port, %d\n",
port_conf->name, ret);
return cnt ? cnt + ret : ret;
}
@@ -93,14 +125,53 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
return 0;
}
+static int t7xx_port_wwan_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct t7xx_port *port_private = wwan_port_get_drvdata(port);
+ const struct t7xx_port_conf *port_conf = port_private->port_conf;
+ int ret;
+
+ if (!port_private->chan_enable)
+ return -EINVAL;
+
+ if (port_conf->port_type != WWAN_PORT_FASTBOOT)
+ ret = t7xx_port_ctrl_tx(port_private, skb);
+ else
+ ret = t7xx_port_fastboot_tx(port_private, skb);
+
+ return ret;
+}
+
static const struct wwan_port_ops wwan_ops = {
- .start = t7xx_port_ctrl_start,
- .stop = t7xx_port_ctrl_stop,
- .tx = t7xx_port_ctrl_tx,
+ .start = t7xx_port_wwan_start,
+ .stop = t7xx_port_wwan_stop,
+ .tx = t7xx_port_wwan_tx,
};
+static void t7xx_port_wwan_create(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ unsigned int header_len = sizeof(struct ccci_header), mtu;
+ struct wwan_port_caps caps;
+
+ if (!port->wwan.wwan_port) {
+ mtu = t7xx_get_port_mtu(port);
+ caps.frag_len = mtu - header_len;
+ caps.headroom_len = header_len;
+ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, &caps, port);
+ if (IS_ERR(port->wwan.wwan_port))
+ dev_err(port->dev, "Unable to create WWAN port %s", port_conf->name);
+ }
+}
+
static int t7xx_port_wwan_init(struct t7xx_port *port)
{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->port_type == WWAN_PORT_FASTBOOT)
+ t7xx_port_wwan_create(port);
+
port->rx_length_th = RX_QUEUE_MAXLEN;
return 0;
}
@@ -152,20 +223,14 @@ static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
- unsigned int header_len = sizeof(struct ccci_header);
- struct wwan_port_caps caps;
+
+ if (port_conf->port_type == WWAN_PORT_FASTBOOT)
+ return;
if (state != MD_STATE_READY)
return;
- if (!port->wwan.wwan_port) {
- caps.frag_len = CLDMA_MTU - header_len;
- caps.headroom_len = header_len;
- port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, &caps, port);
- if (IS_ERR(port->wwan.wwan_port))
- dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
- }
+ t7xx_port_wwan_create(port);
}
struct port_ops wwan_sub_port_ops = {
diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
index c41d7d094c08..9c7dc72ac6f6 100644
--- a/drivers/net/wwan/t7xx/t7xx_reg.h
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -101,11 +101,33 @@ enum t7xx_pm_resume_state {
PM_RESUME_REG_STATE_L2_EXP,
};
+enum host_event_e {
+ HOST_EVENT_INIT = 0,
+ FASTBOOT_DL_NOTIFY = 0x3,
+};
+
#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c
#define MISC_STAGE_MASK GENMASK(2, 0)
#define MISC_RESET_TYPE_PLDR BIT(26)
#define MISC_RESET_TYPE_FLDR BIT(27)
-#define LINUX_STAGE 4
+#define MISC_RESET_TYPE_PLDR BIT(26)
+#define MISC_LK_EVENT_MASK GENMASK(11, 8)
+#define HOST_EVENT_MASK GENMASK(31, 28)
+
+enum lk_event_id {
+ LK_EVENT_NORMAL = 0,
+ LK_EVENT_CREATE_PD_PORT = 1,
+ LK_EVENT_CREATE_POST_DL_PORT = 2,
+ LK_EVENT_RESET = 7,
+};
+
+enum t7xx_device_stage {
+ T7XX_DEV_STAGE_INIT = 0,
+ T7XX_DEV_STAGE_BROM_PRE = 1,
+ T7XX_DEV_STAGE_BROM_POST = 2,
+ T7XX_DEV_STAGE_LK = 3,
+ T7XX_DEV_STAGE_LINUX = 4,
+};
#define T7XX_PCIE_RESOURCE_STATUS 0x0d28
#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 0bc97430211b..9889ca4621cf 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -47,6 +47,13 @@
#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
#define FSM_CMD_TIMEOUT_MS 2000
+#define wait_for_expected_dev_stage(status) \
+ read_poll_timeout(ioread32, status, \
+ ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) || \
+ ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000, \
+ 20000000, false, IREG_BASE(md->t7xx_dev) + \
+ T7XX_PCIE_MISC_DEV_STATUS)
+
void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
@@ -206,6 +213,55 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
fsm_finish_command(ctl, cmd, 0);
}
+static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
+{
+ u32 value;
+
+ value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ value &= ~HOST_EVENT_MASK;
+ value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
+ iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+}
+
+static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
+{
+ struct t7xx_modem *md = ctl->md;
+ struct cldma_ctrl *md_ctrl;
+ enum lk_event_id lk_event;
+ struct device *dev;
+ struct t7xx_port *port;
+
+ dev = &md->t7xx_dev->pdev->dev;
+ lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
+ switch (lk_event) {
+ case LK_EVENT_NORMAL:
+ case LK_EVENT_RESET:
+ break;
+
+ case LK_EVENT_CREATE_PD_PORT:
+ case LK_EVENT_CREATE_POST_DL_PORT:
+ md_ctrl = md->md_ctrl[CLDMA_ID_AP];
+ t7xx_cldma_hif_hw_init(md_ctrl);
+ t7xx_cldma_stop(md_ctrl);
+ t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG);
+
+ port = &ctl->md->port_prox->ports[0];
+ port->port_conf->ops->enable_chl(port);
+
+ t7xx_cldma_start(md_ctrl);
+
+ if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
+ t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD);
+ else
+ t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP);
+ break;
+
+ default:
+ dev_err(dev, "Invalid LK event %d\n", lk_event);
+ break;
+ }
+}
+
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{
ctl->curr_state = FSM_STATE_STOPPED;
@@ -226,8 +282,9 @@ static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comman
static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
- struct t7xx_pci_dev *t7xx_dev;
- struct cldma_ctrl *md_ctrl;
+ struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
+ struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
@@ -235,18 +292,20 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
return;
}
- md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
- t7xx_dev = ctl->md->t7xx_dev;
-
ctl->curr_state = FSM_STATE_STOPPING;
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl);
- if (!ctl->md->rgu_irq_asserted) {
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
- /* Wait for the DRM disable to take effect */
- msleep(FSM_DRM_DISABLE_DELAY_MS);
+ if (mode == T7XX_FASTBOOT_SWITCHING)
+ t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
+
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
+ /* Wait for the DRM disable to take effect */
+ msleep(FSM_DRM_DISABLE_DELAY_MS);
+ if (mode == T7XX_FASTBOOT_SWITCHING) {
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ } else {
err = t7xx_acpi_fldr_func(t7xx_dev);
if (err)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
@@ -272,6 +331,7 @@ static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
ctl->curr_state = FSM_STATE_READY;
t7xx_fsm_broadcast_ready_state(ctl);
+ t7xx_mode_update(md->t7xx_dev, T7XX_READY);
t7xx_md_event_notify(md, FSM_READY);
}
@@ -317,7 +377,8 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
struct t7xx_modem *md = ctl->md;
- u32 dev_status;
+ struct device *dev;
+ u32 status;
int ret;
if (!md)
@@ -329,23 +390,53 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
return;
}
+ dev = &md->t7xx_dev->pdev->dev;
ctl->curr_state = FSM_STATE_PRE_START;
t7xx_md_event_notify(md, FSM_PRE_START);
- ret = read_poll_timeout(ioread32, dev_status,
- (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
- false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ ret = wait_for_expected_dev_stage(status);
+
if (ret) {
- struct device *dev = &md->t7xx_dev->pdev->dev;
+ dev_err(dev, "read poll timeout %d\n", ret);
+ goto finish_command;
+ }
- fsm_finish_command(ctl, cmd, -ETIMEDOUT);
- dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
- return;
+ if (status != ctl->status || cmd->flag != 0) {
+ u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
+
+ switch (stage) {
+ case T7XX_DEV_STAGE_INIT:
+ case T7XX_DEV_STAGE_BROM_PRE:
+ case T7XX_DEV_STAGE_BROM_POST:
+ dev_dbg(dev, "BROM_STAGE Entered\n");
+ ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0);
+ break;
+
+ case T7XX_DEV_STAGE_LK:
+ dev_dbg(dev, "LK_STAGE Entered\n");
+ t7xx_lk_stage_event_handling(ctl, status);
+ break;
+
+ case T7XX_DEV_STAGE_LINUX:
+ dev_dbg(dev, "LINUX_STAGE Entered\n");
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM |
+ D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
+ if (cmd->flag == 0)
+ break;
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL);
+ ret = fsm_routine_starting(ctl);
+ break;
+
+ default:
+ break;
+ }
+ ctl->status = status;
}
- t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
- t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
- fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
+finish_command:
+ fsm_finish_command(ctl, cmd, ret);
}
static int fsm_main_thread(void *data)
@@ -517,6 +608,7 @@ void t7xx_fsm_reset(struct t7xx_modem *md)
fsm_flush_event_cmd_qs(ctl);
ctl->curr_state = FSM_STATE_STOPPED;
ctl->exp_flg = false;
+ ctl->status = T7XX_DEV_STAGE_INIT;
}
int t7xx_fsm_init(struct t7xx_modem *md)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
index b0b3662ae6d7..7b0a9baf488c 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -96,6 +96,7 @@ struct t7xx_fsm_ctl {
bool exp_flg;
spinlock_t notifier_lock; /* Protects notifier list */
struct list_head notifier_list;
+ u32 status; /* Device boot stage */
};
struct t7xx_fsm_event {
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 72e01e550a16..17431f1b1a0c 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -26,7 +26,9 @@
static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
-static struct class *wwan_class;
+static const struct class wwan_class = {
+ .name = "wwan",
+};
static int wwan_major;
static struct dentry *wwan_debugfs_dir;
@@ -130,7 +132,7 @@ static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
+ dev = class_find_device(&wwan_class, NULL, parent, wwan_dev_parent_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -147,7 +149,7 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
+ dev = class_find_device(&wwan_class, NULL, name, wwan_dev_name_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -183,7 +185,7 @@ static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ dev = class_find_device(&wwan_class, NULL, dir, wwan_dev_debugfs_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -239,7 +241,7 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
}
wwandev->dev.parent = parent;
- wwandev->dev.class = wwan_class;
+ wwandev->dev.class = &wwan_class;
wwandev->dev.type = &wwan_dev_type;
wwandev->id = id;
dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
@@ -265,7 +267,7 @@ done_unlock:
static int is_wwan_child(struct device *dev, void *data)
{
- return dev->class == wwan_class;
+ return dev->class == &wwan_class;
}
static void wwan_remove_dev(struct wwan_device *wwandev)
@@ -328,6 +330,10 @@ static const struct {
.name = "XMMRPC",
.devsuf = "xmmrpc",
},
+ [WWAN_PORT_FASTBOOT] = {
+ .name = "FASTBOOT",
+ .devsuf = "fastboot",
+ },
};
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -371,7 +377,7 @@ static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
+ dev = class_find_device(&wwan_class, NULL, &minor, wwan_port_minor_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -401,7 +407,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
return -ENOMEM;
/* Collect ids of same name format ports */
- class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
+ class_dev_iter_init(&iter, &wwan_class, NULL, &wwan_port_dev_type);
while ((dev = class_dev_iter_next(&iter))) {
if (dev->parent != &wwandev->dev)
continue;
@@ -473,7 +479,7 @@ struct wwan_port *wwan_create_port(struct device *parent,
mutex_init(&port->data_lock);
port->dev.parent = &wwandev->dev;
- port->dev.class = wwan_class;
+ port->dev.class = &wwan_class;
port->dev.type = &wwan_port_dev_type;
port->dev.devt = MKDEV(wwan_major, minor);
dev_set_drvdata(&port->dev, drvdata);
@@ -916,7 +922,7 @@ static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static struct device_type wwan_type = { .name = "wwan" };
+static const struct device_type wwan_type = { .name = "wwan" };
static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
const char *ifname,
@@ -1208,11 +1214,9 @@ static int __init wwan_init(void)
if (err)
return err;
- wwan_class = class_create("wwan");
- if (IS_ERR(wwan_class)) {
- err = PTR_ERR(wwan_class);
+ err = class_register(&wwan_class);
+ if (err)
goto unregister;
- }
/* chrdev used for wwan ports */
wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
@@ -1229,7 +1233,7 @@ static int __init wwan_init(void)
return 0;
destroy:
- class_destroy(wwan_class);
+ class_unregister(&wwan_class);
unregister:
rtnl_link_unregister(&wwan_rtnl_link_ops);
return err;
@@ -1240,7 +1244,7 @@ static void __exit wwan_exit(void)
debugfs_remove_recursive(wwan_debugfs_dir);
__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
rtnl_link_unregister(&wwan_rtnl_link_ops);
- class_destroy(wwan_class);
+ class_unregister(&wwan_class);
}
module_init(wwan_init);
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index ff3dd24ddb33..b02befd1b6fb 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -25,7 +25,9 @@ static int wwan_hwsim_devsnum = 2;
module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
MODULE_PARM_DESC(devices, "Number of simulated devices");
-static struct class *wwan_hwsim_class;
+static const struct class wwan_hwsim_class = {
+ .name = "wwan_hwsim",
+};
static struct dentry *wwan_hwsim_debugfs_topdir;
static struct dentry *wwan_hwsim_debugfs_devcreate;
@@ -277,7 +279,7 @@ static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
spin_unlock(&wwan_hwsim_devs_lock);
dev->dev.release = wwan_hwsim_dev_release;
- dev->dev.class = wwan_hwsim_class;
+ dev->dev.class = &wwan_hwsim_class;
dev_set_name(&dev->dev, "hwsim%u", dev->id);
spin_lock_init(&dev->ports_lock);
@@ -511,11 +513,9 @@ static int __init wwan_hwsim_init(void)
if (!wwan_wq)
return -ENOMEM;
- wwan_hwsim_class = class_create("wwan_hwsim");
- if (IS_ERR(wwan_hwsim_class)) {
- err = PTR_ERR(wwan_hwsim_class);
+ err = class_register(&wwan_hwsim_class);
+ if (err)
goto err_wq_destroy;
- }
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
@@ -534,7 +534,7 @@ err_clean_devs:
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
- class_destroy(wwan_hwsim_class);
+ class_unregister(&wwan_hwsim_class);
err_wq_destroy:
destroy_workqueue(wwan_wq);
@@ -547,7 +547,7 @@ static void __exit wwan_hwsim_exit(void)
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
- class_destroy(wwan_hwsim_class);
+ class_unregister(&wwan_hwsim_class);
destroy_workqueue(wwan_wq);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 88f760a7cbc3..ef76850d9bcd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,13 +104,12 @@ bool provides_xdp_headroom = true;
module_param(provides_xdp_headroom, bool, 0644);
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
- u8 status);
+ s8 status);
static void make_tx_response(struct xenvif_queue *queue,
- struct xen_netif_tx_request *txp,
+ const struct xen_netif_tx_request *txp,
unsigned int extra_count,
- s8 st);
-static void push_tx_responses(struct xenvif_queue *queue);
+ s8 status);
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
@@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
unsigned int extra_count, RING_IDX end)
{
RING_IDX cons = queue->tx.req_cons;
- unsigned long flags;
do {
- spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
- push_tx_responses(queue);
- spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
RING_COPY_REQUEST(&queue->tx, cons++, txp);
@@ -463,12 +458,20 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
- shinfo->nr_frags++, gop++, nr_slots--) {
+ nr_slots--) {
+ if (unlikely(!txp->size)) {
+ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+ ++txp;
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
if (txp == first)
txp = txfrags;
@@ -481,20 +484,33 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
- shinfo->nr_frags++, txp++, gop++) {
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+ if (unlikely(!txp->size)) {
+ make_tx_response(queue, txp, 0,
+ XEN_NETIF_RSP_OKAY);
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
+ }
+
+ if (shinfo->nr_frags) {
+ skb_shinfo(skb)->frag_list = nskb;
+ nskb = NULL;
}
+ }
- skb_shinfo(skb)->frag_list = nskb;
- } else if (nskb) {
+ if (nskb) {
/* A frag_list skb was allocated but it is no longer needed
- * because enough slots were converted to copy ops above.
+ * because enough slots were converted to copy ops above or some
+ * were empty.
*/
kfree_skb(nskb);
}
@@ -963,7 +979,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(ret == 0) ?
XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR);
- push_tx_responses(queue);
continue;
}
@@ -975,7 +990,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
make_tx_response(queue, &txreq, extra_count,
XEN_NETIF_RSP_OKAY);
- push_tx_responses(queue);
continue;
}
@@ -1401,8 +1415,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
return work_done;
}
+static void _make_tx_response(struct xenvif_queue *queue,
+ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+ s8 status)
+{
+ RING_IDX i = queue->tx.rsp_prod_pvt;
+ struct xen_netif_tx_response *resp;
+
+ resp = RING_GET_RESPONSE(&queue->tx, i);
+ resp->id = txp->id;
+ resp->status = status;
+
+ while (extra_count-- != 0)
+ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+
+ queue->tx.rsp_prod_pvt = ++i;
+}
+
+static void push_tx_responses(struct xenvif_queue *queue)
+{
+ int notify;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
+}
+
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
- u8 status)
+ s8 status)
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
@@ -1412,8 +1453,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_lock_irqsave(&queue->response_lock, flags);
- make_tx_response(queue, &pending_tx_info->req,
- pending_tx_info->extra_count, status);
+ _make_tx_response(queue, &pending_tx_info->req,
+ pending_tx_info->extra_count, status);
/* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the
@@ -1427,32 +1468,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_unlock_irqrestore(&queue->response_lock, flags);
}
-
static void make_tx_response(struct xenvif_queue *queue,
- struct xen_netif_tx_request *txp,
+ const struct xen_netif_tx_request *txp,
unsigned int extra_count,
- s8 st)
+ s8 status)
{
- RING_IDX i = queue->tx.rsp_prod_pvt;
- struct xen_netif_tx_response *resp;
-
- resp = RING_GET_RESPONSE(&queue->tx, i);
- resp->id = txp->id;
- resp->status = st;
-
- while (extra_count-- != 0)
- RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+ unsigned long flags;
- queue->tx.rsp_prod_pvt = ++i;
-}
+ spin_lock_irqsave(&queue->response_lock, flags);
-static void push_tx_responses(struct xenvif_queue *queue)
-{
- int notify;
+ _make_tx_response(queue, txp, extra_count, status);
+ push_tx_responses(queue);
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
- if (notify)
- notify_remote_via_irq(queue->tx_irq);
+ spin_unlock_irqrestore(&queue->response_lock, flags);
}
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
@@ -1750,5 +1778,6 @@ static void __exit netback_fini(void)
}
module_exit(netback_fini);
+MODULE_DESCRIPTION("Xen backend network device module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vif");
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index a23ab5c968b9..a3455f1d67fa 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+MODULE_DESCRIPTION("NVMe Authentication framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c
index ee341b83eeba..6f7e7a8fa5ae 100644
--- a/drivers/nvme/common/keyring.c
+++ b/drivers/nvme/common/keyring.c
@@ -111,7 +111,7 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
* should be preferred to 'generated' PSKs,
* and SHA-384 should be preferred to SHA-256.
*/
-struct nvme_tls_psk_priority_list {
+static struct nvme_tls_psk_priority_list {
bool generated;
enum nvme_tcp_tls_cipher cipher;
} nvme_tls_psk_prio[] = {
@@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void)
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_DESCRIPTION("NVMe Keyring implementation");
module_init(nvme_keyring_init);
module_exit(nvme_keyring_exit);
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 596bb11eeba5..c727cd1f264b 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set,
static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
bool dead = false, freeze = false;
unsigned long flags;
@@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
if (csts & NVME_CSTS_CFS)
dead = true;
- if (anv->ctrl.state == NVME_CTRL_LIVE ||
- anv->ctrl.state == NVME_CTRL_RESETTING) {
+ if (state == NVME_CTRL_LIVE ||
+ state == NVME_CTRL_RESETTING) {
freeze = true;
nvme_start_freeze(&anv->ctrl);
}
@@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
unsigned long flags;
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
- if (anv->ctrl.state != NVME_CTRL_LIVE) {
+ if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
/*
* From rdma.c:
* If we are resetting, connecting or deleting we should
@@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work)
u32 boot_status, aqa;
struct apple_nvme *anv =
container_of(work, struct apple_nvme, ctrl.reset_work);
+ enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
- if (anv->ctrl.state != NVME_CTRL_RESETTING) {
- dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
- anv->ctrl.state);
+ if (state != NVME_CTRL_RESETTING) {
+ dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 72c0525c75f5..a264b3ae078b 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context {
static struct workqueue_struct *nvme_auth_wq;
-#define nvme_auth_flags_from_qid(qid) \
- (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
-#define nvme_auth_queue_from_qid(ctrl, qid) \
- (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
-
static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
{
return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
@@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send)
{
struct nvme_command cmd = {};
- blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
- struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
+ struct request_queue *q = ctrl->fabrics_q;
int ret;
+ if (qid != 0) {
+ flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
+ q = ctrl->connect_q;
+ }
+
cmd.auth_common.opcode = nvme_fabrics_command;
cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
cmd.auth_common.spsp0 = 0x01;
@@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
}
ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
- qid == 0 ? NVME_QID_ANY : qid,
- 0, flags);
+ qid == 0 ? NVME_QID_ANY : qid, flags);
if (ret > 0)
dev_warn(ctrl->device,
"qid %d auth_send failed with status %d\n", qid, ret);
@@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
* If the ctrl is no connected, bail as reconnect will handle
* authentication.
*/
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
/* Authenticate admin queue first */
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 20f46c230885..6f2ebb5fcdb0 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = {
[NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
};
-const unsigned char *nvme_get_error_status_str(u16 status)
+const char *nvme_get_error_status_str(u16 status)
{
status &= 0x7ff;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
- return nvme_statuses[status & 0x7ff];
+ return nvme_statuses[status];
return "Unknown";
}
-const unsigned char *nvme_get_opcode_str(u8 opcode)
+const char *nvme_get_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
return nvme_ops[opcode];
@@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+const char *nvme_get_admin_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
return nvme_admin_ops[opcode];
@@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
+const char *nvme_get_fabrics_opcode_str(u8 opcode) {
if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
return nvme_fabrics_ops[opcode];
return "Unknown";
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0af612387083..0a96362912ce 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -338,6 +338,30 @@ static void nvme_log_error(struct request *req)
nr->status & NVME_SC_DNR ? "DNR " : "");
}
+static void nvme_log_err_passthru(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ struct nvme_request *nr = nvme_req(req);
+
+ pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
+ "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
+ ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
+ ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
+ nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "",
+ nr->cmd->common.cdw10,
+ nr->cmd->common.cdw11,
+ nr->cmd->common.cdw12,
+ nr->cmd->common.cdw13,
+ nr->cmd->common.cdw14,
+ nr->cmd->common.cdw14);
+}
+
enum nvme_disposition {
COMPLETE,
RETRY,
@@ -385,8 +409,12 @@ static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
- nvme_log_error(req);
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+ if (blk_rq_is_passthrough(req))
+ nvme_log_err_passthru(req);
+ else
+ nvme_log_error(req);
+ }
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
@@ -679,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req)
/* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
- if (req->q->queuedata)
+ struct nvme_request *nr = nvme_req(req);
+ bool logging_enabled;
+
+ if (req->q->queuedata) {
+ struct nvme_ns *ns = req->q->disk->private_data;
+
+ logging_enabled = ns->head->passthru_err_log_enabled;
req->timeout = NVME_IO_TIMEOUT;
- else /* no queuedata implies admin queue */
+ } else { /* no queuedata implies admin queue */
+ logging_enabled = nr->ctrl->passthru_err_log_enabled;
req->timeout = NVME_ADMIN_TIMEOUT;
+ }
+
+ if (!logging_enabled)
+ req->rq_flags |= RQF_QUIET;
/* passthru commands should let the driver set the SGL flags */
cmd->common.flags &= ~NVME_CMD_SGL_ALL;
@@ -691,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->mq_hctx->type == HCTX_TYPE_POLL)
req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
- req->rq_flags |= RQF_QUIET;
- memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
+ memcpy(nr->cmd, cmd, sizeof(*cmd));
}
EXPORT_SYMBOL_GPL(nvme_init_request);
@@ -721,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live)
+ bool queue_live, enum nvme_ctrl_state state)
{
struct nvme_request *req = nvme_req(rq);
@@ -742,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* command, which is require to set the queue live in the
* appropinquate states.
*/
- switch (nvme_ctrl_state(ctrl)) {
+ switch (state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
(req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
@@ -1051,20 +1089,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head, blk_mq_req_flags_t flags)
+ int qid, nvme_submit_flags_t flags)
{
struct request *req;
int ret;
+ blk_mq_req_flags_t blk_flags = 0;
+ if (flags & NVME_SUBMIT_NOWAIT)
+ blk_flags |= BLK_MQ_REQ_NOWAIT;
+ if (flags & NVME_SUBMIT_RESERVED)
+ blk_flags |= BLK_MQ_REQ_RESERVED;
if (qid == NVME_QID_ANY)
- req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
else
- req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
+ if (flags & NVME_SUBMIT_RETRY)
+ req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -1072,7 +1117,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- ret = nvme_execute_rq(req, at_head);
+ ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
if (result && ret >= 0)
*result = nvme_req(req)->result;
out:
@@ -1085,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1108,6 +1153,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
} else {
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+
+ /* Ignore execution restrictions if any relaxation bits are set */
+ if (effects & NVME_CMD_EFFECTS_CSER_MASK)
+ effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
}
return effects;
@@ -1560,7 +1609,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1740,13 +1789,13 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
struct nvme_ns_head *head)
{
struct request_queue *queue = disk->queue;
- u32 size = queue_logical_block_size(queue);
+ u32 max_discard_sectors;
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX))
- ctrl->max_discard_sectors =
- nvme_lba_to_sect(head, ctrl->dmrsl);
-
- if (ctrl->max_discard_sectors == 0) {
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX)) {
+ max_discard_sectors = nvme_lba_to_sect(head, ctrl->dmrsl);
+ } else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
+ max_discard_sectors = UINT_MAX;
+ } else {
blk_queue_max_discard_sectors(queue, 0);
return;
}
@@ -1754,14 +1803,22 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- queue->limits.discard_granularity = size;
-
- /* If discard is already enabled, don't reset queue limits */
+ /*
+ * If discard is already enabled, don't reset queue limits.
+ *
+ * This works around the fact that the block layer can't cope well with
+ * updating the hardware limits when overridden through sysfs. This is
+ * harmless because discard limits in NVMe are purely advisory.
+ */
if (queue->limits.max_discard_sectors)
return;
- blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
- blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
+ blk_queue_max_discard_sectors(queue, max_discard_sectors);
+ if (ctrl->dmrl)
+ blk_queue_max_discard_segments(queue, ctrl->dmrl);
+ else
+ blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
+ queue->limits.discard_granularity = queue_logical_block_size(queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
@@ -2164,7 +2221,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- NVME_QID_ANY, 1, 0);
+ NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
}
static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
@@ -2930,14 +2987,6 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
struct nvme_id_ctrl_nvm *id;
int ret;
- if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
- ctrl->max_discard_sectors = UINT_MAX;
- ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
- } else {
- ctrl->max_discard_sectors = 0;
- ctrl->max_discard_segments = 0;
- }
-
/*
* Even though NVMe spec explicitly states that MDTS is not applicable
* to the write-zeroes, we are cautious and limit the size to the
@@ -2967,8 +3016,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
if (ret)
goto free_data;
- if (id->dmrl)
- ctrl->max_discard_segments = id->dmrl;
+ ctrl->dmrl = id->dmrl;
ctrl->dmrsl = le32_to_cpu(id->dmrsl);
if (id->wzsl)
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
@@ -3715,6 +3763,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
+ /*
+ * Set ns->disk->device->driver_data to ns so we can access
+ * ns->head->passthru_err_log_enabled in
+ * nvme_io_passthru_err_log_enabled_[store | show]().
+ */
+ dev_set_drvdata(disk_to_dev(ns->disk), ns);
+
return;
out_cleanup_ns_from_list:
@@ -4139,6 +4194,7 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
struct nvme_fw_slot_info_log *log;
+ u8 next_fw_slot, cur_fw_slot;
log = kmalloc(sizeof(*log), GFP_KERNEL);
if (!log)
@@ -4150,13 +4206,15 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
goto out_free_log;
}
- if (log->afi & 0x70 || !(log->afi & 0x7)) {
+ cur_fw_slot = log->afi & 0x7;
+ next_fw_slot = (log->afi & 0x70) >> 4;
+ if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) {
dev_info(ctrl->device,
"Firmware is activated after next Controller Level Reset\n");
goto out_free_log;
}
- memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1],
+ memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
sizeof(ctrl->subsys->firmware_rev));
out_free_log:
@@ -4515,6 +4573,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
int ret;
WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
+ ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
@@ -4852,5 +4911,6 @@ static void __exit nvme_core_exit(void)
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host core framework");
module_init(nvme_core_init);
module_exit(nvme_core_exit);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index b5752a77ad98..495c171daead 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), NVME_QID_ANY, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), NVME_QID_ANY,
+ NVME_SUBMIT_AT_HEAD |
+ NVME_SUBMIT_NOWAIT |
+ NVME_SUBMIT_RESERVED);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -525,11 +527,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), qid, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), qid,
+ NVME_SUBMIT_AT_HEAD |
+ NVME_SUBMIT_RESERVED |
+ NVME_SUBMIT_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
+ goto out_free_data;
}
result = le32_to_cpu(res.u32);
if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
@@ -1488,6 +1493,7 @@ static void __exit nvmf_exit(void)
}
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NVMe host fabrics library");
module_init(nvmf_init);
module_exit(nvmf_exit);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index fbaee5a7be19..06cc54851b1b 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -185,9 +185,11 @@ static inline bool
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
- if (ctrl->state == NVME_CTRL_DELETING ||
- ctrl->state == NVME_CTRL_DELETING_NOIO ||
- ctrl->state == NVME_CTRL_DEAD ||
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (state == NVME_CTRL_DELETING ||
+ state == NVME_CTRL_DELETING_NOIO ||
+ state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
!uuid_equal(&opts->host->id, &ctrl->opts->host->id))
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 16847a316421..68a5d971657b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-static struct workqueue_struct *nvme_fc_wq;
-
-static bool nvme_fc_waiting_to_unload;
-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
-
/*
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
- if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
- complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
@@ -2574,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
+ u16 qnum = op->queue->qnum;
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
@@ -2582,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
* will detect the aborted io and will fail the connection.
*/
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
"x%08x/x%08x\n",
- ctrl->cnum, op->queue->qnum, sqe->common.opcode,
- sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
+ nvme_fabrics_opcode_str(qnum, sqe),
+ sqe->common.cdw10, sqe->common.cdw11);
if (__nvme_fc_abort_op(ctrl, op))
nvme_fc_error_recovery(ctrl, "io timeout abort failed");
@@ -3575,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
flush_delayed_work(&ctrl->connect_work);
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
+ "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
return &ctrl->ctrl;
@@ -3894,10 +3889,6 @@ static int __init nvme_fc_init_module(void)
{
int ret;
- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
- if (!nvme_fc_wq)
- return -ENOMEM;
-
/*
* NOTE:
* It is expected that in the future the kernel will combine
@@ -3915,7 +3906,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
- goto out_destroy_wq;
+ return ret;
}
/*
@@ -3939,8 +3930,6 @@ out_destroy_device:
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
-out_destroy_wq:
- destroy_workqueue(nvme_fc_wq);
return ret;
}
@@ -3960,48 +3949,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
spin_unlock(&rport->lock);
}
-static void
-nvme_fc_cleanup_for_unload(void)
+static void __exit nvme_fc_exit_module(void)
{
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
-
- list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
- list_for_each_entry(rport, &lport->endp_list, endp_list) {
- nvme_fc_delete_controllers(rport);
- }
- }
-}
-
-static void __exit nvme_fc_exit_module(void)
-{
unsigned long flags;
- bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags);
- nvme_fc_waiting_to_unload = true;
- if (!list_empty(&nvme_fc_lport_list)) {
- need_cleanup = true;
- nvme_fc_cleanup_for_unload();
- }
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
+ list_for_each_entry(rport, &lport->endp_list, endp_list)
+ nvme_fc_delete_controllers(rport);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- if (need_cleanup) {
- pr_info("%s: waiting for ctlr deletes\n", __func__);
- wait_for_completion(&nvme_fc_unload_proceed);
- pr_info("%s: ctrl deletes complete\n", __func__);
- }
+ flush_workqueue(nvme_delete_wq);
nvmf_unregister_transport(&nvme_fc_transport);
- ida_destroy(&nvme_fc_local_port_cnt);
- ida_destroy(&nvme_fc_ctrl_cnt);
-
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
- destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
module_exit(nvme_fc_exit_module);
+MODULE_DESCRIPTION("NVMe host FC transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 18f5c1be5d67..3dfd5ae99ae0 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -228,7 +228,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
length = (io.nblocks + 1) << ns->head->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) &&
- ns->head->ms == sizeof(struct t10_pi_tuple)) {
+ (ns->head->ms == ns->head->pi_size)) {
/*
* Protection information is stripped/inserted by the
* controller.
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2dd4137a08b2..74de1e64aeea 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
@@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
+
/*
* We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
* still be able to complete assuming that the controller is connected.
* Otherwise it will fail immediately and return to the requeue list.
*/
- if (ns->ctrl->state != NVME_CTRL_LIVE &&
- ns->ctrl->state != NVME_CTRL_DELETING)
+ if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
!test_bit(NVME_NS_READY, &ns->flags))
@@ -331,7 +332,7 @@ out:
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
{
- return ns->ctrl->state == NVME_CTRL_LIVE &&
+ return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
ns->ana_state == NVME_ANA_OPTIMIZED;
}
@@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
- switch (ns->ctrl->state) {
+ switch (nvme_ctrl_state(ns->ctrl)) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
@@ -667,7 +668,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
- ns->ctrl->state == NVME_CTRL_LIVE)
+ nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@@ -748,7 +749,7 @@ static void nvme_ana_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
nvme_read_ana_log(ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 4be7f6822966..7b87763e2f8a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -263,6 +263,7 @@ enum nvme_ctrl_flags {
struct nvme_ctrl {
bool comp_seen;
bool identified;
+ bool passthru_err_log_enabled;
enum nvme_ctrl_state state;
spinlock_t lock;
struct mutex scan_lock;
@@ -303,14 +304,13 @@ struct nvme_ctrl {
u32 max_hw_sectors;
u32 max_segments;
u32 max_integrity_segments;
- u32 max_discard_sectors;
- u32 max_discard_segments;
u32 max_zeroes_sectors;
#ifdef CONFIG_BLK_DEV_ZONED
u32 max_zone_append;
#endif
u16 crdt[3];
u16 oncs;
+ u8 dmrl;
u32 dmrsl;
u16 oacs;
u16 sqsize;
@@ -455,6 +455,7 @@ struct nvme_ns_head {
struct list_head entry;
struct kref ref;
bool shared;
+ bool passthru_err_log_enabled;
int instance;
struct nvme_effects_log *effects;
u64 nuse;
@@ -523,7 +524,6 @@ struct nvme_ns {
struct device cdev_device;
struct nvme_fault_inject fault_inject;
-
};
/* NVMe ns supports metadata actions by the controller (generate/strip) */
@@ -806,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *req);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live);
+ bool queue_live, enum nvme_ctrl_state state);
static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE))
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (likely(state == NVME_CTRL_LIVE))
return true;
- if (ctrl->ops->flags & NVME_F_FABRICS &&
- ctrl->state == NVME_CTRL_DELETING)
+ if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
return queue_live;
- return __nvme_check_ready(ctrl, rq, queue_live);
+ return __nvme_check_ready(ctrl, rq, queue_live, state);
}
/*
@@ -837,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
}
+/*
+ * Flags for __nvme_submit_sync_cmd()
+ */
+typedef __u32 __bitwise nvme_submit_flags_t;
+
+enum {
+ /* Insert request at the head of the queue */
+ NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0),
+ /* Set BLK_MQ_REQ_NOWAIT when allocating request */
+ NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
+ /* Set BLK_MQ_REQ_RESERVED when allocating request */
+ NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
+ /* Retry command when NVME_SC_DNR is not set in the result */
+ NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
+};
+
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head,
- blk_mq_req_flags_t flags);
+ int qid, nvme_submit_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
@@ -932,6 +948,10 @@ extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
+static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
+{
+ return disk->fops == &nvme_ns_head_ops;
+}
#else
#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
@@ -1009,6 +1029,10 @@ static inline void nvme_mpath_start_request(struct request *rq)
static inline void nvme_mpath_end_request(struct request *rq)
{
}
+static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
+{
+ return false;
+}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns);
@@ -1037,7 +1061,10 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
- return dev_to_disk(dev)->private_data;
+ struct gendisk *disk = dev_to_disk(dev);
+
+ WARN_ON(nvme_disk_is_ns_head(disk));
+ return disk->private_data;
}
#ifdef CONFIG_NVME_HWMON
@@ -1114,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
}
#ifdef CONFIG_NVME_VERBOSE_ERRORS
-const unsigned char *nvme_get_error_status_str(u16 status);
-const unsigned char *nvme_get_opcode_str(u8 opcode);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
+const char *nvme_get_error_status_str(u16 status);
+const char *nvme_get_opcode_str(u8 opcode);
+const char *nvme_get_admin_opcode_str(u8 opcode);
+const char *nvme_get_fabrics_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_get_error_status_str(u16 status)
+static inline const char *nvme_get_error_status_str(u16 status)
{
return "I/O Error";
}
-static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+static inline const char *nvme_get_opcode_str(u8 opcode)
{
return "I/O Cmd";
}
-static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+static inline const char *nvme_get_admin_opcode_str(u8 opcode)
{
return "Admin Cmd";
}
-static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
+static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
{
return "Fabrics Cmd";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
+static inline const char *nvme_opcode_str(int qid, u8 opcode)
{
- if (opcode == nvme_fabrics_command)
- return nvme_get_fabrics_opcode_str(fctype);
return qid ? nvme_get_opcode_str(opcode) :
nvme_get_admin_opcode_str(opcode);
}
+
+static inline const char *nvme_fabrics_opcode_str(
+ int qid, const struct nvme_command *cmd)
+{
+ if (nvme_is_fabrics(cmd))
+ return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
+
+ return nvme_opcode_str(qid, cmd->common.opcode);
+}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 61af7ff1a9d6..e6267a6aa380 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1284,6 +1284,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct request *abort_req;
struct nvme_command cmd = { };
u32 csts = readl(dev->bar + NVME_REG_CSTS);
+ u8 opcode;
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
@@ -1310,8 +1311,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
dev_warn(dev->ctrl.device,
- "I/O %d QID %d timeout, completion polled\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) QID %d timeout, completion polled\n",
+ req->tag, nvme_cid(req), nvmeq->qid);
return BLK_EH_DONE;
}
@@ -1327,8 +1328,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
fallthrough;
case NVME_CTRL_DELETING:
dev_warn_ratelimited(dev->ctrl.device,
- "I/O %d QID %d timeout, disable controller\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) QID %d timeout, disable controller\n",
+ req->tag, nvme_cid(req), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
nvme_dev_disable(dev, true);
return BLK_EH_DONE;
@@ -1343,10 +1344,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* command was already aborted once before and still hasn't been
* returned to the driver, or if this is the admin queue.
*/
+ opcode = nvme_req(req)->cmd->common.opcode;
if (!nvmeq->qid || iod->aborted) {
dev_warn(dev->ctrl.device,
- "I/O %d QID %d timeout, reset controller\n",
- req->tag, nvmeq->qid);
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
+ req->tag, nvme_cid(req), opcode,
+ nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
goto disable;
}
@@ -1362,10 +1365,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d (%s) QID %d timeout, aborting\n",
- req->tag,
- nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
- nvmeq->qid);
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n",
+ req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode),
+ nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
+ blk_rq_bytes(req));
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT);
@@ -2743,10 +2746,10 @@ static void nvme_reset_work(struct work_struct *work)
* controller around but remove all namespaces.
*/
if (dev->online_queues > 1) {
+ nvme_dbbuf_set(dev);
nvme_unquiesce_io_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
nvme_pci_update_nr_queues(dev);
- nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
} else {
dev_warn(dev->ctrl.device, "IO queues lost\n");
@@ -3408,6 +3411,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
@@ -3538,5 +3543,6 @@ static void __exit nvme_exit(void)
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host PCIe transport driver");
module_init(nvme_init);
module_exit(nvme_exit);
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index 391b1465ebfd..fc3eed00f9ff 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -98,7 +98,7 @@ static int nvme_send_pr_command(struct block_device *bdev,
struct nvme_command *c, void *data, unsigned int data_len)
{
if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- bdev->bd_disk->fops == &nvme_ns_head_ops)
+ nvme_disk_is_ns_head(bdev->bd_disk))
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c89503da24d7..20fdd40b1879 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1410,6 +1410,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
struct nvme_ns *ns = rq->q->queuedata;
struct bio *bio = rq->bio;
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ u32 xfer_len;
int nr;
req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
@@ -1422,8 +1424,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr))
goto mr_put;
- nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
- req->mr->sig_attrs, ns->head->pi_type);
+ nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
@@ -1441,7 +1442,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_WRITE;
sg->addr = cpu_to_le64(req->mr->iova);
- put_unaligned_le24(req->mr->length, sg->length);
+ xfer_len = req->mr->length;
+ /* Check if PI is added by the HW */
+ if (!pi_count)
+ xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
+ put_unaligned_le24(xfer_len, sg->length);
put_unaligned_le32(req->mr->rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
@@ -1946,9 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct nvme_command *cmd = req->req.cmd;
+ int qid = nvme_rdma_queue_idx(queue);
- dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
- rq->tag, nvme_rdma_queue_idx(queue));
+ dev_warn(ctrl->ctrl.device,
+ "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
+ rq->tag, nvme_cid(rq), cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
/*
@@ -2291,8 +2300,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
@@ -2395,4 +2404,5 @@ static void __exit nvme_rdma_cleanup_module(void)
module_init(nvme_rdma_init_module);
module_exit(nvme_rdma_cleanup_module);
+MODULE_DESCRIPTION("NVMe host RDMA transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index ac24ad102380..f2832f70e7e0 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -35,16 +35,71 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf,
+ ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ bool passthru_err_log_enabled;
+ int err;
+
+ err = kstrtobool(buf, &passthru_err_log_enabled);
+ if (err)
+ return -EINVAL;
+
+ ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
+
+ return count;
+}
+
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
- if (disk->fops == &nvme_bdev_ops)
- return nvme_get_ns_from_dev(dev)->head;
- else
+ if (nvme_disk_is_ns_head(disk))
return disk->private_data;
+ return nvme_get_ns_from_dev(dev)->head;
}
+static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+
+ return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ bool passthru_err_log_enabled;
+ int err;
+
+ err = kstrtobool(buf, &passthru_err_log_enabled);
+ if (err)
+ return -EINVAL;
+ head->passthru_err_log_enabled = passthru_err_log_enabled;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
+ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+ nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
+
+static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
+ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+ nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -209,6 +264,7 @@ static struct attribute *nvme_ns_attrs[] = {
&dev_attr_ana_grpid.attr,
&dev_attr_ana_state.attr,
#endif
+ &dev_attr_io_passthru_err_log_enabled.attr,
NULL,
};
@@ -233,7 +289,8 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
}
#ifdef CONFIG_NVME_MULTIPATH
if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
- if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
+ /* per-path attr */
+ if (nvme_disk_is_ns_head(dev_to_disk(dev)))
return 0;
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
return 0;
@@ -311,6 +368,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned state = (unsigned)nvme_ctrl_state(ctrl);
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
@@ -321,9 +379,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_DEAD] = "dead",
};
- if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
- state_name[ctrl->state])
- return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
+ if (state < ARRAY_SIZE(state_name) && state_name[state])
+ return sysfs_emit(buf, "%s\n", state_name[state]);
return sysfs_emit(buf, "unknown state\n");
}
@@ -655,6 +712,7 @@ static struct attribute *nvme_dev_attrs[] = {
#ifdef CONFIG_NVME_TCP_TLS
&dev_attr_tls_key.attr,
#endif
+ &dev_attr_adm_passthru_err_log_enabled.attr,
NULL
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 08805f027810..3692b56cb58d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1344,7 +1344,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
- struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag;
@@ -1355,11 +1354,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
- if (queue->pf_cache.va) {
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
- queue->pf_cache.va = NULL;
- }
+ page_frag_cache_drain(&queue->pf_cache);
noreclaim_flag = memalloc_noreclaim_save();
/* ->sock will be released by fput() */
@@ -1922,14 +1917,13 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
ctrl->opts->subsysnqn);
if (!pskid) {
dev_err(ctrl->device, "no valid PSK found\n");
- ret = -ENOKEY;
- goto out_free_queue;
+ return -ENOKEY;
}
}
ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
if (ret)
- goto out_free_queue;
+ return ret;
ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
if (ret)
@@ -2429,13 +2423,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
- u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
+ struct nvme_command *cmd = &pdu->cmd;
int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device,
- "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
- nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
- opc, nvme_opcode_str(qid, opc, fctype));
+ "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
+ rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
/*
@@ -2754,8 +2748,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
@@ -2827,4 +2821,5 @@ static void __exit nvme_tcp_cleanup_module(void)
module_init(nvme_tcp_init_module);
module_exit(nvme_tcp_cleanup_module);
+MODULE_DESCRIPTION("NVMe host TCP transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index d26aa30f8702..8658e9c08534 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
continue;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_NS_CHANGED,
NVME_LOG_CHANGED_NS);
}
@@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
continue;
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
continue;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
}
mutex_unlock(&subsys->lock);
@@ -1705,4 +1705,5 @@ static void __exit nvmet_exit(void)
module_init(nvmet_init);
module_exit(nvmet_exit);
+MODULE_DESCRIPTION("NVMe target core framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 668d257fa986..68e82ccc0e4e 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port,
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
return;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d8da840a1c0e..9964ffe347d2 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -209,7 +209,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
- u16 status = 0;
+ u16 status;
int ret;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
@@ -290,7 +290,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid);
- u16 status = 0;
+ u16 status;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index bd59990b5250..fd229f310c93 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
+
+ struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -145,7 +147,6 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
- struct rcu_head rcu;
/* array of fcp_iods */
struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize);
} __aligned(sizeof(unsigned long long));
@@ -166,10 +167,9 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
- struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
- struct rcu_head rcu;
};
@@ -249,6 +249,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+{
+ struct nvmet_fc_tgtport *tgtport =
+ container_of(work, struct nvmet_fc_tgtport, put_work);
+
+ nvmet_fc_tgtport_put(tgtport);
+}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
@@ -360,7 +367,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
if (!lsop->req_queued) {
spin_unlock_irqrestore(&tgtport->lock, flags);
- return;
+ goto out_putwork;
}
list_del(&lsop->lsreq_list);
@@ -373,7 +380,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
(lsreq->rqstlen + lsreq->rsplen),
DMA_BIDIRECTIONAL);
- nvmet_fc_tgtport_put(tgtport);
+out_putwork:
+ queue_work(nvmet_wq, &tgtport->put_work);
}
static int
@@ -489,8 +497,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
* message is normal. Otherwise, send unless the hostport has
* already been invalidated by the lldd.
*/
- if (!tgtport->ops->ls_req || !assoc->hostport ||
- assoc->hostport->invalid)
+ if (!tgtport->ops->ls_req || assoc->hostport->invalid)
return;
lsop = kzalloc((sizeof(*lsop) +
@@ -802,14 +809,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue)
return NULL;
- if (!nvmet_fc_tgt_a_get(assoc))
- goto out_free_queue;
-
queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
assoc->tgtport->fc_target_port.port_num,
assoc->a_id, qid);
if (!queue->work_q)
- goto out_a_put;
+ goto out_free_queue;
queue->qid = qid;
queue->sqsize = sqsize;
@@ -831,15 +835,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
- rcu_assign_pointer(assoc->queues[qid], queue);
+ assoc->queues[qid] = queue;
return queue;
out_fail_iodlist:
nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
destroy_workqueue(queue->work_q);
-out_a_put:
- nvmet_fc_tgt_a_put(assoc);
out_free_queue:
kfree(queue);
return NULL;
@@ -852,15 +854,11 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
- rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
-
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
- nvmet_fc_tgt_a_put(queue->assoc);
-
destroy_workqueue(queue->work_q);
- kfree_rcu(queue, rcu);
+ kfree(queue);
}
static void
@@ -969,7 +967,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
- queue = rcu_dereference(assoc->queues[qid]);
+ queue = assoc->queues[qid];
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
@@ -1031,7 +1029,7 @@ nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host))
- return (host);
+ return host;
}
}
@@ -1078,8 +1076,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
/* new allocation not needed */
kfree(newhost);
newhost = match;
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
} else {
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
@@ -1094,23 +1090,54 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
static void
-nvmet_fc_delete_assoc(struct work_struct *work)
+nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static void
+nvmet_fc_delete_assoc_work(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_delete_assoc(assoc);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_tgtport_get(assoc->tgtport);
+ queue_work(nvmet_wq, &assoc->del_work);
+}
+
+static bool
+nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id)
+{
+ struct nvmet_fc_tgt_assoc *a;
+
+ list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
+ if (association_id == a->association_id)
+ return true;
+ }
+
+ return false;
}
static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
- struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+ struct nvmet_fc_tgt_assoc *assoc;
unsigned long flags;
+ bool done;
u64 ran;
int idx;
- bool needrandom = true;
+
+ if (!tgtport->pe)
+ return NULL;
assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
if (!assoc)
@@ -1120,43 +1147,35 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (idx < 0)
goto out_free_assoc;
- if (!nvmet_fc_tgtport_get(tgtport))
- goto out_ida;
-
assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
if (IS_ERR(assoc->hostport))
- goto out_put;
+ goto out_ida;
assoc->tgtport = tgtport;
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
- INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
atomic_set(&assoc->terminating, 0);
- while (needrandom) {
+ done = false;
+ do {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
ran = ran << BYTES_FOR_QID_SHIFT;
spin_lock_irqsave(&tgtport->lock, flags);
- needrandom = false;
- list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
- if (ran == tmpassoc->association_id) {
- needrandom = true;
- break;
- }
- }
- if (!needrandom) {
+ rcu_read_lock();
+ if (!nvmet_fc_assoc_exits(tgtport, ran)) {
assoc->association_id = ran;
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
+ done = true;
}
+ rcu_read_unlock();
spin_unlock_irqrestore(&tgtport->lock, flags);
- }
+ } while (!done);
return assoc;
-out_put:
- nvmet_fc_tgtport_put(tgtport);
out_ida:
ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
@@ -1172,13 +1191,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
+ int i;
+
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ if (assoc->queues[i])
+ nvmet_fc_delete_target_queue(assoc->queues[i]);
+ }
/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
- list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1188,8 +1212,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
- kfree_rcu(assoc, rcu);
- nvmet_fc_tgtport_put(tgtport);
+ kfree(assoc);
}
static void
@@ -1208,7 +1231,7 @@ static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1217,29 +1240,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del_rcu(&assoc->a_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
- for (i = NVMET_NR_QUEUES; i >= 0; i--) {
- rcu_read_lock();
- queue = rcu_dereference(assoc->queues[i]);
- if (!queue) {
- rcu_read_unlock();
- continue;
- }
+ synchronize_rcu();
- if (!nvmet_fc_tgt_q_get(queue)) {
- rcu_read_unlock();
- continue;
- }
- rcu_read_unlock();
- nvmet_fc_delete_target_queue(queue);
- nvmet_fc_tgt_q_put(queue);
+ /* ensure all in-flight I/Os have been processed */
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ if (assoc->queues[i])
+ flush_workqueue(assoc->queues[i]->work_q);
}
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
-
- nvmet_fc_tgt_a_put(assoc);
}
static struct nvmet_fc_tgt_assoc *
@@ -1415,6 +1430,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
+ INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -1492,9 +1508,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
}
rcu_read_unlock();
}
@@ -1540,16 +1555,14 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
- if (!assoc->hostport ||
- assoc->hostport->hosthandle != hosthandle)
+ if (assoc->hostport->hosthandle != hosthandle)
continue;
if (!nvmet_fc_tgt_a_get(assoc))
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1581,7 +1594,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
- queue = rcu_dereference(assoc->queues[0]);
+ queue = assoc->queues[0];
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
@@ -1593,9 +1606,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
return;
}
@@ -1625,6 +1637,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
+ flush_workqueue(nvmet_wq);
+
/*
* should terminate LS's as well. However, LS's will be generated
* at the tail end of association termination, so they likely don't
@@ -1870,9 +1884,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);
- /* release get taken in nvmet_fc_find_target_assoc */
- nvmet_fc_tgt_a_put(assoc);
-
/*
* The rules for LS response says the response cannot
* go back until ABTS's have been sent for all outstanding
@@ -1887,8 +1898,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc->rcv_disconn = iod;
spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_assoc(assoc);
-
if (oldls) {
dev_info(tgtport->dev,
"{%d:%d} Multiple Disconnect Association LS's "
@@ -1904,6 +1913,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
}
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+
return false;
}
@@ -2540,8 +2552,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
- if (tgtport->pe)
- fod->req.port = tgtport->pe->port;
+ if (!tgtport->pe)
+ goto transport_error;
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2902,6 +2915,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
nvmet_fc_portentry_unbind(pe);
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(pe->tgtport);
+
kfree(pe);
}
@@ -2933,6 +2949,9 @@ static int __init nvmet_fc_init_module(void)
static void __exit nvmet_fc_exit_module(void)
{
+ /* ensure any shutdown operation, e.g. delete ctrls have finished */
+ flush_workqueue(nvmet_wq);
+
/* sanity check - all lports should be removed */
if (!list_empty(&nvmet_fc_target_list))
pr_warn("%s: targetport list not empty\n", __func__);
@@ -2945,4 +2964,5 @@ static void __exit nvmet_fc_exit_module(void)
module_init(nvmet_fc_init_module);
module_exit(nvmet_fc_exit_module);
+MODULE_DESCRIPTION("NVMe target FC transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index c65a73433c05..1471af250ea6 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&rport->lock);
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
return ret;
@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
if (remoteport) {
rport = remoteport->private;
spin_lock(&rport->lock);
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
}
@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
if (!tport->remoteport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&tport->lock);
- list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &tport->ls_list);
spin_unlock(&tport->lock);
queue_work(nvmet_wq, &tport->ls_work);
return ret;
@@ -995,11 +995,6 @@ fcloop_nport_free(struct kref *ref)
{
struct fcloop_nport *nport =
container_of(ref, struct fcloop_nport, ref);
- unsigned long flags;
-
- spin_lock_irqsave(&fcloop_lock, flags);
- list_del(&nport->nport_list);
- spin_unlock_irqrestore(&fcloop_lock, flags);
kfree(nport);
}
@@ -1357,6 +1352,8 @@ __unlink_remote_port(struct fcloop_nport *nport)
nport->tport->remoteport = NULL;
nport->rport = NULL;
+ list_del(&nport->nport_list);
+
return rport;
}
@@ -1653,4 +1650,5 @@ static void __exit fcloop_exit(void)
module_init(fcloop_init);
module_exit(fcloop_exit);
+MODULE_DESCRIPTION("NVMe target FC loop transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9cb434c58075..e589915ddef8 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
}
nvme_quiesce_admin_queue(&ctrl->ctrl);
- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
nvme_disable_ctrl(&ctrl->ctrl, true);
nvme_cancel_admin_tagset(&ctrl->ctrl);
@@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ if (state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO)
/* state change failure for non-deleted ctrl? */
WARN_ON_ONCE(1);
return;
@@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void)
module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module);
+MODULE_DESCRIPTION("NVMe target loop transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 4597bca43a6d..3a0f2c170f4c 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -37,6 +37,8 @@
#define NVMET_RDMA_MAX_MDTS 8
#define NVMET_RDMA_MAX_METADATA_MDTS 5
+#define NVMET_RDMA_BACKLOG 128
+
struct nvmet_rdma_srq;
struct nvmet_rdma_cmd {
@@ -1583,8 +1585,19 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
}
if (queue->host_qid == 0) {
- /* Let inflight controller teardown complete */
- flush_workqueue(nvmet_wq);
+ struct nvmet_rdma_queue *q;
+ int pending = 0;
+
+ /* Check for pending controller teardown */
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) {
+ if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl &&
+ q->state == NVMET_RDMA_Q_DISCONNECTING)
+ pending++;
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+ if (pending > NVMET_RDMA_BACKLOG)
+ return NVME_SC_CONNECT_CTRL_BUSY;
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@@ -1880,7 +1893,7 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
- ret = rdma_listen(cm_id, 128);
+ ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG);
if (ret) {
pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
@@ -2091,5 +2104,6 @@ static void __exit nvmet_rdma_exit(void)
module_init(nvmet_rdma_init);
module_exit(nvmet_rdma_exit);
+MODULE_DESCRIPTION("NVMe target RDMA transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 4cc27856aa8f..2aa5762e9f50 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -24,6 +24,8 @@
#include "nvmet.h"
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
+#define NVMET_TCP_BACKLOG 128
static int param_store_val(const char *str, int *val, int min, int max)
{
@@ -923,7 +925,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
icresp->hdr.pdo = 0;
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
- icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
+ icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
icresp->cpda = 0;
if (queue->hdr_digest)
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@@ -978,13 +980,13 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
+ unsigned int exp_data_len;
if (likely(queue->nr_cmds)) {
if (unlikely(data->ttag >= queue->nr_cmds)) {
pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
queue->idx, data->ttag, queue->nr_cmds);
- nvmet_tcp_fatal_error(queue);
- return -EPROTO;
+ goto err_proto;
}
cmd = &queue->cmds[data->ttag];
} else {
@@ -995,19 +997,32 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
data->ttag, le32_to_cpu(data->data_offset),
cmd->rbytes_done);
- /* FIXME: use path and transport errors */
- nvmet_req_complete(&cmd->req,
- NVME_SC_INVALID_FIELD | NVME_SC_DNR);
- return -EPROTO;
+ goto err_proto;
}
+ exp_data_len = le32_to_cpu(data->hdr.plen) -
+ nvmet_tcp_hdgst_len(queue) -
+ nvmet_tcp_ddgst_len(queue) -
+ sizeof(*data);
+
cmd->pdu_len = le32_to_cpu(data->data_length);
+ if (unlikely(cmd->pdu_len != exp_data_len ||
+ cmd->pdu_len == 0 ||
+ cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
+ pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+ goto err_proto;
+ }
cmd->pdu_recv = 0;
nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
return 0;
+
+err_proto:
+ /* FIXME: use proper transport errors */
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
}
static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
@@ -1576,7 +1591,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
- struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1600,8 +1614,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ page_frag_cache_drain(&queue->pf_cache);
kfree(queue);
}
@@ -1768,7 +1781,7 @@ static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
(int)sizeof(struct nvme_tcp_icreq_pdu));
if (hdr->type == nvme_tcp_icreq &&
hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
- hdr->plen == (__le32)sizeof(struct nvme_tcp_icreq_pdu)) {
+ hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) {
pr_debug("queue %d: icreq detected\n",
queue->idx);
return len;
@@ -2053,7 +2066,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
goto err_sock;
}
- ret = kernel_listen(port->sock, 128);
+ ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
if (ret) {
pr_err("failed to listen %d on port sock\n", ret);
goto err_sock;
@@ -2119,8 +2132,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
container_of(sq, struct nvmet_tcp_queue, nvme_sq);
if (sq->qid == 0) {
- /* Let inflight controller teardown complete */
- flush_workqueue(nvmet_wq);
+ struct nvmet_tcp_queue *q;
+ int pending = 0;
+
+ /* Check for pending controller teardown */
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
+ if (q->nvme_sq.ctrl == sq->ctrl &&
+ q->state == NVMET_TCP_Q_DISCONNECTING)
+ pending++;
+ }
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+ if (pending > NVMET_TCP_BACKLOG)
+ return NVME_SC_CONNECT_CTRL_BUSY;
}
queue->nr_cmds = sq->size * 2;
@@ -2190,10 +2214,12 @@ static void __exit nvmet_tcp_exit(void)
flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
+ ida_destroy(&nvmet_tcp_queue_ida);
}
module_init(nvmet_tcp_init);
module_exit(nvmet_tcp_exit);
+MODULE_DESCRIPTION("NVMe target TCP transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index bff454d46255..6ee1f3db81d0 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -211,7 +211,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
return ret;
}
-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -224,8 +224,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
* If we can know the extra data of the connect command in this stage,
* we can update this print statement later.
*/
- if (ctrl)
- trace_seq_printf(p, "%d", ctrl->cntlid);
+ if (ctrl_id)
+ trace_seq_printf(p, "%d", ctrl_id);
else
trace_seq_printf(p, "_");
trace_seq_putc(p, 0);
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index 6109b3806b12..7f7ebf9558e5 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \
nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
-#define __print_ctrl_name(ctrl) \
- nvmet_trace_ctrl_name(p, ctrl)
+const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id);
+#define __print_ctrl_id(ctrl_id) \
+ nvmet_trace_ctrl_id(p, ctrl_id)
const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
#define __print_disk_name(name) \
nvmet_trace_disk_name(p, name)
#ifndef TRACE_HEADER_MULTI_READ
-static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
+static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req)
{
- return req->sq->ctrl;
+ /*
+ * The queue and controller pointers are not valid until an association
+ * has been established.
+ */
+ if (!req->sq || !req->sq->ctrl)
+ return 0;
+ return req->sq->ctrl->cntlid;
}
static inline void __assign_req_name(char *name, struct nvmet_req *req)
@@ -53,8 +59,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req)
return;
}
- strncpy(name, req->ns->device_path,
- min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
+ strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
}
#endif
@@ -63,7 +68,7 @@ TRACE_EVENT(nvmet_req_init,
TP_ARGS(req, cmd),
TP_STRUCT__entry(
__field(struct nvme_command *, cmd)
- __field(struct nvmet_ctrl *, ctrl)
+ __field(u16, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(u16, cid)
@@ -76,7 +81,7 @@ TRACE_EVENT(nvmet_req_init,
),
TP_fast_assign(
__entry->cmd = cmd;
- __entry->ctrl = nvmet_req_to_ctrl(req);
+ __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
__assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id;
@@ -85,12 +90,12 @@ TRACE_EVENT(nvmet_req_init,
__entry->flags = cmd->common.flags;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = le64_to_cpu(cmd->common.metadata);
- memcpy(__entry->cdw10, &cmd->common.cdw10,
+ memcpy(__entry->cdw10, &cmd->common.cdws,
sizeof(__entry->cdw10));
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
"meta=%#llx, cmd=(%s, %s)",
- __print_ctrl_name(__entry->ctrl),
+ __print_ctrl_id(__entry->ctrl_id),
__print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
@@ -104,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete,
TP_PROTO(struct nvmet_req *req),
TP_ARGS(req),
TP_STRUCT__entry(
- __field(struct nvmet_ctrl *, ctrl)
+ __field(u16, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(int, cid)
@@ -112,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete,
__field(u16, status)
),
TP_fast_assign(
- __entry->ctrl = nvmet_req_to_ctrl(req);
+ __entry->ctrl_id = nvmet_req_to_ctrl_id(req);
__entry->qid = req->cq->qid;
__entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64);
@@ -120,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete,
__assign_req_name(__entry->disk, req);
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
- __print_ctrl_name(__entry->ctrl),
+ __print_ctrl_id(__entry->ctrl_id),
__print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->result, __entry->status)
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 980123fb4dde..eb357ac2e54a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -460,8 +460,9 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
list_for_each_entry(entry, &nvmem->cells, node) {
sysfs_bin_attr_init(&attrs[i]);
attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
- "%s@%x", entry->name,
- entry->offset);
+ "%s@%x,%x", entry->name,
+ entry->offset,
+ entry->bit_offset);
attrs[i].attr.mode = 0444;
attrs[i].size = entry->bytes;
attrs[i].read = &nvmem_cell_attr_read;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 641a40cf5cf3..fa8cd33be131 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -763,7 +763,9 @@ struct device_node *of_graph_get_port_parent(struct device_node *node)
/* Walk 3 levels up only if there is 'ports' node. */
for (depth = 3; depth && node; depth--) {
node = of_get_next_parent(node);
- if (depth == 2 && !of_node_name_eq(node, "ports"))
+ if (depth == 2 && !of_node_name_eq(node, "ports") &&
+ !of_node_name_eq(node, "in-ports") &&
+ !of_node_name_eq(node, "out-ports"))
break;
}
return node;
@@ -1063,36 +1065,6 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
return of_device_get_match_data(dev);
}
-static struct device_node *of_get_compat_node(struct device_node *np)
-{
- of_node_get(np);
-
- while (np) {
- if (!of_device_is_available(np)) {
- of_node_put(np);
- np = NULL;
- }
-
- if (of_property_present(np, "compatible"))
- break;
-
- np = of_get_next_parent(np);
- }
-
- return np;
-}
-
-static struct device_node *of_get_compat_node_parent(struct device_node *np)
-{
- struct device_node *parent, *node;
-
- parent = of_get_parent(np);
- node = of_get_compat_node(parent);
- of_node_put(parent);
-
- return node;
-}
-
static void of_link_to_phandle(struct device_node *con_np,
struct device_node *sup_np)
{
@@ -1222,10 +1194,10 @@ static struct device_node *parse_##fname(struct device_node *np, \
* parse_prop.prop_name: Name of property holding a phandle value
* parse_prop.index: For properties holding a list of phandles, this is the
* index into the list
+ * @get_con_dev: If the consumer node containing the property is never converted
+ * to a struct device, implement this ops so fw_devlink can use it
+ * to find the true consumer.
* @optional: Describes whether a supplier is mandatory or not
- * @node_not_dev: The consumer node containing the property is never converted
- * to a struct device. Instead, parse ancestor nodes for the
- * compatible property to find a node corresponding to a device.
*
* Returns:
* parse_prop() return values are
@@ -1236,15 +1208,15 @@ static struct device_node *parse_##fname(struct device_node *np, \
struct supplier_bindings {
struct device_node *(*parse_prop)(struct device_node *np,
const char *prop_name, int index);
+ struct device_node *(*get_con_dev)(struct device_node *np);
bool optional;
- bool node_not_dev;
};
DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
-DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
@@ -1262,7 +1234,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
-DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
DEFINE_SIMPLE_PROP(leds, "leds", NULL)
@@ -1328,6 +1299,17 @@ static struct device_node *parse_interrupts(struct device_node *np,
return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
}
+static struct device_node *parse_remote_endpoint(struct device_node *np,
+ const char *prop_name,
+ int index)
+{
+ /* Return NULL for index > 0 to signify end of remote-endpoints. */
+ if (index > 0 || strcmp(prop_name, "remote-endpoint"))
+ return NULL;
+
+ return of_graph_get_remote_port_parent(np);
+}
+
static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_clocks, },
{ .parse_prop = parse_interconnects, },
@@ -1352,7 +1334,10 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_pinctrl6, },
{ .parse_prop = parse_pinctrl7, },
{ .parse_prop = parse_pinctrl8, },
- { .parse_prop = parse_remote_endpoint, .node_not_dev = true, },
+ {
+ .parse_prop = parse_remote_endpoint,
+ .get_con_dev = of_graph_get_port_parent,
+ },
{ .parse_prop = parse_pwms, },
{ .parse_prop = parse_resets, },
{ .parse_prop = parse_leds, },
@@ -1403,8 +1388,8 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
while ((phandle = s->parse_prop(con_np, prop_name, i))) {
struct device_node *con_dev_np;
- con_dev_np = s->node_not_dev
- ? of_get_compat_node_parent(con_np)
+ con_dev_np = s->get_con_dev
+ ? s->get_con_dev(con_np)
: of_node_get(con_np);
matched = true;
i++;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index cfd60e35a899..d7593bde2d02 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -50,6 +50,12 @@ static struct unittest_results {
failed; \
})
+#ifdef CONFIG_OF_KOBJ
+#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref)
+#else
+#define OF_KREF_READ(NODE) 1
+#endif
+
/*
* Expected message may have a message level other than KERN_INFO.
* Print the expected message only if the current loglevel will allow
@@ -570,7 +576,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
pr_err("missing testcase data\n");
return;
}
- prefs[i] = kref_read(&p[i]->kobj.kref);
+ prefs[i] = OF_KREF_READ(p[i]);
}
rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
@@ -693,9 +699,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
for (i = 0; i < ARRAY_SIZE(p); ++i) {
- unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
+ unittest(prefs[i] == OF_KREF_READ(p[i]),
"provider%d: expected:%d got:%d\n",
- i, prefs[i], kref_read(&p[i]->kobj.kref));
+ i, prefs[i], OF_KREF_READ(p[i]));
of_node_put(p[i]);
}
}
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 9c2137dae429..826b5016a101 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -386,21 +386,8 @@ void pci_bus_add_devices(const struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_add_devices);
-/** pci_walk_bus - walk devices on/under bus, calling callback.
- * @top bus whose devices should be walked
- * @cb callback to be called for each device found
- * @userdata arbitrary pointer to be passed to callback.
- *
- * Walk the given bus, including any bridged devices
- * on buses under this bus. Call the provided callback
- * on each device found.
- *
- * We check the return of @cb each time. If it returns anything
- * other than 0, we break out.
- *
- */
-void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
- void *userdata)
+static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata, bool locked)
{
struct pci_dev *dev;
struct pci_bus *bus;
@@ -408,7 +395,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
int retval;
bus = top;
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
next = top->devices.next;
for (;;) {
if (next == &bus->devices) {
@@ -431,10 +419,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
if (retval)
break;
}
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
+}
+
+/**
+ * pci_walk_bus - walk devices on/under bus, calling callback.
+ * @top: bus whose devices should be walked
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * Walk the given bus, including any bridged devices
+ * on buses under this bus. Call the provided callback
+ * on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ */
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ __pci_walk_bus(top, cb, userdata, false);
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ lockdep_assert_held(&pci_bus_sem);
+
+ __pci_walk_bus(top, cb, userdata, true);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
+
struct pci_bus *pci_bus_get(struct pci_bus *bus)
{
if (bus)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 5befed2dc02b..9a437cfce073 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -6,6 +6,7 @@
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -482,9 +483,10 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
reg = ep_func->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
- msg_addr = ((u64)msg_addr_upper) << 32 |
- (msg_addr_lower & ~aligned_offset);
+ msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
@@ -551,7 +553,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- msg_addr &= ~aligned_offset;
+ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 10f2d0bb86be..2ce2a3bd932b 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -972,7 +972,7 @@ static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
* Downstream devices need to be in D0 state before enabling PCI PM
* substates.
*/
- pci_set_power_state(pdev, PCI_D0);
+ pci_set_power_state_locked(pdev, PCI_D0);
pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
return 0;
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index c8be056c248d..cfd84a899c82 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
return (irq_hw_number_t)desc->msi_index |
pci_dev_id(dev) << 11 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+ ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
}
static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d8f11a078924..c3585229c12a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1354,6 +1354,7 @@ end:
/**
* pci_set_full_power_state - Put a PCI device into D0 and update its state
* @dev: PCI device to power up
+ * @locked: whether pci_bus_sem is held
*
* Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
* to confirm the state change, restore its BARs if they might be lost and
@@ -1363,7 +1364,7 @@ end:
* to D0, it is more efficient to use pci_power_up() directly instead of this
* function.
*/
-static int pci_set_full_power_state(struct pci_dev *dev)
+static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
{
u16 pmcsr;
int ret;
@@ -1399,7 +1400,7 @@ static int pci_set_full_power_state(struct pci_dev *dev)
}
if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
+ pcie_aspm_pm_state_change(dev->bus->self, locked);
return 0;
}
@@ -1428,10 +1429,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
pci_walk_bus(bus, __pci_dev_set_current_state, &state);
}
+static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
+{
+ if (!bus)
+ return;
+
+ if (locked)
+ pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
+ else
+ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
+}
+
/**
* pci_set_low_power_state - Put a PCI device into a low-power state.
* @dev: PCI device to handle.
* @state: PCI power state (D1, D2, D3hot) to put the device into.
+ * @locked: whether pci_bus_sem is held
*
* Use the device's PCI_PM_CTRL register to put it into a low-power state.
*
@@ -1442,7 +1455,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
* 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/
-static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
{
u16 pmcsr;
@@ -1496,29 +1509,12 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(state));
if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
+ pcie_aspm_pm_state_change(dev->bus->self, locked);
return 0;
}
-/**
- * pci_set_power_state - Set the power state of a PCI device
- * @dev: PCI device to handle.
- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
- *
- * Transition a device to a new power state, using the platform firmware and/or
- * the device's PCI PM registers.
- *
- * RETURN VALUE:
- * -EINVAL if the requested state is invalid.
- * -EIO if device does not support PCI PM or its PM capabilities register has a
- * wrong version, or device doesn't support the requested state.
- * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
- * 0 if device already is in the requested state.
- * 0 if the transition is to D3 but D3 is not supported.
- * 0 if device's power state has been successfully changed.
- */
-int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
{
int error;
@@ -1542,7 +1538,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
if (state == PCI_D0)
- return pci_set_full_power_state(dev);
+ return pci_set_full_power_state(dev, locked);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1556,16 +1552,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
* To put the device in D3cold, put it into D3hot in the native
* way, then put it into D3cold using platform ops.
*/
- error = pci_set_low_power_state(dev, PCI_D3hot);
+ error = pci_set_low_power_state(dev, PCI_D3hot, locked);
if (pci_platform_power_transition(dev, PCI_D3cold))
return error;
/* Powering off a bridge may power off the whole hierarchy */
if (dev->current_state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
} else {
- error = pci_set_low_power_state(dev, state);
+ error = pci_set_low_power_state(dev, state, locked);
if (pci_platform_power_transition(dev, state))
return error;
@@ -1573,8 +1569,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
}
+
+/**
+ * pci_set_power_state - Set the power state of a PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ *
+ * Transition a device to a new power state, using the platform firmware and/or
+ * the device's PCI PM registers.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
+ * 0 if device already is in the requested state.
+ * 0 if the transition is to D3 but D3 is not supported.
+ * 0 if device's power state has been successfully changed.
+ */
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ return __pci_set_power_state(dev, state, false);
+}
EXPORT_SYMBOL(pci_set_power_state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{
+ lockdep_assert_held(&pci_bus_sem);
+
+ return __pci_set_power_state(dev, state, true);
+}
+EXPORT_SYMBOL(pci_set_power_state_locked);
+
#define PCI_EXP_SAVE_REGS 7
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
@@ -2496,29 +2522,36 @@ static void pci_pme_list_scan(struct work_struct *work)
if (pdev->pme_poll) {
struct pci_dev *bridge = pdev->bus->self;
struct device *dev = &pdev->dev;
- int pm_status;
+ struct device *bdev = bridge ? &bridge->dev : NULL;
+ int bref = 0;
/*
- * If bridge is in low power state, the
- * configuration space of subordinate devices
- * may be not accessible
+ * If we have a bridge, it should be in an active/D0
+ * state or the configuration space of subordinate
+ * devices may not be accessible or stable over the
+ * course of the call.
*/
- if (bridge && bridge->current_state != PCI_D0)
- continue;
+ if (bdev) {
+ bref = pm_runtime_get_if_active(bdev, true);
+ if (!bref)
+ continue;
+
+ if (bridge->current_state != PCI_D0)
+ goto put_bridge;
+ }
/*
- * If the device is in a low power state it
- * should not be polled either.
+ * The device itself should be suspended but config
+ * space must be accessible, therefore it cannot be in
+ * D3cold.
*/
- pm_status = pm_runtime_get_if_active(dev, true);
- if (!pm_status)
- continue;
-
- if (pdev->current_state != PCI_D3cold)
+ if (pm_runtime_suspended(dev) &&
+ pdev->current_state != PCI_D3cold)
pci_pme_wakeup(pdev, NULL);
- if (pm_status > 0)
- pm_runtime_put(dev);
+put_bridge:
+ if (bref > 0)
+ pm_runtime_put(bdev);
} else {
list_del(&pme_dev->list);
kfree(pme_dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2336a8d1edab..e9750b1b19ba 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -571,12 +571,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
+static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5a0066ecc3c5..bc0bd86695ec 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1003,8 +1003,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
+/*
+ * @pdev: the root port or switch downstream port
+ * @locked: whether pci_bus_sem is held
+ */
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
{
struct pcie_link_state *link = pdev->link_state;
@@ -1014,12 +1017,14 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
* Devices changed PM state, we should recheck if latency
* meets all functions' requirement
*/
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
pcie_update_aspm_capable(link->root);
pcie_config_aspm_path(link);
mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
}
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index c584165b13ba..7e3aa7e2345f 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -2305,6 +2305,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
continue;
}
+ /*
+ * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
+ * child count larger than the number of valid child pointers.
+ * A child offset of 0 can only occur on CMN-600; otherwise it
+ * would imply the root node being its own grandchild, which
+ * we can safely dismiss in general.
+ */
+ if (reg == 0 && cmn->part != PART_CMN600) {
+ dev_dbg(cmn->dev, "bogus child pointer?\n");
+ continue;
+ }
arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 365d964b0f6a..308c9969642e 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -59,7 +59,7 @@
#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
-#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
+#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0)
#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
@@ -314,9 +314,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event)
}
/*
- * CPMU specification allows for 8 filters, each with a 16 bit value...
- * So we need to find 8x16bits to store it in.
- * As the value used for disable is 0xffff, a separate enable switch
+ * CPMU specification allows for 8 filters, each with a 32 bit value...
+ * So we need to find 8x32bits to store it in.
+ * As the value used for disable is 0xffff_ffff, a separate enable switch
* is needed.
*/
@@ -419,7 +419,7 @@ static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
- CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)),
/* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)),
@@ -642,7 +642,7 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags)
if (cxl_pmu_config1_hdm_filter_en(event))
cfg = cxl_pmu_config2_get_hdm_decoder(event);
else
- cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
+ cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
}
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 0dda70e1ef90..c78a6fd6c57f 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -150,19 +150,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!rvpmu->ctr_get_width)
- /**
- * If the pmu driver doesn't support counter width, set it to default
- * maximum allowed by the specification.
- */
- cwidth = 63;
- else {
- if (hwc->idx == -1)
- /* Handle init case where idx is not initialized yet */
- cwidth = rvpmu->ctr_get_width(0);
- else
- cwidth = rvpmu->ctr_get_width(hwc->idx);
- }
+ if (hwc->idx == -1)
+ /* Handle init case where idx is not initialized yet */
+ cwidth = rvpmu->ctr_get_width(0);
+ else
+ cwidth = rvpmu->ctr_get_width(hwc->idx);
return GENMASK_ULL(cwidth, 0);
}
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 79fdd667922e..fa0bccf4edf2 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
return pmu_legacy_ctr_get_idx(event);
}
+/* cycle & instret are always 64 bit, one bit less according to SBI spec */
+static int pmu_legacy_ctr_get_width(int idx)
+{
+ return 63;
+}
+
static u64 pmu_legacy_read_ctr(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -111,12 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
- pmu->ctr_get_width = NULL;
+ pmu->ctr_get_width = pmu_legacy_ctr_get_width;
pmu->ctr_clear_idx = NULL;
pmu->ctr_read = pmu_legacy_read_ctr;
pmu->event_mapped = pmu_legacy_event_mapped;
pmu->event_unmapped = pmu_legacy_event_unmapped;
pmu->csr_index = pmu_legacy_csr_index;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
}
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 16acd4dcdb96..452aab49db1e 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -512,7 +512,7 @@ static void pmu_sbi_set_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_reset_scounteren(void *arg)
@@ -521,7 +521,7 @@ static void pmu_sbi_reset_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
@@ -731,14 +731,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* compute hardware counter index */
hidx = info->csr - CSR_CYCLE;
/* check if the corresponding bit is set in sscountovf */
- if (!(overflow & (1 << hidx)))
+ if (!(overflow & BIT(hidx)))
continue;
/*
* Keep a track of overflowed counters so that they can be started
* with updated initial value.
*/
- overflowed_ctrs |= 1 << lidx;
+ overflowed_ctrs |= BIT(lidx);
hw_evt = &event->hw;
riscv_pmu_event_update(event);
perf_sample_data_init(&data, 0, hw_evt->last_period);
diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
index e625b32889bf..0928a526e2ab 100644
--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
@@ -706,7 +706,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
return ret;
}
- priv->id = of_alias_get_id(np, "mipi_dphy");
+ priv->id = of_alias_get_id(np, "mipi-dphy");
if (priv->id < 0) {
dev_err(dev, "Failed to get phy node alias id: %d\n",
priv->id);
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
index c1a41b6cd29b..b5ac2b7995e7 100644
--- a/drivers/phy/microchip/lan966x_serdes.c
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -96,6 +96,8 @@ static const struct serdes_mux lan966x_serdes_muxes[] = {
SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG,
HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
+ SERDES_MUX_SGMII(SERDES6G(2), 4, 0, 0),
+
SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
HSIO_HW_CFG_RGMII_ENA |
HSIO_HW_CFG_GMII_ENA,
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index a623f092b11f..a43e20abb10d 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -37,56 +37,28 @@
#define EUSB2_TUNE_EUSB_EQU 0x5A
#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
-#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
- { \
- .reg = r, \
- .val = v, \
- }
-
-enum reg_fields {
- F_TUNE_EUSB_HS_COMP_CUR,
- F_TUNE_EUSB_EQU,
- F_TUNE_EUSB_SLEW,
- F_TUNE_USB2_HS_COMP_CUR,
- F_TUNE_USB2_PREEM,
- F_TUNE_USB2_EQU,
- F_TUNE_USB2_SLEW,
- F_TUNE_SQUELCH_U,
- F_TUNE_HSDISC,
- F_TUNE_RES_FSDIF,
- F_TUNE_IUSB2,
- F_TUNE_USB2_CROSSOVER,
- F_NUM_TUNE_FIELDS,
-
- F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
- F_FORCE_EN_5,
-
- F_EN_CTL1,
-
- F_RPTR_STATUS,
- F_NUM_FIELDS,
-};
-
-static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
- [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
- [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
- [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
- [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
- [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
- [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
- [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
- [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
- [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
- [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
- [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
- [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
-
- [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
- [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
-
- [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
-
- [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+enum eusb2_reg_layout {
+ TUNE_EUSB_HS_COMP_CUR,
+ TUNE_EUSB_EQU,
+ TUNE_EUSB_SLEW,
+ TUNE_USB2_HS_COMP_CUR,
+ TUNE_USB2_PREEM,
+ TUNE_USB2_EQU,
+ TUNE_USB2_SLEW,
+ TUNE_SQUELCH_U,
+ TUNE_HSDISC,
+ TUNE_RES_FSDIF,
+ TUNE_IUSB2,
+ TUNE_USB2_CROSSOVER,
+ NUM_TUNE_FIELDS,
+
+ FORCE_VAL_5 = NUM_TUNE_FIELDS,
+ FORCE_EN_5,
+
+ EN_CTL1,
+
+ RPTR_STATUS,
+ LAYOUT_SIZE,
};
struct eusb2_repeater_cfg {
@@ -98,10 +70,11 @@ struct eusb2_repeater_cfg {
struct eusb2_repeater {
struct device *dev;
- struct regmap_field *regs[F_NUM_FIELDS];
+ struct regmap *regmap;
struct phy *phy;
struct regulator_bulk_data *vregs;
const struct eusb2_repeater_cfg *cfg;
+ u32 base;
enum phy_mode mode;
};
@@ -109,10 +82,10 @@ static const char * const pm8550b_vreg_l[] = {
"vdd18", "vdd3",
};
-static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
- [F_TUNE_IUSB2] = 0x8,
- [F_TUNE_SQUELCH_U] = 0x3,
- [F_TUNE_USB2_PREEM] = 0x5,
+static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
+ [TUNE_IUSB2] = 0x8,
+ [TUNE_SQUELCH_U] = 0x3,
+ [TUNE_USB2_PREEM] = 0x5,
};
static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
@@ -140,47 +113,42 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
static int eusb2_repeater_init(struct phy *phy)
{
- struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
struct device_node *np = rptr->dev->of_node;
- u32 init_tbl[F_NUM_TUNE_FIELDS] = { 0 };
- u8 override;
+ struct regmap *regmap = rptr->regmap;
+ const u32 *init_tbl = rptr->cfg->init_tbl;
+ u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
+ u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
+ u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
+ u32 base = rptr->base;
u32 val;
int ret;
- int i;
+
+ of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
+ of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
+ of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
if (ret)
return ret;
- regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+ regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
- if (init_tbl[i]) {
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
- } else {
- /* Write 0 if there's no value set */
- u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
-
- regmap_field_update_bits(rptr->regs[i], mask, 0);
- }
- }
- memcpy(init_tbl, rptr->cfg->init_tbl, sizeof(init_tbl));
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
+ regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
- if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &override))
- init_tbl[F_TUNE_IUSB2] = override;
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
+ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
+ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
- if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &override))
- init_tbl[F_TUNE_HSDISC] = override;
-
- if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &override))
- init_tbl[F_TUNE_USB2_PREEM] = override;
-
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++)
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
-
- ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
- val, val & RPTR_OK, 10, 5);
+ ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
if (ret)
dev_err(rptr->dev, "initialization timed-out\n");
@@ -191,6 +159,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+ struct regmap *regmap = rptr->regmap;
+ u32 base = rptr->base;
switch (mode) {
case PHY_MODE_USB_HOST:
@@ -199,10 +169,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* per eUSB 1.2 Spec. Below implement software workaround until
* PHY and controller is fixing seen observation.
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, F_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, V_CLK_19P2M_EN);
break;
case PHY_MODE_USB_DEVICE:
/*
@@ -211,10 +179,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* repeater doesn't clear previous value due to shared
* regulators (say host <-> device mode switch).
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, 0);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, 0);
break;
default:
return -EINVAL;
@@ -243,9 +209,8 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct device_node *np = dev->of_node;
- struct regmap *regmap;
- int i, ret;
u32 res;
+ int ret;
rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
if (!rptr)
@@ -258,22 +223,15 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
if (!rptr->cfg)
return -EINVAL;
- regmap = dev_get_regmap(dev->parent, NULL);
- if (!regmap)
+ rptr->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!rptr->regmap)
return -ENODEV;
ret = of_property_read_u32(np, "reg", &res);
if (ret < 0)
return ret;
- for (i = 0; i < F_NUM_FIELDS; i++)
- eusb2_repeater_tune_reg_fields[i].reg += res;
-
- ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
- eusb2_repeater_tune_reg_fields,
- F_NUM_FIELDS);
- if (ret)
- return ret;
+ rptr->base = res;
ret = eusb2_repeater_init_vregs(rptr);
if (ret < 0) {
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index c2590579190a..03fb0d4b75d7 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -299,7 +299,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->vreg = devm_regulator_get(dev, "vdda-phy");
if (IS_ERR(qphy->vreg))
- return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
phy_set_drvdata(qphy->phy, qphy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 243cc2b9a0fb..5c003988c35d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1556,6 +1556,14 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
+static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
+ .serdes = 0,
+ .pcs = 0x800,
+ .pcs_misc = 0x600,
+ .tx = 0x200,
+ .rx = 0x400,
+};
+
static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
.serdes = 0,
.pcs = 0x800,
@@ -1564,7 +1572,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
.rx = 0x400,
};
-static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
+static const struct qmp_usb_offsets qmp_usb_offsets_v3_msm8996 = {
.serdes = 0,
.pcs = 0x600,
.tx = 0x200,
@@ -1613,6 +1621,24 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v7 = {
.rx = 0x1000,
};
+static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_usb_offsets_v3,
+
+ .serdes_tbl = ipq9574_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq9574_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = ipq8074_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl),
+ .pcs_tbl = ipq8074_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
.lanes = 1,
@@ -1652,7 +1678,7 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.lanes = 1,
- .offsets = &qmp_usb_offsets_v3,
+ .offsets = &qmp_usb_offsets_v3_msm8996,
.serdes_tbl = msm8996_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
@@ -2563,7 +2589,7 @@ err_node_put:
static const struct of_device_id qmp_usb_of_match_table[] = {
{
.compatible = "qcom,ipq6018-qmp-usb3-phy",
- .data = &ipq8074_usb3phy_cfg,
+ .data = &ipq6018_usb3phy_cfg,
}, {
.compatible = "qcom,ipq8074-qmp-usb3-phy",
.data = &ipq8074_usb3phy_cfg,
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index e53eace7c91e..6387c0d34c55 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -673,8 +673,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->irq = platform_get_irq_optional(pdev, 0);
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
- int ret;
-
channel->is_otg_channel = true;
channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
"renesas,no-otg-pins");
@@ -738,8 +736,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
ret = PTR_ERR(provider);
goto error;
} else if (channel->is_otg_channel) {
- int ret;
-
ret = device_create_file(dev, &dev_attr_role);
if (ret < 0)
goto error;
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index dd2913ac0fa2..78e19b128962 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -117,7 +117,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
- if (!phy->comparator)
+ if (!phy->comparator || !phy->comparator->set_vbus)
return -ENODEV;
return phy->comparator->set_vbus(phy->comparator, enabled);
@@ -127,7 +127,7 @@ static int omap_usb_start_srp(struct usb_otg *otg)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
- if (!phy->comparator)
+ if (!phy->comparator || !phy->comparator->start_srp)
return -ENODEV;
return phy->comparator->start_srp(phy->comparator);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 03ecb3d1aaf6..49f89b70dcec 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
+ IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 1dd84c7a79de..b1995ac268d7 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -1170,7 +1170,7 @@ static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
int ret;
addr = pmc->block[blk_num].mmio_base +
- (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+ ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
ret = mlxbf_pmc_readl(addr, &word);
if (ret)
return ret;
@@ -1413,7 +1413,7 @@ static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
int ret;
addr = pmc->block[blk_num].mmio_base +
- (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+ ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
ret = mlxbf_pmc_readl(addr, &word);
if (ret)
return ret;
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index ed16ec422a7b..b8d1e32e97eb 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -47,6 +47,9 @@
/* Message with data needs at least two words (for header & data). */
#define MLXBF_TMFIFO_DATA_MIN_WORDS 2
+/* Tx timeout in milliseconds. */
+#define TMFIFO_TX_TIMEOUT 2000
+
/* ACPI UID for BlueField-3. */
#define TMFIFO_BF3_UID 1
@@ -62,12 +65,14 @@ struct mlxbf_tmfifo;
* @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
+ * @rem_padding: remaining bytes to send as paddings
* @pkt_len: total length of the pending packet
* @next_avail: next avail descriptor id
* @num: vring size (number of descriptors)
* @align: vring alignment size
* @index: vring index
* @vdev_id: vring virtio id (VIRTIO_ID_xxx)
+ * @tx_timeout: expire time of last tx packet
* @fifo: pointer to the tmfifo structure
*/
struct mlxbf_tmfifo_vring {
@@ -79,12 +84,14 @@ struct mlxbf_tmfifo_vring {
struct vring_desc drop_desc;
int cur_len;
int rem_len;
+ int rem_padding;
u32 pkt_len;
u16 next_avail;
int num;
int align;
int index;
int vdev_id;
+ unsigned long tx_timeout;
struct mlxbf_tmfifo *fifo;
};
@@ -819,6 +826,50 @@ mlxbf_tmfifo_desc_done:
return true;
}
+static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring)
+{
+ unsigned long flags;
+
+ /* Only handle Tx timeout for network vdev. */
+ if (vring->vdev_id != VIRTIO_ID_NET)
+ return;
+
+ /* Initialize the timeout or return if not expired. */
+ if (!vring->tx_timeout) {
+ /* Initialize the timeout. */
+ vring->tx_timeout = jiffies +
+ msecs_to_jiffies(TMFIFO_TX_TIMEOUT);
+ return;
+ } else if (time_before(jiffies, vring->tx_timeout)) {
+ /* Return if not timeout yet. */
+ return;
+ }
+
+ /*
+ * Drop the packet after timeout. The outstanding packet is
+ * released and the remaining bytes will be sent with padding byte 0x00
+ * as a recovery. On the peer(host) side, the padding bytes 0x00 will be
+ * either dropped directly, or appended into existing outstanding packet
+ * thus dropped as corrupted network packet.
+ */
+ vring->rem_padding = round_up(vring->rem_len, sizeof(u64));
+ mlxbf_tmfifo_release_pkt(vring);
+ vring->cur_len = 0;
+ vring->rem_len = 0;
+ vring->fifo->vring[0] = NULL;
+
+ /*
+ * Make sure the load/store are in order before
+ * returning back to virtio.
+ */
+ virtio_mb(false);
+
+ /* Notify upper layer. */
+ spin_lock_irqsave(&vring->fifo->spin_lock[0], flags);
+ vring_interrupt(0, vring->vq);
+ spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags);
+}
+
/* Rx & Tx processing of a queue. */
static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
{
@@ -841,6 +892,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
return;
do {
+retry:
/* Get available FIFO space. */
if (avail == 0) {
if (is_rx)
@@ -851,6 +903,17 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
break;
}
+ /* Insert paddings for discarded Tx packet. */
+ if (!is_rx) {
+ vring->tx_timeout = 0;
+ while (vring->rem_padding >= sizeof(u64)) {
+ writeq(0, vring->fifo->tx.data);
+ vring->rem_padding -= sizeof(u64);
+ if (--avail == 0)
+ goto retry;
+ }
+ }
+
/* Console output always comes from the Tx buffer. */
if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
mlxbf_tmfifo_console_tx(fifo, avail);
@@ -860,6 +923,10 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
/* Handle one descriptor. */
more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
} while (more);
+
+ /* Check Tx timeout. */
+ if (avail <= 0 && !is_rx)
+ mlxbf_tmfifo_check_tx_timeout(vring);
}
/* Handle Rx or Tx queues. */
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
index f246252bddd8..f4fa8bd8bda8 100644
--- a/drivers/platform/x86/amd/pmf/Kconfig
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -10,6 +10,7 @@ config AMD_PMF
depends on AMD_NB
select ACPI_PLATFORM_PROFILE
depends on TEE && AMDTEE
+ depends on AMD_SFH_HID
help
This driver provides support for the AMD Platform Management Framework.
The goal is to enhance end user experience by making AMD PCs smarter,
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index feaa09f5b35a..4f734e049f4a 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -296,7 +296,8 @@ static int amd_pmf_suspend_handler(struct device *dev)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- kfree(pdev->buf);
+ if (pdev->smart_pc_enabled)
+ cancel_delayed_work_sync(&pdev->pb_work);
return 0;
}
@@ -312,6 +313,9 @@ static int amd_pmf_resume_handler(struct device *dev)
return ret;
}
+ if (pdev->smart_pc_enabled)
+ schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
+
return 0;
}
@@ -330,9 +334,14 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
- if (!amd_pmf_init_smart_pc(dev)) {
+ amd_pmf_init_smart_pc(dev);
+ if (dev->smart_pc_enabled) {
dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
- } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ /* If Smart PC is enabled, no need to check for other features */
+ return;
+ }
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_init_auto_mode(dev);
dev_dbg(dev->dev, "Auto Mode Init done\n");
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
@@ -351,7 +360,7 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
amd_pmf_deinit_sps(dev);
}
- if (!dev->smart_pc_enabled) {
+ if (dev->smart_pc_enabled) {
amd_pmf_deinit_smart_pc(dev);
} else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 16999c5b334f..66cae1cca73c 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -441,11 +441,6 @@ struct apmf_dyn_slider_output {
struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
} __packed;
-enum smart_pc_status {
- PMF_SMART_PC_ENABLED,
- PMF_SMART_PC_DISABLED,
-};
-
/* Smart PC - TA internals */
enum system_state {
SYSTEM_STATE_S0i3,
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index a0423942f771..a3dec14c3004 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -10,6 +10,7 @@
*/
#include <acpi/button.h>
+#include <linux/amd-pmf-io.h>
#include <linux/power_supply.h>
#include <linux/units.h>
#include "pmf.h"
@@ -44,6 +45,8 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency);
dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy);
dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
+ dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
+ dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
dev_dbg(dev->dev, "==== TA inputs END ====\n");
}
#else
@@ -147,6 +150,38 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
return 0;
}
+static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ struct amd_sfh_info sfh_info;
+ int ret;
+
+ /* Get ALS data */
+ ret = amd_get_sfh_info(&sfh_info, MT_ALS);
+ if (!ret)
+ in->ev_info.ambient_light = sfh_info.ambient_light;
+ else
+ return ret;
+
+ /* get HPD data */
+ ret = amd_get_sfh_info(&sfh_info, MT_HPD);
+ if (ret)
+ return ret;
+
+ switch (sfh_info.user_present) {
+ case SFH_NOT_DETECTED:
+ in->ev_info.user_present = 0xff; /* assume no sensors connected */
+ break;
+ case SFH_USER_PRESENT:
+ in->ev_info.user_present = 1;
+ break;
+ case SFH_USER_AWAY:
+ in->ev_info.user_present = 0;
+ break;
+ }
+
+ return 0;
+}
+
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
{
/* TA side lid open is 1 and close is 0, hence the ! here */
@@ -155,4 +190,5 @@ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_tab
amd_pmf_get_smu_info(dev, in);
amd_pmf_get_battery_info(dev, in);
amd_pmf_get_slider_info(dev, in);
+ amd_pmf_get_sensor_info(dev, in);
}
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 502ce93d5cdd..dcbe8f85e122 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -252,15 +252,17 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
cookie = readl(dev->policy_buf + POLICY_COOKIE_OFFSET);
length = readl(dev->policy_buf + POLICY_COOKIE_LEN);
- if (cookie != POLICY_SIGN_COOKIE || !length)
+ if (cookie != POLICY_SIGN_COOKIE || !length) {
+ dev_dbg(dev->dev, "cookie doesn't match\n");
return -EINVAL;
+ }
/* Update the actual length */
dev->policy_sz = length + 512;
res = amd_pmf_invoke_cmd_init(dev);
if (res == TA_PMF_TYPE_SUCCESS) {
/* Now its safe to announce that smart pc is enabled */
- dev->smart_pc_enabled = PMF_SMART_PC_ENABLED;
+ dev->smart_pc_enabled = true;
/*
* Start collecting the data from TA FW after a small delay
* or else, we might end up getting stale values.
@@ -268,7 +270,7 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
} else {
dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
- dev->smart_pc_enabled = PMF_SMART_PC_DISABLED;
+ dev->smart_pc_enabled = false;
return res;
}
@@ -298,8 +300,10 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
if (!new_policy_buf)
return -ENOMEM;
- if (copy_from_user(new_policy_buf, buf, length))
+ if (copy_from_user(new_policy_buf, buf, length)) {
+ kfree(new_policy_buf);
return -EFAULT;
+ }
kfree(dev->policy_buf);
dev->policy_buf = new_policy_buf;
@@ -334,25 +338,6 @@ static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {}
static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {}
#endif
-static int amd_pmf_get_bios_buffer(struct amd_pmf_dev *dev)
-{
- dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
- if (!dev->policy_buf)
- return -ENOMEM;
-
- dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
- if (!dev->policy_base)
- return -ENOMEM;
-
- memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
-
- amd_pmf_hex_dump_pb(dev);
- if (pb_side_load)
- amd_pmf_open_pb(dev, dev->dbgfs_dir);
-
- return amd_pmf_start_policy_engine(dev);
-}
-
static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data)
{
return ver->impl_id == TEE_IMPL_ID_AMDTEE;
@@ -451,22 +436,59 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
return ret;
INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
- amd_pmf_set_dram_addr(dev, true);
- amd_pmf_get_bios_buffer(dev);
+
+ ret = amd_pmf_set_dram_addr(dev, true);
+ if (ret)
+ goto error;
+
+ dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
+ if (!dev->policy_base) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
+ if (!dev->policy_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
+
+ amd_pmf_hex_dump_pb(dev);
+
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
- if (!dev->prev_data)
- return -ENOMEM;
+ if (!dev->prev_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = amd_pmf_start_policy_engine(dev);
+ if (ret)
+ goto error;
+
+ if (pb_side_load)
+ amd_pmf_open_pb(dev, dev->dbgfs_dir);
+
+ return 0;
- return dev->smart_pc_enabled;
+error:
+ amd_pmf_deinit_smart_pc(dev);
+
+ return ret;
}
void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
{
- if (pb_side_load)
+ if (pb_side_load && dev->esbin)
amd_pmf_remove_pb(dev);
+ cancel_delayed_work_sync(&dev->pb_work);
kfree(dev->prev_data);
+ dev->prev_data = NULL;
kfree(dev->policy_buf);
- cancel_delayed_work_sync(&dev->pb_work);
+ dev->policy_buf = NULL;
+ kfree(dev->buf);
+ dev->buf = NULL;
amd_pmf_tee_deinit(dev);
}
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index a1ee1a74fc3c..2cf3b4a8813f 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -399,7 +399,8 @@ int ifs_load_firmware(struct device *dev)
if (fw->size != expected_size) {
dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n",
expected_size, fw->size);
- return -EINVAL;
+ ret = -EINVAL;
+ goto release;
}
ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data);
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index b6708bab7c53..527d8fbc7cc1 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -196,7 +196,7 @@ static int int0002_probe(struct platform_device *pdev)
* IRQs into gpiolib.
*/
ret = devm_request_irq(dev, irq, int0002_irq,
- IRQF_SHARED, "INT0002", chip);
+ IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
return ret;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 33ab207493e3..33bb58dc3f78 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -23,23 +23,23 @@ static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned
static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
-static ssize_t show_domain_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_dev_attr);
+ struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr);
return sprintf(buf, "%u\n", data->domain_id);
}
-static ssize_t show_fabric_cluster_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_dev_attr);
+ struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr);
return sprintf(buf, "%u\n", data->cluster_id);
}
-static ssize_t show_package_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- struct uncore_data *data = container_of(attr, struct uncore_data, package_id_dev_attr);
+ struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr);
return sprintf(buf, "%u\n", data->package_id);
}
@@ -97,30 +97,30 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
}
#define store_uncore_min_max(name, min_max) \
- static ssize_t store_##name(struct device *dev, \
- struct device_attribute *attr, \
+ static ssize_t store_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
return store_min_max_freq_khz(data, buf, count, \
min_max); \
}
#define show_uncore_min_max(name, min_max) \
- static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf)\
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
{ \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
return show_min_max_freq_khz(data, buf, min_max); \
}
#define show_uncore_perf_status(name) \
- static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf)\
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
{ \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
return show_perf_status_freq_khz(data, buf); \
}
@@ -134,11 +134,11 @@ show_uncore_min_max(max_freq_khz, 1);
show_uncore_perf_status(current_freq_khz);
#define show_uncore_data(member_name) \
- static ssize_t show_##member_name(struct device *dev, \
- struct device_attribute *attr, char *buf)\
+ static ssize_t show_##member_name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
{ \
struct uncore_data *data = container_of(attr, struct uncore_data,\
- member_name##_dev_attr);\
+ member_name##_kobj_attr);\
\
return sysfs_emit(buf, "%u\n", \
data->member_name); \
@@ -149,29 +149,29 @@ show_uncore_data(initial_max_freq_khz);
#define init_attribute_rw(_name) \
do { \
- sysfs_attr_init(&data->_name##_dev_attr.attr); \
- data->_name##_dev_attr.show = show_##_name; \
- data->_name##_dev_attr.store = store_##_name; \
- data->_name##_dev_attr.attr.name = #_name; \
- data->_name##_dev_attr.attr.mode = 0644; \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = store_##_name; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0644; \
} while (0)
#define init_attribute_ro(_name) \
do { \
- sysfs_attr_init(&data->_name##_dev_attr.attr); \
- data->_name##_dev_attr.show = show_##_name; \
- data->_name##_dev_attr.store = NULL; \
- data->_name##_dev_attr.attr.name = #_name; \
- data->_name##_dev_attr.attr.mode = 0444; \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = NULL; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0444; \
} while (0)
#define init_attribute_root_ro(_name) \
do { \
- sysfs_attr_init(&data->_name##_dev_attr.attr); \
- data->_name##_dev_attr.show = show_##_name; \
- data->_name##_dev_attr.store = NULL; \
- data->_name##_dev_attr.attr.name = #_name; \
- data->_name##_dev_attr.attr.mode = 0400; \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = NULL; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0400; \
} while (0)
static int create_attr_group(struct uncore_data *data, char *name)
@@ -186,21 +186,21 @@ static int create_attr_group(struct uncore_data *data, char *name)
if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
init_attribute_root_ro(domain_id);
- data->uncore_attrs[index++] = &data->domain_id_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr;
init_attribute_root_ro(fabric_cluster_id);
- data->uncore_attrs[index++] = &data->fabric_cluster_id_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
init_attribute_root_ro(package_id);
- data->uncore_attrs[index++] = &data->package_id_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
}
- data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
ret = uncore_read_freq(data, &freq);
if (!ret)
- data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
data->uncore_attrs[index] = NULL;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
index 7afb69977c7e..0e5bf507e555 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -26,14 +26,14 @@
* @instance_id: Unique instance id to append to directory name
* @name: Sysfs entry name for this instance
* @uncore_attr_group: Attribute group storage
- * @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz
- * @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz
- * @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz
- * @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz
- * @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz
- * @domain_id_dev_attr: Storage for device attribute domain_id
- * @fabric_cluster_id_dev_attr: Storage for device attribute fabric_cluster_id
- * @package_id_dev_attr: Storage for device attribute package_id
+ * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
+ * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
+ * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
+ * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
+ * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
+ * @domain_id_kobj_attr: Storage for kobject attribute domain_id
+ * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
+ * @package_id_kobj_attr: Storage for kobject attribute package_id
* @uncore_attrs: Attribute storage for group creation
*
* This structure is used to encapsulate all data related to uncore sysfs
@@ -53,14 +53,14 @@ struct uncore_data {
char name[32];
struct attribute_group uncore_attr_group;
- struct device_attribute max_freq_khz_dev_attr;
- struct device_attribute min_freq_khz_dev_attr;
- struct device_attribute initial_max_freq_khz_dev_attr;
- struct device_attribute initial_min_freq_khz_dev_attr;
- struct device_attribute current_freq_khz_dev_attr;
- struct device_attribute domain_id_dev_attr;
- struct device_attribute fabric_cluster_id_dev_attr;
- struct device_attribute package_id_dev_attr;
+ struct kobj_attribute max_freq_khz_kobj_attr;
+ struct kobj_attribute min_freq_khz_kobj_attr;
+ struct kobj_attribute initial_max_freq_khz_kobj_attr;
+ struct kobj_attribute initial_min_freq_khz_kobj_attr;
+ struct kobj_attribute current_freq_khz_kobj_attr;
+ struct kobj_attribute domain_id_kobj_attr;
+ struct kobj_attribute fabric_cluster_id_kobj_attr;
+ struct kobj_attribute package_id_kobj_attr;
struct attribute *uncore_attrs[9];
};
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 210b0a81b7ec..084c355c86f5 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
sparse_keymap_report_event(input_dev, event, val, autorelease);
-
- /* Some devices need this to report further events */
- acpi_evaluate_object(handle, "VBDL", NULL, NULL);
}
/*
diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
index 9cf5ed0f8dc2..040153ad67c1 100644
--- a/drivers/platform/x86/intel/wmi/sbl-fw-update.c
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -32,7 +32,7 @@ static int get_fwu_request(struct device *dev, u32 *out)
return -ENODEV;
if (obj->type != ACPI_TYPE_INTEGER) {
- dev_warn(dev, "wmi_query_block returned invalid value\n");
+ dev_warn(dev, "wmidev_block_query returned invalid value\n");
kfree(obj);
return -EINVAL;
}
@@ -55,7 +55,7 @@ static int set_fwu_request(struct device *dev, u32 in)
status = wmidev_block_set(to_wmi_device(dev), 0, &input);
if (ACPI_FAILURE(status)) {
- dev_err(dev, "wmi_set_block failed\n");
+ dev_err(dev, "wmidev_block_set failed\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 1cf2471d54dd..3d66e1d4eb1f 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -20,12 +20,29 @@
#define P2SBC_HIDE BIT(8)
#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
+#define P2SB_DEVFN_GOLDMONT PCI_DEVFN(13, 0)
+#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2)
static const struct x86_cpu_id p2sb_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
{}
};
+/*
+ * Cache BAR0 of P2SB device functions 0 to 7.
+ * TODO: The constant 8 is the number of functions that PCI specification
+ * defines. Same definitions exist tree-wide. Unify this definition and
+ * the other definitions then move to include/uapi/linux/pci.h.
+ */
+#define NR_P2SB_RES_CACHE 8
+
+struct p2sb_res_cache {
+ u32 bus_dev_id;
+ struct resource res;
+};
+
+static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
+
static int p2sb_get_devfn(unsigned int *devfn)
{
unsigned int fn = P2SB_DEVFN_DEFAULT;
@@ -39,10 +56,18 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0;
}
+static bool p2sb_valid_resource(struct resource *res)
+{
+ if (res->flags)
+ return true;
+
+ return false;
+}
+
/* Copy resource from the first BAR of the device in question */
-static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
- struct resource *bar0 = &pdev->resource[0];
+ struct resource *bar0 = pci_resource_n(pdev, 0);
/* Make sure we have no dangling pointers in the output */
memset(mem, 0, sizeof(*mem));
@@ -56,49 +81,57 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
mem->end = bar0->end;
mem->flags = bar0->flags;
mem->desc = bar0->desc;
-
- return 0;
}
-static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
{
+ struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
struct pci_dev *pdev;
- int ret;
pdev = pci_scan_single_device(bus, devfn);
if (!pdev)
- return -ENODEV;
+ return;
- ret = p2sb_read_bar0(pdev, mem);
+ p2sb_read_bar0(pdev, &cache->res);
+ cache->bus_dev_id = bus->dev.id;
pci_stop_and_remove_bus_device(pdev);
- return ret;
}
-/**
- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
- * @bus: PCI bus to communicate with
- * @devfn: PCI slot and function to communicate with
- * @mem: memory resource to be filled in
- *
- * The BIOS prevents the P2SB device from being enumerated by the PCI
- * subsystem, so we need to unhide and hide it back to lookup the BAR.
- *
- * if @bus is NULL, the bus 0 in domain 0 will be used.
- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
- *
- * Caller must provide a valid pointer to @mem.
- *
- * Locking is handled by pci_rescan_remove_lock mutex.
- *
- * Return:
- * 0 on success or appropriate errno value on error.
- */
-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+{
+ /* Scan the P2SB device and cache its BAR0 */
+ p2sb_scan_and_cache_devfn(bus, devfn);
+
+ /* On Goldmont p2sb_bar() also gets called for the SPI controller */
+ if (devfn == P2SB_DEVFN_GOLDMONT)
+ p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
+
+ if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
+ return -ENOENT;
+
+ return 0;
+}
+
+static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
+{
+ static struct pci_bus *p2sb_bus;
+
+ bus = bus ?: p2sb_bus;
+ if (bus)
+ return bus;
+
+ /* Assume P2SB is on the bus 0 in domain 0 */
+ p2sb_bus = pci_find_bus(0, 0);
+ return p2sb_bus;
+}
+
+static int p2sb_cache_resources(void)
{
- struct pci_dev *pdev_p2sb;
unsigned int devfn_p2sb;
u32 value = P2SBC_HIDE;
+ struct pci_bus *bus;
+ u16 class;
int ret;
/* Get devfn for P2SB device itself */
@@ -106,8 +139,17 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
if (ret)
return ret;
- /* if @bus is NULL, use bus 0 in domain 0 */
- bus = bus ?: pci_find_bus(0, 0);
+ bus = p2sb_get_bus(NULL);
+ if (!bus)
+ return -ENODEV;
+
+ /*
+ * When a device with same devfn exists and its device class is not
+ * PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
+ */
+ pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
+ if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
+ return -ENODEV;
/*
* Prevent concurrent PCI bus scan from seeing the P2SB device and
@@ -115,17 +157,16 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
*/
pci_lock_rescan_remove();
- /* Unhide the P2SB device, if needed */
+ /*
+ * The BIOS prevents the P2SB device from being enumerated by the PCI
+ * subsystem, so we need to unhide and hide it back to lookup the BAR.
+ * Unhide the P2SB device here, if needed.
+ */
pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
- pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
- if (devfn)
- ret = p2sb_scan_and_read(bus, devfn, mem);
- else
- ret = p2sb_read_bar0(pdev_p2sb, mem);
- pci_stop_and_remove_bus_device(pdev_p2sb);
+ ret = p2sb_scan_and_cache(bus, devfn_p2sb);
/* Hide the P2SB device, if it was hidden */
if (value & P2SBC_HIDE)
@@ -133,12 +174,62 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
pci_unlock_rescan_remove();
- if (ret)
- return ret;
+ return ret;
+}
- if (mem->flags == 0)
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * If @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct p2sb_res_cache *cache;
+ int ret;
+
+ bus = p2sb_get_bus(bus);
+ if (!bus)
+ return -ENODEV;
+
+ if (!devfn) {
+ ret = p2sb_get_devfn(&devfn);
+ if (ret)
+ return ret;
+ }
+
+ cache = &p2sb_resources[PCI_FUNC(devfn)];
+ if (cache->bus_dev_id != bus->dev.id)
return -ENODEV;
+ if (!p2sb_valid_resource(&cache->res))
+ return -ENOENT;
+
+ memcpy(mem, &cache->res, sizeof(*mem));
return 0;
}
EXPORT_SYMBOL_GPL(p2sb_bar);
+
+static int __init p2sb_fs_init(void)
+{
+ p2sb_cache_resources();
+ return 0;
+}
+
+/*
+ * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
+ * not be locked in sysfs pci bus rescan path because of deadlock. To
+ * avoid the deadlock, access to P2SB devices with the lock at an early
+ * step in kernel initialization and cache required resources. This
+ * should happen after subsys_initcall which initializes PCI subsystem
+ * and before device_initcall which requires P2SB resources.
+ */
+fs_initcall(p2sb_fs_init);
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
new file mode 100644
index 000000000000..bcf3a0c356ea
--- /dev/null
+++ b/drivers/platform/x86/serdev_helpers.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * In some cases UART attached devices which require an in kernel driver,
+ * e.g. UART attached Bluetooth HCIs are described in the ACPI tables
+ * by an ACPI device with a broken or missing UartSerialBusV2() resource.
+ *
+ * This causes the kernel to create a /dev/ttyS# char-device for the UART
+ * instead of creating an in kernel serdev-controller + serdev-device pair
+ * for the in kernel driver.
+ *
+ * The quirk handling in acpi_quirk_skip_serdev_enumeration() makes the kernel
+ * create a serdev-controller device for these UARTs instead of a /dev/ttyS#.
+ *
+ * Instantiating the actual serdev-device to bind to is up to pdx86 code,
+ * this header provides a helper for getting the serdev-controller device.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/printk.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+ const char *serial_ctrl_uid,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
+{
+ struct device *ctrl_dev, *child;
+ struct acpi_device *ctrl_adev;
+ char name[32];
+ int i;
+
+ ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ if (!ctrl_adev) {
+ pr_err("error could not get %s/%s serial-ctrl adev\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* get_first_physical_node() returns a weak ref */
+ ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s serial-ctrl physical node\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ /* Walk host -> uart-ctrl -> port -> serdev-ctrl */
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ snprintf(name, sizeof(name), "%s:0", dev_name(ctrl_dev));
+ break;
+ case 1:
+ snprintf(name, sizeof(name), "%s.%d",
+ dev_name(ctrl_dev), serial_ctrl_port);
+ break;
+ case 2:
+ strscpy(name, serdev_ctrl_name, sizeof(name));
+ break;
+ }
+
+ child = device_find_child_by_name(ctrl_dev, name);
+ put_device(ctrl_dev);
+ if (!child) {
+ pr_err("error could not find '%s' device\n", name);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ ctrl_dev = child;
+ }
+
+put_ctrl_adev:
+ acpi_dev_put(ctrl_adev);
+ return ctrl_dev;
+}
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 3a396b763c49..ce3e08815a8e 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -1009,7 +1009,16 @@ static ssize_t current_value_store(struct kobject *kobj,
* Note - this sets the variable and then the password as separate
* WMI calls. Function tlmi_save_bios_settings will error if the
* password is incorrect.
+ * Workstation's require the opcode to be set before changing the
+ * attribute.
*/
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+ ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
+ tlmi_priv.pwd_admin->password);
+ if (ret)
+ goto out;
+ }
+
set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
new_setting);
if (!set_str) {
@@ -1021,17 +1030,10 @@ static ssize_t current_value_store(struct kobject *kobj,
if (ret)
goto out;
- if (tlmi_priv.save_mode == TLMI_SAVE_BULK) {
+ if (tlmi_priv.save_mode == TLMI_SAVE_BULK)
tlmi_priv.save_required = true;
- } else {
- if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
- ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
- tlmi_priv.pwd_admin->password);
- if (ret)
- goto out;
- }
+ else
ret = tlmi_save_bios_settings("");
- }
} else { /* old non-opcode based authentication method (deprecated) */
if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c4895e9bc714..5ecd9d33250d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -10308,6 +10308,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
return 0;
default:
/* Unknown function */
+ pr_debug("unknown function 0x%x\n", funcmode);
return -EOPNOTSUPP;
}
return 0;
@@ -10493,8 +10494,8 @@ static void dytc_profile_refresh(void)
return;
perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
- convert_dytc_to_profile(funcmode, perfmode, &profile);
- if (profile != dytc_current_profile) {
+ err = convert_dytc_to_profile(funcmode, perfmode, &profile);
+ if (!err && profile != dytc_current_profile) {
dytc_current_profile = profile;
platform_profile_notify();
}
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 0c6733772698..975cf24ae359 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -81,7 +81,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
};
static const struct ts_dmi_data chuwi_hi8_air_data = {
- .acpi_name = "MSSL1680:00",
+ .acpi_name = "MSSL1680",
.properties = chuwi_hi8_air_props,
};
@@ -415,18 +415,13 @@ static const struct property_entry gdix1001_upside_down_props[] = {
{ }
};
-static const struct ts_dmi_data gdix1001_00_upside_down_data = {
- .acpi_name = "GDIX1001:00",
- .properties = gdix1001_upside_down_props,
-};
-
-static const struct ts_dmi_data gdix1001_01_upside_down_data = {
- .acpi_name = "GDIX1001:01",
+static const struct ts_dmi_data gdix1001_upside_down_data = {
+ .acpi_name = "GDIX1001",
.properties = gdix1001_upside_down_props,
};
-static const struct ts_dmi_data gdix1002_00_upside_down_data = {
- .acpi_name = "GDIX1002:00",
+static const struct ts_dmi_data gdix1002_upside_down_data = {
+ .acpi_name = "GDIX1002",
.properties = gdix1001_upside_down_props,
};
@@ -944,6 +939,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
.properties = teclast_tbook11_props,
};
+static const struct property_entry teclast_x16_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data teclast_x16_plus_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3692-teclast-x16-plus.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 43560,
+ .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
+ 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
+ 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
+ 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = teclast_x16_plus_props,
+};
+
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -1386,7 +1407,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Juno Tablet */
- .driver_data = (void *)&gdix1002_00_upside_down_data,
+ .driver_data = (void *)&gdix1002_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
@@ -1613,6 +1634,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Teclast X16 Plus */
+ .driver_data = (void *)&teclast_x16_plus_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
+ },
+ },
+ {
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
@@ -1623,7 +1653,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Android version / BIOS) */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
@@ -1631,7 +1661,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Windows version / BIOS) */
- .driver_data = (void *)&gdix1001_01_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/* tPAD is too generic, also match on bios date */
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
@@ -1649,7 +1679,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X98 Pro */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/*
* Only match BIOS date, because the manufacturers
@@ -1753,7 +1783,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* "WinBook TW100" */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
@@ -1761,7 +1791,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* WinBook TW700 */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
@@ -1786,7 +1816,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
int error;
if (has_acpi_companion(dev) &&
- !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
+ strstarts(client->name, ts_data->acpi_name)) {
error = device_create_managed_software_node(dev, ts_data->properties, NULL);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index bd271a5730aa..3c288e8f404b 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
@@ -56,7 +57,6 @@ static_assert(__alignof__(struct guid_block) == 1);
enum { /* wmi_block flags */
WMI_READ_TAKES_NO_ARGS,
- WMI_PROBED,
};
struct wmi_block {
@@ -64,8 +64,10 @@ struct wmi_block {
struct list_head list;
struct guid_block gblock;
struct acpi_device *acpi_device;
+ struct rw_semaphore notify_lock; /* Protects notify callback add/remove */
wmi_notify_handler handler;
void *handler_data;
+ bool driver_ready;
unsigned long flags;
};
@@ -219,6 +221,17 @@ static int wmidev_match_guid(struct device *dev, const void *data)
return 0;
}
+static int wmidev_match_notify_id(struct device *dev, const void *data)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ const u32 *notify_id = data;
+
+ if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
+ return 1;
+
+ return 0;
+}
+
static struct bus_type wmi_bus_type;
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
@@ -238,6 +251,17 @@ static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
return dev_to_wdev(dev);
}
+static struct wmi_device *wmi_find_event_by_notify_id(const u32 notify_id)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&wmi_bus_type, NULL, &notify_id, wmidev_match_notify_id);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wmi_device(dev);
+}
+
static void wmi_device_put(struct wmi_device *wdev)
{
put_device(&wdev->dev);
@@ -572,32 +596,31 @@ acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler,
void *data)
{
- struct wmi_block *block;
- acpi_status status = AE_NOT_EXIST;
- guid_t guid_input;
-
- if (!guid || !handler)
- return AE_BAD_PARAMETER;
+ struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- if (guid_parse(guid, &guid_input))
- return AE_BAD_PARAMETER;
+ wdev = wmi_find_device_by_guid(guid);
+ if (IS_ERR(wdev))
+ return AE_ERROR;
- list_for_each_entry(block, &wmi_block_list, list) {
- acpi_status wmi_status;
+ wblock = container_of(wdev, struct wmi_block, dev);
- if (guid_equal(&block->gblock.guid, &guid_input)) {
- if (block->handler)
- return AE_ALREADY_ACQUIRED;
+ down_write(&wblock->notify_lock);
+ if (wblock->handler) {
+ status = AE_ALREADY_ACQUIRED;
+ } else {
+ wblock->handler = handler;
+ wblock->handler_data = data;
- block->handler = handler;
- block->handler_data = data;
+ if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+ dev_warn(&wblock->dev.dev, "Failed to enable device\n");
- wmi_status = wmi_method_enable(block, true);
- if ((wmi_status != AE_OK) ||
- ((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
- status = wmi_status;
- }
+ status = AE_OK;
}
+ up_write(&wblock->notify_lock);
+
+ wmi_device_put(wdev);
return status;
}
@@ -613,30 +636,31 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
*/
acpi_status wmi_remove_notify_handler(const char *guid)
{
- struct wmi_block *block;
- acpi_status status = AE_NOT_EXIST;
- guid_t guid_input;
+ struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- if (!guid)
- return AE_BAD_PARAMETER;
+ wdev = wmi_find_device_by_guid(guid);
+ if (IS_ERR(wdev))
+ return AE_ERROR;
- if (guid_parse(guid, &guid_input))
- return AE_BAD_PARAMETER;
+ wblock = container_of(wdev, struct wmi_block, dev);
- list_for_each_entry(block, &wmi_block_list, list) {
- acpi_status wmi_status;
+ down_write(&wblock->notify_lock);
+ if (!wblock->handler) {
+ status = AE_NULL_ENTRY;
+ } else {
+ if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ dev_warn(&wblock->dev.dev, "Failed to disable device\n");
- if (guid_equal(&block->gblock.guid, &guid_input)) {
- if (!block->handler)
- return AE_NULL_ENTRY;
+ wblock->handler = NULL;
+ wblock->handler_data = NULL;
- wmi_status = wmi_method_enable(block, false);
- block->handler = NULL;
- block->handler_data = NULL;
- if (wmi_status != AE_OK || (wmi_status == AE_OK && status == AE_NOT_EXIST))
- status = wmi_status;
- }
+ status = AE_OK;
}
+ up_write(&wblock->notify_lock);
+
+ wmi_device_put(wdev);
return status;
}
@@ -655,15 +679,19 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
{
struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- list_for_each_entry(wblock, &wmi_block_list, list) {
- struct guid_block *gblock = &wblock->gblock;
+ wdev = wmi_find_event_by_notify_id(event);
+ if (IS_ERR(wdev))
+ return AE_NOT_FOUND;
- if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event)
- return get_event_data(wblock, out);
- }
+ wblock = container_of(wdev, struct wmi_block, dev);
+ status = get_event_data(wblock, out);
- return AE_NOT_FOUND;
+ wmi_device_put(wdev);
+
+ return status;
}
EXPORT_SYMBOL_GPL(wmi_get_event_data);
@@ -868,7 +896,7 @@ static int wmi_dev_probe(struct device *dev)
if (wdriver->probe) {
ret = wdriver->probe(dev_to_wdev(dev),
find_guid_context(wblock, wdriver));
- if (!ret) {
+ if (ret) {
if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
dev_warn(dev, "Failed to disable device\n");
@@ -876,7 +904,9 @@ static int wmi_dev_probe(struct device *dev)
}
}
- set_bit(WMI_PROBED, &wblock->flags);
+ down_write(&wblock->notify_lock);
+ wblock->driver_ready = true;
+ up_write(&wblock->notify_lock);
return 0;
}
@@ -886,7 +916,9 @@ static void wmi_dev_remove(struct device *dev)
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
- clear_bit(WMI_PROBED, &wblock->flags);
+ down_write(&wblock->notify_lock);
+ wblock->driver_ready = false;
+ up_write(&wblock->notify_lock);
if (wdriver->remove)
wdriver->remove(dev_to_wdev(dev));
@@ -999,6 +1031,8 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.setable = true;
out_init:
+ init_rwsem(&wblock->notify_lock);
+ wblock->driver_ready = false;
wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev;
@@ -1171,6 +1205,26 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
}
}
+static void wmi_notify_driver(struct wmi_block *wblock)
+{
+ struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
+ struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ if (!driver->no_notify_data) {
+ status = get_event_data(wblock, &data);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&wblock->dev.dev, "Failed to get event data\n");
+ return;
+ }
+ }
+
+ if (driver->notify)
+ driver->notify(&wblock->dev, data.pointer);
+
+ kfree(data.pointer);
+}
+
static int wmi_notify_device(struct device *dev, void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
@@ -1179,28 +1233,17 @@ static int wmi_notify_device(struct device *dev, void *data)
if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
return 0;
- /* If a driver is bound, then notify the driver. */
- if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) {
- struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
- struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_status status;
-
- if (!driver->no_notify_data) {
- status = get_event_data(wblock, &evdata);
- if (ACPI_FAILURE(status)) {
- dev_warn(&wblock->dev.dev, "failed to get event data\n");
- return -EIO;
- }
- }
-
- if (driver->notify)
- driver->notify(&wblock->dev, evdata.pointer);
-
- kfree(evdata.pointer);
- } else if (wblock->handler) {
- /* Legacy handler */
- wblock->handler(*event, wblock->handler_data);
+ down_read(&wblock->notify_lock);
+ /* The WMI driver notify handler conflicts with the legacy WMI handler.
+ * Because of this the WMI driver notify handler takes precedence.
+ */
+ if (wblock->dev.dev.driver && wblock->driver_ready) {
+ wmi_notify_driver(wblock);
+ } else {
+ if (wblock->handler)
+ wblock->handler(*event, wblock->handler_data);
}
+ up_read(&wblock->notify_lock);
acpi_bus_generate_netlink_event(wblock->acpi_device->pnp.device_class,
dev_name(&wblock->dev.dev), *event, 0);
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index f8221a15575b..a3415f1c0b5f 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include "x86-android-tablets.h"
+#include "../serdev_helpers.h"
static struct platform_device *x86_android_tablet_device;
@@ -113,6 +114,9 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
if (irq_type != IRQ_TYPE_NONE && irq_type != irq_get_trigger_type(irq))
irq_set_irq_type(irq, irq_type);
+ if (data->free_gpio)
+ devm_gpiod_put(&x86_android_tablet_device->dev, gpiod);
+
return irq;
case X86_ACPI_IRQ_TYPE_PMIC:
status = acpi_get_handle(NULL, data->chip, &handle);
@@ -229,38 +233,20 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
{
- struct acpi_device *ctrl_adev, *serdev_adev;
+ struct acpi_device *serdev_adev;
struct serdev_device *serdev;
struct device *ctrl_dev;
int ret = -ENODEV;
- ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1);
- if (!ctrl_adev) {
- pr_err("error could not get %s/%s ctrl adev\n",
- info->ctrl_hid, info->ctrl_uid);
- return -ENODEV;
- }
+ ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
+ info->ctrl_devname);
+ if (IS_ERR(ctrl_dev))
+ return PTR_ERR(ctrl_dev);
serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1);
if (!serdev_adev) {
pr_err("error could not get %s serdev adev\n", info->serdev_hid);
- goto put_ctrl_adev;
- }
-
- /* get_first_physical_node() returns a weak ref, no need to put() it */
- ctrl_dev = acpi_get_first_physical_node(ctrl_adev);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s ctrl physical dev\n",
- info->ctrl_hid, info->ctrl_uid);
- goto put_serdev_adev;
- }
-
- /* ctrl_dev now points to the controller's parent, get the controller */
- ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s %s ctrl dev\n",
- info->ctrl_hid, info->ctrl_uid, info->ctrl_devname);
- goto put_serdev_adev;
+ goto put_ctrl_dev;
}
serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
@@ -283,8 +269,8 @@ static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int
put_serdev_adev:
acpi_dev_put(serdev_adev);
-put_ctrl_adev:
- acpi_dev_put(ctrl_adev);
+put_ctrl_dev:
+ put_device(ctrl_dev);
return ret;
}
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index f1c66a61bfc5..c297391955ad 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -116,6 +116,7 @@ static const struct x86_i2c_client_info lenovo_yb1_x90_i2c_clients[] __initconst
.trigger = ACPI_EDGE_SENSITIVE,
.polarity = ACPI_ACTIVE_LOW,
.con_id = "goodix_ts_irq",
+ .free_gpio = true,
},
}, {
/* Wacom Digitizer in keyboard half */
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index bc6bbf7ec6ea..278402dcb808 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -68,7 +68,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
+static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = {
.dev_id = "i2c-NVT-ts",
.table = {
GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
@@ -77,7 +77,7 @@ static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
};
static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
- &acer_b1_750_goodix_gpios,
+ &acer_b1_750_nvt_ts_gpios,
&int3496_reference_gpios,
NULL
};
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 49fed9410adb..468993edfeee 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -39,6 +39,7 @@ struct x86_acpi_irq_data {
int index;
int trigger; /* ACPI_EDGE_SENSITIVE / ACPI_LEVEL_SENSITIVE */
int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
+ bool free_gpio; /* Release GPIO after getting IRQ (for TYPE_GPIOINT) */
const char *con_id;
};
diff --git a/drivers/pmdomain/arm/scmi_perf_domain.c b/drivers/pmdomain/arm/scmi_perf_domain.c
index 709bbc448fad..d7ef46ccd9b8 100644
--- a/drivers/pmdomain/arm/scmi_perf_domain.c
+++ b/drivers/pmdomain/arm/scmi_perf_domain.c
@@ -159,6 +159,9 @@ static void scmi_perf_domain_remove(struct scmi_device *sdev)
struct genpd_onecell_data *scmi_pd_data = dev_get_drvdata(dev);
int i;
+ if (!scmi_pd_data)
+ return;
+
of_genpd_del_provider(dev->of_node);
for (i = 0; i < scmi_pd_data->num_domains; i++)
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index a1f6cba3ae6c..18e232b5ed53 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -1109,7 +1109,7 @@ static int __init genpd_power_off_unused(void)
return 0;
}
-late_initcall(genpd_power_off_unused);
+late_initcall_sync(genpd_power_off_unused);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index e26dc17d07ad..e274e3315fe7 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
@@ -561,6 +561,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
goto err_put_node;
}
+ /* recursive call to add all subdomains */
+ ret = scpsys_add_subdomain(scpsys, child);
+ if (ret)
+ goto err_put_node;
+
ret = pm_genpd_add_subdomain(parent_pd, child_pd);
if (ret) {
dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
@@ -570,11 +575,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
child_pd->name);
}
-
- /* recursive call to add all subdomains */
- ret = scpsys_add_subdomain(scpsys, child);
- if (ret)
- goto err_put_node;
}
return 0;
@@ -588,9 +588,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
{
int ret;
- if (scpsys_domain_is_on(pd))
- scpsys_power_off(&pd->genpd);
-
/*
* We're in the error cleanup already, so we only complain,
* but won't emit another error on top of the original one.
@@ -600,6 +597,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
dev_err(pd->scpsys->dev,
"failed to remove domain '%s' : %d - state may be inconsistent\n",
pd->genpd.name, ret);
+ if (scpsys_domain_is_on(pd))
+ scpsys_power_off(&pd->genpd);
clk_bulk_put(pd->num_clks, pd->clks);
clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index 3078896b1300..47df910645f6 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -692,6 +692,7 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
unsigned int active_corner, sleep_corner;
unsigned int this_active_corner = 0, this_sleep_corner = 0;
unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+ unsigned int peer_enabled_corner;
if (pd->state_synced) {
to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
@@ -701,9 +702,11 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
this_sleep_corner = pd->level_count - 1;
}
- if (peer && peer->enabled)
- to_active_sleep(peer, peer->corner, &peer_active_corner,
+ if (peer && peer->enabled) {
+ peer_enabled_corner = max(peer->corner, peer->enable_corner);
+ to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
&peer_sleep_corner);
+ }
active_corner = max(this_active_corner, peer_active_corner);
diff --git a/drivers/pmdomain/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c
index 39ca84a67daa..621e411fc999 100644
--- a/drivers/pmdomain/renesas/r8a77980-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77980-sysc.c
@@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
PD_CPU_NOCR },
- { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
+ { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON,
+ PD_CPU_NOCR },
{ "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
{ "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
{ "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index 829e0dba2fda..ab3350ce2d62 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -61,13 +61,11 @@ static int as3722_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int as3722_poweroff_remove(struct platform_device *pdev)
+static void as3722_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == as3722_pm_power_off)
pm_power_off = NULL;
as3722_pm_poweroff = NULL;
-
- return 0;
}
static struct platform_driver as3722_poweroff_driver = {
@@ -75,7 +73,7 @@ static struct platform_driver as3722_poweroff_driver = {
.name = "as3722-power-off",
},
.probe = as3722_poweroff_probe,
- .remove = as3722_poweroff_remove,
+ .remove_new = as3722_poweroff_remove,
};
module_platform_driver(as3722_poweroff_driver);
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index dd5399785b69..93eece027865 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -57,7 +57,7 @@ static struct shdwc {
void __iomem *mpddrc_base;
} at91_shdwc;
-static void __init at91_wakeup_status(struct platform_device *pdev)
+static void at91_wakeup_status(struct platform_device *pdev)
{
const char *reason;
u32 reg = readl(at91_shdwc.shdwc_base + AT91_SHDW_SR);
@@ -149,7 +149,7 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
writel(wakeup_mode | mode, at91_shdwc.shdwc_base + AT91_SHDW_MR);
}
-static int __init at91_poweroff_probe(struct platform_device *pdev)
+static int at91_poweroff_probe(struct platform_device *pdev)
{
struct device_node *np;
u32 ddr_type;
@@ -202,7 +202,7 @@ clk_disable:
return ret;
}
-static int __exit at91_poweroff_remove(struct platform_device *pdev)
+static void at91_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == at91_poweroff)
pm_power_off = NULL;
@@ -211,8 +211,6 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev)
iounmap(at91_shdwc.mpddrc_base);
clk_disable_unprepare(at91_shdwc.sclk);
-
- return 0;
}
static const struct of_device_id at91_poweroff_of_match[] = {
@@ -224,13 +222,14 @@ static const struct of_device_id at91_poweroff_of_match[] = {
MODULE_DEVICE_TABLE(of, at91_poweroff_of_match);
static struct platform_driver at91_poweroff_driver = {
- .remove = __exit_p(at91_poweroff_remove),
+ .probe = at91_poweroff_probe,
+ .remove_new = at91_poweroff_remove,
.driver = {
.name = "at91-poweroff",
.of_match_table = at91_poweroff_of_match,
},
};
-module_platform_driver_probe(at91_poweroff_driver, at91_poweroff_probe);
+module_platform_driver(at91_poweroff_driver);
MODULE_AUTHOR("Atmel Corporation");
MODULE_DESCRIPTION("Shutdown driver for Atmel SoCs");
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index aa9b012d3d00..16512654295f 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -337,7 +337,7 @@ static int at91_rcdev_init(struct at91_reset *reset,
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}
-static int __init at91_reset_probe(struct platform_device *pdev)
+static int at91_reset_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct at91_reset *reset;
@@ -417,24 +417,23 @@ disable_clk:
return ret;
}
-static int __exit at91_reset_remove(struct platform_device *pdev)
+static void at91_reset_remove(struct platform_device *pdev)
{
struct at91_reset *reset = platform_get_drvdata(pdev);
unregister_restart_handler(&reset->nb);
clk_disable_unprepare(reset->sclk);
-
- return 0;
}
static struct platform_driver at91_reset_driver = {
- .remove = __exit_p(at91_reset_remove),
+ .probe = at91_reset_probe,
+ .remove_new = at91_reset_remove,
.driver = {
.name = "at91-reset",
.of_match_table = at91_reset_of_match,
},
};
-module_platform_driver_probe(at91_reset_driver, at91_reset_probe);
+module_platform_driver(at91_reset_driver);
MODULE_AUTHOR("Atmel Corporation");
MODULE_DESCRIPTION("Reset driver for Atmel SoCs");
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index e76b102b57b1..959ce0dbe91d 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -107,7 +107,7 @@ static const unsigned long long sdwc_dbc_period[] = {
0, 3, 32, 512, 4096, 32768,
};
-static void __init at91_wakeup_status(struct platform_device *pdev)
+static void at91_wakeup_status(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
const struct reg_config *rcfg = shdw->rcfg;
@@ -329,7 +329,7 @@ static const struct of_device_id at91_pmc_ids[] = {
{ /* Sentinel. */ }
};
-static int __init at91_shdwc_probe(struct platform_device *pdev)
+static int at91_shdwc_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct device_node *np;
@@ -421,7 +421,7 @@ clk_disable:
return ret;
}
-static int __exit at91_shdwc_remove(struct platform_device *pdev)
+static void at91_shdwc_remove(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
@@ -437,18 +437,17 @@ static int __exit at91_shdwc_remove(struct platform_device *pdev)
iounmap(shdw->pmc_base);
clk_disable_unprepare(shdw->sclk);
-
- return 0;
}
static struct platform_driver at91_shdwc_driver = {
- .remove = __exit_p(at91_shdwc_remove),
+ .probe = at91_shdwc_probe,
+ .remove_new = at91_shdwc_remove,
.driver = {
.name = "at91-shdwc",
.of_match_table = at91_shdwc_of_match,
},
};
-module_platform_driver_probe(at91_shdwc_driver, at91_shdwc_probe);
+module_platform_driver(at91_shdwc_driver);
MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
MODULE_DESCRIPTION("Atmel shutdown controller driver");
diff --git a/drivers/power/reset/atc260x-poweroff.c b/drivers/power/reset/atc260x-poweroff.c
index 98f20251a6d1..b4aa50e9685e 100644
--- a/drivers/power/reset/atc260x-poweroff.c
+++ b/drivers/power/reset/atc260x-poweroff.c
@@ -233,7 +233,7 @@ static int atc260x_pwrc_probe(struct platform_device *pdev)
return ret;
}
-static int atc260x_pwrc_remove(struct platform_device *pdev)
+static void atc260x_pwrc_remove(struct platform_device *pdev)
{
struct atc260x_pwrc *priv = platform_get_drvdata(pdev);
@@ -243,13 +243,11 @@ static int atc260x_pwrc_remove(struct platform_device *pdev)
}
unregister_restart_handler(&priv->restart_nb);
-
- return 0;
}
static struct platform_driver atc260x_pwrc_driver = {
.probe = atc260x_pwrc_probe,
- .remove = atc260x_pwrc_remove,
+ .remove_new = atc260x_pwrc_remove,
.driver = {
.name = "atc260x-pwrc",
},
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index 3aa19765772d..d1e177176fa1 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -17,17 +17,14 @@
struct gpio_restart {
struct gpio_desc *reset_gpio;
- struct notifier_block restart_handler;
u32 active_delay_ms;
u32 inactive_delay_ms;
u32 wait_delay_ms;
};
-static int gpio_restart_notify(struct notifier_block *this,
- unsigned long mode, void *cmd)
+static int gpio_restart_notify(struct sys_off_data *data)
{
- struct gpio_restart *gpio_restart =
- container_of(this, struct gpio_restart, restart_handler);
+ struct gpio_restart *gpio_restart = data->cb_data;
/* drive it active, also inactive->active edge */
gpiod_direction_output(gpio_restart->reset_gpio, 1);
@@ -52,6 +49,7 @@ static int gpio_restart_probe(struct platform_device *pdev)
{
struct gpio_restart *gpio_restart;
bool open_source = false;
+ int priority = 129;
u32 property;
int ret;
@@ -71,8 +69,6 @@ static int gpio_restart_probe(struct platform_device *pdev)
return ret;
}
- gpio_restart->restart_handler.notifier_call = gpio_restart_notify;
- gpio_restart->restart_handler.priority = 129;
gpio_restart->active_delay_ms = 100;
gpio_restart->inactive_delay_ms = 100;
gpio_restart->wait_delay_ms = 3000;
@@ -83,7 +79,7 @@ static int gpio_restart_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Invalid priority property: %u\n",
property);
else
- gpio_restart->restart_handler.priority = property;
+ priority = property;
}
of_property_read_u32(pdev->dev.of_node, "active-delay",
@@ -93,9 +89,11 @@ static int gpio_restart_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "wait-delay",
&gpio_restart->wait_delay_ms);
- platform_set_drvdata(pdev, gpio_restart);
-
- ret = register_restart_handler(&gpio_restart->restart_handler);
+ ret = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ priority,
+ gpio_restart_notify,
+ gpio_restart);
if (ret) {
dev_err(&pdev->dev, "%s: cannot register restart handler, %d\n",
__func__, ret);
@@ -105,19 +103,6 @@ static int gpio_restart_probe(struct platform_device *pdev)
return 0;
}
-static void gpio_restart_remove(struct platform_device *pdev)
-{
- struct gpio_restart *gpio_restart = platform_get_drvdata(pdev);
- int ret;
-
- ret = unregister_restart_handler(&gpio_restart->restart_handler);
- if (ret) {
- dev_err(&pdev->dev,
- "%s: cannot unregister restart handler, %d\n",
- __func__, ret);
- }
-}
-
static const struct of_device_id of_gpio_restart_match[] = {
{ .compatible = "gpio-restart", },
{},
@@ -125,7 +110,6 @@ static const struct of_device_id of_gpio_restart_match[] = {
static struct platform_driver gpio_restart_driver = {
.probe = gpio_restart_probe,
- .remove_new = gpio_restart_remove,
.driver = {
.name = "restart-gpio",
.of_match_table = of_gpio_restart_match,
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index eea05921a054..fa25fbd53934 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -286,7 +286,7 @@ static int ltc2952_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int ltc2952_poweroff_remove(struct platform_device *pdev)
+static void ltc2952_poweroff_remove(struct platform_device *pdev)
{
struct ltc2952_poweroff *data = platform_get_drvdata(pdev);
@@ -295,7 +295,6 @@ static int ltc2952_poweroff_remove(struct platform_device *pdev)
hrtimer_cancel(&data->timer_wde);
atomic_notifier_chain_unregister(&panic_notifier_list,
&data->panic_notifier);
- return 0;
}
static const struct of_device_id of_ltc2952_poweroff_match[] = {
@@ -306,7 +305,7 @@ MODULE_DEVICE_TABLE(of, of_ltc2952_poweroff_match);
static struct platform_driver ltc2952_poweroff_driver = {
.probe = ltc2952_poweroff_probe,
- .remove = ltc2952_poweroff_remove,
+ .remove_new = ltc2952_poweroff_remove,
.driver = {
.name = "ltc2952-poweroff",
.of_match_table = of_ltc2952_poweroff_match,
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
index 108167f7738b..57a63c0ab7fb 100644
--- a/drivers/power/reset/mt6323-poweroff.c
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -70,12 +70,10 @@ static int mt6323_pwrc_probe(struct platform_device *pdev)
return 0;
}
-static int mt6323_pwrc_remove(struct platform_device *pdev)
+static void mt6323_pwrc_remove(struct platform_device *pdev)
{
if (pm_power_off == &mt6323_do_pwroff)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id mt6323_pwrc_dt_match[] = {
@@ -86,7 +84,7 @@ MODULE_DEVICE_TABLE(of, mt6323_pwrc_dt_match);
static struct platform_driver mt6323_pwrc_driver = {
.probe = mt6323_pwrc_probe,
- .remove = mt6323_pwrc_remove,
+ .remove_new = mt6323_pwrc_remove,
.driver = {
.name = "mt6323-pwrc",
.of_match_table = mt6323_pwrc_dt_match,
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
index de35d24bb7ef..1775b318d0ef 100644
--- a/drivers/power/reset/pwr-mlxbf.c
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -17,11 +17,17 @@
#include <linux/types.h>
struct pwr_mlxbf {
- struct work_struct send_work;
+ struct work_struct reboot_work;
+ struct work_struct shutdown_work;
const char *hid;
};
-static void pwr_mlxbf_send_work(struct work_struct *work)
+static void pwr_mlxbf_reboot_work(struct work_struct *work)
+{
+ acpi_bus_generate_netlink_event("button/reboot.*", "Reboot Button", 0x80, 1);
+}
+
+static void pwr_mlxbf_shutdown_work(struct work_struct *work)
{
acpi_bus_generate_netlink_event("button/power.*", "Power Button", 0x80, 1);
}
@@ -33,10 +39,10 @@ static irqreturn_t pwr_mlxbf_irq(int irq, void *ptr)
struct pwr_mlxbf *priv = ptr;
if (!strncmp(priv->hid, rst_pwr_hid, 8))
- emergency_restart();
+ schedule_work(&priv->reboot_work);
if (!strncmp(priv->hid, low_pwr_hid, 8))
- schedule_work(&priv->send_work);
+ schedule_work(&priv->shutdown_work);
return IRQ_HANDLED;
}
@@ -64,7 +70,11 @@ static int pwr_mlxbf_probe(struct platform_device *pdev)
if (irq < 0)
return dev_err_probe(dev, irq, "Error getting %s irq.\n", priv->hid);
- err = devm_work_autocancel(dev, &priv->send_work, pwr_mlxbf_send_work);
+ err = devm_work_autocancel(dev, &priv->shutdown_work, pwr_mlxbf_shutdown_work);
+ if (err)
+ return err;
+
+ err = devm_work_autocancel(dev, &priv->reboot_work, pwr_mlxbf_reboot_work);
if (err)
return err;
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
index 0ddf7f25f7b8..e0f2ff6b147c 100644
--- a/drivers/power/reset/qnap-poweroff.c
+++ b/drivers/power/reset/qnap-poweroff.c
@@ -111,15 +111,14 @@ static int qnap_power_off_probe(struct platform_device *pdev)
return 0;
}
-static int qnap_power_off_remove(struct platform_device *pdev)
+static void qnap_power_off_remove(struct platform_device *pdev)
{
pm_power_off = NULL;
- return 0;
}
static struct platform_driver qnap_power_off_driver = {
.probe = qnap_power_off_probe,
- .remove = qnap_power_off_remove,
+ .remove_new = qnap_power_off_remove,
.driver = {
.name = "qnap_power_off",
.of_match_table = of_match_ptr(qnap_power_off_of_match_table),
diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
index 7f87fbb8b051..15160809c423 100644
--- a/drivers/power/reset/regulator-poweroff.c
+++ b/drivers/power/reset/regulator-poweroff.c
@@ -52,12 +52,10 @@ static int regulator_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int regulator_poweroff_remove(__maybe_unused struct platform_device *pdev)
+static void regulator_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == &regulator_poweroff_do_poweroff)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id of_regulator_poweroff_match[] = {
@@ -68,7 +66,7 @@ MODULE_DEVICE_TABLE(of, of_regulator_poweroff_match);
static struct platform_driver regulator_poweroff_driver = {
.probe = regulator_poweroff_probe,
- .remove = regulator_poweroff_remove,
+ .remove_new = regulator_poweroff_remove,
.driver = {
.name = "poweroff-regulator",
.of_match_table = of_regulator_poweroff_match,
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index 28f1822db162..f4d6004793d3 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -33,12 +33,10 @@ static int restart_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int restart_poweroff_remove(struct platform_device *pdev)
+static void restart_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == &restart_poweroff_do_poweroff)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id of_restart_poweroff_match[] = {
@@ -49,7 +47,7 @@ MODULE_DEVICE_TABLE(of, of_restart_poweroff_match);
static struct platform_driver restart_poweroff_driver = {
.probe = restart_poweroff_probe,
- .remove = restart_poweroff_remove,
+ .remove_new = restart_poweroff_remove,
.driver = {
.name = "poweroff-restart",
.of_match_table = of_restart_poweroff_match,
diff --git a/drivers/power/reset/rmobile-reset.c b/drivers/power/reset/rmobile-reset.c
index bd3b396558e0..5df9b41c68c7 100644
--- a/drivers/power/reset/rmobile-reset.c
+++ b/drivers/power/reset/rmobile-reset.c
@@ -59,11 +59,10 @@ fail_unmap:
return error;
}
-static int rmobile_reset_remove(struct platform_device *pdev)
+static void rmobile_reset_remove(struct platform_device *pdev)
{
unregister_restart_handler(&rmobile_reset_nb);
iounmap(sysc_base2);
- return 0;
}
static const struct of_device_id rmobile_reset_of_match[] = {
@@ -74,7 +73,7 @@ MODULE_DEVICE_TABLE(of, rmobile_reset_of_match);
static struct platform_driver rmobile_reset_driver = {
.probe = rmobile_reset_probe,
- .remove = rmobile_reset_remove,
+ .remove_new = rmobile_reset_remove,
.driver = {
.name = "rmobile_reset",
.of_match_table = rmobile_reset_of_match,
diff --git a/drivers/power/reset/syscon-poweroff.c b/drivers/power/reset/syscon-poweroff.c
index c3aab7f59345..1b2ce7734260 100644
--- a/drivers/power/reset/syscon-poweroff.c
+++ b/drivers/power/reset/syscon-poweroff.c
@@ -76,12 +76,10 @@ static int syscon_poweroff_probe(struct platform_device *pdev)
return 0;
}
-static int syscon_poweroff_remove(struct platform_device *pdev)
+static void syscon_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == syscon_poweroff)
pm_power_off = NULL;
-
- return 0;
}
static const struct of_device_id syscon_poweroff_of_match[] = {
@@ -91,7 +89,7 @@ static const struct of_device_id syscon_poweroff_of_match[] = {
static struct platform_driver syscon_poweroff_driver = {
.probe = syscon_poweroff_probe,
- .remove = syscon_poweroff_remove,
+ .remove_new = syscon_poweroff_remove,
.driver = {
.name = "syscon-poweroff",
.of_match_table = syscon_poweroff_of_match,
diff --git a/drivers/power/reset/tps65086-restart.c b/drivers/power/reset/tps65086-restart.c
index 5ec819eac7da..ee8e9f4b837e 100644
--- a/drivers/power/reset/tps65086-restart.c
+++ b/drivers/power/reset/tps65086-restart.c
@@ -62,19 +62,21 @@ static int tps65086_restart_probe(struct platform_device *pdev)
return 0;
}
-static int tps65086_restart_remove(struct platform_device *pdev)
+static void tps65086_restart_remove(struct platform_device *pdev)
{
struct tps65086_restart *tps65086_restart = platform_get_drvdata(pdev);
int ret;
ret = unregister_restart_handler(&tps65086_restart->handler);
if (ret) {
+ /*
+ * tps65086_restart_probe() registered the restart handler. So
+ * unregistering should work fine. Checking the error code
+ * shouldn't be needed, still doing it for completeness.
+ */
dev_err(&pdev->dev, "%s: cannot unregister restart handler: %d\n",
__func__, ret);
- return -ENODEV;
}
-
- return 0;
}
static const struct platform_device_id tps65086_restart_id_table[] = {
@@ -88,7 +90,7 @@ static struct platform_driver tps65086_restart_driver = {
.name = "tps65086-restart",
},
.probe = tps65086_restart_probe,
- .remove = tps65086_restart_remove,
+ .remove_new = tps65086_restart_remove,
.id_table = tps65086_restart_id_table,
};
module_platform_driver(tps65086_restart_driver);
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index f21cb05815ec..3e31375491d5 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -978,6 +978,7 @@ config CHARGER_QCOM_SMB2
config FUEL_GAUGE_MM8013
tristate "Mitsumi MM8013 fuel gauge driver"
depends on I2C
+ select REGMAP_I2C
help
Say Y here to enable the Mitsumi MM8013 fuel gauge driver.
It enables the monitoring of many battery parameters, including
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 1db290ee2591..2b393eb5c282 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -36,10 +36,16 @@
#define BQ24190_REG_POC_WDT_RESET_SHIFT 6
#define BQ24190_REG_POC_CHG_CONFIG_MASK (BIT(5) | BIT(4))
#define BQ24190_REG_POC_CHG_CONFIG_SHIFT 4
-#define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0
-#define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1
-#define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2
-#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3
+#define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0
+#define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1
+#define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2
+#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3
+#define BQ24296_REG_POC_OTG_CONFIG_MASK BIT(5)
+#define BQ24296_REG_POC_OTG_CONFIG_SHIFT 5
+#define BQ24296_REG_POC_CHG_CONFIG_MASK BIT(4)
+#define BQ24296_REG_POC_CHG_CONFIG_SHIFT 4
+#define BQ24296_REG_POC_OTG_CONFIG_DISABLE 0x0
+#define BQ24296_REG_POC_OTG_CONFIG_OTG 0x1
#define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1))
#define BQ24190_REG_POC_SYS_MIN_SHIFT 1
#define BQ24190_REG_POC_SYS_MIN_MIN 3000
@@ -134,59 +140,24 @@
#define BQ24190_REG_F_BAT_FAULT_SHIFT 3
#define BQ24190_REG_F_NTC_FAULT_MASK (BIT(2) | BIT(1) | BIT(0))
#define BQ24190_REG_F_NTC_FAULT_SHIFT 0
+#define BQ24296_REG_F_NTC_FAULT_MASK (BIT(1) | BIT(0))
+#define BQ24296_REG_F_NTC_FAULT_SHIFT 0
#define BQ24190_REG_VPRS 0x0A /* Vendor/Part/Revision Status */
#define BQ24190_REG_VPRS_PN_MASK (BIT(5) | BIT(4) | BIT(3))
#define BQ24190_REG_VPRS_PN_SHIFT 3
-#define BQ24190_REG_VPRS_PN_24190 0x4
-#define BQ24190_REG_VPRS_PN_24192 0x5 /* Also 24193, 24196 */
-#define BQ24190_REG_VPRS_PN_24192I 0x3
+#define BQ24190_REG_VPRS_PN_24190 0x4
+#define BQ24190_REG_VPRS_PN_24192 0x5 /* Also 24193, 24196 */
+#define BQ24190_REG_VPRS_PN_24192I 0x3
+#define BQ24296_REG_VPRS_PN_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BQ24296_REG_VPRS_PN_SHIFT 5
+#define BQ24296_REG_VPRS_PN_24296 0x1
#define BQ24190_REG_VPRS_TS_PROFILE_MASK BIT(2)
#define BQ24190_REG_VPRS_TS_PROFILE_SHIFT 2
#define BQ24190_REG_VPRS_DEV_REG_MASK (BIT(1) | BIT(0))
#define BQ24190_REG_VPRS_DEV_REG_SHIFT 0
/*
- * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
- * so the first read after a fault returns the latched value and subsequent
- * reads return the current value. In order to return the fault status
- * to the user, have the interrupt handler save the reg's value and retrieve
- * it in the appropriate health/status routine.
- */
-struct bq24190_dev_info {
- struct i2c_client *client;
- struct device *dev;
- struct extcon_dev *edev;
- struct power_supply *charger;
- struct power_supply *battery;
- struct delayed_work input_current_limit_work;
- char model_name[I2C_NAME_SIZE];
- bool initialized;
- bool irq_event;
- bool otg_vbus_enabled;
- int charge_type;
- u16 sys_min;
- u16 iprechg;
- u16 iterm;
- u32 ichg;
- u32 ichg_max;
- u32 vreg;
- u32 vreg_max;
- struct mutex f_reg_lock;
- u8 f_reg;
- u8 ss_reg;
- u8 watchdog;
-};
-
-static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
- const union power_supply_propval *val);
-
-static const unsigned int bq24190_usb_extcon_cable[] = {
- EXTCON_USB,
- EXTCON_NONE,
-};
-
-/*
* The tables below provide a 2-way mapping for the value that goes in
* the register field and the real-world value that it represents.
* The index of the array is the value that goes in the register; the
@@ -211,6 +182,9 @@ static const int bq24190_ccc_ichg_values[] = {
4096000, 4160000, 4224000, 4288000, 4352000, 4416000, 4480000, 4544000
};
+/* ICHG higher than 3008mA is not supported in BQ24296 */
+#define BQ24296_CCC_ICHG_VALUES_LEN 40
+
/* REG04[7:2] (VREG) in uV */
static const int bq24190_cvc_vreg_values[] = {
3504000, 3520000, 3536000, 3552000, 3568000, 3584000, 3600000, 3616000,
@@ -228,6 +202,68 @@ static const int bq24190_ictrc_treg_values[] = {
600, 800, 1000, 1200
};
+enum bq24190_chip {
+ BQ24190,
+ BQ24192,
+ BQ24192i,
+ BQ24196,
+ BQ24296,
+};
+
+/*
+ * The FAULT register is latched by the bq24190 (except for NTC_FAULT)
+ * so the first read after a fault returns the latched value and subsequent
+ * reads return the current value. In order to return the fault status
+ * to the user, have the interrupt handler save the reg's value and retrieve
+ * it in the appropriate health/status routine.
+ */
+struct bq24190_dev_info {
+ struct i2c_client *client;
+ struct device *dev;
+ struct extcon_dev *edev;
+ struct power_supply *charger;
+ struct power_supply *battery;
+ struct delayed_work input_current_limit_work;
+ char model_name[I2C_NAME_SIZE];
+ bool initialized;
+ bool irq_event;
+ bool otg_vbus_enabled;
+ int charge_type;
+ u16 sys_min;
+ u16 iprechg;
+ u16 iterm;
+ u32 ichg;
+ u32 ichg_max;
+ u32 vreg;
+ u32 vreg_max;
+ struct mutex f_reg_lock;
+ u8 f_reg;
+ u8 ss_reg;
+ u8 watchdog;
+ const struct bq24190_chip_info *info;
+};
+
+struct bq24190_chip_info {
+ int ichg_array_size;
+#ifdef CONFIG_REGULATOR
+ const struct regulator_desc *vbus_desc;
+#endif
+ int (*check_chip)(struct bq24190_dev_info *bdi);
+ int (*set_chg_config)(struct bq24190_dev_info *bdi, const u8 chg_config);
+ int (*set_otg_vbus)(struct bq24190_dev_info *bdi, bool enable);
+ u8 ntc_fault_mask;
+ int (*get_ntc_status)(const u8 value);
+};
+
+static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
+ const union power_supply_propval *val);
+
+static const unsigned int bq24190_usb_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_NONE,
+};
+
+
/*
* Return the index in 'tbl' of greatest value that is less than or equal to
* 'val'. The index range returned is 0 to 'tbl_size' - 1. Assumes that
@@ -529,6 +565,43 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
return ret;
}
+static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0) {
+ dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
+ return ret;
+ }
+
+ bdi->otg_vbus_enabled = enable;
+ if (enable) {
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_CHG_CONFIG_MASK,
+ BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ BQ24190_REG_POC_CHG_CONFIG_DISABLE);
+
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_OTG_CONFIG_MASK,
+ BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ BQ24296_REG_POC_OTG_CONFIG_OTG);
+ } else
+ ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_OTG_CONFIG_MASK,
+ BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ BQ24296_REG_POC_OTG_CONFIG_DISABLE);
+
+out:
+ pm_runtime_mark_last_busy(bdi->dev);
+ pm_runtime_put_autosuspend(bdi->dev);
+
+ return ret;
+}
+
#ifdef CONFIG_REGULATOR
static int bq24190_vbus_enable(struct regulator_dev *dev)
{
@@ -567,6 +640,43 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev)
return bdi->otg_vbus_enabled;
}
+static int bq24296_vbus_enable(struct regulator_dev *dev)
+{
+ return bq24296_set_otg_vbus(rdev_get_drvdata(dev), true);
+}
+
+static int bq24296_vbus_disable(struct regulator_dev *dev)
+{
+ return bq24296_set_otg_vbus(rdev_get_drvdata(dev), false);
+}
+
+static int bq24296_vbus_is_enabled(struct regulator_dev *dev)
+{
+ struct bq24190_dev_info *bdi = rdev_get_drvdata(dev);
+ int ret;
+ u8 val;
+
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0) {
+ dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
+ return ret;
+ }
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_OTG_CONFIG_MASK,
+ BQ24296_REG_POC_OTG_CONFIG_SHIFT, &val);
+
+ pm_runtime_mark_last_busy(bdi->dev);
+ pm_runtime_put_autosuspend(bdi->dev);
+
+ if (ret)
+ return ret;
+
+ bdi->otg_vbus_enabled = (val == BQ24296_REG_POC_OTG_CONFIG_OTG);
+
+ return bdi->otg_vbus_enabled;
+}
+
static const struct regulator_ops bq24190_vbus_ops = {
.enable = bq24190_vbus_enable,
.disable = bq24190_vbus_disable,
@@ -583,6 +693,22 @@ static const struct regulator_desc bq24190_vbus_desc = {
.n_voltages = 1,
};
+static const struct regulator_ops bq24296_vbus_ops = {
+ .enable = bq24296_vbus_enable,
+ .disable = bq24296_vbus_disable,
+ .is_enabled = bq24296_vbus_is_enabled,
+};
+
+static const struct regulator_desc bq24296_vbus_desc = {
+ .name = "usb_otg_vbus",
+ .of_match = "usb-otg-vbus",
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &bq24296_vbus_ops,
+ .fixed_uV = 5000000,
+ .n_voltages = 1,
+};
+
static const struct regulator_init_data bq24190_vbus_init_data = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -602,7 +728,7 @@ static int bq24190_register_vbus_regulator(struct bq24190_dev_info *bdi)
else
cfg.init_data = &bq24190_vbus_init_data;
cfg.driver_data = bdi;
- reg = devm_regulator_register(bdi->dev, &bq24190_vbus_desc, &cfg);
+ reg = devm_regulator_register(bdi->dev, bdi->info->vbus_desc, &cfg);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(bdi->dev, "Can't register regulator: %d\n", ret);
@@ -678,7 +804,7 @@ static int bq24190_set_config(struct bq24190_dev_info *bdi)
BQ24190_REG_CCC_ICHG_MASK,
BQ24190_REG_CCC_ICHG_SHIFT,
bq24190_ccc_ichg_values,
- ARRAY_SIZE(bq24190_ccc_ichg_values),
+ bdi->info->ichg_array_size,
bdi->ichg);
if (ret < 0)
return ret;
@@ -777,6 +903,24 @@ static int bq24190_charger_get_charge_type(struct bq24190_dev_info *bdi,
return 0;
}
+static int bq24190_battery_set_chg_config(struct bq24190_dev_info *bdi,
+ const u8 chg_config)
+{
+ return bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24190_REG_POC_CHG_CONFIG_MASK,
+ BQ24190_REG_POC_CHG_CONFIG_SHIFT,
+ chg_config);
+}
+
+static int bq24296_battery_set_chg_config(struct bq24190_dev_info *bdi,
+ const u8 chg_config)
+{
+ return bq24190_write_mask(bdi, BQ24190_REG_POC,
+ BQ24296_REG_POC_CHG_CONFIG_MASK,
+ BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ chg_config);
+}
+
static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
const union power_supply_propval *val)
{
@@ -835,9 +979,50 @@ static int bq24190_charger_set_charge_type(struct bq24190_dev_info *bdi,
return ret;
}
- return bq24190_write_mask(bdi, BQ24190_REG_POC,
- BQ24190_REG_POC_CHG_CONFIG_MASK,
- BQ24190_REG_POC_CHG_CONFIG_SHIFT, chg_config);
+ return bdi->info->set_chg_config(bdi, chg_config);
+}
+
+static int bq24190_charger_get_ntc_status(u8 value)
+{
+ int health;
+
+ switch (value >> BQ24190_REG_F_NTC_FAULT_SHIFT & 0x7) {
+ case 0x1: /* TS1 Cold */
+ case 0x3: /* TS2 Cold */
+ case 0x5: /* Both Cold */
+ health = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ case 0x2: /* TS1 Hot */
+ case 0x4: /* TS2 Hot */
+ case 0x6: /* Both Hot */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ return health;
+}
+
+static int bq24296_charger_get_ntc_status(u8 value)
+{
+ int health;
+
+ switch (value >> BQ24296_REG_F_NTC_FAULT_SHIFT & 0x3) {
+ case 0x0: /* Normal */
+ health = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case 0x1: /* Hot */
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ break;
+ case 0x2: /* Cold */
+ health = POWER_SUPPLY_HEALTH_COLD;
+ break;
+ default:
+ health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ return health;
}
static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
@@ -850,21 +1035,8 @@ static int bq24190_charger_get_health(struct bq24190_dev_info *bdi,
v = bdi->f_reg;
mutex_unlock(&bdi->f_reg_lock);
- if (v & BQ24190_REG_F_NTC_FAULT_MASK) {
- switch (v >> BQ24190_REG_F_NTC_FAULT_SHIFT & 0x7) {
- case 0x1: /* TS1 Cold */
- case 0x3: /* TS2 Cold */
- case 0x5: /* Both Cold */
- health = POWER_SUPPLY_HEALTH_COLD;
- break;
- case 0x2: /* TS1 Hot */
- case 0x4: /* TS2 Hot */
- case 0x6: /* Both Hot */
- health = POWER_SUPPLY_HEALTH_OVERHEAT;
- break;
- default:
- health = POWER_SUPPLY_HEALTH_UNKNOWN;
- }
+ if (v & bdi->info->ntc_fault_mask) {
+ health = bdi->info->get_ntc_status(v);
} else if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
} else if (v & BQ24190_REG_F_CHRG_FAULT_MASK) {
@@ -1015,7 +1187,7 @@ static int bq24190_charger_get_current(struct bq24190_dev_info *bdi,
ret = bq24190_get_field_val(bdi, BQ24190_REG_CCC,
BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
bq24190_ccc_ichg_values,
- ARRAY_SIZE(bq24190_ccc_ichg_values), &curr);
+ bdi->info->ichg_array_size, &curr);
if (ret < 0)
return ret;
@@ -1055,7 +1227,7 @@ static int bq24190_charger_set_current(struct bq24190_dev_info *bdi,
ret = bq24190_set_field_val(bdi, BQ24190_REG_CCC,
BQ24190_REG_CCC_ICHG_MASK, BQ24190_REG_CCC_ICHG_SHIFT,
bq24190_ccc_ichg_values,
- ARRAY_SIZE(bq24190_ccc_ichg_values), curr);
+ bdi->info->ichg_array_size, curr);
if (ret < 0)
return ret;
@@ -1395,26 +1567,9 @@ static int bq24190_battery_get_health(struct bq24190_dev_info *bdi,
if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
} else {
- v &= BQ24190_REG_F_NTC_FAULT_MASK;
- v >>= BQ24190_REG_F_NTC_FAULT_SHIFT;
+ v &= bdi->info->ntc_fault_mask;
- switch (v) {
- case 0x0: /* Normal */
- health = POWER_SUPPLY_HEALTH_GOOD;
- break;
- case 0x1: /* TS1 Cold */
- case 0x3: /* TS2 Cold */
- case 0x5: /* Both Cold */
- health = POWER_SUPPLY_HEALTH_COLD;
- break;
- case 0x2: /* TS1 Hot */
- case 0x4: /* TS2 Hot */
- case 0x6: /* Both Hot */
- health = POWER_SUPPLY_HEALTH_OVERHEAT;
- break;
- default:
- health = POWER_SUPPLY_HEALTH_UNKNOWN;
- }
+ health = v ? bdi->info->get_ntc_status(v) : POWER_SUPPLY_HEALTH_GOOD;
}
val->intval = health;
@@ -1601,12 +1756,13 @@ static int bq24190_configure_usb_otg(struct bq24190_dev_info *bdi, u8 ss_reg)
static void bq24190_check_status(struct bq24190_dev_info *bdi)
{
const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK;
- const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK
- | BQ24190_REG_F_NTC_FAULT_MASK;
+ u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK;
bool alert_charger = false, alert_battery = false;
u8 ss_reg = 0, f_reg = 0;
int i, ret;
+ battery_mask_f |= bdi->info->ntc_fault_mask;
+
ret = bq24190_read(bdi, BQ24190_REG_SS, &ss_reg);
if (ret < 0) {
dev_err(bdi->dev, "Can't read SS reg: %d\n", ret);
@@ -1633,7 +1789,7 @@ static void bq24190_check_status(struct bq24190_dev_info *bdi)
!!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
!!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
!!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK),
- !!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK));
+ !!(f_reg & bdi->info->ntc_fault_mask));
mutex_lock(&bdi->f_reg_lock);
if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f))
@@ -1696,12 +1852,11 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+static int bq24190_check_chip(struct bq24190_dev_info *bdi)
{
u8 v;
int ret;
- /* First check that the device really is what its supposed to be */
ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
BQ24190_REG_VPRS_PN_MASK,
BQ24190_REG_VPRS_PN_SHIFT,
@@ -1719,6 +1874,40 @@ static int bq24190_hw_init(struct bq24190_dev_info *bdi)
return -ENODEV;
}
+ return 0;
+}
+
+static int bq24296_check_chip(struct bq24190_dev_info *bdi)
+{
+ u8 v;
+ int ret;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_VPRS,
+ BQ24296_REG_VPRS_PN_MASK,
+ BQ24296_REG_VPRS_PN_SHIFT,
+ &v);
+ if (ret < 0)
+ return ret;
+
+ switch (v) {
+ case BQ24296_REG_VPRS_PN_24296:
+ break;
+ default:
+ dev_err(bdi->dev, "Error unknown model: 0x%02x\n", v);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int bq24190_hw_init(struct bq24190_dev_info *bdi)
+{
+ int ret;
+
+ ret = bdi->info->check_chip(bdi);
+ if (ret < 0)
+ return ret;
+
ret = bq24190_register_reset(bdi);
if (ret < 0)
return ret;
@@ -1736,7 +1925,8 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
struct power_supply_battery_info *info;
int v, idx;
- idx = ARRAY_SIZE(bq24190_ccc_ichg_values) - 1;
+ idx = bdi->info->ichg_array_size - 1;
+
bdi->ichg_max = bq24190_ccc_ichg_values[idx];
idx = ARRAY_SIZE(bq24190_cvc_vreg_values) - 1;
@@ -1781,6 +1971,64 @@ static int bq24190_get_config(struct bq24190_dev_info *bdi)
return 0;
}
+static const struct bq24190_chip_info bq24190_chip_info_tbl[] = {
+ [BQ24190] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
+ [BQ24192] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
+ [BQ24192i] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
+ [BQ24196] = {
+ .ichg_array_size = ARRAY_SIZE(bq24190_ccc_ichg_values),
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24190_vbus_desc,
+#endif
+ .check_chip = bq24190_check_chip,
+ .set_chg_config = bq24190_battery_set_chg_config,
+ .ntc_fault_mask = BQ24190_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24190_charger_get_ntc_status,
+ .set_otg_vbus = bq24190_set_otg_vbus,
+ },
+ [BQ24296] = {
+ .ichg_array_size = BQ24296_CCC_ICHG_VALUES_LEN,
+#ifdef CONFIG_REGULATOR
+ .vbus_desc = &bq24296_vbus_desc,
+#endif
+ .check_chip = bq24296_check_chip,
+ .set_chg_config = bq24296_battery_set_chg_config,
+ .ntc_fault_mask = BQ24296_REG_F_NTC_FAULT_MASK,
+ .get_ntc_status = bq24296_charger_get_ntc_status,
+ .set_otg_vbus = bq24296_set_otg_vbus,
+ },
+};
+
static int bq24190_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -1804,6 +2052,7 @@ static int bq24190_probe(struct i2c_client *client)
bdi->client = client;
bdi->dev = dev;
strscpy(bdi->model_name, id->name, sizeof(bdi->model_name));
+ bdi->info = i2c_get_match_data(client);
mutex_init(&bdi->f_reg_lock);
bdi->charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
bdi->f_reg = 0;
@@ -1940,7 +2189,7 @@ static void bq24190_shutdown(struct i2c_client *client)
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
/* Turn off 5V boost regulator on shutdown */
- bq24190_set_otg_vbus(bdi, false);
+ bdi->info->set_otg_vbus(bdi, false);
}
static __maybe_unused int bq24190_runtime_suspend(struct device *dev)
@@ -2029,19 +2278,21 @@ static const struct dev_pm_ops bq24190_pm_ops = {
};
static const struct i2c_device_id bq24190_i2c_ids[] = {
- { "bq24190" },
- { "bq24192" },
- { "bq24192i" },
- { "bq24196" },
+ { "bq24190", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24190] },
+ { "bq24192", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192] },
+ { "bq24192i", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24192i] },
+ { "bq24196", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24196] },
+ { "bq24296", (kernel_ulong_t)&bq24190_chip_info_tbl[BQ24296] },
{ },
};
MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids);
static const struct of_device_id bq24190_of_match[] = {
- { .compatible = "ti,bq24190", },
- { .compatible = "ti,bq24192", },
- { .compatible = "ti,bq24192i", },
- { .compatible = "ti,bq24196", },
+ { .compatible = "ti,bq24190", .data = &bq24190_chip_info_tbl[BQ24190] },
+ { .compatible = "ti,bq24192", .data = &bq24190_chip_info_tbl[BQ24192] },
+ { .compatible = "ti,bq24192i", .data = &bq24190_chip_info_tbl[BQ24192i] },
+ { .compatible = "ti,bq24196", .data = &bq24190_chip_info_tbl[BQ24196] },
+ { .compatible = "ti,bq24296", .data = &bq24190_chip_info_tbl[BQ24296] },
{ },
};
MODULE_DEVICE_TABLE(of, bq24190_of_match);
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index 789a31bd70c3..1a935bc88510 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1574,13 +1574,16 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
wd_reg_val = i;
break;
}
- if (bq->watchdog_timer > bq256xx_watchdog_time[i] &&
+ if (i + 1 < BQ256XX_NUM_WD_VAL &&
+ bq->watchdog_timer > bq256xx_watchdog_time[i] &&
bq->watchdog_timer < bq256xx_watchdog_time[i + 1])
wd_reg_val = i;
}
ret = regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_1,
BQ256XX_WATCHDOG_MASK, wd_reg_val <<
BQ256XX_WDT_BIT_SHIFT);
+ if (ret)
+ return ret;
ret = power_supply_get_battery_info(bq->charger, &bat_info);
if (ret == -ENOMEM)
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 4296600e8912..1c4a9d137744 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -2162,6 +2162,28 @@ void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
+#ifdef CONFIG_PM_SLEEP
+static int bq27xxx_battery_suspend(struct device *dev)
+{
+ struct bq27xxx_device_info *di = dev_get_drvdata(dev);
+
+ cancel_delayed_work(&di->work);
+ return 0;
+}
+
+static int bq27xxx_battery_resume(struct device *dev)
+{
+ struct bq27xxx_device_info *di = dev_get_drvdata(dev);
+
+ schedule_delayed_work(&di->work, 0);
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+SIMPLE_DEV_PM_OPS(bq27xxx_battery_battery_pm_ops,
+ bq27xxx_battery_suspend, bq27xxx_battery_resume);
+EXPORT_SYMBOL_GPL(bq27xxx_battery_battery_pm_ops);
+
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("BQ27xxx battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 9b5475590518..9910c600743e 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -209,7 +209,9 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
- free_irq(client->irq, di);
+ if (client->irq)
+ free_irq(client->irq, di);
+
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);
@@ -295,6 +297,7 @@ static struct i2c_driver bq27xxx_battery_i2c_driver = {
.driver = {
.name = "bq27xxx-battery",
.of_match_table = of_match_ptr(bq27xxx_battery_i2c_of_match_table),
+ .pm = &bq27xxx_battery_battery_pm_ops,
},
.probe = bq27xxx_battery_i2c_probe,
.remove = bq27xxx_battery_i2c_remove,
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index bb29e9ebd24a..99f3ccdc30a6 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -491,7 +491,7 @@ static int cw_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
if (cw_battery_valid_time_to_empty(cw_bat))
- val->intval = cw_bat->time_to_empty;
+ val->intval = cw_bat->time_to_empty * 60;
else
val->intval = 0;
break;
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 73265001dd4b..ecef35ac3b7e 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -861,44 +861,44 @@ const size_t power_supply_battery_info_properties_size = ARRAY_SIZE(power_supply
EXPORT_SYMBOL_GPL(power_supply_battery_info_properties_size);
bool power_supply_battery_info_has_prop(struct power_supply_battery_info *info,
- enum power_supply_property psp)
+ enum power_supply_property psp)
{
if (!info)
return false;
switch (psp) {
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- return info->technology != POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
- case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- return info->energy_full_design_uwh >= 0;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- return info->charge_full_design_uah >= 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- return info->voltage_min_design_uv >= 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- return info->voltage_max_design_uv >= 0;
- case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
- return info->precharge_current_ua >= 0;
- case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
- return info->charge_term_current_ua >= 0;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
- return info->constant_charge_current_max_ua >= 0;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
- return info->constant_charge_voltage_max_uv >= 0;
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
- return info->temp_ambient_alert_min > INT_MIN;
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
- return info->temp_ambient_alert_max < INT_MAX;
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- return info->temp_alert_min > INT_MIN;
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
- return info->temp_alert_max < INT_MAX;
- case POWER_SUPPLY_PROP_TEMP_MIN:
- return info->temp_min > INT_MIN;
- case POWER_SUPPLY_PROP_TEMP_MAX:
- return info->temp_max < INT_MAX;
- default:
- return false;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ return info->technology != POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ return info->energy_full_design_uwh >= 0;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ return info->charge_full_design_uah >= 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ return info->voltage_min_design_uv >= 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ return info->voltage_max_design_uv >= 0;
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return info->precharge_current_ua >= 0;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return info->charge_term_current_ua >= 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return info->constant_charge_current_max_ua >= 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ return info->constant_charge_voltage_max_uv >= 0;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+ return info->temp_ambient_alert_min > INT_MIN;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+ return info->temp_ambient_alert_max < INT_MAX;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+ return info->temp_alert_min > INT_MIN;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ return info->temp_alert_max < INT_MAX;
+ case POWER_SUPPLY_PROP_TEMP_MIN:
+ return info->temp_min > INT_MIN;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ return info->temp_max < INT_MAX;
+ default:
+ return false;
}
}
EXPORT_SYMBOL_GPL(power_supply_battery_info_has_prop);
@@ -914,53 +914,53 @@ int power_supply_battery_info_get_prop(struct power_supply_battery_info *info,
return -EINVAL;
switch (psp) {
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = info->technology;
- return 0;
- case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- val->intval = info->energy_full_design_uwh;
- return 0;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- val->intval = info->charge_full_design_uah;
- return 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = info->voltage_min_design_uv;
- return 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = info->voltage_max_design_uv;
- return 0;
- case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
- val->intval = info->precharge_current_ua;
- return 0;
- case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
- val->intval = info->charge_term_current_ua;
- return 0;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
- val->intval = info->constant_charge_current_max_ua;
- return 0;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
- val->intval = info->constant_charge_voltage_max_uv;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
- val->intval = info->temp_ambient_alert_min;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
- val->intval = info->temp_ambient_alert_max;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
- val->intval = info->temp_alert_min;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
- val->intval = info->temp_alert_max;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_MIN:
- val->intval = info->temp_min;
- return 0;
- case POWER_SUPPLY_PROP_TEMP_MAX:
- val->intval = info->temp_max;
- return 0;
- default:
- return -EINVAL;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = info->technology;
+ return 0;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = info->energy_full_design_uwh;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = info->charge_full_design_uah;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = info->voltage_min_design_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = info->voltage_max_design_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ val->intval = info->precharge_current_ua;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ val->intval = info->charge_term_current_ua;
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = info->constant_charge_current_max_ua;
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = info->constant_charge_voltage_max_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
+ val->intval = info->temp_ambient_alert_min;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
+ val->intval = info->temp_ambient_alert_max;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+ val->intval = info->temp_alert_min;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ val->intval = info->temp_alert_max;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_MIN:
+ val->intval = info->temp_min;
+ return 0;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ val->intval = info->temp_max;
+ return 0;
+ default:
+ return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(power_supply_battery_info_get_prop);
@@ -1255,6 +1255,7 @@ EXPORT_SYMBOL_GPL(power_supply_powers);
static void power_supply_dev_release(struct device *dev)
{
struct power_supply *psy = to_power_supply(dev);
+
dev_dbg(dev, "%s\n", __func__);
kfree(psy);
}
@@ -1636,6 +1637,6 @@ subsys_initcall(power_supply_class_init);
module_exit(power_supply_class_exit);
MODULE_DESCRIPTION("Universal power supply monitor class");
-MODULE_AUTHOR("Ian Molton <spyro@f2s.com>, "
- "Szabolcs Gyurko, "
- "Anton Vorontsov <cbou@mail.ru>");
+MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
+MODULE_AUTHOR("Szabolcs Gyurko");
+MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>");
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
index 8acf63ee6897..9bb777406013 100644
--- a/drivers/power/supply/qcom_pmi8998_charger.c
+++ b/drivers/power/supply/qcom_pmi8998_charger.c
@@ -972,10 +972,14 @@ static int smb2_probe(struct platform_device *pdev)
supply_config.of_node = pdev->dev.of_node;
desc = devm_kzalloc(chip->dev, sizeof(smb2_psy_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
memcpy(desc, &smb2_psy_desc, sizeof(smb2_psy_desc));
desc->name =
devm_kasprintf(chip->dev, GFP_KERNEL, "%s-charger",
(const char *)device_get_match_data(chip->dev));
+ if (!desc->name)
+ return -ENOMEM;
chip->chg_psy =
devm_power_supply_register(chip->dev, desc, &supply_config);
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5dd5f188e14f..604541dcb320 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -155,6 +155,18 @@ config PTP_1588_CLOCK_IDTCM
To compile this driver as a module, choose M here: the module
will be called ptp_clockmatrix.
+config PTP_1588_CLOCK_FC3W
+ tristate "RENESAS FemtoClock3 Wireless as PTP clock"
+ depends on PTP_1588_CLOCK && I2C
+ default n
+ help
+ This driver adds support for using Renesas FemtoClock3 Wireless
+ as a PTP clock. This clock is only useful if your time stamping
+ MAC is connected to the RENESAS chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_fc3.
+
config PTP_1588_CLOCK_MOCK
tristate "Mock-up PTP clock"
depends on PTP_1588_CLOCK
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index dea0cebd2303..68bf02078053 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
+obj-$(CONFIG_PTP_1588_CLOCK_FC3W) += ptp_fc3.o
obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
obj-$(CONFIG_PTP_1588_CLOCK_MOCK) += ptp_mock.o
obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 15b804ba4868..3aaf1a3430c5 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -44,18 +44,31 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
struct ptp_clock_event *src)
{
struct ptp_extts_event *dst;
+ struct timespec64 offset_ts;
unsigned long flags;
s64 seconds;
u32 remainder;
- seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+ if (src->type == PTP_CLOCK_EXTTS) {
+ seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+ } else if (src->type == PTP_CLOCK_EXTOFF) {
+ offset_ts = ns_to_timespec64(src->offset);
+ seconds = offset_ts.tv_sec;
+ remainder = offset_ts.tv_nsec;
+ } else {
+ WARN(1, "%s: unknown type %d\n", __func__, src->type);
+ return;
+ }
spin_lock_irqsave(&queue->lock, flags);
dst = &queue->buf[queue->tail];
dst->index = src->index;
+ dst->flags = PTP_EXTTS_EVENT_VALID;
dst->t.sec = seconds;
dst->t.nsec = remainder;
+ if (src->type == PTP_CLOCK_EXTOFF)
+ dst->flags |= PTP_EXT_OFFSET;
/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
if (!queue_free(queue))
@@ -417,6 +430,7 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
break;
case PTP_CLOCK_EXTTS:
+ case PTP_CLOCK_EXTOFF:
/* Enqueue timestamp on selected queues */
spin_lock_irqsave(&ptp->tsevqs_lock, flags);
list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
diff --git a/drivers/ptp/ptp_fc3.c b/drivers/ptp/ptp_fc3.c
new file mode 100644
index 000000000000..6ef982862e27
--- /dev/null
+++ b/drivers/ptp/ptp_fc3.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the FemtoClock3 family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idtRC38xxx_reg.h>
+#include <asm/unaligned.h>
+
+#include "ptp_private.h"
+#include "ptp_fc3.h"
+
+MODULE_DESCRIPTION("Driver for IDT FemtoClock3(TM) family");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/*
+ * The name of the firmware file to be loaded
+ * over-rides any automatic selection
+ */
+static char *firmware;
+module_param(firmware, charp, 0);
+
+static s64 ns2counters(struct idtfc3 *idtfc3, s64 nsec, u32 *sub_ns)
+{
+ s64 sync;
+ s32 rem;
+
+ if (likely(nsec >= 0)) {
+ sync = div_u64_rem(nsec, idtfc3->ns_per_sync, &rem);
+ *sub_ns = rem;
+ } else {
+ sync = -div_u64_rem(-nsec - 1, idtfc3->ns_per_sync, &rem) - 1;
+ *sub_ns = idtfc3->ns_per_sync - rem - 1;
+ }
+
+ return sync * idtfc3->ns_per_sync;
+}
+
+static s64 tdc_meas2offset(struct idtfc3 *idtfc3, u64 meas_read)
+{
+ s64 coarse, fine;
+
+ fine = sign_extend64(FIELD_GET(FINE_MEAS_MASK, meas_read), 12);
+ coarse = sign_extend64(FIELD_GET(COARSE_MEAS_MASK, meas_read), (39 - 13));
+
+ fine = div64_s64(fine * NSEC_PER_SEC, idtfc3->tdc_apll_freq * 62LL);
+ coarse = div64_s64(coarse * NSEC_PER_SEC, idtfc3->time_ref_freq);
+
+ return coarse + fine;
+}
+
+static s64 tdc_offset2phase(struct idtfc3 *idtfc3, s64 offset_ns)
+{
+ if (offset_ns > idtfc3->ns_per_sync / 2)
+ offset_ns -= idtfc3->ns_per_sync;
+
+ return offset_ns * idtfc3->tdc_offset_sign;
+}
+
+static int idtfc3_set_lpf_mode(struct idtfc3 *idtfc3, u8 mode)
+{
+ int err;
+
+ if (mode >= LPF_INVALID)
+ return -EINVAL;
+
+ if (idtfc3->lpf_mode == mode)
+ return 0;
+
+ err = regmap_bulk_write(idtfc3->regmap, LPF_MODE_CNFG, &mode, sizeof(mode));
+ if (err)
+ return err;
+
+ idtfc3->lpf_mode = mode;
+
+ return 0;
+}
+
+static int idtfc3_enable_lpf(struct idtfc3 *idtfc3, bool enable)
+{
+ u8 val;
+ int err;
+
+ err = regmap_bulk_read(idtfc3->regmap, LPF_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable == true)
+ val |= LPF_EN;
+ else
+ val &= ~LPF_EN;
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_CTRL, &val, sizeof(val));
+}
+
+static int idtfc3_get_time_ref_freq(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+ u8 time_ref_div;
+ u8 time_clk_div;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_MEAS_DIV_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+ time_ref_div = FIELD_GET(TIME_REF_DIV_MASK, get_unaligned_le32(buf)) + 1;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_COUNT, buf, 1);
+ if (err)
+ return err;
+ time_clk_div = (buf[0] & TIME_CLOCK_COUNT_MASK) + 1;
+ idtfc3->time_ref_freq = idtfc3->hw_param.time_clk_freq *
+ time_clk_div / time_ref_div;
+
+ return 0;
+}
+
+static int idtfc3_get_tdc_offset_sign(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+ u32 val;
+ u8 sig1, sig2;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_TDC_FANOUT_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ val = get_unaligned_le32(buf);
+ if ((val & TIME_SYNC_TO_TDC_EN) != TIME_SYNC_TO_TDC_EN) {
+ dev_err(idtfc3->dev, "TIME_SYNC_TO_TDC_EN is off !!!");
+ return -EINVAL;
+ }
+
+ sig1 = FIELD_GET(SIG1_MUX_SEL_MASK, val);
+ sig2 = FIELD_GET(SIG2_MUX_SEL_MASK, val);
+
+ if ((sig1 == sig2) || ((sig1 != TIME_SYNC) && (sig2 != TIME_SYNC))) {
+ dev_err(idtfc3->dev, "Invalid tdc_mux_sel sig1=%d sig2=%d", sig1, sig2);
+ return -EINVAL;
+ } else if (sig1 == TIME_SYNC) {
+ idtfc3->tdc_offset_sign = 1;
+ } else if (sig2 == TIME_SYNC) {
+ idtfc3->tdc_offset_sign = -1;
+ }
+
+ return 0;
+}
+
+static int idtfc3_lpf_bw(struct idtfc3 *idtfc3, u8 shift, u8 mult)
+{
+ u8 val = FIELD_PREP(LPF_BW_SHIFT, shift) | FIELD_PREP(LPF_BW_MULT, mult);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_BW_CNFG, &val, sizeof(val));
+}
+
+static int idtfc3_enable_tdc(struct idtfc3 *idtfc3, bool enable, u8 meas_mode)
+{
+ int err;
+ u8 val = 0;
+
+ /* Disable TDC first */
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable == false)
+ return idtfc3_lpf_bw(idtfc3, LPF_BW_SHIFT_DEFAULT, LPF_BW_MULT_DEFAULT);
+
+ if (meas_mode >= MEAS_MODE_INVALID)
+ return -EINVAL;
+
+ /* Change TDC meas mode */
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CNFG,
+ &meas_mode, sizeof(meas_mode));
+ if (err)
+ return err;
+
+ /* Enable TDC */
+ val = TDC_MEAS_EN;
+ if (meas_mode == CONTINUOUS)
+ val |= TDC_MEAS_START;
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ return idtfc3_lpf_bw(idtfc3, LPF_BW_SHIFT_1PPS, LPF_BW_MULT_DEFAULT);
+}
+
+static bool get_tdc_meas(struct idtfc3 *idtfc3, s64 *offset_ns)
+{
+ bool valid = false;
+ u8 buf[9];
+ u8 val;
+ int err;
+
+ while (true) {
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_STS,
+ &val, sizeof(val));
+ if (err)
+ return false;
+
+ if (val & FIFO_EMPTY)
+ break;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_READ_REQ,
+ &buf, sizeof(buf));
+ if (err)
+ return false;
+
+ valid = true;
+ }
+
+ if (valid)
+ *offset_ns = tdc_meas2offset(idtfc3, get_unaligned_le64(&buf[1]));
+
+ return valid;
+}
+
+static int check_tdc_fifo_overrun(struct idtfc3 *idtfc3)
+{
+ u8 val;
+ int err;
+
+ /* Check if FIFO is overrun */
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_STS, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (!(val & FIFO_FULL))
+ return 0;
+
+ dev_warn(idtfc3->dev, "TDC FIFO overrun !!!");
+
+ err = idtfc3_enable_tdc(idtfc3, true, CONTINUOUS);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int get_tdc_meas_continuous(struct idtfc3 *idtfc3)
+{
+ int err;
+ s64 offset_ns;
+ struct ptp_clock_event event;
+
+ err = check_tdc_fifo_overrun(idtfc3);
+ if (err)
+ return err;
+
+ if (get_tdc_meas(idtfc3, &offset_ns) && offset_ns >= 0) {
+ event.index = 0;
+ event.offset = tdc_offset2phase(idtfc3, offset_ns);
+ event.type = PTP_CLOCK_EXTOFF;
+ ptp_clock_event(idtfc3->ptp_clock, &event);
+ }
+
+ return 0;
+}
+
+static int idtfc3_read_subcounter(struct idtfc3 *idtfc3)
+{
+ u8 buf[5] = {0};
+ int err;
+
+ err = regmap_bulk_read(idtfc3->regmap, TOD_COUNTER_READ_REQ,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ /* sync_counter_value is [31:82] and sub_sync_counter_value is [0:30] */
+ return get_unaligned_le32(&buf[1]) & SUB_SYNC_COUNTER_MASK;
+}
+
+static int idtfc3_tod_update_is_done(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 req;
+
+ err = read_poll_timeout_atomic(regmap_bulk_read, err, !req, USEC_PER_MSEC,
+ idtfc3->tc_write_timeout, true, idtfc3->regmap,
+ TOD_SYNC_LOAD_REQ_CTRL, &req, 1);
+ if (err)
+ dev_err(idtfc3->dev, "TOD counter write timeout !!!");
+
+ return err;
+}
+
+static int idtfc3_write_subcounter(struct idtfc3 *idtfc3, u32 counter)
+{
+ u8 buf[18] = {0};
+ int err;
+
+ /* sync_counter_value is [31:82] and sub_sync_counter_value is [0:30] */
+ put_unaligned_le32(counter & SUB_SYNC_COUNTER_MASK, &buf[0]);
+
+ buf[16] = SUB_SYNC_LOAD_ENABLE | SYNC_LOAD_ENABLE;
+ buf[17] = SYNC_LOAD_REQ;
+
+ err = regmap_bulk_write(idtfc3->regmap, TOD_SYNC_LOAD_VAL_CTRL,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ return idtfc3_tod_update_is_done(idtfc3);
+}
+
+static int idtfc3_timecounter_update(struct idtfc3 *idtfc3, u32 counter, s64 ns)
+{
+ int err;
+
+ err = idtfc3_write_subcounter(idtfc3, counter);
+ if (err)
+ return err;
+
+ /* Update time counter */
+ idtfc3->ns = ns;
+ idtfc3->last_counter = counter;
+
+ return 0;
+}
+
+static int idtfc3_timecounter_read(struct idtfc3 *idtfc3)
+{
+ int now, delta;
+
+ now = idtfc3_read_subcounter(idtfc3);
+ if (now < 0)
+ return now;
+
+ /* calculate the delta since the last idtfc3_timecounter_read(): */
+ if (now >= idtfc3->last_counter)
+ delta = now - idtfc3->last_counter;
+ else
+ delta = idtfc3->sub_sync_count - idtfc3->last_counter + now;
+
+ /* Update time counter */
+ idtfc3->ns += delta * idtfc3->ns_per_counter;
+ idtfc3->last_counter = now;
+
+ return 0;
+}
+
+static int _idtfc3_gettime(struct idtfc3 *idtfc3, struct timespec64 *ts)
+{
+ int err;
+
+ err = idtfc3_timecounter_read(idtfc3);
+ if (err)
+ return err;
+
+ *ts = ns_to_timespec64(idtfc3->ns);
+
+ return 0;
+}
+
+static int idtfc3_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_gettime(idtfc3, ts);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_settime(struct idtfc3 *idtfc3, const struct timespec64 *ts)
+{
+ s64 offset_ns, now_ns;
+ u32 counter, sub_ns;
+ int now;
+
+ if (timespec64_valid(ts) == false) {
+ dev_err(idtfc3->dev, "%s: invalid timespec", __func__);
+ return -EINVAL;
+ }
+
+ now = idtfc3_read_subcounter(idtfc3);
+ if (now < 0)
+ return now;
+
+ offset_ns = (idtfc3->sub_sync_count - now) * idtfc3->ns_per_counter;
+ now_ns = timespec64_to_ns(ts);
+ (void)ns2counters(idtfc3, offset_ns + now_ns, &sub_ns);
+
+ counter = sub_ns / idtfc3->ns_per_counter;
+ return idtfc3_timecounter_update(idtfc3, counter, now_ns);
+}
+
+static int idtfc3_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_settime(idtfc3, ts);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjtime(struct idtfc3 *idtfc3, s64 delta)
+{
+ /*
+ * The TOD counter can be synchronously loaded with any value,
+ * to be loaded on the next Time Sync pulse
+ */
+ s64 sync_ns;
+ u32 sub_ns;
+ u32 counter;
+
+ if (idtfc3->ns + delta < 0) {
+ dev_err(idtfc3->dev, "%lld ns adj is too large", delta);
+ return -EINVAL;
+ }
+
+ sync_ns = ns2counters(idtfc3, delta + idtfc3->ns_per_sync, &sub_ns);
+
+ counter = sub_ns / idtfc3->ns_per_counter;
+ return idtfc3_timecounter_update(idtfc3, counter, idtfc3->ns + sync_ns +
+ counter * idtfc3->ns_per_counter);
+}
+
+static int idtfc3_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjtime(idtfc3, delta);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjphase(struct idtfc3 *idtfc3, s32 delta)
+{
+ u8 buf[8] = {0};
+ int err;
+ s64 pcw;
+
+ err = idtfc3_set_lpf_mode(idtfc3, LPF_WP);
+ if (err)
+ return err;
+
+ /*
+ * Phase Control Word unit is: 10^9 / (TDC_APLL_FREQ * 124)
+ *
+ * delta * TDC_APLL_FREQ * 124
+ * PCW = ---------------------------
+ * 10^9
+ *
+ */
+ pcw = div_s64((s64)delta * idtfc3->tdc_apll_freq * 124, NSEC_PER_SEC);
+
+ put_unaligned_le64(pcw, buf);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_WR_PHASE_CTRL, buf, sizeof(buf));
+}
+
+static int idtfc3_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjphase(idtfc3, delta);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjfine(struct idtfc3 *idtfc3, long scaled_ppm)
+{
+ u8 buf[8] = {0};
+ int err;
+ s64 fcw;
+
+ err = idtfc3_set_lpf_mode(idtfc3, LPF_WF);
+ if (err)
+ return err;
+
+ /*
+ * Frequency Control Word unit is: 2^-44 * 10^6 ppm
+ *
+ * adjfreq:
+ * ppb * 2^44
+ * FCW = ----------
+ * 10^9
+ *
+ * adjfine:
+ * ppm_16 * 2^28
+ * FCW = -------------
+ * 10^6
+ */
+ fcw = scaled_ppm * BIT(28);
+ fcw = div_s64(fcw, 1000000);
+
+ put_unaligned_le64(fcw, buf);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_WR_FREQ_CTRL, buf, sizeof(buf));
+}
+
+static int idtfc3_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjfine(idtfc3, scaled_ppm);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int idtfc3_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(idtfc3->lock);
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on)
+ err = 0;
+ /* Only accept a 1-PPS aligned to the second. */
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ err = -ERANGE;
+ else
+ err = 0;
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ if (on) {
+ /* Only accept requests for external phase offset */
+ if ((rq->extts.flags & PTP_EXT_OFFSET) != (PTP_EXT_OFFSET))
+ err = -EOPNOTSUPP;
+ else
+ err = idtfc3_enable_tdc(idtfc3, true, CONTINUOUS);
+ } else {
+ err = idtfc3_enable_tdc(idtfc3, false, MEAS_MODE_INVALID);
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(idtfc3->lock);
+
+ if (err)
+ dev_err(idtfc3->dev, "Failed in %s with err %d!", __func__, err);
+
+ return err;
+}
+
+static long idtfc3_aux_work(struct ptp_clock_info *ptp)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ static int tdc_get;
+
+ mutex_lock(idtfc3->lock);
+ tdc_get %= TDC_GET_PERIOD;
+ if ((tdc_get == 0) || (tdc_get == TDC_GET_PERIOD / 2))
+ idtfc3_timecounter_read(idtfc3);
+ get_tdc_meas_continuous(idtfc3);
+ tdc_get++;
+ mutex_unlock(idtfc3->lock);
+
+ return idtfc3->tc_update_period;
+}
+
+static const struct ptp_clock_info idtfc3_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = MAX_FFO_PPB,
+ .n_per_out = 1,
+ .n_ext_ts = 1,
+ .adjphase = &idtfc3_adjphase,
+ .adjfine = &idtfc3_adjfine,
+ .adjtime = &idtfc3_adjtime,
+ .gettime64 = &idtfc3_gettime,
+ .settime64 = &idtfc3_settime,
+ .enable = &idtfc3_enable,
+ .do_aux_work = &idtfc3_aux_work,
+};
+
+static int idtfc3_hw_calibrate(struct idtfc3 *idtfc3)
+{
+ int err = 0;
+ u8 val;
+
+ mdelay(10);
+ /*
+ * Toggle TDC_DAC_RECAL_REQ:
+ * (1) set tdc_en to 1
+ * (2) set tdc_dac_recal_req to 0
+ * (3) set tdc_dac_recal_req to 1
+ */
+ val = TDC_EN;
+ err = regmap_bulk_write(idtfc3->regmap, TDC_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ val = TDC_EN | TDC_DAC_RECAL_REQ;
+ err = regmap_bulk_write(idtfc3->regmap, TDC_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ mdelay(10);
+
+ /*
+ * Toggle APLL_REINIT:
+ * (1) set apll_reinit to 0
+ * (2) set apll_reinit to 1
+ */
+ val = 0;
+ err = regmap_bulk_write(idtfc3->regmap, SOFT_RESET_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ val = APLL_REINIT;
+ err = regmap_bulk_write(idtfc3->regmap, SOFT_RESET_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ mdelay(10);
+
+ return err;
+}
+
+static int idtfc3_init_timecounter(struct idtfc3 *idtfc3)
+{
+ int err;
+ u32 period_ms;
+
+ period_ms = idtfc3->sub_sync_count * MSEC_PER_SEC /
+ idtfc3->hw_param.time_clk_freq;
+
+ idtfc3->tc_update_period = msecs_to_jiffies(period_ms / TDC_GET_PERIOD);
+ idtfc3->tc_write_timeout = period_ms * USEC_PER_MSEC;
+
+ err = idtfc3_timecounter_update(idtfc3, 0, 0);
+ if (err)
+ return err;
+
+ err = idtfc3_timecounter_read(idtfc3);
+ if (err)
+ return err;
+
+ ptp_schedule_worker(idtfc3->ptp_clock, idtfc3->tc_update_period);
+
+ return 0;
+}
+
+static int idtfc3_get_tdc_apll_freq(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 tdc_fb_div_int;
+ u8 tdc_ref_div;
+ struct idtfc3_hw_param *param = &idtfc3->hw_param;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_REF_DIV_CNFG,
+ &tdc_ref_div, sizeof(tdc_ref_div));
+ if (err)
+ return err;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FB_DIV_INT_CNFG,
+ &tdc_fb_div_int, sizeof(tdc_fb_div_int));
+ if (err)
+ return err;
+
+ tdc_fb_div_int &= TDC_FB_DIV_INT_MASK;
+ tdc_ref_div &= TDC_REF_DIV_CONFIG_MASK;
+
+ idtfc3->tdc_apll_freq = div_u64(param->xtal_freq * (u64)tdc_fb_div_int,
+ 1 << tdc_ref_div);
+
+ return 0;
+}
+
+static int idtfc3_get_fod(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 fod;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_SRC, &fod, sizeof(fod));
+ if (err)
+ return err;
+
+ switch (fod) {
+ case 0:
+ idtfc3->fod_n = FOD_0;
+ break;
+ case 1:
+ idtfc3->fod_n = FOD_1;
+ break;
+ case 2:
+ idtfc3->fod_n = FOD_2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idtfc3_get_sync_count(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+
+ err = regmap_bulk_read(idtfc3->regmap, SUB_SYNC_GEN_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ idtfc3->sub_sync_count = (get_unaligned_le32(buf) & SUB_SYNC_COUNTER_MASK) + 1;
+ idtfc3->ns_per_counter = NSEC_PER_SEC / idtfc3->hw_param.time_clk_freq;
+ idtfc3->ns_per_sync = idtfc3->sub_sync_count * idtfc3->ns_per_counter;
+
+ return 0;
+}
+
+static int idtfc3_setup_hw_param(struct idtfc3 *idtfc3)
+{
+ int err;
+
+ err = idtfc3_get_fod(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_get_sync_count(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_get_time_ref_freq(idtfc3);
+ if (err)
+ return err;
+
+ return idtfc3_get_tdc_apll_freq(idtfc3);
+}
+
+static int idtfc3_configure_hw(struct idtfc3 *idtfc3)
+{
+ int err = 0;
+
+ err = idtfc3_hw_calibrate(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_enable_lpf(idtfc3, true);
+ if (err)
+ return err;
+
+ err = idtfc3_enable_tdc(idtfc3, false, MEAS_MODE_INVALID);
+ if (err)
+ return err;
+
+ err = idtfc3_get_tdc_offset_sign(idtfc3);
+ if (err)
+ return err;
+
+ return idtfc3_setup_hw_param(idtfc3);
+}
+
+static int idtfc3_set_overhead(struct idtfc3 *idtfc3)
+{
+ s64 current_ns = 0;
+ s64 lowest_ns = 0;
+ int err;
+ u8 i;
+ ktime_t start;
+ ktime_t stop;
+ ktime_t diff;
+
+ char buf[18] = {0};
+
+ for (i = 0; i < 5; i++) {
+ start = ktime_get_raw();
+
+ err = regmap_bulk_write(idtfc3->regmap, TOD_SYNC_LOAD_VAL_CTRL,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ diff = ktime_sub(stop, start);
+
+ current_ns = ktime_to_ns(diff);
+
+ if (i == 0) {
+ lowest_ns = current_ns;
+ } else {
+ if (current_ns < lowest_ns)
+ lowest_ns = current_ns;
+ }
+ }
+
+ idtfc3->tod_write_overhead = lowest_ns;
+
+ return err;
+}
+
+static int idtfc3_enable_ptp(struct idtfc3 *idtfc3)
+{
+ int err;
+
+ idtfc3->caps = idtfc3_caps;
+ snprintf(idtfc3->caps.name, sizeof(idtfc3->caps.name), "IDT FC3W");
+ idtfc3->ptp_clock = ptp_clock_register(&idtfc3->caps, NULL);
+
+ if (IS_ERR(idtfc3->ptp_clock)) {
+ err = PTR_ERR(idtfc3->ptp_clock);
+ idtfc3->ptp_clock = NULL;
+ return err;
+ }
+
+ err = idtfc3_set_overhead(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_init_timecounter(idtfc3);
+ if (err)
+ return err;
+
+ dev_info(idtfc3->dev, "TIME_SYNC_CHANNEL registered as ptp%d",
+ idtfc3->ptp_clock->index);
+
+ return 0;
+}
+
+static int idtfc3_load_firmware(struct idtfc3 *idtfc3)
+{
+ char fname[128] = FW_FILENAME;
+ const struct firmware *fw;
+ struct idtfc3_fwrc *rec;
+ u16 addr;
+ u8 val;
+ int err;
+ s32 len;
+
+ idtfc3_default_hw_param(&idtfc3->hw_param);
+
+ if (firmware) /* module parameter */
+ snprintf(fname, sizeof(fname), "%s", firmware);
+
+ dev_info(idtfc3->dev, "requesting firmware '%s'\n", fname);
+
+ err = request_firmware(&fw, fname, idtfc3->dev);
+
+ if (err) {
+ dev_err(idtfc3->dev,
+ "requesting firmware failed with err %d!\n", err);
+ return err;
+ }
+
+ dev_dbg(idtfc3->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtfc3_fwrc *)fw->data;
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+ if (rec->reserved) {
+ dev_err(idtfc3->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ val = rec->value;
+ addr = rec->hiaddr << 8 | rec->loaddr;
+
+ rec++;
+
+ err = idtfc3_set_hw_param(&idtfc3->hw_param, addr, val);
+ }
+
+ if (err != -EINVAL) {
+ err = 0;
+
+ /* Max register */
+ if (addr >= 0xE88)
+ continue;
+
+ err = regmap_bulk_write(idtfc3->regmap, addr,
+ &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ err = idtfc3_configure_hw(idtfc3);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtfc3_read_device_id(struct idtfc3 *idtfc3, u16 *device_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = regmap_bulk_read(idtfc3->regmap, DEVICE_ID,
+ &buf, sizeof(buf));
+ if (err) {
+ dev_err(idtfc3->dev, "%s failed with %d", __func__, err);
+ return err;
+ }
+
+ *device_id = get_unaligned_le16(buf);
+
+ return 0;
+}
+
+static int idtfc3_check_device_compatibility(struct idtfc3 *idtfc3)
+{
+ int err;
+ u16 device_id;
+
+ err = idtfc3_read_device_id(idtfc3, &device_id);
+ if (err)
+ return err;
+
+ if ((device_id & DEVICE_ID_MASK) == 0) {
+ dev_err(idtfc3->dev, "invalid device");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idtfc3_probe(struct platform_device *pdev)
+{
+ struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct idtfc3 *idtfc3;
+ int err;
+
+ idtfc3 = devm_kzalloc(&pdev->dev, sizeof(struct idtfc3), GFP_KERNEL);
+
+ if (!idtfc3)
+ return -ENOMEM;
+
+ idtfc3->dev = &pdev->dev;
+ idtfc3->mfd = pdev->dev.parent;
+ idtfc3->lock = &ddata->lock;
+ idtfc3->regmap = ddata->regmap;
+
+ mutex_lock(idtfc3->lock);
+
+ err = idtfc3_check_device_compatibility(idtfc3);
+ if (err) {
+ mutex_unlock(idtfc3->lock);
+ return err;
+ }
+
+ err = idtfc3_load_firmware(idtfc3);
+ if (err) {
+ if (err == -ENOENT) {
+ mutex_unlock(idtfc3->lock);
+ return -EPROBE_DEFER;
+ }
+ dev_warn(idtfc3->dev, "loading firmware failed with %d", err);
+ }
+
+ err = idtfc3_enable_ptp(idtfc3);
+ if (err) {
+ dev_err(idtfc3->dev, "idtfc3_enable_ptp failed with %d", err);
+ mutex_unlock(idtfc3->lock);
+ return err;
+ }
+
+ mutex_unlock(idtfc3->lock);
+
+ if (err) {
+ ptp_clock_unregister(idtfc3->ptp_clock);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, idtfc3);
+
+ return 0;
+}
+
+static void idtfc3_remove(struct platform_device *pdev)
+{
+ struct idtfc3 *idtfc3 = platform_get_drvdata(pdev);
+
+ ptp_clock_unregister(idtfc3->ptp_clock);
+}
+
+static struct platform_driver idtfc3_driver = {
+ .driver = {
+ .name = "rc38xxx-phc",
+ },
+ .probe = idtfc3_probe,
+ .remove_new = idtfc3_remove,
+};
+
+module_platform_driver(idtfc3_driver);
diff --git a/drivers/ptp/ptp_fc3.h b/drivers/ptp/ptp_fc3.h
new file mode 100644
index 000000000000..897101579207
--- /dev/null
+++ b/drivers/ptp/ptp_fc3.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the FemtoClock3 family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTFC3_H
+#define PTP_IDTFC3_H
+
+#include <linux/ktime.h>
+#include <linux/ptp_clock.h>
+#include <linux/regmap.h>
+
+#define FW_FILENAME "idtfc3.bin"
+
+#define MAX_FFO_PPB (244000)
+#define TDC_GET_PERIOD (10)
+
+struct idtfc3 {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct device *dev;
+ /* Mutex to protect operations from being interrupted */
+ struct mutex *lock;
+ struct device *mfd;
+ struct regmap *regmap;
+ struct idtfc3_hw_param hw_param;
+ u32 sub_sync_count;
+ u32 ns_per_sync;
+ int tdc_offset_sign;
+ u64 tdc_apll_freq;
+ u32 time_ref_freq;
+ u16 fod_n;
+ u8 lpf_mode;
+ /* Time counter */
+ u32 last_counter;
+ s64 ns;
+ u32 ns_per_counter;
+ u32 tc_update_period;
+ u32 tc_write_timeout;
+ s64 tod_write_overhead;
+};
+
+#endif /* PTP_IDTFC3_H */
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 5f858e426bbd..6506cfb89aa9 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -34,6 +34,9 @@
#define PCI_VENDOR_ID_OROLIA 0x1ad7
#define PCI_DEVICE_ID_OROLIA_ARTCARD 0xa000
+#define PCI_VENDOR_ID_ADVA 0xad5a
+#define PCI_DEVICE_ID_ADVA_TIMECARD 0x0400
+
static struct class timecard_class = {
.name = "timecard",
};
@@ -63,6 +66,13 @@ struct ocp_reg {
u32 status_drift;
};
+struct ptp_ocp_servo_conf {
+ u32 servo_offset_p;
+ u32 servo_offset_i;
+ u32 servo_drift_p;
+ u32 servo_drift_i;
+};
+
#define OCP_CTRL_ENABLE BIT(0)
#define OCP_CTRL_ADJUST_TIME BIT(1)
#define OCP_CTRL_ADJUST_OFFSET BIT(2)
@@ -397,10 +407,14 @@ static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr);
static int ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+
static const struct ocp_attr_group fb_timecard_groups[];
static const struct ocp_attr_group art_timecard_groups[];
+static const struct ocp_attr_group adva_timecard_groups[];
+
struct ptp_ocp_eeprom_map {
u16 off;
u16 len;
@@ -700,6 +714,12 @@ static struct ocp_resource ocp_fb_resource[] = {
},
{
.setup = ptp_ocp_fb_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0x2000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
},
{ }
};
@@ -831,6 +851,170 @@ static struct ocp_resource ocp_art_resource[] = {
},
{
.setup = ptp_ocp_art_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0x2000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
+ },
+ { }
+};
+
+static struct ocp_resource ocp_adva_resource[] = {
+ {
+ OCP_MEM_RESOURCE(reg),
+ .offset = 0x01000000, .size = 0x10000,
+ },
+ {
+ OCP_EXT_RESOURCE(ts0),
+ .offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 0,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts1),
+ .offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts2),
+ .offset = 0x01060000, .size = 0x10000, .irq_vec = 6,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ /* Timestamp for PHC and/or PPS generator */
+ {
+ OCP_EXT_RESOURCE(pps),
+ .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 5,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[0]),
+ .offset = 0x010D0000, .size = 0x10000, .irq_vec = 11,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[1]),
+ .offset = 0x010E0000, .size = 0x10000, .irq_vec = 12,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_ext),
+ .offset = 0x01030000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_clk),
+ .offset = 0x01040000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(tod),
+ .offset = 0x01050000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(image),
+ .offset = 0x00020000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_select),
+ .offset = 0x00130000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma_map1),
+ .offset = 0x00140000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma_map2),
+ .offset = 0x00220000, .size = 0x1000,
+ },
+ {
+ OCP_SERIAL_RESOURCE(gnss_port),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 9600,
+ },
+ },
+ {
+ OCP_SERIAL_RESOURCE(mac_port),
+ .offset = 0x00180000 + 0x1000, .irq_vec = 5,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[0]),
+ .offset = 0x01200000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[1]),
+ .offset = 0x01210000, .size = 0x10000,
+ },
+ {
+ OCP_SPI_RESOURCE(spi_flash),
+ .offset = 0x00310400, .size = 0x10000, .irq_vec = 9,
+ .extra = &(struct ptp_ocp_flash_info) {
+ .name = "spi_altera", .pci_offset = 0,
+ .data_size = sizeof(struct altera_spi_platform_data),
+ .data = &(struct altera_spi_platform_data) {
+ .num_chipselect = 1,
+ .num_devices = 1,
+ .devices = &(struct spi_board_info) {
+ .modalias = "spi-nor",
+ },
+ },
+ },
+ },
+ {
+ OCP_I2C_RESOURCE(i2c_ctrl),
+ .offset = 0x150000, .size = 0x100, .irq_vec = 7,
+ .extra = &(struct ptp_ocp_i2c_info) {
+ .name = "ocores-i2c",
+ .fixed_rate = 50000000,
+ .data_size = sizeof(struct ocores_i2c_platform_data),
+ .data = &(struct ocores_i2c_platform_data) {
+ .clock_khz = 50000,
+ .bus_khz = 100,
+ .reg_io_width = 4, // 32-bit/4-byte
+ .reg_shift = 2, // 32-bit addressing
+ .num_devices = 2,
+ .devices = (struct i2c_board_info[]) {
+ { I2C_BOARD_INFO("24c02", 0x50) },
+ { I2C_BOARD_INFO("24mac402", 0x58),
+ .platform_data = "mac" },
+ },
+ },
+ },
+ },
+ {
+ .setup = ptp_ocp_adva_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0xc000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
},
{ }
};
@@ -839,6 +1023,7 @@ static const struct pci_device_id ptp_ocp_pcidev_id[] = {
{ PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(OROLIA, ARTCARD, &ocp_art_resource) },
+ { PCI_DEVICE_DATA(ADVA, TIMECARD, &ocp_adva_resource) },
{ }
};
MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
@@ -917,6 +1102,30 @@ static const struct ocp_selector ptp_ocp_art_sma_out[] = {
{ }
};
+static const struct ocp_selector ptp_ocp_adva_sma_in[] = {
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000},
+ { .name = "PPS1", .value = 0x0001, .frequency = 1 },
+ { .name = "PPS2", .value = 0x0002, .frequency = 1 },
+ { .name = "TS1", .value = 0x0004, .frequency = 0 },
+ { .name = "TS2", .value = 0x0008, .frequency = 0 },
+ { .name = "FREQ1", .value = 0x0100, .frequency = 0 },
+ { .name = "FREQ2", .value = 0x0200, .frequency = 0 },
+ { .name = "None", .value = SMA_DISABLE, .frequency = 0 },
+ { }
+};
+
+static const struct ocp_selector ptp_ocp_adva_sma_out[] = {
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000},
+ { .name = "PHC", .value = 0x0001, .frequency = 1 },
+ { .name = "MAC", .value = 0x0002, .frequency = 1 },
+ { .name = "GNSS1", .value = 0x0004, .frequency = 1 },
+ { .name = "GEN1", .value = 0x0040 },
+ { .name = "GEN2", .value = 0x0080 },
+ { .name = "GND", .value = 0x2000 },
+ { .name = "VCC", .value = 0x4000 },
+ { }
+};
+
struct ocp_sma_op {
const struct ocp_selector *tbl[2];
void (*init)(struct ptp_ocp *bp);
@@ -1363,7 +1572,7 @@ ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
}
static int
-ptp_ocp_init_clock(struct ptp_ocp *bp)
+ptp_ocp_init_clock(struct ptp_ocp *bp, struct ptp_ocp_servo_conf *servo_conf)
{
struct timespec64 ts;
u32 ctrl;
@@ -1371,12 +1580,11 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
ctrl = OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
- /* NO DRIFT Correction */
- /* offset_p:i 1/8, offset_i: 1/16, drift_p: 0, drift_i: 0 */
- iowrite32(0x2000, &bp->reg->servo_offset_p);
- iowrite32(0x1000, &bp->reg->servo_offset_i);
- iowrite32(0, &bp->reg->servo_drift_p);
- iowrite32(0, &bp->reg->servo_drift_i);
+ /* servo configuration */
+ iowrite32(servo_conf->servo_offset_p, &bp->reg->servo_offset_p);
+ iowrite32(servo_conf->servo_offset_i, &bp->reg->servo_offset_i);
+ iowrite32(servo_conf->servo_drift_p, &bp->reg->servo_drift_p);
+ iowrite32(servo_conf->servo_drift_p, &bp->reg->servo_drift_i);
/* latch servo values */
ctrl |= OCP_CTRL_ADJUST_SERVO;
@@ -2348,6 +2556,14 @@ static const struct ocp_sma_op ocp_fb_sma_op = {
.set_output = ptp_ocp_sma_fb_set_output,
};
+static const struct ocp_sma_op ocp_adva_sma_op = {
+ .tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out },
+ .init = ptp_ocp_sma_fb_init,
+ .get = ptp_ocp_sma_fb_get,
+ .set_inputs = ptp_ocp_sma_fb_set_inputs,
+ .set_output = ptp_ocp_sma_fb_set_output,
+};
+
static int
ptp_ocp_set_pins(struct ptp_ocp *bp)
{
@@ -2427,7 +2643,7 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
return err;
ptp_ocp_sma_init(bp);
- return ptp_ocp_init_clock(bp);
+ return ptp_ocp_init_clock(bp, r->extra);
}
static bool
@@ -2589,7 +2805,44 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
if (err)
return err;
- return ptp_ocp_init_clock(bp);
+ return ptp_ocp_init_clock(bp, r->extra);
+}
+
+/* ADVA specific board initializers; last "resource" registered. */
+static int
+ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ int err;
+ u32 version;
+
+ bp->flash_start = 0xA00000;
+ bp->eeprom_map = fb_eeprom_map;
+ bp->sma_op = &ocp_adva_sma_op;
+
+ version = ioread32(&bp->image->version);
+ /* if lower 16 bits are empty, this is the fw loader. */
+ if ((version & 0xffff) == 0) {
+ version = version >> 16;
+ bp->fw_loader = true;
+ }
+ bp->fw_tag = 3;
+ bp->fw_version = version & 0xffff;
+ bp->fw_cap = OCP_CAP_BASIC | OCP_CAP_SIGNAL | OCP_CAP_FREQ;
+
+ ptp_ocp_tod_init(bp);
+ ptp_ocp_nmea_out_init(bp);
+ ptp_ocp_signal_init(bp);
+
+ err = ptp_ocp_attr_group_add(bp, adva_timecard_groups);
+ if (err)
+ return err;
+
+ err = ptp_ocp_set_pins(bp);
+ if (err)
+ return err;
+ ptp_ocp_sma_init(bp);
+
+ return ptp_ocp_init_clock(bp, r->extra);
}
static ssize_t
@@ -3564,6 +3817,37 @@ static const struct ocp_attr_group art_timecard_groups[] = {
{ },
};
+static struct attribute *adva_timecard_attrs[] = {
+ &dev_attr_serialnum.attr,
+ &dev_attr_gnss_sync.attr,
+ &dev_attr_clock_source.attr,
+ &dev_attr_available_clock_sources.attr,
+ &dev_attr_sma1.attr,
+ &dev_attr_sma2.attr,
+ &dev_attr_sma3.attr,
+ &dev_attr_sma4.attr,
+ &dev_attr_available_sma_inputs.attr,
+ &dev_attr_available_sma_outputs.attr,
+ &dev_attr_clock_status_drift.attr,
+ &dev_attr_clock_status_offset.attr,
+ &dev_attr_ts_window_adjust.attr,
+ &dev_attr_tod_correction.attr,
+ NULL,
+};
+
+static const struct attribute_group adva_timecard_group = {
+ .attrs = adva_timecard_attrs,
+};
+
+static const struct ocp_attr_group adva_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &adva_timecard_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq1_group },
+ { },
+};
+
static void
gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
const char *def)
@@ -4209,10 +4493,11 @@ ptp_ocp_detach(struct ptp_ocp *bp)
device_unregister(&bp->dev);
}
-static int ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
{
struct ptp_ocp *bp = priv;
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index f7a499a1bd39..a15460aaa03b 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -24,8 +24,7 @@ static ssize_t max_phase_adjustment_show(struct device *dev,
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
- return snprintf(page, PAGE_SIZE - 1, "%d\n",
- ptp->info->getmaxphase(ptp->info));
+ return sysfs_emit(page, "%d\n", ptp->info->getmaxphase(ptp->info));
}
static DEVICE_ATTR_RO(max_phase_adjustment);
@@ -34,7 +33,7 @@ static ssize_t var##_show(struct device *dev, \
struct device_attribute *attr, char *page) \
{ \
struct ptp_clock *ptp = dev_get_drvdata(dev); \
- return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->var); \
+ return sysfs_emit(page, "%d\n", ptp->info->var); \
} \
static DEVICE_ATTR(name, 0444, var##_show, NULL);
@@ -102,8 +101,8 @@ static ssize_t extts_fifo_show(struct device *dev,
if (!qcnt)
goto out;
- cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
- event.index, event.t.sec, event.t.nsec);
+ cnt = sysfs_emit(page, "%u %lld %u\n",
+ event.index, event.t.sec, event.t.nsec);
out:
return cnt;
}
@@ -194,7 +193,7 @@ static ssize_t n_vclocks_show(struct device *dev,
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
- size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
+ size = sysfs_emit(page, "%u\n", ptp->n_vclocks);
mutex_unlock(&ptp->n_vclocks_mux);
@@ -270,7 +269,7 @@ static ssize_t max_vclocks_show(struct device *dev,
struct ptp_clock *ptp = dev_get_drvdata(dev);
ssize_t size;
- size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
+ size = sysfs_emit(page, "%u\n", ptp->max_vclocks);
return size;
}
diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c
index bc88a40a88d4..8bbcd983a74a 100644
--- a/drivers/regulator/max5970-regulator.c
+++ b/drivers/regulator/max5970-regulator.c
@@ -29,8 +29,8 @@ struct max5970_regulator {
};
enum max597x_regulator_id {
- MAX597X_SW0,
- MAX597X_SW1,
+ MAX597X_sw0,
+ MAX597X_sw1,
};
static int max5970_read_adc(struct regmap *regmap, int reg, long *val)
@@ -378,8 +378,8 @@ static int max597x_dt_parse(struct device_node *np,
}
static const struct regulator_desc regulators[] = {
- MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"),
- MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"),
+ MAX597X_SWITCH(sw0, MAX5970_REG_CHXEN, 0, "vss1"),
+ MAX597X_SWITCH(sw1, MAX5970_REG_CHXEN, 1, "vss2"),
};
static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
@@ -392,7 +392,7 @@ static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
return ret;
if (*val)
- return regmap_write(map, reg, *val);
+ return regmap_write(map, reg, 0);
return 0;
}
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 698c420e0869..60cfcd741c2a 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -157,7 +157,17 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
pwm_get_state(drvdata->pwm, &pstate);
+ if (!pstate.enabled) {
+ if (pstate.polarity == PWM_POLARITY_INVERSED)
+ pstate.duty_cycle = pstate.period;
+ else
+ pstate.duty_cycle = 0;
+ }
+
voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
+ if (voltage < min(max_uV_duty, min_uV_duty) ||
+ voltage > max(max_uV_duty, min_uV_duty))
+ return -ENOTRECOVERABLE;
/*
* The dutycycle for min_uV might be greater than the one for max_uV.
@@ -313,6 +323,32 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev,
return 0;
}
+static int pwm_regulator_init_boot_on(struct platform_device *pdev,
+ struct pwm_regulator_data *drvdata,
+ const struct regulator_init_data *init_data)
+{
+ struct pwm_state pstate;
+
+ if (!init_data->constraints.boot_on || drvdata->enb_gpio)
+ return 0;
+
+ pwm_get_state(drvdata->pwm, &pstate);
+ if (pstate.enabled)
+ return 0;
+
+ /*
+ * Update the duty cycle so the output does not change
+ * when the regulator core enables the regulator (and
+ * thus the PWM channel).
+ */
+ if (pstate.polarity == PWM_POLARITY_INVERSED)
+ pstate.duty_cycle = pstate.period;
+ else
+ pstate.duty_cycle = 0;
+
+ return pwm_apply_might_sleep(drvdata->pwm, &pstate);
+}
+
static int pwm_regulator_probe(struct platform_device *pdev)
{
const struct regulator_init_data *init_data;
@@ -372,6 +408,13 @@ static int pwm_regulator_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = pwm_regulator_init_boot_on(pdev, drvdata, init_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to apply boot_on settings: %d\n",
+ ret);
+ return ret;
+ }
+
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
if (IS_ERR(regulator)) {
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index f48214e2c3b4..04133510e5af 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -726,9 +726,25 @@ static int ti_abb_probe(struct platform_device *pdev)
return PTR_ERR(abb->setup_reg);
}
- abb->int_base = devm_platform_ioremap_resource_byname(pdev, "int-address");
- if (IS_ERR(abb->int_base))
- return PTR_ERR(abb->int_base);
+ pname = "int-address";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+ if (!res) {
+ dev_err(dev, "Missing '%s' IO resource\n", pname);
+ return -ENODEV;
+ }
+ /*
+ * The MPU interrupt status register (PRM_IRQSTATUS_MPU) is
+ * shared between regulator-abb-{ivahd,dspeve,gpu} driver
+ * instances. Therefore use devm_ioremap() rather than
+ * devm_platform_ioremap_resource_byname() to avoid busy
+ * resource region conflicts.
+ */
+ abb->int_base = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!abb->int_base) {
+ dev_err(dev, "Unable to map '%s'\n", pname);
+ return -ENOMEM;
+ }
/* Map Optional resources */
pname = "efuse-address";
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index c533d1dadc6b..a5dba3829769 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
- if (cdev->private->state == DEV_STATE_VERIFY) {
+ if (cdev->private->state == DEV_STATE_VERIFY ||
+ cdev->private->flags.doverify) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
}
if (cdev->private->state != DEV_STATE_ONLINE ||
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
- !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
- cdev->private->flags.doverify)
+ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
return -EBUSY;
ret = cio_set_options (sch, flags);
if (ret)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b92a32b4b114..04c64ce0a1ca 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
if (!recover) {
hash_del(&addr->hnode);
kfree(addr);
- continue;
+ } else {
+ /* prepare for recovery */
+ addr->disp_flag = QETH_DISP_ADDR_ADD;
}
- addr->disp_flag = QETH_DISP_ADDR_ADD;
}
mutex_unlock(&card->ip_lock);
@@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
rc = qeth_l3_register_addr_entry(card, addr);
- if (!rc) {
+ if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
+ /* keep it in the records */
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else {
+ /* bad address */
hash_del(&addr->hnode);
kfree(addr);
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index addac7fbe37b..9ce27092729c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1270,7 +1270,7 @@ source "drivers/scsi/arm/Kconfig"
config JAZZ_ESP
bool "MIPS JAZZ FAS216 SCSI support"
- depends on MACH_JAZZ && SCSI
+ depends on MACH_JAZZ && SCSI=y
select SCSI_SPI_ATTRS
help
This is the driver for the onboard SCSI host adapter of MIPS Magnum
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 19eee108db02..5c8d1ba3f8f3 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
- unsigned long flags;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{
struct fc_frame *fp;
struct fc_frame_header *fh;
- unsigned long flags;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
@@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN)
break;
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
schedule_work(&fip->timer_work);
return -EINPROGRESS;
case ELS_FDISC:
@@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
- unsigned long flags;
int error;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) {
@@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip);
}
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
mutex_unlock(&fip->ctlr_mutex);
return error;
}
@@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
- unsigned long flags;
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send)
goto unlock;
@@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
}
/**
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 408a806bf4c2..c64a085a7ee2 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -263,6 +263,7 @@ static ssize_t store_ctlr_mode(struct device *dev,
const char *buf, size_t count)
{
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ int res;
if (count > FCOE_MAX_MODENAME_LEN)
return -EINVAL;
@@ -279,12 +280,13 @@ static ssize_t store_ctlr_mode(struct device *dev,
return -ENOTSUPP;
}
- ctlr->mode = sysfs_match_string(fip_conn_type_names, buf);
- if (ctlr->mode < 0 || ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+ res = sysfs_match_string(fip_conn_type_names, buf);
+ if (res < 0 || res == FIP_CONN_TYPE_UNKNOWN) {
LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
buf);
return -EINVAL;
}
+ ctlr->mode = res;
ctlr->f->set_fcoe_ctlr_mode(ctlr);
LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 2074937c05bc..ce73f08ee889 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -305,6 +305,7 @@ struct fnic {
unsigned int copy_wq_base;
struct work_struct link_work;
struct work_struct frame_work;
+ struct work_struct flush_work;
struct sk_buff_head frame_queue;
struct sk_buff_head tx_queue;
@@ -363,7 +364,7 @@ void fnic_handle_event(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
-void fnic_flush_tx(struct fnic *);
+void fnic_flush_tx(struct work_struct *work);
void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
void fnic_update_mac(struct fc_lport *, u8 *new);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 5e312a55cc7d..a08293b2ad9f 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -1182,7 +1182,7 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
/**
* fnic_flush_tx() - send queued frames.
- * @fnic: fnic device
+ * @work: pointer to work element
*
* Send frames that were waiting to go out in FC or Ethernet mode.
* Whenever changing modes we purge queued frames, so these frames should
@@ -1190,8 +1190,9 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
*
* Called without fnic_lock held.
*/
-void fnic_flush_tx(struct fnic *fnic)
+void fnic_flush_tx(struct work_struct *work)
{
+ struct fnic *fnic = container_of(work, struct fnic, flush_work);
struct sk_buff *skb;
struct fc_frame *fp;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5ed1d897311a..29eead383eb9 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -830,6 +830,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
+ INIT_WORK(&fnic->flush_work, fnic_flush_tx);
skb_queue_head_init(&fnic->fip_frame_queue);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 4d6db4509e75..fc4cee91b175 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -546,6 +546,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) {
WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n",
fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag));
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -679,7 +680,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->flush_work);
reset_cmpl_handler_end:
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
@@ -735,7 +736,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->flush_work);
queue_work(fnic_event_queue, &fnic->frame_work);
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 2a50fda3a628..625fd547ee60 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -371,7 +371,6 @@ static u16 initio_se2_rd(unsigned long base, u8 addr)
*/
static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{
- u8 rb;
u8 instr;
int i;
@@ -400,7 +399,7 @@ static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
- if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
+ if (inb(base + TUL_NVRAM) & SE2DI)
break; /* write complete */
}
outb(0, base + TUL_NVRAM); /* -CS */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 71f711cb0628..355a0bc0828e 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -3387,7 +3387,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
return SCI_FAILURE;
}
- return SCI_SUCCESS;
+ return status;
}
static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d26941b131fd..bf879d81846b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1918,7 +1918,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datasegcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -1926,8 +1926,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgde = NULL; /* s/g data entry */
struct sli4_sge_diseed *diseed = NULL;
dma_addr_t physaddr;
- int i = 0, num_sge = 0, status;
- uint32_t reftag;
+ int i = 0, status;
+ uint32_t reftag, num_sge = 0;
uint8_t txop, rxop;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
@@ -2099,7 +2099,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datacnt, int protcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -2123,8 +2123,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t rc;
#endif
uint32_t checking = 1;
- uint32_t dma_offset = 0;
- int num_sge = 0, j = 2;
+ uint32_t dma_offset = 0, num_sge = 0;
+ int j = 2;
struct sli4_hybrid_sgl *sgl_xtra = NULL;
sgpe = scsi_prot_sglist(sc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index d8c57a0a518f..528f19f782f2 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -475,7 +475,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
* @op_reply_q: op_reply_qinfo object
* @reply_ci: operational reply descriptor's queue consumer index
*
- * Returns reply descriptor frame address
+ * Returns: reply descriptor frame address
*/
static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
@@ -1063,7 +1063,6 @@ enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
* @mrioc: Adapter instance reference
*
* Free the DMA memory allocated for IOCTL handling purpose.
-
*
* Return: None
*/
@@ -1106,7 +1105,6 @@ static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
/**
* mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
* @mrioc: Adapter instance reference
-
*
* This function allocates dmaable memory required to handle the
* application issued MPI3 IOCTL requests.
@@ -1241,7 +1239,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
* during reset/resume
* @mrioc: Adapter instance reference
*
- * Return zero if the new IOCFacts parameters value is compatible with
+ * Return: zero if the new IOCFacts parameters value is compatible with
* older values else return -EPERM
*/
static int
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index c0c8ab586957..d32ad46318cb 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1671,7 +1671,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
void
mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
{
- struct host_port h_port[64];
+ struct host_port *h_port = NULL;
int i, j, found, host_port_count = 0, port_idx;
u16 sz, attached_handle, ioc_status;
struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
@@ -1685,6 +1685,10 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_io_unit_pg0)
return;
+ h_port = kcalloc(64, sizeof(struct host_port), GFP_KERNEL);
+ if (!h_port)
+ goto out;
+
if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -1814,6 +1818,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
}
}
out:
+ kfree(h_port);
kfree(sas_io_unit_pg0);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8761bc58d965..b8120ca93c79 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -7378,7 +7378,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
return -EFAULT;
}
- issue_diag_reset:
+ return 0;
+
+issue_diag_reset:
rc = _base_diag_reset(ioc);
return rc;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 76d369343c7a..8cad9792a562 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
return result + 4;
}
+enum scsi_vpd_parameters {
+ SCSI_VPD_HEADER_SIZE = 4,
+ SCSI_VPD_LIST_SIZE = 36,
+};
+
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
{
- unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
int result;
if (sdev->no_vpd_size)
return SCSI_DEFAULT_VPD_LEN;
/*
+ * Fetch the supported pages VPD and validate that the requested page
+ * number is present.
+ */
+ if (page != 0) {
+ result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
+ if (result < SCSI_VPD_HEADER_SIZE)
+ return 0;
+
+ result -= SCSI_VPD_HEADER_SIZE;
+ if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
+ return 0;
+ }
+ /*
* Fetch the VPD page header to find out how big the page
* is. This is done to prevent problems on legacy devices
* which can not handle allocation lengths as large as
* potentially requested by the caller.
*/
- result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
if (result < 0)
return 0;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 885a7d5df3b9..612489afe8d2 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *,
struct scsi_cmnd *);
-void scsi_eh_wakeup(struct Scsi_Host *shost)
+void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy)
{
lockdep_assert_held(shost->host_lock);
- if (scsi_host_busy(shost) == shost->host_failed) {
+ if (busy == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
shost->host_eh_scheduled++;
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, scsi_host_busy(shost));
}
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -282,11 +282,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
{
struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
struct Scsi_Host *shost = scmd->device->host;
+ unsigned int busy = scsi_host_busy(shost);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_failed++;
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, busy);
spin_unlock_irqrestore(shost->host_lock, flags);
}
@@ -2197,15 +2198,18 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
struct scsi_cmnd *scmd, *next;
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
+ struct scsi_device *sdev = scmd->device;
+
list_del_init(&scmd->eh_entry);
- if (scsi_device_online(scmd->device) &&
- !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
- scsi_eh_should_retry_cmd(scmd)) {
+ if (scsi_device_online(sdev) && !scsi_noretry_cmd(scmd) &&
+ scsi_cmd_retry_allowed(scmd) &&
+ scsi_eh_should_retry_cmd(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush retry cmd\n",
current->comm));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+ blk_mq_kick_requeue_list(sdev->request_queue);
} else {
/*
* If just we got sense for the device (called
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cf3864f72093..df5ac03d5d6c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -278,9 +278,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
rcu_read_lock();
__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
if (unlikely(scsi_host_in_recovery(shost))) {
+ unsigned int busy = scsi_host_busy(shost);
+
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, busy);
spin_unlock_irqrestore(shost->host_lock, flags);
}
rcu_read_unlock();
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3f0dfb97db6b..1fbfe1b52c9f 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work);
extern enum blk_eh_timer_return scsi_timeout(struct request *req);
extern int scsi_error_handler(void *host);
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
-extern void scsi_eh_wakeup(struct Scsi_Host *shost);
+extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy);
extern void scsi_eh_scmd_add(struct scsi_cmnd *);
void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0833b3e6aa6e..bdd0acf7fa3c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3407,6 +3407,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
return true;
}
+static void sd_read_block_zero(struct scsi_disk *sdkp)
+{
+ unsigned int buf_len = sdkp->device->sector_size;
+ char *buffer, cmd[10] = { };
+
+ buffer = kmalloc(buf_len, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+
+ scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
+ SD_TIMEOUT, sdkp->max_retries, NULL);
+ kfree(buffer);
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -3446,7 +3464,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
-
+ /*
+ * Some USB/UAS devices return generic values for mode pages
+ * until the media has been accessed. Trigger a READ operation
+ * to force the device to populate mode pages.
+ */
+ if (sdp->read_before_ms)
+ sd_read_block_zero(sdkp);
/*
* set the default to rotational. All non-rotational devices
* support the block characteristics VPD page, which will
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 041940183516..cdedc271857a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1347,7 +1347,6 @@ struct pqi_ctrl_info {
bool controller_online;
bool block_requests;
bool scan_blocked;
- u8 logical_volume_rescan_needed : 1;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 9a58df9312fa..385180c98be4 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.24-046"
+#define DRIVER_VERSION "2.1.26-030"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 24
-#define DRIVER_REVISION 46
+#define DRIVER_RELEASE 26
+#define DRIVER_REVISION 30
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -2093,8 +2093,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
if (existing_device->devtype == TYPE_DISK) {
existing_device->raid_level = new_device->raid_level;
existing_device->volume_status = new_device->volume_status;
- if (ctrl_info->logical_volume_rescan_needed)
- existing_device->rescan = true;
memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
kfree(existing_device->raid_map);
@@ -2164,6 +2162,20 @@ static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
}
+static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
+{
+ if (pqi_device_in_remove(device))
+ return false;
+
+ if (device->sdev == NULL)
+ return false;
+
+ if (!scsi_device_online(device->sdev))
+ return false;
+
+ return device->rescan;
+}
+
static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{
@@ -2284,9 +2296,13 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
device->advertised_queue_depth = device->queue_depth;
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
- if (device->rescan) {
- scsi_rescan_device(device->sdev);
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ if (pqi_volume_rescan_needed(device)) {
device->rescan = false;
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ scsi_rescan_device(device->sdev);
+ } else {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
}
}
@@ -2308,8 +2324,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
}
- ctrl_info->logical_volume_rescan_needed = false;
-
}
static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
@@ -3702,6 +3716,21 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
return ack_event;
}
+static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
+ device->rescan = true;
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
{
unsigned long flags;
@@ -3742,7 +3771,7 @@ static void pqi_event_worker(struct work_struct *work)
ack_event = true;
rescan_needed = true;
if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
- ctrl_info->logical_volume_rescan_needed = true;
+ pqi_mark_volumes_for_rescan(ctrl_info);
else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
pqi_disable_raid_bypass(ctrl_info);
}
@@ -6504,8 +6533,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ if (!ctrl_info->disable_managed_interrupts)
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ctrl_info->pci_dev, 0);
+ else
+ return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
@@ -10144,6 +10176,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02f8)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02f9)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1137, 0x02fa)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
},
{
@@ -10200,6 +10244,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1010)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1011)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1043)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1044)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1045)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a95936b18f69..7ceb982040a5 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -330,6 +330,7 @@ enum storvsc_request_type {
*/
static int storvsc_ringbuffer_size = (128 * 1024);
+static int aligned_ringbuffer_size;
static u32 max_outstanding_req_per_channel;
static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
@@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
new_sc->next_request_id_callback = storvsc_next_request_id;
ret = vmbus_open(new_sc,
- storvsc_ringbuffer_size,
- storvsc_ringbuffer_size,
+ aligned_ringbuffer_size,
+ aligned_ringbuffer_size,
(void *)&props,
sizeof(struct vmstorage_channel_properties),
storvsc_on_channel_callback, new_sc);
@@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device,
dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
stor_device->port_number = host->host_no;
- ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
+ ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc);
if (ret)
goto err_out1;
@@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev)
{
int ret;
- ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
+ ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size,
hv_dev_is_fc(hv_dev));
return ret;
}
@@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void)
* the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
*/
+ aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
max_outstanding_req_per_channel =
- ((storvsc_ringbuffer_size - PAGE_SIZE) /
+ ((aligned_ringbuffer_size - PAGE_SIZE) /
ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64)));
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 4cf20be668a6..617eb892f4ad 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -188,8 +188,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
fn(vscsi, buf);
- if (unlikely(virtqueue_is_broken(vq)))
- break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
}
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
index 780199bf351e..49a0955e82d6 100644
--- a/drivers/soc/apple/mailbox.c
+++ b/drivers/soc/apple/mailbox.c
@@ -296,14 +296,14 @@ struct apple_mbox *apple_mbox_get(struct device *dev, int index)
of_node_put(args.np);
if (!pdev)
- return ERR_PTR(EPROBE_DEFER);
+ return ERR_PTR(-EPROBE_DEFER);
mbox = platform_get_drvdata(pdev);
if (!mbox)
- return ERR_PTR(EPROBE_DEFER);
+ return ERR_PTR(-EPROBE_DEFER);
if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER))
- return ERR_PTR(ENODEV);
+ return ERR_PTR(-ENODEV);
return mbox;
}
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
index 9b0fdd95276e..19f4b576f822 100644
--- a/drivers/soc/microchip/Kconfig
+++ b/drivers/soc/microchip/Kconfig
@@ -1,5 +1,5 @@
config POLARFIRE_SOC_SYS_CTRL
- tristate "POLARFIRE_SOC_SYS_CTRL"
+ tristate "Microchip PolarFire SoC (MPFS) system controller support"
depends on POLARFIRE_SOC_MAILBOX
depends on MTD
help
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index f4bfd24386f1..f913e9bd57ed 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -265,10 +265,17 @@ static int pmic_glink_probe(struct platform_device *pdev)
pg->client_mask = *match_data;
+ pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+ if (IS_ERR(pg->pdr)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
+ "failed to initialize pdr\n");
+ return ret;
+ }
+
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
if (ret)
- return ret;
+ goto out_release_pdr_handle;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
@@ -281,17 +288,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
goto out_release_altmode_aux;
}
- pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
- if (IS_ERR(pg->pdr)) {
- ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
- goto out_release_aux_devices;
- }
-
service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
if (IS_ERR(service)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
"failed adding pdr lookup for charger_pd\n");
- goto out_release_pdr_handle;
+ goto out_release_aux_devices;
}
mutex_lock(&__pmic_glink_lock);
@@ -300,8 +301,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
return 0;
-out_release_pdr_handle:
- pdr_handle_release(pg->pdr);
out_release_aux_devices:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
pmic_glink_del_aux_device(pg, &pg->ps_aux);
@@ -311,6 +310,8 @@ out_release_altmode_aux:
out_release_ucsi_aux:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
+out_release_pdr_handle:
+ pdr_handle_release(pg->pdr);
return ret;
}
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 5fcd0fdd2faa..b3808fc24c69 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
- struct device *bridge;
+ struct auxiliary_device *bridge;
enum typec_orientation orientation;
u16 svid;
@@ -230,7 +230,7 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
else
pmic_glink_altmode_enable_usb(altmode, alt_port);
- drm_aux_hpd_bridge_notify(alt_port->bridge,
+ drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
alt_port->hpd_state ?
connector_status_connected :
connector_status_disconnected);
@@ -454,7 +454,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
- alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
+ alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
if (IS_ERR(alt_port->bridge)) {
fwnode_handle_put(fwnode);
return PTR_ERR(alt_port->bridge);
@@ -510,6 +510,16 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
}
}
+ for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
+ alt_port = &altmode->ports[port];
+ if (!alt_port->bridge)
+ continue;
+
+ ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge);
+ if (ret)
+ return ret;
+ }
+
altmode->client = devm_pmic_glink_register_client(dev,
altmode->owner_id,
pmic_glink_altmode_callback,
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index d96222e6d7d2..cfdaa5eaec76 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi-mem.h>
+#include <linux/mtd/spi-nor.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include "spi-bcm-qspi.h"
@@ -1221,7 +1221,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
/* non-aligned and very short transfers are handled by MSPI */
if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
- len < 4)
+ len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
mspi_read = true;
if (!has_bspi(qspi) || mspi_read)
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index f94e0d370d46..1a8d03958dff 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1927,24 +1927,18 @@ static void cqspi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int cqspi_suspend(struct device *dev)
+static int cqspi_runtime_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_controller *host = dev_get_drvdata(dev);
- int ret;
- ret = spi_controller_suspend(host);
cqspi_controller_enable(cqspi, 0);
-
clk_disable_unprepare(cqspi->clk);
-
- return ret;
+ return 0;
}
-static int cqspi_resume(struct device *dev)
+static int cqspi_runtime_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_controller *host = dev_get_drvdata(dev);
clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
@@ -1952,12 +1946,27 @@ static int cqspi_resume(struct device *dev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
+ return 0;
+}
+
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(cqspi->host);
+}
- return spi_controller_resume(host);
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_resume(cqspi->host);
}
-static DEFINE_RUNTIME_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend,
- cqspi_resume, NULL);
+static const struct dev_pm_ops cqspi_dev_pm_ops = {
+ RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume)
+};
static const struct cqspi_driver_platdata cdns_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index a50eb4db79de..e5140532071d 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -317,6 +317,15 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
xspi->rx_bytes -= nrx;
while (ntx || nrx) {
+ if (nrx) {
+ u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+ if (xspi->rxbuf)
+ *xspi->rxbuf++ = data;
+
+ nrx--;
+ }
+
if (ntx) {
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
@@ -326,14 +335,6 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
ntx--;
}
- if (nrx) {
- u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
-
- if (xspi->rxbuf)
- *xspi->rxbuf++ = data;
-
- nrx--;
- }
}
}
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index f0b630fe16c3..b341b6908df0 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -441,7 +441,6 @@ static void mcfqspi_remove(struct platform_device *pdev)
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
mcfqspi_cs_teardown(mcfqspi);
- clk_disable_unprepare(mcfqspi->clk);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index f13073e12593..b24190526ce9 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -244,7 +244,10 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
priv->ctlr->use_gpio_descriptors = true;
priv->ctlr->auto_runtime_pm = true;
- devm_pm_runtime_enable(priv->dev);
+ ret = devm_pm_runtime_enable(priv->dev);
+ if (ret)
+ return ret;
+
pm_runtime_idle(priv->dev);
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG6, CS42L43_FIFO_SIZE - 1);
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index 9d22018f7985..1301d14483d4 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -377,6 +377,11 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
{
struct hisi_sfc_v3xx_host *host = data;
+ u32 reg;
+
+ reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
+ if (!reg)
+ return IRQ_NONE;
hisi_sfc_v3xx_disable_int(host);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 272bc871a848..833a1bb7a914 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -2,6 +2,7 @@
// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
// Copyright (C) 2008 Juergen Beisert
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -660,15 +661,15 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
if (spi_imx->usedma) {
- ctrl |= (spi_imx->bits_per_word *
- spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1)
+ ctrl |= (spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
} else {
if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
- ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1)
+ ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
+ ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+ BITS_PER_BYTE) * spi_imx->bits_per_word
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
}
@@ -1344,7 +1345,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
controller->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(controller->dma_tx)) {
ret = PTR_ERR(controller->dma_tx);
- dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+ dev_err_probe(dev, ret, "can't get the TX DMA channel!\n");
controller->dma_tx = NULL;
goto err;
}
@@ -1353,7 +1354,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
controller->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(controller->dma_rx)) {
ret = PTR_ERR(controller->dma_rx);
- dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+ dev_err_probe(dev, ret, "can't get the RX DMA channel!\n");
controller->dma_rx = NULL;
goto err;
}
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 57d767a68e7b..4337ca51d7aa 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -76,6 +76,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7f24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
@@ -84,7 +85,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xae23), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 1bf080339b5a..88cbe4f00cc3 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -39,6 +39,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
#include <trace/events/spi.h>
+#include <linux/dma/mxs-dma.h>
#define DRIVER_NAME "mxs-spi"
@@ -252,7 +253,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
if (!desc) {
dev_err(ssp->dev,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index a0c9fea908f5..ddf1c684bcc7 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -53,8 +53,6 @@
/* per-register bitmasks: */
#define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
-#define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY BIT(0)
-#define OMAP2_MCSPI_IRQSTATUS_RX0_FULL BIT(2)
#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
@@ -293,7 +291,7 @@ static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
}
static void omap2_mcspi_set_fifo(const struct spi_device *spi,
- struct spi_transfer *t, int enable, int dma_enabled)
+ struct spi_transfer *t, int enable)
{
struct spi_controller *ctlr = spi->controller;
struct omap2_mcspi_cs *cs = spi->controller_state;
@@ -314,28 +312,20 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
else
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
- if (dma_enabled)
- wcnt = t->len / bytes_per_word;
- else
- wcnt = 0;
+
+ wcnt = t->len / bytes_per_word;
if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
goto disable_fifo;
xferlevel = wcnt << 16;
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
- if (dma_enabled)
- xferlevel |= (bytes_per_word - 1) << 8;
- else
- xferlevel |= (max_fifo_depth - 1) << 8;
+ xferlevel |= (bytes_per_word - 1) << 8;
}
if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
- if (dma_enabled)
- xferlevel |= bytes_per_word - 1;
- else
- xferlevel |= (max_fifo_depth - 1);
+ xferlevel |= bytes_per_word - 1;
}
mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
@@ -892,113 +882,6 @@ out:
return count - c;
}
-static unsigned
-omap2_mcspi_txrx_piofifo(struct spi_device *spi, struct spi_transfer *xfer)
-{
- struct omap2_mcspi_cs *cs = spi->controller_state;
- struct omap2_mcspi *mcspi;
- unsigned int count, c;
- unsigned int iter, cwc;
- int last_request;
- void __iomem *base = cs->base;
- void __iomem *tx_reg;
- void __iomem *rx_reg;
- void __iomem *chstat_reg;
- void __iomem *irqstat_reg;
- int word_len, bytes_per_word;
- u8 *rx;
- const u8 *tx;
-
- mcspi = spi_controller_get_devdata(spi->controller);
- count = xfer->len;
- c = count;
- word_len = cs->word_len;
- bytes_per_word = mcspi_bytes_per_word(word_len);
-
- /*
- * We store the pre-calculated register addresses on stack to speed
- * up the transfer loop.
- */
- tx_reg = base + OMAP2_MCSPI_TX0;
- rx_reg = base + OMAP2_MCSPI_RX0;
- chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
- irqstat_reg = base + OMAP2_MCSPI_IRQSTATUS;
-
- if (c < (word_len >> 3))
- return 0;
-
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
-
- do {
- /* calculate number of words in current iteration */
- cwc = min((unsigned int)mcspi->fifo_depth / bytes_per_word,
- c / bytes_per_word);
- last_request = cwc != (mcspi->fifo_depth / bytes_per_word);
- if (tx) {
- if (mcspi_wait_for_reg_bit(irqstat_reg,
- OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) < 0) {
- dev_err(&spi->dev, "TX Empty timed out\n");
- goto out;
- }
- writel_relaxed(OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY, irqstat_reg);
-
- for (iter = 0; iter < cwc; iter++, tx += bytes_per_word) {
- if (bytes_per_word == 1)
- writel_relaxed(*tx, tx_reg);
- else if (bytes_per_word == 2)
- writel_relaxed(*((u16 *)tx), tx_reg);
- else if (bytes_per_word == 4)
- writel_relaxed(*((u32 *)tx), tx_reg);
- }
- }
-
- if (rx) {
- if (!last_request &&
- mcspi_wait_for_reg_bit(irqstat_reg,
- OMAP2_MCSPI_IRQSTATUS_RX0_FULL) < 0) {
- dev_err(&spi->dev, "RX_FULL timed out\n");
- goto out;
- }
- writel_relaxed(OMAP2_MCSPI_IRQSTATUS_RX0_FULL, irqstat_reg);
-
- for (iter = 0; iter < cwc; iter++, rx += bytes_per_word) {
- if (last_request &&
- mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_RXS) < 0) {
- dev_err(&spi->dev, "RXS timed out\n");
- goto out;
- }
- if (bytes_per_word == 1)
- *rx = readl_relaxed(rx_reg);
- else if (bytes_per_word == 2)
- *((u16 *)rx) = readl_relaxed(rx_reg);
- else if (bytes_per_word == 4)
- *((u32 *)rx) = readl_relaxed(rx_reg);
- }
- }
-
- if (last_request) {
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_EOT) < 0) {
- dev_err(&spi->dev, "EOT timed out\n");
- goto out;
- }
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_TXFFE) < 0) {
- dev_err(&spi->dev, "TXFFE timed out\n");
- goto out;
- }
- omap2_mcspi_set_enable(spi, 0);
- }
- c -= cwc * bytes_per_word;
- } while (c >= bytes_per_word);
-
-out:
- omap2_mcspi_set_enable(spi, 1);
- return count - c;
-}
-
static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
{
u32 div;
@@ -1323,9 +1206,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
ctlr->cur_msg_mapped &&
ctlr->can_dma(ctlr, spi, t))
- omap2_mcspi_set_fifo(spi, t, 1, 1);
- else if (t->len > OMAP2_MCSPI_MAX_FIFODEPTH)
- omap2_mcspi_set_fifo(spi, t, 1, 0);
+ omap2_mcspi_set_fifo(spi, t, 1);
omap2_mcspi_set_enable(spi, 1);
@@ -1338,8 +1219,6 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
ctlr->cur_msg_mapped &&
ctlr->can_dma(ctlr, spi, t))
count = omap2_mcspi_txrx_dma(spi, t);
- else if (mcspi->fifo_depth > 0)
- count = omap2_mcspi_txrx_piofifo(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);
@@ -1352,7 +1231,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
omap2_mcspi_set_enable(spi, 0);
if (mcspi->fifo_depth > 0)
- omap2_mcspi_set_fifo(spi, t, 0, 0);
+ omap2_mcspi_set_fifo(spi, t, 0);
out:
/* Restore defaults if they were overriden */
@@ -1375,7 +1254,7 @@ out:
omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
if (mcspi->fifo_depth > 0 && t)
- omap2_mcspi_set_fifo(spi, t, 0, 0);
+ omap2_mcspi_set_fifo(spi, t, 0);
return status;
}
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 03aab661be9d..82d6264841fc 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -25,11 +25,13 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/wait.h>
+#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -166,10 +168,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
int scr;
u8 cdm = 0;
u32 speed;
- u8 bits_per_word;
/* Start with the generic configuration for this device. */
- bits_per_word = spi->bits_per_word;
speed = spi->max_speed_hz;
/*
@@ -177,9 +177,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
* the transfer to overwrite the generic configuration with zeros.
*/
if (t) {
- if (t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
if (t->speed_hz)
speed = min(t->speed_hz, spi->max_speed_hz);
}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index cfc3b1ddbd22..6f12e4fb2e2e 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -136,14 +136,14 @@ struct sh_msiof_spi_priv {
/* SIFCTR */
#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
-#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
-#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
-#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
-#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
-#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
-#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
-#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */
#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
#define SIFCTR_TFUA_SHIFT 20
#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 7477a11e12be..f2170f4b5077 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1717,6 +1717,10 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
return ret;
}
}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index b9934b9c2d70..9f30e0edadfe 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -384,7 +384,7 @@ static struct attribute *ssb_device_attrs[] = {
};
ATTRIBUTE_GROUPS(ssb_device);
-static struct bus_type ssb_bustype = {
+static const struct bus_type ssb_bustype = {
.name = "ssb",
.match = ssb_bus_match,
.probe = ssb_device_probe,
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index e748a5d04e97..9149d41fe65b 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -608,7 +608,7 @@ static void ad5933_work(struct work_struct *work)
struct ad5933_state, work.work);
struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
__be16 buf[2];
- int val[2];
+ u16 val[2];
unsigned char status;
int ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index f44e6412f4e3..d0db2efe0045 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -3723,12 +3723,10 @@ apply_min_padding:
static int atomisp_set_crop(struct atomisp_device *isp,
const struct v4l2_mbus_framefmt *format,
+ struct v4l2_subdev_state *sd_state,
int which)
{
struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_selection sel = {
.which = which,
.target = V4L2_SEL_TGT_CROP,
@@ -3754,7 +3752,7 @@ static int atomisp_set_crop(struct atomisp_device *isp,
sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1;
sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1;
- ret = v4l2_subdev_call(input->camera, pad, set_selection, &pad_state, &sel);
+ ret = v4l2_subdev_call(input->camera, pad, set_selection, sd_state, &sel);
if (ret)
dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n",
sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret);
@@ -3770,9 +3768,6 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
const struct atomisp_format_bridge *fmt, *snr_fmt;
struct atomisp_sub_device *asd = &isp->asd;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -3809,11 +3804,16 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
format.format.width, format.format.height);
- ret = atomisp_set_crop(isp, &format.format, V4L2_SUBDEV_FORMAT_TRY);
- if (ret)
- return ret;
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ ret = atomisp_set_crop(isp, &format.format, input->try_sd_state,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret == 0)
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt,
+ input->try_sd_state, &format);
+
+ v4l2_subdev_unlock_state(input->try_sd_state);
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &format);
if (ret)
return ret;
@@ -4238,9 +4238,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
struct atomisp_device *isp = asd->isp;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
+ struct v4l2_subdev_state *act_sd_state;
struct v4l2_subdev_format vformat = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -4268,12 +4266,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
/* Disable dvs if resolution can't be supported by sensor */
if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
- ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_TRY);
- if (ret)
- return ret;
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ ret = atomisp_set_crop(isp, &vformat.format, input->try_sd_state,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret == 0) {
+ vformat.which = V4L2_SUBDEV_FORMAT_TRY;
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt,
+ input->try_sd_state, &vformat);
+ }
+
+ v4l2_subdev_unlock_state(input->try_sd_state);
- vformat.which = V4L2_SUBDEV_FORMAT_TRY;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
@@ -4291,12 +4295,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
}
}
- ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_ACTIVE);
- if (ret)
- return ret;
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+
+ ret = atomisp_set_crop(isp, &vformat.format, act_sd_state,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (ret == 0) {
+ vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt, act_sd_state, &vformat);
+ }
+
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
- vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, NULL, &vformat);
if (ret)
return ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index f7b4bee9574b..d5b077e602ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -132,8 +132,8 @@ struct atomisp_input_subdev {
/* Sensor rects for sensors which support crop */
struct v4l2_rect native_rect;
struct v4l2_rect active_rect;
- /* Sensor pad_cfg for which == V4L2_SUBDEV_FORMAT_TRY calls */
- struct v4l2_subdev_pad_config pad_cfg;
+ /* Sensor state for which == V4L2_SUBDEV_FORMAT_TRY calls */
+ struct v4l2_subdev_state *try_sd_state;
struct v4l2_subdev *motor;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 01b7fa9b56a2..5b2d88c02d36 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -781,12 +781,20 @@ static int atomisp_enum_framesizes(struct file *file, void *priv,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = input->code,
};
+ struct v4l2_subdev_state *act_sd_state;
int ret;
+ if (!input->camera)
+ return -EINVAL;
+
if (input->crop_support)
return atomisp_enum_framesizes_crop(isp, fsize);
- ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state, &fse);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
if (ret)
return ret;
@@ -803,18 +811,25 @@ static int atomisp_enum_frameintervals(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_frame_interval_enum fie = {
- .code = atomisp_in_fmt_conv[0].code,
+ .code = atomisp_in_fmt_conv[0].code,
.index = fival->index,
.width = fival->width,
.height = fival->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ struct v4l2_subdev_state *act_sd_state;
int ret;
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, enum_frame_interval, NULL,
- &fie);
+ if (!input->camera)
+ return -EINVAL;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_interval,
+ act_sd_state, &fie);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
if (ret)
return ret;
@@ -830,30 +845,25 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
const struct atomisp_format_bridge *format;
- struct v4l2_subdev *camera;
+ struct v4l2_subdev_state *act_sd_state;
unsigned int i, fi = 0;
- int rval;
+ int ret;
- camera = isp->inputs[asd->input_curr].camera;
- if(!camera) {
- dev_err(isp->dev, "%s(): camera is NULL, device is %s\n",
- __func__, vdev->name);
+ if (!input->camera)
return -EINVAL;
- }
- rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code);
- if (rval == -ENOIOCTLCMD) {
- dev_warn(isp->dev,
- "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n",
- camera->name);
- }
-
- if (rval)
- return rval;
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state, &code);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
format = &atomisp_output_fmts[i];
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index c1c8501ec61f..547e1444ad97 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -862,6 +862,9 @@ static void atomisp_unregister_entities(struct atomisp_device *isp)
v4l2_device_unregister(&isp->v4l2_dev);
media_device_unregister(&isp->media_dev);
media_device_cleanup(&isp->media_dev);
+
+ for (i = 0; i < isp->input_cnt; i++)
+ __v4l2_subdev_state_free(isp->inputs[i].try_sd_state);
}
static int atomisp_register_entities(struct atomisp_device *isp)
@@ -933,32 +936,49 @@ v4l2_device_failed:
static void atomisp_init_sensor(struct atomisp_input_subdev *input)
{
+ static struct lock_class_key try_sd_state_key;
struct v4l2_subdev_mbus_code_enum mbus_code_enum = { };
struct v4l2_subdev_frame_size_enum fse = { };
- struct v4l2_subdev_state sd_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_selection sel = { };
+ struct v4l2_subdev_state *try_sd_state, *act_sd_state;
int i, err;
+ /*
+ * FIXME: Drivers are not supposed to use __v4l2_subdev_state_alloc()
+ * but atomisp needs this for try_fmt on its /dev/video# node since
+ * it emulates a normal v4l2 device there, passing through try_fmt /
+ * set_fmt to the sensor.
+ */
+ try_sd_state = __v4l2_subdev_state_alloc(input->camera,
+ "atomisp:try_sd_state->lock", &try_sd_state_key);
+ if (IS_ERR(try_sd_state))
+ return;
+
+ input->try_sd_state = try_sd_state;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+
mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, NULL, &mbus_code_enum);
+ err = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state, &mbus_code_enum);
if (!err)
input->code = mbus_code_enum.code;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_NATIVE_SIZE;
- err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, get_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
input->native_rect = sel.r;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP_DEFAULT;
- err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, get_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
input->active_rect = sel.r;
@@ -973,7 +993,8 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
fse.code = input->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ err = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state, &fse);
if (err)
break;
@@ -989,22 +1010,26 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
* for padding, set the crop rect to cover the entire sensor instead
* of only the default active area.
*
- * Do this for both try and active formats since the try_crop rect in
- * pad_cfg may influence (clamp) future try_fmt calls with which == try.
+ * Do this for both try and active formats since the crop rect in
+ * try_sd_state may influence (clamp size) in calls with which == try.
*/
sel.which = V4L2_SUBDEV_FORMAT_TRY;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection, &sd_state, &sel);
+ v4l2_subdev_lock_state(input->try_sd_state);
+ err = v4l2_subdev_call(input->camera, pad, set_selection,
+ input->try_sd_state, &sel);
+ v4l2_subdev_unlock_state(input->try_sd_state);
if (err)
- return;
+ goto unlock_act_sd_state;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, set_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
input->native_rect.width, input->native_rect.height,
@@ -1012,6 +1037,10 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
input->binning_support);
input->crop_support = true;
+
+unlock_act_sd_state:
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
}
int atomisp_register_device_nodes(struct atomisp_device *isp)
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b0b262de6480..283804b49e91 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1515,7 +1515,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TXPOWER)
RFbSetPower(priv, priv->wCurrentRate,
- conf->chandef.chan->hw_value);
+ conf->chanreq.oper.chan->hw_value);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->pcid->dev,
@@ -1684,6 +1684,10 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops vnt_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 2abae90f3f52..7bbed462f062 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -794,7 +794,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_set_bss_mode(priv);
if (changed & (BSS_CHANGED_TXPOWER | BSS_CHANGED_BANDWIDTH))
- vnt_rf_setpower(priv, conf->chandef.chan);
+ vnt_rf_setpower(priv, conf->chanreq.oper.chan);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->usb->dev,
@@ -956,6 +956,10 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops vnt_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f58988130a..c1fbcdd16182 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -759,6 +759,29 @@ static ssize_t emulate_tas_store(struct config_item *item,
return count;
}
+static int target_try_configure_unmap(struct se_device *dev,
+ const char *config_opt)
+{
+ if (!dev->transport->configure_unmap) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ if (!target_dev_configured(dev)) {
+ pr_err("Generic Block Discard setup for %s requires device to be configured\n",
+ config_opt);
+ return -ENODEV;
+ }
+
+ if (!dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard setup for %s failed\n",
+ config_opt);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
@@ -776,11 +799,9 @@ static ssize_t emulate_tpu_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "emulate_tpu");
+ if (ret)
+ return ret;
}
da->emulate_tpu = flag;
@@ -806,11 +827,9 @@ static ssize_t emulate_tpws_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "emulate_tpws");
+ if (ret)
+ return ret;
}
da->emulate_tpws = flag;
@@ -1022,12 +1041,9 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n",
- da->da_dev);
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "unmap_zeroes_data");
+ if (ret)
+ return ret;
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 506193e870c4..7a85e6477e46 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -147,7 +147,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
struct se_session *se_sess = se_cmd->se_sess;
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
- unsigned long flags;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
@@ -178,10 +177,6 @@ out_unlock:
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
- spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
- list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
- spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
-
return 0;
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 41b7489d37ce..ed4fd22eac6e 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -907,12 +907,15 @@ new_bio:
return 0;
fail:
- if (bio)
- bio_put(bio);
+ if (bio) {
+ bio_uninit(bio);
+ kfree(bio);
+ }
while (req->bio) {
bio = req->bio;
req->bio = bio->bi_next;
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
}
req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 670cfb7bd426..73d0d6133ac8 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -3629,6 +3629,10 @@ int transport_generic_handle_tmr(
unsigned long flags;
bool aborted = false;
+ spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
+ list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
+ spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
+
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->transport_state & CMD_T_ABORTED) {
aborted = true;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 4b1092127694..1892e49a8e6a 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -90,13 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid, u32 func)
if (rc) {
pr_err("device registration failed, err: %d\n", rc);
put_device(&optee_device->dev);
+ return rc;
}
if (func == PTA_CMD_GET_DEVICES_SUPP)
device_create_file(&optee_device->dev,
&dev_attr_need_supplicant);
- return rc;
+ return 0;
}
static int __optee_enumerate_devices(u32 func)
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 5ac5cb60bae6..bc6eb0dd66a4 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -49,7 +49,6 @@
*/
#define DEFAULT_DURATION_JIFFIES (6)
-static unsigned int target_mwait;
static struct dentry *debug_dir;
static bool poll_pkg_cstate_enable;
@@ -312,34 +311,6 @@ MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
"\twindow size results in slower response time but more smooth\n"
"\tclamping results. default to 2.");
-static void find_target_mwait(void)
-{
- unsigned int eax, ebx, ecx, edx;
- unsigned int highest_cstate = 0;
- unsigned int highest_subcstate = 0;
- int i;
-
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
-
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
-
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
- return;
-
- edx >>= MWAIT_SUBSTATE_SIZE;
- for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
- if (edx & MWAIT_SUBSTATE_MASK) {
- highest_cstate = i;
- highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
- }
- }
- target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
- (highest_subcstate - 1);
-
-}
-
struct pkg_cstate_info {
bool skip;
int msr_index;
@@ -759,9 +730,6 @@ static int __init powerclamp_probe(void)
return -ENODEV;
}
- /* find the deepest mwait value */
- find_target_mwait();
-
return 0;
}
diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
index 99ca0c7bc41c..0f475fe46bc9 100644
--- a/drivers/thermal/loongson2_thermal.c
+++ b/drivers/thermal/loongson2_thermal.c
@@ -8,9 +8,10 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/thermal.h>
#include <linux/units.h>
#include "thermal_hwmon.h"
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 87e4795275fe..6f798f6a2b84 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -203,7 +203,7 @@ struct tb_regs_switch_header {
#define ROUTER_CS_5_WOP BIT(1)
#define ROUTER_CS_5_WOU BIT(2)
#define ROUTER_CS_5_WOD BIT(3)
-#define ROUTER_CS_5_C3S BIT(23)
+#define ROUTER_CS_5_CNS BIT(23)
#define ROUTER_CS_5_PTO BIT(24)
#define ROUTER_CS_5_UTO BIT(25)
#define ROUTER_CS_5_HCO BIT(26)
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index f8f0d24ff6e4..1515eff8cc3e 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -290,7 +290,7 @@ int usb4_switch_setup(struct tb_switch *sw)
}
/* TBT3 supported by the CM */
- val |= ROUTER_CS_5_C3S;
+ val &= ~ROUTER_CS_5_CNS;
return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
}
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4f9264d005c0..c2a4e88b328f 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -108,13 +108,15 @@ config HVC_DCC_SERIALIZE_SMP
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV_SBI_V01
+ depends on RISCV_SBI && NONPORTABLE
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
- is normally used only during boot to output printk.
+ is normally used only during boot to output printk. This driver
+ conflicts with real console drivers and should not be enabled on
+ systems that directly access the console.
- If you don't know what do to here, say Y.
+ If you don't know what do to here, say N.
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c
index a72591279f86..cede8a572594 100644
--- a/drivers/tty/hvc/hvc_riscv_sbi.c
+++ b/drivers/tty/hvc/hvc_riscv_sbi.c
@@ -40,21 +40,44 @@ static ssize_t hvc_sbi_tty_get(uint32_t vtermno, u8 *buf, size_t count)
return i;
}
-static const struct hv_ops hvc_sbi_ops = {
+static const struct hv_ops hvc_sbi_v01_ops = {
.get_chars = hvc_sbi_tty_get,
.put_chars = hvc_sbi_tty_put,
};
-static int __init hvc_sbi_init(void)
+static ssize_t hvc_sbi_dbcn_tty_put(uint32_t vtermno, const u8 *buf, size_t count)
{
- return PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_ops, 16));
+ return sbi_debug_console_write(buf, count);
}
-device_initcall(hvc_sbi_init);
-static int __init hvc_sbi_console_init(void)
+static ssize_t hvc_sbi_dbcn_tty_get(uint32_t vtermno, u8 *buf, size_t count)
{
- hvc_instantiate(0, 0, &hvc_sbi_ops);
+ return sbi_debug_console_read(buf, count);
+}
+
+static const struct hv_ops hvc_sbi_dbcn_ops = {
+ .put_chars = hvc_sbi_dbcn_tty_put,
+ .get_chars = hvc_sbi_dbcn_tty_get,
+};
+
+static int __init hvc_sbi_init(void)
+{
+ int err;
+
+ if (sbi_debug_console_available) {
+ err = PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_dbcn_ops, 256));
+ if (err)
+ return err;
+ hvc_instantiate(0, 0, &hvc_sbi_dbcn_ops);
+ } else if (IS_ENABLED(CONFIG_RISCV_SBI_V01)) {
+ err = PTR_ERR_OR_ZERO(hvc_alloc(0, 0, &hvc_sbi_v01_ops, 256));
+ if (err)
+ return err;
+ hvc_instantiate(0, 0, &hvc_sbi_v01_ops);
+ } else {
+ return -ENODEV;
+ }
return 0;
}
-console_initcall(hvc_sbi_console_init);
+device_initcall(hvc_sbi_init);
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index 558c4c7f3104..2dda737b1660 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -311,7 +311,7 @@ static void pci1xxxx_process_read_data(struct uart_port *port,
}
while (*valid_byte_count) {
- if (*buff_index > RX_BUF_SIZE)
+ if (*buff_index >= RX_BUF_SIZE)
break;
rx_buff[*buff_index] = readb(port->membase +
UART_RX_BYTE_FIFO);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 8b1f5756002f..ffcf4882b25f 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -87,7 +87,7 @@ config SERIAL_EARLYCON_SEMIHOST
config SERIAL_EARLYCON_RISCV_SBI
bool "Early console using RISC-V SBI"
- depends on RISCV_SBI_V01
+ depends on RISCV_SBI
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index fccec1698a54..cf2c890a560f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1339,11 +1339,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
}
}
+static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+{
+ struct uart_port *port = &uap->port;
+ u32 cr;
+
+ /* Enable transmitter */
+ cr = pl011_read(uap, REG_CR);
+ cr |= UART011_CR_TXE;
+
+ /* Disable receiver if half-duplex */
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ cr &= ~UART011_CR_RXE;
+
+ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+ cr &= ~UART011_CR_RTS;
+ else
+ cr |= UART011_CR_RTS;
+
+ pl011_write(cr, uap, REG_CR);
+
+ if (port->rs485.delay_rts_before_send)
+ mdelay(port->rs485.delay_rts_before_send);
+
+ uap->rs485_tx_started = true;
+}
+
static void pl011_start_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+ !uap->rs485_tx_started)
+ pl011_rs485_tx_start(uap);
+
if (!pl011_dma_tx_start(uap))
pl011_start_tx_pio(uap);
}
@@ -1424,42 +1454,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
return true;
}
-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
-{
- struct uart_port *port = &uap->port;
- u32 cr;
-
- /* Enable transmitter */
- cr = pl011_read(uap, REG_CR);
- cr |= UART011_CR_TXE;
-
- /* Disable receiver if half-duplex */
- if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
- cr &= ~UART011_CR_RXE;
-
- if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
- cr &= ~UART011_CR_RTS;
- else
- cr |= UART011_CR_RTS;
-
- pl011_write(cr, uap, REG_CR);
-
- if (port->rs485.delay_rts_before_send)
- mdelay(port->rs485.delay_rts_before_send);
-
- uap->rs485_tx_started = true;
-}
-
/* Returns true if tx interrupts have to be (kept) enabled */
static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
{
struct circ_buf *xmit = &uap->port.state->xmit;
int count = uap->fifosize >> 1;
- if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
- !uap->rs485_tx_started)
- pl011_rs485_tx_start(uap);
-
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
return true;
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c
index 27afb0b74ea7..0162155f0c83 100644
--- a/drivers/tty/serial/earlycon-riscv-sbi.c
+++ b/drivers/tty/serial/earlycon-riscv-sbi.c
@@ -15,17 +15,38 @@ static void sbi_putc(struct uart_port *port, unsigned char c)
sbi_console_putchar(c);
}
-static void sbi_console_write(struct console *con,
- const char *s, unsigned n)
+static void sbi_0_1_console_write(struct console *con,
+ const char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
uart_console_write(&dev->port, s, n, sbi_putc);
}
+static void sbi_dbcn_console_write(struct console *con,
+ const char *s, unsigned int n)
+{
+ int ret;
+
+ while (n) {
+ ret = sbi_debug_console_write(s, n);
+ if (ret < 0)
+ break;
+
+ s += ret;
+ n -= ret;
+ }
+}
+
static int __init early_sbi_setup(struct earlycon_device *device,
const char *opt)
{
- device->con->write = sbi_console_write;
+ if (sbi_debug_console_available)
+ device->con->write = sbi_dbcn_console_write;
+ else if (IS_ENABLED(CONFIG_RISCV_SBI_V01))
+ device->con->write = sbi_0_1_console_write;
+ else
+ return -ENODEV;
+
return 0;
}
EARLYCON_DECLARE(sbi, early_sbi_setup);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f3a99daebdaa..10bf6d75bf9e 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -237,6 +237,14 @@
#define MAX310x_REV_MASK (0xf8)
#define MAX310X_WRITE_BIT 0x80
+/* Port startup definitions */
+#define MAX310X_PORT_STARTUP_WAIT_RETRIES 20 /* Number of retries */
+#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS 10 /* Delay between retries */
+
+/* Crystal-related definitions */
+#define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */
+#define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */
+
/* MAX3107 specific */
#define MAX3107_REV_ID (0xa0)
@@ -583,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
return 1;
}
-static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
unsigned long freq, bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
@@ -641,12 +649,20 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
/* Wait for crystal */
if (xtal) {
- unsigned int val;
- msleep(10);
- regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
- if (!(val & MAX310X_STS_CLKREADY_BIT)) {
- dev_warn(dev, "clock is not stable yet\n");
- }
+ bool stable = false;
+ unsigned int try = 0, val = 0;
+
+ do {
+ msleep(MAX310X_XTAL_WAIT_DELAY_MS);
+ regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+
+ if (val & MAX310X_STS_CLKREADY_BIT)
+ stable = true;
+ } while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES));
+
+ if (!stable)
+ return dev_err_probe(dev, -EAGAIN,
+ "clock is not stable\n");
}
return bestfreq;
@@ -1271,7 +1287,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
{
int i, ret, fmin, fmax, freq;
struct max310x_port *s;
- u32 uartclk = 0;
+ s32 uartclk = 0;
bool xtal;
for (i = 0; i < devtype->nr; i++)
@@ -1334,6 +1350,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
+ bool started = false;
+ unsigned int try = 0, val = 0;
+
/* Reset port */
regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
@@ -1342,13 +1361,27 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
/* Wait for port startup */
do {
- regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
- } while (ret != 0x01);
+ msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS);
+ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val);
+
+ if (val == 0x01)
+ started = true;
+ } while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES));
+
+ if (!started) {
+ ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n");
+ goto out_uart;
+ }
regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
+ if (uartclk < 0) {
+ ret = uartclk;
+ goto out_uart;
+ }
+
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
for (i = 0; i < devtype->nr; i++) {
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 3ec725555bcc..4749331fe618 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -605,13 +605,16 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
return;
}
- pending = uart_port_tx(&s->port, ch,
+ pending = uart_port_tx_flags(&s->port, ch, UART_TX_NOSTOP,
!(mxs_read(s, REG_STAT) & AUART_STAT_TXFF),
mxs_write(ch, s, REG_DATA));
if (pending)
mxs_set(AUART_INTR_TXIEN, s, REG_INTR);
else
mxs_clr(AUART_INTR_TXIEN, s, REG_INTR);
+
+ if (uart_tx_stopped(&s->port))
+ mxs_auart_stop_tx(&s->port);
}
static void mxs_auart_rx_char(struct mxs_auart_port *s)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index b56ed8c376b2..d6a58a9e072a 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1084,8 +1084,8 @@ static int uart_tiocmget(struct tty_struct *tty)
goto out;
if (!tty_io_error(tty)) {
- result = uport->mctrl;
uart_port_lock_irq(uport);
+ result = uport->mctrl;
result |= uport->ops->get_mctrl(uport);
uart_port_unlock_irq(uport);
}
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 794b77512740..693e932d6feb 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -251,7 +251,9 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr1, port->membase + ofs->cr1);
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
+ if (!port->rs485_rx_during_tx_gpio)
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
} else {
stm32_usart_clr_bits(port, ofs->cr3,
USART_CR3_DEM | USART_CR3_DEP);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index d1e33328ff3f..3b89c9d4aa40 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1469,7 +1469,7 @@ static int ufshcd_devfreq_target(struct device *dev,
int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start;
- bool scale_up, sched_clk_scaling_suspend_work = false;
+ bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
unsigned long irq_flags;
@@ -3057,7 +3057,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
*/
static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
{
- u32 mask = 1U << task_tag;
+ u32 mask;
unsigned long flags;
int err;
@@ -3075,6 +3075,8 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
return 0;
}
+ mask = 1U << task_tag;
+
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, mask);
@@ -6352,7 +6354,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
- ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op);
} else {
@@ -8725,7 +8726,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
- pm_runtime_put_sync(hba->dev);
out:
return ret;
@@ -8994,15 +8994,12 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
/* Probe and add UFS logical units */
ret = ufshcd_add_lus(hba);
+
out:
- /*
- * If we failed to initialize the device or the device is not
- * present, turn off the power/clocks etc.
- */
- if (ret) {
- pm_runtime_put_sync(hba->dev);
- ufshcd_hba_exit(hba);
- }
+ pm_runtime_put_sync(hba->dev);
+
+ if (ret)
+ dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
}
static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 480787048e75..39eef470f8fa 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1716,7 +1716,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
- goto out;
+ return ret;
}
msi_lock_descs(hba->dev);
@@ -1750,11 +1750,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
REG_UFS_CFG3);
ufshcd_mcq_enable_esi(hba);
- }
-
-out:
- if (!ret)
host->esi_enabled = true;
+ }
return ret;
}
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index aeca902ab6cc..fd1beb10bba7 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -828,7 +828,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
return;
}
- if (request->complete) {
+ /*
+ * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
+ * gadget composite driver.
+ */
+ if (request->complete && request->buf != priv_dev->zlp_buf) {
spin_unlock(&priv_dev->lock);
usb_gadget_giveback_request(&priv_ep->endpoint,
request);
@@ -2540,11 +2544,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+ list_del_init(&priv_req->list);
kfree(priv_req->request.buf);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
- list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 33548771a0d3..465e9267b49c 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -395,7 +395,6 @@ pm_put:
return ret;
}
-
/**
* cdns_wakeup_irq - interrupt handler for wakeup events
* @irq: irq number for cdns3/cdnsp core device
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 04b6d12f2b9a..ee917f1b091c 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
*/
static void cdns_otg_disable_irq(struct cdns *cdns)
{
- writel(0, &cdns->otg_irq_regs->ien);
+ if (cdns->version)
+ writel(0, &cdns->otg_irq_regs->ien);
}
/**
@@ -422,15 +423,20 @@ int cdns_drd_init(struct cdns *cdns)
cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
- if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
+ state = readl(&cdns->otg_cdnsp_regs->did);
+
+ if (OTG_CDNSP_CHECK_DID(state)) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_cdnsp_regs->ien;
cdns->version = CDNSP_CONTROLLER_V2;
- } else {
+ } else if (OTG_CDNS3_CHECK_DID(state)) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_v1_regs->ien;
writel(1, &cdns->otg_v1_regs->simulate);
cdns->version = CDNS3_CONTROLLER_V1;
+ } else {
+ dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
+ return -EINVAL;
}
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
@@ -483,7 +489,6 @@ int cdns_drd_exit(struct cdns *cdns)
return 0;
}
-
/* Indicate the cdns3 core was power lost before */
bool cdns_power_is_lost(struct cdns *cdns)
{
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
index cbdf94f73ed9..d72370c321d3 100644
--- a/drivers/usb/cdns3/drd.h
+++ b/drivers/usb/cdns3/drd.h
@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
__le32 susp_timing_ctrl;
};
-#define OTG_CDNSP_DID 0x0004034E
+/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
+#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
+
+/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
+#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
/*
* Common registers interface for both CDNS3 and CDNSP version of DRD.
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 6164fc4c96a4..ceca4d839dfd 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -18,6 +18,11 @@
#include "../host/xhci.h"
#include "../host/xhci-plat.h"
+/*
+ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
+ * in Cadence USB3 dual-role controller, so it can't be used
+ * with Cadence CDNSP dual-role controller.
+ */
#define XECP_PORT_CAP_REG 0x8000
#define XECP_AUX_CTRL_REG1 0x8120
@@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.resume_quirk = xhci_cdns3_resume_quirk,
};
+static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
+
static int __cdns_host_init(struct cdns *cdns)
{
struct platform_device *xhci;
@@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns)
goto err1;
}
- cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
- sizeof(struct xhci_plat_priv), GFP_KERNEL);
+ if (cdns->version < CDNSP_CONTROLLER_V2)
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
+ else
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
+
if (!cdns->xhci_plat_data) {
ret = -ENOMEM;
goto err1;
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index d9bb3d3f026e..2a38e1eb6546 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -176,6 +176,7 @@ struct hw_bank {
* @enabled_otg_timer_bits: bits of enabled otg timers
* @next_otg_timer: next nearest enabled timer to be expired
* @work: work for role changing
+ * @power_lost_work: work for power lost handling
* @wq: workqueue thread
* @qh_pool: allocation pool for queue heads
* @td_pool: allocation pool for transfer descriptors
@@ -226,6 +227,7 @@ struct ci_hdrc {
enum otg_fsm_timer next_otg_timer;
struct usb_role_switch *role_switch;
struct work_struct work;
+ struct work_struct power_lost_work;
struct workqueue_struct *wq;
struct dma_pool *qh_pool;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 41014f93cfdf..835bf2428dc6 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -856,6 +856,27 @@ static int ci_extcon_register(struct ci_hdrc *ci)
return 0;
}
+static void ci_power_lost_work(struct work_struct *work)
+{
+ struct ci_hdrc *ci = container_of(work, struct ci_hdrc, power_lost_work);
+ enum ci_role role;
+
+ disable_irq_nosync(ci->irq);
+ pm_runtime_get_sync(ci->dev);
+ if (!ci_otg_is_fsm_mode(ci)) {
+ role = ci_get_role(ci);
+
+ if (ci->role != role) {
+ ci_handle_id_switch(ci);
+ } else if (role == CI_ROLE_GADGET) {
+ if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
+ usb_gadget_vbus_connect(&ci->gadget);
+ }
+ }
+ pm_runtime_put_sync(ci->dev);
+ enable_irq(ci->irq);
+}
+
static DEFINE_IDA(ci_ida);
struct platform_device *ci_hdrc_add_device(struct device *dev,
@@ -1045,6 +1066,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
spin_lock_init(&ci->lock);
mutex_init(&ci->mutex);
+ INIT_WORK(&ci->power_lost_work, ci_power_lost_work);
+
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -1396,25 +1419,6 @@ static int ci_suspend(struct device *dev)
return 0;
}
-static void ci_handle_power_lost(struct ci_hdrc *ci)
-{
- enum ci_role role;
-
- disable_irq_nosync(ci->irq);
- if (!ci_otg_is_fsm_mode(ci)) {
- role = ci_get_role(ci);
-
- if (ci->role != role) {
- ci_handle_id_switch(ci);
- } else if (role == CI_ROLE_GADGET) {
- if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
- usb_gadget_vbus_connect(&ci->gadget);
- }
- }
-
- enable_irq(ci->irq);
-}
-
static int ci_resume(struct device *dev)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -1446,7 +1450,7 @@ static int ci_resume(struct device *dev)
ci_role(ci)->resume(ci, power_lost);
if (power_lost)
- ci_handle_power_lost(ci);
+ queue_work(system_freezable_wq, &ci->power_lost_work);
if (ci->supports_runtime_pm) {
pm_runtime_disable(dev);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 84d91b1c1eed..0886b19d2e1c 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
return ret;
}
- root = debugfs_create_dir(dev_name(dev), ulpi_root);
+ root = debugfs_create_dir(dev_name(&ulpi->dev), ulpi_root);
debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index ffd7c99e24a3..e38a4124f610 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2053,9 +2053,19 @@ static void update_port_device_state(struct usb_device *udev)
if (udev->parent) {
hub = usb_hub_to_struct_hub(udev->parent);
- port_dev = hub->ports[udev->portnum - 1];
- WRITE_ONCE(port_dev->state, udev->state);
- sysfs_notify_dirent(port_dev->state_kn);
+
+ /*
+ * The Link Layer Validation System Driver (lvstest)
+ * has a test step to unbind the hub before running the
+ * rest of the procedure. This triggers hub_disconnect
+ * which will set the hub's maxchild to 0, further
+ * resulting in usb_hub_to_struct_hub returning NULL.
+ */
+ if (hub) {
+ port_dev = hub->ports[udev->portnum - 1];
+ WRITE_ONCE(port_dev->state, udev->state);
+ sysfs_notify_dirent(port_dev->state_kn);
+ }
}
}
@@ -2388,17 +2398,25 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
}
} else if (desc->bLength == sizeof
(struct usb_otg_descriptor)) {
- /* Set a_alt_hnp_support for legacy otg device */
- err = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- USB_REQ_SET_FEATURE, 0,
- USB_DEVICE_A_ALT_HNP_SUPPORT,
- 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- if (err < 0)
- dev_err(&udev->dev,
- "set a_alt_hnp_support failed: %d\n",
- err);
+ /*
+ * We are operating on a legacy OTP device
+ * These should be told that they are operating
+ * on the wrong port if we have another port that does
+ * support HNP
+ */
+ if (bus->otg_port != 0) {
+ /* Set a_alt_hnp_support for legacy otg device */
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_A_ALT_HNP_SUPPORT,
+ 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err < 0)
+ dev_err(&udev->dev,
+ "set a_alt_hnp_support failed: %d\n",
+ err);
+ }
}
}
#endif
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e3eea965e57b..e120611a5174 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -376,7 +376,6 @@
/* Global HWPARAMS4 Register */
#define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13)
#define DWC3_MAX_HIBER_SCRATCHBUFS 15
-#define DWC3_EXT_BUFF_CONTROL BIT(21)
/* Global HWPARAMS6 Register */
#define DWC3_GHWPARAMS6_BCSUPPORT BIT(14)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6604845c397c..39564e17f3b0 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -51,6 +51,8 @@
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
+#define PCI_DEVICE_ID_INTEL_ARLH 0x7ec1
+#define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -421,6 +423,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, ARLH, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 019368f8e9c4..28f49400f3e8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -673,12 +673,6 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
}
- if (dep->endpoint.fifo_mode) {
- if (!(dwc->hwparams.hwparams4 & DWC3_EXT_BUFF_CONTROL))
- return -EINVAL;
- params.param1 |= DWC3_DEPCFG_EBC_HWO_NOWB | DWC3_DEPCFG_USE_EBC;
- }
-
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
}
@@ -2656,6 +2650,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
int ret;
spin_lock_irqsave(&dwc->lock, flags);
+ if (!dwc->pullups_connected) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
dwc->connected = false;
/*
@@ -4709,15 +4708,13 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
unsigned long flags;
int ret;
- if (!dwc->gadget_driver)
- return 0;
-
ret = dwc3_gadget_soft_disconnect(dwc);
if (ret)
goto err;
spin_lock_irqsave(&dwc->lock, flags);
- dwc3_disconnect_gadget(dwc);
+ if (dwc->gadget_driver)
+ dwc3_disconnect_gadget(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index fd7a4e94397e..55a56cf67d73 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -26,8 +26,6 @@ struct dwc3;
#define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10)
#define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11)
#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(13)
-#define DWC3_DEPCFG_EBC_HWO_NOWB BIT(14)
-#define DWC3_DEPCFG_USE_EBC BIT(15)
#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE BIT(24)
#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 61f57fe5bb78..43230915323c 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -61,7 +61,7 @@ out:
int dwc3_host_init(struct dwc3 *dwc)
{
- struct property_entry props[4];
+ struct property_entry props[5];
struct platform_device *xhci;
int ret, irq;
int prop_idx = 0;
@@ -89,6 +89,8 @@ int dwc3_host_init(struct dwc3 *dwc)
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk");
+
if (dwc->usb3_lpm_capable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 722a3ab2b337..c265a1f62fc1 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -545,21 +545,37 @@ static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
+ int rc;
+
if (!fsg_is_set(common))
return false;
bh->state = BUF_STATE_SENDING;
- if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
+ rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq);
+ if (rc) {
bh->state = BUF_STATE_EMPTY;
+ if (rc == -ESHUTDOWN) {
+ common->running = 0;
+ return false;
+ }
+ }
return true;
}
static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
+ int rc;
+
if (!fsg_is_set(common))
return false;
bh->state = BUF_STATE_RECEIVING;
- if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
+ rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq);
+ if (rc) {
bh->state = BUF_STATE_FULL;
+ if (rc == -ESHUTDOWN) {
+ common->running = 0;
+ return false;
+ }
+ }
return true;
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index a1575a0ca568..e2a059cfda2c 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -105,8 +105,8 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
/*
* Although max mtu as dictated by u_ether is 15412 bytes, setting
- * max_segment_sizeto 15426 would not be efficient. If user chooses segment
- * size to be (>= 8192), then we can't aggregate more than one buffer in each
+ * max_segment_size to 15426 would not be efficient. If user chooses segment
+ * size to be (>= 8192), then we can't aggregate more than one buffer in each
* NTB (assuming each packet coming from network layer is >= 8192 bytes) as ep
* maxpacket limit is 16384. So let max_segment_size be limited to 8000 to allow
* at least 2 packets to be aggregated reducing wastage of NTB buffer space
@@ -1338,7 +1338,15 @@ parse_ntb:
"Parsed NTB with %d frames\n", dgram_counter);
to_process -= block_len;
- if (to_process != 0) {
+
+ /*
+ * Windows NCM driver avoids USB ZLPs by adding a 1-byte
+ * zero pad as needed.
+ */
+ if (to_process == 1 &&
+ (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
+ to_process--;
+ } else if (to_process > 0) {
ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
goto parse_ntb;
}
@@ -1489,7 +1497,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm_data_intf.bInterfaceNumber = status;
ncm_union_desc.bSlaveInterface0 = status;
- ecm_desc.wMaxSegmentSize = ncm_opts->max_segment_size;
+ ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_opts->max_segment_size);
status = -ENODEV;
@@ -1685,7 +1693,7 @@ static struct usb_function_instance *ncm_alloc_inst(void)
kfree(opts);
return ERR_CAST(net);
}
- opts->max_segment_size = cpu_to_le16(ETH_FRAME_LEN);
+ opts->max_segment_size = ETH_FRAME_LEN;
INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
descs[0] = &opts->ncm_os_desc;
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 10c5d7f726a1..f90eeecf27de 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2036,7 +2036,8 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
static inline int machine_without_vbus_sense(void)
{
- return machine_is_omap_osk() || machine_is_sx1();
+ return machine_is_omap_osk() || machine_is_omap_palmte() ||
+ machine_is_sx1();
}
static int omap_udc_start(struct usb_gadget *g,
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 4f8617210d85..169f72665739 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -274,7 +274,6 @@ struct pch_udc_cfg_data {
* @td_data: for data request
* @dev: reference to device struct
* @offset_addr: offset address of ep register
- * @desc: for this ep
* @queue: queue for requests
* @num: endpoint number
* @in: endpoint is IN
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index ac3fc5970315..cfebb833668e 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
static int uhci_grlib_init(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 4460fa7e9fab..a7716202a8dd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1861,14 +1861,14 @@ void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrup
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned int intr_num;
+ spin_lock_irq(&xhci->lock);
+
/* interrupter 0 is primary interrupter, don't touch it */
- if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters)
+ if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) {
xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n");
-
- /* fixme, should we check xhci->interrupter[intr_num] == ir */
- /* fixme locking */
-
- spin_lock_irq(&xhci->lock);
+ spin_unlock_irq(&xhci->lock);
+ return;
+ }
intr_num = ir->intr_num;
@@ -2322,7 +2322,7 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
u64 erst_base;
u32 erst_size;
- if (intr_num > xhci->max_interrupters) {
+ if (intr_num >= xhci->max_interrupters) {
xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
intr_num, xhci->max_interrupters);
return -EINVAL;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index f04fde19f551..3d071b875308 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -253,6 +253,9 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
xhci->quirks |= XHCI_BROKEN_PORT_PED;
+ if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
+ xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
+
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 33806ae966f9..f0d8a607ff21 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2376,6 +2376,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
+ /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
+ if (td->error_mid_td)
+ break;
if (remaining) {
frame->status = short_framestatus;
if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
@@ -2391,9 +2394,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_BANDWIDTH_OVERRUN_ERROR:
frame->status = -ECOMM;
break;
- case COMP_ISOCH_BUFFER_OVERRUN:
case COMP_BABBLE_DETECTED_ERROR:
+ sum_trbs_for_length = true;
+ fallthrough;
+ case COMP_ISOCH_BUFFER_OVERRUN:
frame->status = -EOVERFLOW;
+ if (ep_trb != td->last_trb)
+ td->error_mid_td = true;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
case COMP_STALL_ERROR:
@@ -2401,8 +2408,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
case COMP_USB_TRANSACTION_ERROR:
frame->status = -EPROTO;
+ sum_trbs_for_length = true;
if (ep_trb != td->last_trb)
- return 0;
+ td->error_mid_td = true;
break;
case COMP_STOPPED:
sum_trbs_for_length = true;
@@ -2422,6 +2430,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
}
+ if (td->urb_length_set)
+ goto finish_td;
+
if (sum_trbs_for_length)
frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
ep_trb_len - remaining;
@@ -2430,6 +2441,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb->actual_length += frame->actual_length;
+finish_td:
+ /* Don't give back TD yet if we encountered an error mid TD */
+ if (td->error_mid_td && ep_trb != td->last_trb) {
+ xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
+ td->urb_length_set = true;
+ return 0;
+ }
+
return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
@@ -2808,17 +2827,51 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
if (!ep_seg) {
- if (!ep->skip ||
- !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
- /* Some host controllers give a spurious
- * successful event after a short transfer.
- * Ignore it.
- */
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
- ep_ring->last_td_was_short) {
- ep_ring->last_td_was_short = false;
- goto cleanup;
+
+ if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ skip_isoc_td(xhci, td, ep, status);
+ goto cleanup;
+ }
+
+ /*
+ * Some hosts give a spurious success event after a short
+ * transfer. Ignore it.
+ */
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ ep_ring->last_td_was_short) {
+ ep_ring->last_td_was_short = false;
+ goto cleanup;
+ }
+
+ /*
+ * xhci 4.10.2 states isoc endpoints should continue
+ * processing the next TD if there was an error mid TD.
+ * So host like NEC don't generate an event for the last
+ * isoc TRB even if the IOC flag is set.
+ * xhci 4.9.1 states that if there are errors in mult-TRB
+ * TDs xHC should generate an error for that TRB, and if xHC
+ * proceeds to the next TD it should genete an event for
+ * any TRB with IOC flag on the way. Other host follow this.
+ * So this event might be for the next TD.
+ */
+ if (td->error_mid_td &&
+ !list_is_last(&td->td_list, &ep_ring->td_list)) {
+ struct xhci_td *td_next = list_next_entry(td, td_list);
+
+ ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb,
+ td_next->last_trb, ep_trb_dma, false);
+ if (ep_seg) {
+ /* give back previous TD, start handling new */
+ xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
+ ep_ring->dequeue = td->last_trb;
+ ep_ring->deq_seg = td->last_trb_seg;
+ inc_deq(xhci, ep_ring);
+ xhci_td_cleanup(xhci, td, ep_ring, td->status);
+ td = td_next;
}
+ }
+
+ if (!ep_seg) {
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
@@ -2830,9 +2883,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_trb_dma, true);
return -ESHUTDOWN;
}
-
- skip_isoc_td(xhci, td, ep, status);
- goto cleanup;
}
if (trb_comp_code == COMP_SHORT_PACKET)
ep_ring->last_td_was_short = true;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a5c72a634e6a..6f82d404883f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1549,6 +1549,7 @@ struct xhci_td {
struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */
bool urb_length_set;
+ bool error_mid_td;
unsigned int num_trbs;
};
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index ae41578bd014..70165dd86b5d 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -21,7 +21,9 @@ static const struct class role_class = {
struct usb_role_switch {
struct device dev;
struct mutex lock; /* device lock*/
+ struct module *module; /* the module this device depends on */
enum usb_role role;
+ bool registered;
/* From descriptor */
struct device *usb2_port;
@@ -48,6 +50,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
if (IS_ERR_OR_NULL(sw))
return 0;
+ if (!sw->registered)
+ return -EOPNOTSUPP;
+
mutex_lock(&sw->lock);
ret = sw->set(sw, role);
@@ -73,7 +78,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
{
enum usb_role role;
- if (IS_ERR_OR_NULL(sw))
+ if (IS_ERR_OR_NULL(sw) || !sw->registered)
return USB_ROLE_NONE;
mutex_lock(&sw->lock);
@@ -135,7 +140,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ WARN_ON(!try_module_get(sw->module));
return sw;
}
@@ -157,7 +162,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
NULL, usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ WARN_ON(!try_module_get(sw->module));
return sw;
}
@@ -172,7 +177,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
void usb_role_switch_put(struct usb_role_switch *sw)
{
if (!IS_ERR_OR_NULL(sw)) {
- module_put(sw->dev.parent->driver->owner);
+ module_put(sw->module);
put_device(&sw->dev);
}
}
@@ -189,15 +194,18 @@ struct usb_role_switch *
usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
{
struct device *dev;
+ struct usb_role_switch *sw = NULL;
if (!fwnode)
return NULL;
dev = class_find_device_by_fwnode(&role_class, fwnode);
- if (dev)
- WARN_ON(!try_module_get(dev->parent->driver->owner));
+ if (dev) {
+ sw = to_role_switch(dev);
+ WARN_ON(!try_module_get(sw->module));
+ }
- return dev ? to_role_switch(dev) : NULL;
+ return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
@@ -338,6 +346,7 @@ usb_role_switch_register(struct device *parent,
sw->set = desc->set;
sw->get = desc->get;
+ sw->module = parent->driver->owner;
sw->dev.parent = parent;
sw->dev.fwnode = desc->fwnode;
sw->dev.class = &role_class;
@@ -352,6 +361,8 @@ usb_role_switch_register(struct device *parent,
return ERR_PTR(ret);
}
+ sw->registered = true;
+
/* TODO: Symlinks for the host port and the device controller. */
return sw;
@@ -366,8 +377,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ sw->registered = false;
device_unregister(&sw->dev);
+ }
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 1e61fe043171..923e0ed85444 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
{ USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
{ USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 72390dbf0769..2ae124c49d44 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2269,6 +2269,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index b1e844bf31f8..703a9c563557 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -184,6 +184,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
{DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x8217)}, /* Dell Wireless DW5826e */
+ {DEVICE_SWI(0x413c, 0x8218)}, /* Dell Wireless DW5826e QDL */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c54e9805da53..12cf9940e5b6 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -180,6 +180,13 @@ static int slave_configure(struct scsi_device *sdev)
sdev->use_192_bytes_for_3f = 1;
/*
+ * Some devices report generic values until the media has been
+ * accessed. Force a READ(10) prior to querying device
+ * characteristics.
+ */
+ sdev->read_before_ms = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9707f53cfda9..71ace274761f 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -879,6 +879,13 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->guess_capacity = 1;
/*
+ * Some devices report generic values until the media has been
+ * accessed. Force a READ(10) prior to querying device
+ * characteristics.
+ */
+ sdev->read_before_ms = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 5945e3a2b0f7..66e532edcece 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3743,9 +3743,6 @@ static void tcpm_detach(struct tcpm_port *port)
if (tcpm_port_is_disconnected(port))
port->hard_reset_count = 0;
- port->try_src_count = 0;
- port->try_snk_count = 0;
-
if (!port->attached)
return;
@@ -6848,7 +6845,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
if (err)
goto out_role_sw_put;
- port->typec_caps.pd = port->pds[0];
+ if (port->pds)
+ port->typec_caps.pd = port->pds[0];
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
if (IS_ERR(port->typec_port)) {
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 5392ec698959..14f5a7bfae2e 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -938,7 +938,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
clear_bit(EVENT_PENDING, &con->ucsi->flags);
+ mutex_lock(&ucsi->ppm_lock);
ret = ucsi_acknowledge_connector_change(ucsi);
+ mutex_unlock(&ucsi->ppm_lock);
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 6bbf490ac401..928eacbeb21a 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -25,6 +25,8 @@ struct ucsi_acpi {
unsigned long flags;
guid_t guid;
u64 cmd;
+ bool dell_quirk_probed;
+ bool dell_quirk_active;
};
static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
@@ -73,9 +75,13 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
int ret;
- set_bit(COMMAND_PENDING, &ua->flags);
+ if (ack)
+ set_bit(ACK_PENDING, &ua->flags);
+ else
+ set_bit(COMMAND_PENDING, &ua->flags);
ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
if (ret)
@@ -85,7 +91,10 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
ret = -ETIMEDOUT;
out_clear_bit:
- clear_bit(COMMAND_PENDING, &ua->flags);
+ if (ack)
+ clear_bit(ACK_PENDING, &ua->flags);
+ else
+ clear_bit(COMMAND_PENDING, &ua->flags);
return ret;
}
@@ -119,12 +128,73 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
.async_write = ucsi_acpi_async_write
};
-static const struct dmi_system_id zenbook_dmi_id[] = {
+/*
+ * Some Dell laptops expect that an ACK command with the
+ * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
+ * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
+ * If this is not done events are not delivered to OSPM and
+ * subsequent commands will timeout.
+ */
+static int
+ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ u64 cmd = *(u64 *)val, ack = 0;
+ int ret;
+
+ if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
+ cmd & UCSI_ACK_CONNECTOR_CHANGE)
+ ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
+
+ ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
+ if (ret != 0)
+ return ret;
+ if (ack == 0)
+ return ret;
+
+ if (!ua->dell_quirk_probed) {
+ ua->dell_quirk_probed = true;
+
+ cmd = UCSI_GET_CAPABILITY;
+ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
+ sizeof(cmd));
+ if (ret == 0)
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
+ &ack, sizeof(ack));
+ if (ret != -ETIMEDOUT)
+ return ret;
+
+ ua->dell_quirk_active = true;
+ dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
+ dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
+ }
+
+ if (!ua->dell_quirk_active)
+ return ret;
+
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
+}
+
+static const struct ucsi_operations ucsi_dell_ops = {
+ .read = ucsi_acpi_read,
+ .sync_write = ucsi_dell_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
+static const struct dmi_system_id ucsi_acpi_quirks[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
},
+ .driver_data = (void *)&ucsi_zenbook_ops,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ .driver_data = (void *)&ucsi_dell_ops,
},
{ }
};
@@ -142,8 +212,10 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
- if (test_bit(COMMAND_PENDING, &ua->flags) &&
- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
+ complete(&ua->complete);
+ if (cci & UCSI_CCI_COMMAND_COMPLETE &&
+ test_bit(COMMAND_PENDING, &ua->flags))
complete(&ua->complete);
}
@@ -151,6 +223,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
const struct ucsi_operations *ops = &ucsi_acpi_ops;
+ const struct dmi_system_id *id;
struct ucsi_acpi *ua;
struct resource *res;
acpi_status status;
@@ -180,8 +253,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
init_completion(&ua->complete);
ua->dev = &pdev->dev;
- if (dmi_check_system(zenbook_dmi_id))
- ops = &ucsi_zenbook_ops;
+ id = dmi_first_match(ucsi_acpi_quirks);
+ if (id)
+ ops = id->driver_data;
ua->ucsi = ucsi_create(&pdev->dev, ops);
if (IS_ERR(ua->ucsi))
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f2ed7167c848..4b2fcb228a0a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -141,10 +141,8 @@ struct vhost_net {
unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */
bool tx_flush;
- /* Private page frag */
- struct page_frag page_frag;
- /* Refcount bias of page frag */
- int refcnt_bias;
+ /* Private page frag cache */
+ struct page_frag_cache pf_cache;
};
static unsigned vhost_net_zcopy_mask __read_mostly;
@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
-static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
- struct page_frag *pfrag, gfp_t gfp)
-{
- if (pfrag->page) {
- if (pfrag->offset + sz <= pfrag->size)
- return true;
- __page_frag_cache_drain(pfrag->page, net->refcnt_bias);
- }
-
- pfrag->offset = 0;
- net->refcnt_bias = 0;
- if (SKB_FRAG_PAGE_ORDER) {
- /* Avoid direct reclaim but allow kswapd to wake */
- pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
- __GFP_COMP | __GFP_NOWARN |
- __GFP_NORETRY,
- SKB_FRAG_PAGE_ORDER);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
- goto done;
- }
- }
- pfrag->page = alloc_page(gfp);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE;
- goto done;
- }
- return false;
-
-done:
- net->refcnt_bias = USHRT_MAX;
- page_ref_add(pfrag->page, USHRT_MAX - 1);
- return true;
-}
-
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
struct socket *sock = vhost_vq_get_backend(vq);
- struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr;
@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
int sock_hlen = nvq->sock_hlen;
void *buf;
int copied;
+ int ret;
if (unlikely(len < nvq->sock_hlen))
return -EFAULT;
@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
return -ENOSPC;
buflen += SKB_DATA_ALIGN(len + pad);
- alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
- if (unlikely(!vhost_net_page_frag_refill(net, buflen,
- alloc_frag, GFP_KERNEL)))
+ buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
+ SMP_CACHE_BYTES);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset +
- offsetof(struct tun_xdp_hdr, gso),
- sock_hlen, from);
- if (copied != sock_hlen)
- return -EFAULT;
+ copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
+ sock_hlen, from);
+ if (copied != sock_hlen) {
+ ret = -EFAULT;
+ goto err;
+ }
hdr = buf;
gso = &hdr->gso;
@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2);
- if (vhost16_to_cpu(vq, gso->hdr_len) > len)
- return -EINVAL;
+ if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
+ ret = -EINVAL;
+ goto err;
+ }
}
len -= sock_hlen;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset + pad,
- len, from);
- if (copied != len)
- return -EFAULT;
+ copied = copy_from_iter(buf + pad, len, from);
+ if (copied != len) {
+ ret = -EFAULT;
+ goto err;
+ }
xdp_init_buff(xdp, buflen, NULL);
xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen;
- --net->refcnt_bias;
- alloc_frag->offset += buflen;
-
++nvq->batched_xdp;
return 0;
+
+err:
+ page_frag_free(buf);
+ return ret;
}
static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
- n->page_frag.page = NULL;
- n->refcnt_bias = 0;
+ n->pf_cache.va = NULL;
return 0;
}
@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
- if (n->page_frag.page)
- __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
+ page_frag_cache_drain(&n->pf_cache);
kvfree(n);
return 0;
}
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 63af6ab034b5..46823c2e2ba1 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -631,8 +631,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (logo_lines > vc->vc_bottom) {
logo_shown = FBCON_LOGO_CANSHOW;
- printk(KERN_INFO
- "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
+ pr_info("fbcon: disable boot-logo (boot-logo bigger than screen).\n");
} else {
logo_shown = FBCON_LOGO_DRAW;
vc->vc_top = logo_lines;
@@ -2400,11 +2399,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize, ret, old_userfont, old_width, old_height, old_charcount;
- char *old_data = NULL;
+ u8 *old_data = vc->vc_font.data;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
- if (p->userfont)
- old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
old_userfont = p->userfont;
if ((p->userfont = userfont))
@@ -2438,13 +2435,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
update_screen(vc);
}
- if (old_data && (--REFCOUNT(old_data) == 0))
+ if (old_userfont && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
err_out:
p->fontdata = old_data;
- vc->vc_font.data = (void *)old_data;
+ vc->vc_font.data = old_data;
if (userfont) {
p->userfont = old_userfont;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index c26ee6fd73c9..8fdccf033b2d 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1010,8 +1010,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
- } else {
- goto err1;
}
/*
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index dddd6afcb972..ebc9aeffdde7 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo *var,
DBG("savagefb_check_var");
+ if (!var->pixclock)
+ return -EINVAL;
+
var->transp.offset = 0;
var->transp.length = 0;
switch (var->bits_per_pixel) {
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 803ccb6aa479..009bf1d92644 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1444,6 +1444,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
+ if (!var->pixclock)
+ return -EINVAL;
pixclock = var->pixclock;
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 2de0e675fd15..8e5bac27542d 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1158,7 +1158,7 @@ stifb_init_display(struct stifb_info *fb)
}
break;
}
- stifb_blank(0, (struct fb_info *)fb); /* 0=enable screen */
+ stifb_blank(0, fb->info); /* 0=enable screen */
SETUP_FB(fb);
}
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index 42c25dc85197..ac73937073a7 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -374,7 +374,6 @@ static int vt8500lcd_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ defined\n");
ret = -ENODEV;
goto failed_free_palette;
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b8cfea7812d6..3b9f080109d7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -923,8 +923,8 @@ static void shutdown_pirq(struct irq_data *data)
return;
do_mask(info, EVT_MASK_REASON_EXPLICIT);
- xen_evtchn_close(evtchn);
xen_irq_info_cleanup(info);
+ xen_evtchn_close(evtchn);
}
static void enable_pirq(struct irq_data *data)
@@ -956,6 +956,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
{
evtchn_port_t evtchn;
+ bool close_evtchn = false;
if (!info) {
xen_irq_free_desc(irq);
@@ -975,7 +976,7 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
struct xenbus_device *dev;
if (!info->is_static)
- xen_evtchn_close(evtchn);
+ close_evtchn = true;
switch (info->type) {
case IRQT_VIRQ:
@@ -995,6 +996,9 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
}
xen_irq_info_cleanup(info);
+
+ if (close_evtchn)
+ xen_evtchn_close(evtchn);
}
xen_free_irq(info);
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 26ffb8755ffb..f93f73ecefee 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -317,7 +317,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
rc = -EFAULT;
goto out_free;
}
- if (copy_to_user(arg->gref_ids, gref_ids,
+ if (copy_to_user(arg->gref_ids_flex, gref_ids,
sizeof(gref_ids[0]) * op.count)) {
rc = -EFAULT;
goto out_free;
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 508655273145..c63f317e3df3 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -65,7 +65,7 @@ struct pcpu {
uint32_t flags;
};
-static struct bus_type xen_pcpu_subsys = {
+static const struct bus_type xen_pcpu_subsys = {
.name = "xen_cpu",
.dev_name = "xen_cpu",
};
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 35b6e306026a..67dfa4778864 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1223,18 +1223,13 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
mmap_write_unlock(mm);
- size = sizeof(*ports) * kioreq->vcpus;
- ports = kzalloc(size, GFP_KERNEL);
- if (!ports) {
- ret = -ENOMEM;
+ ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
+ kioreq->vcpus, sizeof(*ports));
+ if (IS_ERR(ports)) {
+ ret = PTR_ERR(ports);
goto error_kfree;
}
- if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) {
- ret = -EFAULT;
- goto error_kfree_ports;
- }
-
for (i = 0; i < kioreq->vcpus; i++) {
kioreq->ports[i].vcpu = i;
kioreq->ports[i].port = ports[i];
@@ -1256,7 +1251,7 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
error_unbind:
while (--i >= 0)
unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
-error_kfree_ports:
+
kfree(ports);
error_kfree:
kfree(kioreq);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 8cd583db20b1..b293d7652f15 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -237,7 +237,7 @@ static const struct attribute_group *balloon_groups[] = {
NULL
};
-static struct bus_type balloon_subsys = {
+static const struct bus_type balloon_subsys = {
.name = BALLOON_CLASS_NAME,
.dev_name = BALLOON_CLASS_NAME,
};
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 32835b4b9bc5..51b3124b0d56 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -116,14 +116,15 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
* @dev: xenbus device
* @path: path to watch
* @watch: watch to register
+ * @will_handle: events queuing determine callback
* @callback: callback to register
*
* Register a @watch on the given path, using the given xenbus_watch structure
- * for storage, and the given @callback function as the callback. On success,
- * the given @path will be saved as @watch->node, and remains the
- * caller's to free. On error, @watch->node will
- * be NULL, the device will switch to %XenbusStateClosing, and the error will
- * be saved in the store.
+ * for storage, @will_handle function as the callback to determine if each
+ * event need to be queued, and the given @callback function as the callback.
+ * On success, the given @path will be saved as @watch->node, and remains the
+ * caller's to free. On error, @watch->node will be NULL, the device will
+ * switch to %XenbusStateClosing, and the error will be saved in the store.
*
* Returns: %0 on success or -errno on error
*/
@@ -158,11 +159,13 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
* xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
* @dev: xenbus device
* @watch: watch to register
+ * @will_handle: events queuing determine callback
* @callback: callback to register
* @pathfmt: format of path to watch
*
* Register a watch on the given @path, using the given xenbus_watch
- * structure for storage, and the given @callback function as the
+ * structure for storage, @will_handle function as the callback to determine if
+ * each event need to be queued, and the given @callback function as the
* callback. On success, the watched path (@path/@path2) will be saved
* as @watch->node, and becomes the caller's to kfree().
* On error, watch->node will be NULL, so the caller has nothing to
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 731e3d14b67d..0e8418066a48 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -42,6 +42,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb);
void v9fs_free_inode(struct inode *inode);
struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode,
dev_t rdev);
+void v9fs_set_netfs_context(struct inode *inode);
int v9fs_init_inode(struct v9fs_session_info *v9ses,
struct inode *inode, umode_t mode, dev_t rdev);
void v9fs_evict_inode(struct inode *inode);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 8a635999a7d6..047855033d32 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -19,12 +19,45 @@
#include <linux/netfs.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
+#include <trace/events/netfs.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "cache.h"
#include "fid.h"
+static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq)
+{
+ struct p9_fid *fid = subreq->rreq->netfs_priv;
+ int err, len;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
+ netfs_write_subrequest_terminated(subreq, len ?: err, false);
+}
+
+static void v9fs_upload_to_server_worker(struct work_struct *work)
+{
+ struct netfs_io_subrequest *subreq =
+ container_of(work, struct netfs_io_subrequest, work);
+
+ v9fs_upload_to_server(subreq);
+}
+
+/*
+ * Set up write requests for a writeback slice. We need to add a write request
+ * for each write we want to make.
+ */
+static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len)
+{
+ struct netfs_io_subrequest *subreq;
+
+ subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER,
+ start, len, v9fs_upload_to_server_worker);
+ if (subreq)
+ netfs_queue_write_request(subreq);
+}
+
/**
* v9fs_issue_read - Issue a read from 9P
* @subreq: The read to make
@@ -33,14 +66,10 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
struct p9_fid *fid = rreq->netfs_priv;
- struct iov_iter to;
- loff_t pos = subreq->start + subreq->transferred;
- size_t len = subreq->len - subreq->transferred;
int total, err;
- iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len);
-
- total = p9_client_read(fid, pos, &to, &err);
+ total = p9_client_read(fid, subreq->start + subreq->transferred,
+ &subreq->io_iter, &err);
/* if we just extended the file size, any portion not in
* cache won't be on server and is zeroes */
@@ -50,25 +79,42 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
}
/**
- * v9fs_init_request - Initialise a read request
+ * v9fs_init_request - Initialise a request
* @rreq: The read request
* @file: The file being read from
*/
static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
{
- struct p9_fid *fid = file->private_data;
-
- BUG_ON(!fid);
+ struct p9_fid *fid;
+ bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
+ rreq->origin == NETFS_WRITEBACK ||
+ rreq->origin == NETFS_WRITETHROUGH ||
+ rreq->origin == NETFS_LAUNDER_WRITE ||
+ rreq->origin == NETFS_UNBUFFERED_WRITE ||
+ rreq->origin == NETFS_DIO_WRITE);
+
+ if (file) {
+ fid = file->private_data;
+ if (!fid)
+ goto no_fid;
+ p9_fid_get(fid);
+ } else {
+ fid = v9fs_fid_find_inode(rreq->inode, writing, INVALID_UID, true);
+ if (!fid)
+ goto no_fid;
+ }
/* we might need to read from a fid that was opened write-only
* for read-modify-write of page cache, use the writeback fid
* for that */
- WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE &&
- !(fid->mode & P9_ORDWR));
-
- p9_fid_get(fid);
+ WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE && !(fid->mode & P9_ORDWR));
rreq->netfs_priv = fid;
return 0;
+
+no_fid:
+ WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
+ rreq->inode->i_ino);
+ return -EINVAL;
}
/**
@@ -82,281 +128,20 @@ static void v9fs_free_request(struct netfs_io_request *rreq)
p9_fid_put(fid);
}
-/**
- * v9fs_begin_cache_operation - Begin a cache operation for a read
- * @rreq: The read request
- */
-static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
-{
-#ifdef CONFIG_9P_FSCACHE
- struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
-
- return fscache_begin_read_operation(&rreq->cache_resources, cookie);
-#else
- return -ENOBUFS;
-#endif
-}
-
const struct netfs_request_ops v9fs_req_ops = {
.init_request = v9fs_init_request,
.free_request = v9fs_free_request,
- .begin_cache_operation = v9fs_begin_cache_operation,
.issue_read = v9fs_issue_read,
+ .create_write_requests = v9fs_create_write_requests,
};
-/**
- * v9fs_release_folio - release the private state associated with a folio
- * @folio: The folio to be released
- * @gfp: The caller's allocation restrictions
- *
- * Returns true if the page can be released, false otherwise.
- */
-
-static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
-{
- if (folio_test_private(folio))
- return false;
-#ifdef CONFIG_9P_FSCACHE
- if (folio_test_fscache(folio)) {
- if (current_is_kswapd() || !(gfp & __GFP_FS))
- return false;
- folio_wait_fscache(folio);
- }
- fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio))));
-#endif
- return true;
-}
-
-static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
- size_t length)
-{
- folio_wait_fscache(folio);
-}
-
-#ifdef CONFIG_9P_FSCACHE
-static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct v9fs_inode *v9inode = priv;
- __le32 version;
-
- if (IS_ERR_VALUE(transferred_or_error) &&
- transferred_or_error != -ENOBUFS) {
- version = cpu_to_le32(v9inode->qid.version);
- fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
- i_size_read(&v9inode->netfs.inode), 0);
- }
-}
-#endif
-
-static int v9fs_vfs_write_folio_locked(struct folio *folio)
-{
- struct inode *inode = folio_inode(folio);
- loff_t start = folio_pos(folio);
- loff_t i_size = i_size_read(inode);
- struct iov_iter from;
- size_t len = folio_size(folio);
- struct p9_fid *writeback_fid;
- int err;
- struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
- struct fscache_cookie __maybe_unused *cookie = v9fs_inode_cookie(v9inode);
-
- if (start >= i_size)
- return 0; /* Simultaneous truncation occurred */
-
- len = min_t(loff_t, i_size - start, len);
-
- iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len);
-
- writeback_fid = v9fs_fid_find_inode(inode, true, INVALID_UID, true);
- if (!writeback_fid) {
- WARN_ONCE(1, "folio expected an open fid inode->i_private=%p\n",
- inode->i_private);
- return -EINVAL;
- }
-
- folio_wait_fscache(folio);
- folio_start_writeback(folio);
-
- p9_client_write(writeback_fid, start, &from, &err);
-
-#ifdef CONFIG_9P_FSCACHE
- if (err == 0 &&
- fscache_cookie_enabled(cookie) &&
- test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
- folio_start_fscache(folio);
- fscache_write_to_cache(v9fs_inode_cookie(v9inode),
- folio_mapping(folio), start, len, i_size,
- v9fs_write_to_cache_done, v9inode,
- true);
- }
-#endif
-
- folio_end_writeback(folio);
- p9_fid_put(writeback_fid);
-
- return err;
-}
-
-static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
-{
- struct folio *folio = page_folio(page);
- int retval;
-
- p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
-
- retval = v9fs_vfs_write_folio_locked(folio);
- if (retval < 0) {
- if (retval == -EAGAIN) {
- folio_redirty_for_writepage(wbc, folio);
- retval = 0;
- } else {
- mapping_set_error(folio_mapping(folio), retval);
- }
- } else
- retval = 0;
-
- folio_unlock(folio);
- return retval;
-}
-
-static int v9fs_launder_folio(struct folio *folio)
-{
- int retval;
-
- if (folio_clear_dirty_for_io(folio)) {
- retval = v9fs_vfs_write_folio_locked(folio);
- if (retval)
- return retval;
- }
- folio_wait_fscache(folio);
- return 0;
-}
-
-/**
- * v9fs_direct_IO - 9P address space operation for direct I/O
- * @iocb: target I/O control block
- * @iter: The data/buffer to use
- *
- * The presence of v9fs_direct_IO() in the address space ops vector
- * allowes open() O_DIRECT flags which would have failed otherwise.
- *
- * In the non-cached mode, we shunt off direct read and write requests before
- * the VFS gets them, so this method should never be called.
- *
- * Direct IO is not 'yet' supported in the cached mode. Hence when
- * this routine is called through generic_file_aio_read(), the read/write fails
- * with an error.
- *
- */
-static ssize_t
-v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- loff_t pos = iocb->ki_pos;
- ssize_t n;
- int err = 0;
-
- if (iov_iter_rw(iter) == WRITE) {
- n = p9_client_write(file->private_data, pos, iter, &err);
- if (n) {
- struct inode *inode = file_inode(file);
- loff_t i_size = i_size_read(inode);
-
- if (pos + n > i_size)
- inode_add_bytes(inode, pos + n - i_size);
- }
- } else {
- n = p9_client_read(file->private_data, pos, iter, &err);
- }
- return n ? n : err;
-}
-
-static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned int len,
- struct page **subpagep, void **fsdata)
-{
- int retval;
- struct folio *folio;
- struct v9fs_inode *v9inode = V9FS_I(mapping->host);
-
- p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
-
- /* Prefetch area to be written into the cache if we're caching this
- * file. We need to do this before we get a lock on the page in case
- * there's more than one writer competing for the same cache block.
- */
- retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
- if (retval < 0)
- return retval;
-
- *subpagep = &folio->page;
- return retval;
-}
-
-static int v9fs_write_end(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int copied,
- struct page *subpage, void *fsdata)
-{
- loff_t last_pos = pos + copied;
- struct folio *folio = page_folio(subpage);
- struct inode *inode = mapping->host;
-
- p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
-
- if (!folio_test_uptodate(folio)) {
- if (unlikely(copied < len)) {
- copied = 0;
- goto out;
- }
-
- folio_mark_uptodate(folio);
- }
-
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold the i_mutex.
- */
- if (last_pos > inode->i_size) {
- inode_add_bytes(inode, last_pos - inode->i_size);
- i_size_write(inode, last_pos);
-#ifdef CONFIG_9P_FSCACHE
- fscache_update_cookie(v9fs_inode_cookie(V9FS_I(inode)), NULL,
- &last_pos);
-#endif
- }
- folio_mark_dirty(folio);
-out:
- folio_unlock(folio);
- folio_put(folio);
-
- return copied;
-}
-
-#ifdef CONFIG_9P_FSCACHE
-/*
- * Mark a page as having been made dirty and thus needing writeback. We also
- * need to pin the cache object to write back to.
- */
-static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
-{
- struct v9fs_inode *v9inode = V9FS_I(mapping->host);
-
- return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
-}
-#else
-#define v9fs_dirty_folio filemap_dirty_folio
-#endif
-
const struct address_space_operations v9fs_addr_operations = {
- .read_folio = netfs_read_folio,
- .readahead = netfs_readahead,
- .dirty_folio = v9fs_dirty_folio,
- .writepage = v9fs_vfs_writepage,
- .write_begin = v9fs_write_begin,
- .write_end = v9fs_write_end,
- .release_folio = v9fs_release_folio,
- .invalidate_folio = v9fs_invalidate_folio,
- .launder_folio = v9fs_launder_folio,
- .direct_IO = v9fs_direct_IO,
+ .read_folio = netfs_read_folio,
+ .readahead = netfs_readahead,
+ .dirty_folio = netfs_dirty_folio,
+ .release_folio = netfs_release_folio,
+ .invalidate_folio = netfs_invalidate_folio,
+ .launder_folio = netfs_launder_folio,
+ .direct_IO = noop_direct_IO,
+ .writepages = netfs_writepages,
};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 11cd8d23f6f2..bae330c2f0cf 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -353,25 +353,15 @@ static ssize_t
v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct p9_fid *fid = iocb->ki_filp->private_data;
- int ret, err = 0;
p9_debug(P9_DEBUG_VFS, "fid %d count %zu offset %lld\n",
fid->fid, iov_iter_count(to), iocb->ki_pos);
- if (!(fid->mode & P9L_DIRECT)) {
- p9_debug(P9_DEBUG_VFS, "(cached)\n");
- return generic_file_read_iter(iocb, to);
- }
-
- if (iocb->ki_filp->f_flags & O_NONBLOCK)
- ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
- else
- ret = p9_client_read(fid, iocb->ki_pos, to, &err);
- if (!ret)
- return err;
+ if (fid->mode & P9L_DIRECT)
+ return netfs_unbuffered_read_iter(iocb, to);
- iocb->ki_pos += ret;
- return ret;
+ p9_debug(P9_DEBUG_VFS, "(cached)\n");
+ return netfs_file_read_iter(iocb, to);
}
/*
@@ -407,46 +397,14 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct p9_fid *fid = file->private_data;
- ssize_t retval;
- loff_t origin;
- int err = 0;
p9_debug(P9_DEBUG_VFS, "fid %d\n", fid->fid);
- if (!(fid->mode & (P9L_DIRECT | P9L_NOWRITECACHE))) {
- p9_debug(P9_DEBUG_CACHE, "(cached)\n");
- return generic_file_write_iter(iocb, from);
- }
+ if (fid->mode & (P9L_DIRECT | P9L_NOWRITECACHE))
+ return netfs_unbuffered_write_iter(iocb, from);
- retval = generic_write_checks(iocb, from);
- if (retval <= 0)
- return retval;
-
- origin = iocb->ki_pos;
- retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
- if (retval > 0) {
- struct inode *inode = file_inode(file);
- loff_t i_size;
- unsigned long pg_start, pg_end;
-
- pg_start = origin >> PAGE_SHIFT;
- pg_end = (origin + retval - 1) >> PAGE_SHIFT;
- if (inode->i_mapping && inode->i_mapping->nrpages)
- invalidate_inode_pages2_range(inode->i_mapping,
- pg_start, pg_end);
- iocb->ki_pos += retval;
- i_size = i_size_read(inode);
- if (iocb->ki_pos > i_size) {
- inode_add_bytes(inode, iocb->ki_pos - i_size);
- /*
- * Need to serialize against i_size_write() in
- * v9fs_stat2inode()
- */
- v9fs_i_size_write(inode, iocb->ki_pos);
- }
- return retval;
- }
- return err;
+ p9_debug(P9_DEBUG_CACHE, "(cached)\n");
+ return netfs_file_write_iter(iocb, from);
}
static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
@@ -519,36 +477,7 @@ v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
static vm_fault_t
v9fs_vm_page_mkwrite(struct vm_fault *vmf)
{
- struct folio *folio = page_folio(vmf->page);
- struct file *filp = vmf->vma->vm_file;
- struct inode *inode = file_inode(filp);
-
-
- p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
- folio, (unsigned long)filp->private_data);
-
- /* Wait for the page to be written to the cache before we allow it to
- * be modified. We then assume the entire page will need writing back.
- */
-#ifdef CONFIG_9P_FSCACHE
- if (folio_test_fscache(folio) &&
- folio_wait_fscache_killable(folio) < 0)
- return VM_FAULT_NOPAGE;
-#endif
-
- /* Update file times before taking page lock */
- file_update_time(filp);
-
- if (folio_lock_killable(folio) < 0)
- return VM_FAULT_RETRY;
- if (folio_mapping(folio) != inode->i_mapping)
- goto out_unlock;
- folio_wait_stable(folio);
-
- return VM_FAULT_LOCKED;
-out_unlock:
- folio_unlock(folio);
- return VM_FAULT_NOPAGE;
+ return netfs_page_mkwrite(vmf, NULL);
}
static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index b845ee18a80b..32572982f72e 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -246,10 +246,10 @@ void v9fs_free_inode(struct inode *inode)
/*
* Set parameters for the netfs library
*/
-static void v9fs_set_netfs_context(struct inode *inode)
+void v9fs_set_netfs_context(struct inode *inode)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
- netfs_inode_init(&v9inode->netfs, &v9fs_req_ops);
+ netfs_inode_init(&v9inode->netfs, &v9fs_req_ops, true);
}
int v9fs_init_inode(struct v9fs_session_info *v9ses,
@@ -326,8 +326,6 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
err = -EINVAL;
goto error;
}
-
- v9fs_set_netfs_context(inode);
error:
return err;
@@ -359,6 +357,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
iput(inode);
return ERR_PTR(err);
}
+ v9fs_set_netfs_context(inode);
return inode;
}
@@ -374,11 +373,8 @@ void v9fs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
-#ifdef CONFIG_9P_FSCACHE
version = cpu_to_le32(v9inode->qid.version);
- fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode,
- &version);
-#endif
+ netfs_clear_inode_writeback(inode, &version);
clear_inode(inode);
filemap_fdatawrite(&inode->i_data);
@@ -464,6 +460,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
goto error;
v9fs_stat2inode(st, inode, sb, 0);
+ v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
unlock_new_inode(inode);
return inode;
@@ -1113,7 +1110,7 @@ static int v9fs_vfs_setattr(struct mnt_idmap *idmap,
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(inode)) {
truncate_setsize(inode, iattr->ia_size);
- truncate_pagecache(inode, iattr->ia_size);
+ netfs_resize_file(netfs_inode(inode), iattr->ia_size, true);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache & CACHE_FSCACHE) {
@@ -1181,6 +1178,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
+ v9inode->netfs.remote_i_size = stat->length;
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
v9fs_i_size_write(inode, stat->length);
/* not real number of blocks, but 512 byte ones ... */
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index c7319af2f471..3505227e1704 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -128,6 +128,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
goto error;
v9fs_stat2inode_dotl(st, inode, 0);
+ v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
retval = v9fs_get_acl(inode, fid);
if (retval)
@@ -598,7 +599,7 @@ int v9fs_vfs_setattr_dotl(struct mnt_idmap *idmap,
if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size !=
i_size_read(inode)) {
truncate_setsize(inode, iattr->ia_size);
- truncate_pagecache(inode, iattr->ia_size);
+ netfs_resize_file(netfs_inode(inode), iattr->ia_size, true);
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache & CACHE_FSCACHE)
@@ -655,6 +656,7 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
+ v9inode->netfs.remote_i_size = stat->st_size;
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
v9fs_i_size_write(inode, stat->st_size);
inode->i_blocks = stat->st_blocks;
@@ -683,8 +685,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
inode->i_mode = mode;
}
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
- stat->st_result_mask & P9_STATS_SIZE)
+ stat->st_result_mask & P9_STATS_SIZE) {
+ v9inode->netfs.remote_i_size = stat->st_size;
v9fs_i_size_write(inode, stat->st_size);
+ }
if (stat->st_result_mask & P9_STATS_BLOCKS)
inode->i_blocks = stat->st_blocks;
}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 73db55c050bf..941f7d0e0bfa 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -289,31 +289,21 @@ static int v9fs_drop_inode(struct inode *inode)
static int v9fs_write_inode(struct inode *inode,
struct writeback_control *wbc)
{
- struct v9fs_inode *v9inode;
-
/*
* send an fsync request to server irrespective of
* wbc->sync_mode.
*/
p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
-
- v9inode = V9FS_I(inode);
- fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode));
-
- return 0;
+ return netfs_unpin_writeback(inode, wbc);
}
static int v9fs_write_inode_dotl(struct inode *inode,
struct writeback_control *wbc)
{
- struct v9fs_inode *v9inode;
- v9inode = V9FS_I(inode);
p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
- fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode));
-
- return 0;
+ return netfs_unpin_writeback(inode, wbc);
}
static const struct super_operations v9fs_super_ops = {
diff --git a/fs/Kconfig b/fs/Kconfig
index a3159831ba98..89fdbefd1075 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -144,7 +144,6 @@ source "fs/overlayfs/Kconfig"
menu "Caches"
source "fs/netfs/Kconfig"
-source "fs/fscache/Kconfig"
source "fs/cachefiles/Kconfig"
endmenu
diff --git a/fs/Makefile b/fs/Makefile
index a6962c588962..c09016257f05 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -61,7 +61,6 @@ obj-$(CONFIG_DLM) += dlm/
# Do not add any filesystems before this line
obj-$(CONFIG_NETFS_SUPPORT) += netfs/
-obj-$(CONFIG_FSCACHE) += fscache/
obj-$(CONFIG_REISERFS_FS) += reiserfs/
obj-$(CONFIG_EXT4_FS) += ext4/
# We place ext4 before ext2 so that clean ext3 root fs's do NOT mount using the
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 60685ec76d98..2e612834329a 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -105,6 +105,7 @@ struct affs_sb_info {
int work_queued; /* non-zero delayed work is queued */
struct delayed_work sb_work; /* superblock flush delayed work */
spinlock_t work_lock; /* protects sb_work and work_queued */
+ struct rcu_head rcu;
};
#define AFFS_MOUNT_SF_INTL 0x0001 /* International filesystem. */
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 58b391446ae1..b56a95cf414a 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -640,7 +640,7 @@ static void affs_kill_sb(struct super_block *sb)
affs_brelse(sbi->s_root_bh);
kfree(sbi->s_prefix);
mutex_destroy(&sbi->s_bmlock);
- kfree(sbi);
+ kfree_rcu(sbi, rcu);
}
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index c14533ef108f..8a67fc427e74 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -124,7 +124,7 @@ static void afs_dir_read_cleanup(struct afs_read *req)
if (xas_retry(&xas, folio))
continue;
BUG_ON(xa_is_value(folio));
- ASSERTCMP(folio_file_mapping(folio), ==, mapping);
+ ASSERTCMP(folio->mapping, ==, mapping);
folio_put(folio);
}
@@ -202,12 +202,12 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
if (xas_retry(&xas, folio))
continue;
- BUG_ON(folio_file_mapping(folio) != mapping);
+ BUG_ON(folio->mapping != mapping);
size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
for (offset = 0; offset < size; offset += sizeof(*block)) {
block = kmap_local_folio(folio, offset);
- pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block);
+ pr_warn("[%02lx] %32phN\n", folio->index + offset, block);
kunmap_local(block);
}
}
@@ -233,7 +233,7 @@ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
if (xas_retry(&xas, folio))
continue;
- BUG_ON(folio_file_mapping(folio) != mapping);
+ BUG_ON(folio->mapping != mapping);
if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
afs_dir_dump(dvnode, req);
@@ -474,6 +474,16 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
continue;
}
+ /* Don't expose silly rename entries to userspace. */
+ if (nlen > 6 &&
+ dire->u.name[0] == '.' &&
+ ctx->actor != afs_lookup_filldir &&
+ ctx->actor != afs_lookup_one_filldir &&
+ memcmp(dire->u.name, ".__afs", 6) == 0) {
+ ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+ continue;
+ }
+
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,
ntohl(dire->u.vnode),
@@ -708,6 +718,8 @@ static void afs_do_lookup_success(struct afs_operation *op)
break;
}
+ if (vp->scb.status.abort_code)
+ trace_afs_bulkstat_error(op, &vp->fid, i, vp->scb.status.abort_code);
if (!vp->scb.have_status && !vp->scb.have_error)
continue;
@@ -897,12 +909,16 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
afs_begin_vnode_operation(op);
afs_wait_for_operation(op);
}
- inode = ERR_PTR(afs_op_error(op));
out_op:
if (!afs_op_error(op)) {
- inode = &op->file[1].vnode->netfs.inode;
- op->file[1].vnode = NULL;
+ if (op->file[1].scb.status.abort_code) {
+ afs_op_accumulate_error(op, -ECONNABORTED,
+ op->file[1].scb.status.abort_code);
+ } else {
+ inode = &op->file[1].vnode->netfs.inode;
+ op->file[1].vnode = NULL;
+ }
}
if (op->file[0].scb.have_status)
@@ -2022,7 +2038,7 @@ static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
{
struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
- _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
+ _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index);
folio_detach_private(folio);
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 2cd40ba601f1..c4d2711e20ad 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -76,7 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
/* there shouldn't be an existing inode */
BUG_ON(!(inode->i_state & I_NEW));
- netfs_inode_init(&vnode->netfs, NULL);
+ netfs_inode_init(&vnode->netfs, NULL, false);
inode->i_size = 0;
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
if (root) {
@@ -258,16 +258,7 @@ const struct inode_operations afs_dynroot_inode_operations = {
.lookup = afs_dynroot_lookup,
};
-/*
- * Dirs in the dynamic root don't need revalidation.
- */
-static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
-{
- return 1;
-}
-
const struct dentry_operations afs_dynroot_dentry_operations = {
- .d_revalidate = afs_dynroot_d_revalidate,
.d_delete = always_delete_dentry,
.d_release = afs_d_release,
.d_automount = afs_d_automount,
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 30914e0d9cb2..ef2cc8f565d2 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -20,9 +20,6 @@
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
static int afs_symlink_read_folio(struct file *file, struct folio *folio);
-static void afs_invalidate_folio(struct folio *folio, size_t offset,
- size_t length);
-static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
@@ -37,7 +34,7 @@ const struct file_operations afs_file_operations = {
.release = afs_release,
.llseek = generic_file_llseek,
.read_iter = afs_file_read_iter,
- .write_iter = afs_file_write,
+ .write_iter = netfs_file_write_iter,
.mmap = afs_file_mmap,
.splice_read = afs_file_splice_read,
.splice_write = iter_file_splice_write,
@@ -53,22 +50,21 @@ const struct inode_operations afs_file_inode_operations = {
};
const struct address_space_operations afs_file_aops = {
+ .direct_IO = noop_direct_IO,
.read_folio = netfs_read_folio,
.readahead = netfs_readahead,
- .dirty_folio = afs_dirty_folio,
- .launder_folio = afs_launder_folio,
- .release_folio = afs_release_folio,
- .invalidate_folio = afs_invalidate_folio,
- .write_begin = afs_write_begin,
- .write_end = afs_write_end,
- .writepages = afs_writepages,
+ .dirty_folio = netfs_dirty_folio,
+ .launder_folio = netfs_launder_folio,
+ .release_folio = netfs_release_folio,
+ .invalidate_folio = netfs_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
+ .writepages = afs_writepages,
};
const struct address_space_operations afs_symlink_aops = {
.read_folio = afs_symlink_read_folio,
- .release_folio = afs_release_folio,
- .invalidate_folio = afs_invalidate_folio,
+ .release_folio = netfs_release_folio,
+ .invalidate_folio = netfs_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
};
@@ -323,11 +319,7 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq)
fsreq->len = subreq->len - subreq->transferred;
fsreq->key = key_get(subreq->rreq->netfs_priv);
fsreq->vnode = vnode;
- fsreq->iter = &fsreq->def_iter;
-
- iov_iter_xarray(&fsreq->def_iter, ITER_DEST,
- &fsreq->vnode->netfs.inode.i_mapping->i_pages,
- fsreq->pos, fsreq->len);
+ fsreq->iter = &subreq->io_iter;
afs_fetch_data(fsreq->vnode, fsreq);
afs_put_read(fsreq);
@@ -359,22 +351,13 @@ static int afs_symlink_read_folio(struct file *file, struct folio *folio)
static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
{
- rreq->netfs_priv = key_get(afs_file_key(file));
+ if (file)
+ rreq->netfs_priv = key_get(afs_file_key(file));
+ rreq->rsize = 256 * 1024;
+ rreq->wsize = 256 * 1024;
return 0;
}
-static int afs_begin_cache_operation(struct netfs_io_request *rreq)
-{
-#ifdef CONFIG_AFS_FSCACHE
- struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
-
- return fscache_begin_read_operation(&rreq->cache_resources,
- afs_vnode_cache(vnode));
-#else
- return -ENOBUFS;
-#endif
-}
-
static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
struct folio **foliop, void **_fsdata)
{
@@ -388,128 +371,37 @@ static void afs_free_request(struct netfs_io_request *rreq)
key_put(rreq->netfs_priv);
}
-const struct netfs_request_ops afs_req_ops = {
- .init_request = afs_init_request,
- .free_request = afs_free_request,
- .begin_cache_operation = afs_begin_cache_operation,
- .check_write_begin = afs_check_write_begin,
- .issue_read = afs_issue_read,
-};
-
-int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
+static void afs_update_i_size(struct inode *inode, loff_t new_i_size)
{
- fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode)));
- return 0;
-}
-
-/*
- * Adjust the dirty region of the page on truncation or full invalidation,
- * getting rid of the markers altogether if the region is entirely invalidated.
- */
-static void afs_invalidate_dirty(struct folio *folio, size_t offset,
- size_t length)
-{
- struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
- unsigned long priv;
- unsigned int f, t, end = offset + length;
-
- priv = (unsigned long)folio_get_private(folio);
-
- /* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == folio_size(folio))
- goto full_invalidate;
-
- /* If the page was dirtied by page_mkwrite(), the PTE stays writable
- * and we don't get another notification to tell us to expand it
- * again.
- */
- if (afs_is_folio_dirty_mmapped(priv))
- return;
-
- /* We may need to shorten the dirty region */
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
-
- if (t <= offset || f >= end)
- return; /* Doesn't overlap */
-
- if (f < offset && t > end)
- return; /* Splits the dirty region - just absorb it */
-
- if (f >= offset && t <= end)
- goto undirty;
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ loff_t i_size;
- if (f < offset)
- t = offset;
- else
- f = end;
- if (f == t)
- goto undirty;
-
- priv = afs_folio_dirty(folio, f, t);
- folio_change_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
- return;
-
-undirty:
- trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
- folio_clear_dirty_for_io(folio);
-full_invalidate:
- trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
- folio_detach_private(folio);
+ write_seqlock(&vnode->cb_lock);
+ i_size = i_size_read(&vnode->netfs.inode);
+ if (new_i_size > i_size) {
+ i_size_write(&vnode->netfs.inode, new_i_size);
+ inode_set_bytes(&vnode->netfs.inode, new_i_size);
+ }
+ write_sequnlock(&vnode->cb_lock);
+ fscache_update_cookie(afs_vnode_cache(vnode), NULL, &new_i_size);
}
-/*
- * invalidate part or all of a page
- * - release a page and clean up its private data if offset is 0 (indicating
- * the entire page)
- */
-static void afs_invalidate_folio(struct folio *folio, size_t offset,
- size_t length)
+static void afs_netfs_invalidate_cache(struct netfs_io_request *wreq)
{
- _enter("{%lu},%zu,%zu", folio->index, offset, length);
-
- BUG_ON(!folio_test_locked(folio));
+ struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
- if (folio_get_private(folio))
- afs_invalidate_dirty(folio, offset, length);
-
- folio_wait_fscache(folio);
- _leave("");
+ afs_invalidate_cache(vnode, 0);
}
-/*
- * release a page and clean up its private state if it's not busy
- * - return true if the page can now be released, false if not
- */
-static bool afs_release_folio(struct folio *folio, gfp_t gfp)
-{
- struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
-
- _enter("{{%llx:%llu}[%lu],%lx},%x",
- vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
- gfp);
-
- /* deny if folio is being written to the cache and the caller hasn't
- * elected to wait */
-#ifdef CONFIG_AFS_FSCACHE
- if (folio_test_fscache(folio)) {
- if (current_is_kswapd() || !(gfp & __GFP_FS))
- return false;
- folio_wait_fscache(folio);
- }
- fscache_note_page_release(afs_vnode_cache(vnode));
-#endif
-
- if (folio_test_private(folio)) {
- trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
- folio_detach_private(folio);
- }
-
- /* Indicate that the folio can be released */
- _leave(" = T");
- return true;
-}
+const struct netfs_request_ops afs_req_ops = {
+ .init_request = afs_init_request,
+ .free_request = afs_free_request,
+ .check_write_begin = afs_check_write_begin,
+ .issue_read = afs_issue_read,
+ .update_i_size = afs_update_i_size,
+ .invalidate_cache = afs_netfs_invalidate_cache,
+ .create_write_requests = afs_create_write_requests,
+};
static void afs_add_open_mmap(struct afs_vnode *vnode)
{
@@ -525,13 +417,17 @@ static void afs_add_open_mmap(struct afs_vnode *vnode)
static void afs_drop_open_mmap(struct afs_vnode *vnode)
{
- if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+ if (atomic_add_unless(&vnode->cb_nr_mmap, -1, 1))
return;
down_write(&vnode->volume->open_mmaps_lock);
- if (atomic_read(&vnode->cb_nr_mmap) == 0)
+ read_seqlock_excl(&vnode->cb_lock);
+ // the only place where ->cb_nr_mmap may hit 0
+ // see __afs_break_callback() for the other side...
+ if (atomic_dec_and_test(&vnode->cb_nr_mmap))
list_del_init(&vnode->cb_mmap_link);
+ read_sequnlock_excl(&vnode->cb_lock);
up_write(&vnode->volume->open_mmaps_lock);
flush_work(&vnode->cb_work);
@@ -576,28 +472,39 @@ static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pg
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_file *af = iocb->ki_filp->private_data;
- int ret;
+ ssize_t ret;
- ret = afs_validate(vnode, af->key);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return netfs_unbuffered_read_iter(iocb, iter);
+
+ ret = netfs_start_io_read(inode);
if (ret < 0)
return ret;
-
- return generic_file_read_iter(iocb, iter);
+ ret = afs_validate(vnode, af->key);
+ if (ret == 0)
+ ret = filemap_read(iocb, iter, 0);
+ netfs_end_io_read(inode);
+ return ret;
}
static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(in));
+ struct inode *inode = file_inode(in);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_file *af = in->private_data;
- int ret;
+ ssize_t ret;
- ret = afs_validate(vnode, af->key);
+ ret = netfs_start_io_read(inode);
if (ret < 0)
return ret;
-
- return filemap_splice_read(in, ppos, pipe, len, flags);
+ ret = afs_validate(vnode, af->key);
+ if (ret == 0)
+ ret = filemap_splice_read(in, ppos, pipe, len, flags);
+ netfs_end_io_read(inode);
+ return ret;
}
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 4f04f6f33f46..94fc049aff58 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -58,7 +58,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
*/
static void afs_set_netfs_context(struct afs_vnode *vnode)
{
- netfs_inode_init(&vnode->netfs, &afs_req_ops);
+ netfs_inode_init(&vnode->netfs, &afs_req_ops, true);
}
/*
@@ -166,6 +166,7 @@ static void afs_apply_status(struct afs_operation *op,
struct inode *inode = &vnode->netfs.inode;
struct timespec64 t;
umode_t mode;
+ bool unexpected_jump = false;
bool data_changed = false;
bool change_size = vp->set_size;
@@ -230,6 +231,7 @@ static void afs_apply_status(struct afs_operation *op,
}
change_size = true;
data_changed = true;
+ unexpected_jump = true;
} else if (vnode->status.type == AFS_FTYPE_DIR) {
/* Expected directory change is handled elsewhere so
* that we can locally edit the directory and save on a
@@ -249,8 +251,10 @@ static void afs_apply_status(struct afs_operation *op,
* what's on the server.
*/
vnode->netfs.remote_i_size = status->size;
- if (change_size) {
+ if (change_size || status->size > i_size_read(inode)) {
afs_set_i_size(vnode, status->size);
+ if (unexpected_jump)
+ vnode->netfs.zero_point = status->size;
inode_set_ctime_to_ts(inode, t);
inode_set_atime_to_ts(inode, t);
}
@@ -647,7 +651,7 @@ void afs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
afs_set_cache_aux(vnode, &aux);
- fscache_clear_inode_writeback(afs_vnode_cache(vnode), inode, &aux);
+ netfs_clear_inode_writeback(inode, &aux);
clear_inode(inode);
while (!list_empty(&vnode->wb_keys)) {
@@ -689,17 +693,17 @@ static void afs_setattr_success(struct afs_operation *op)
static void afs_setattr_edit_file(struct afs_operation *op)
{
struct afs_vnode_param *vp = &op->file[0];
- struct inode *inode = &vp->vnode->netfs.inode;
+ struct afs_vnode *vnode = vp->vnode;
if (op->setattr.attr->ia_valid & ATTR_SIZE) {
loff_t size = op->setattr.attr->ia_size;
loff_t i_size = op->setattr.old_i_size;
- if (size < i_size)
- truncate_pagecache(inode, size);
- if (size != i_size)
- fscache_resize_cookie(afs_vnode_cache(vp->vnode),
- vp->scb.status.size);
+ if (size != i_size) {
+ truncate_setsize(&vnode->netfs.inode, size);
+ netfs_resize_file(&vnode->netfs, size, true);
+ fscache_resize_cookie(afs_vnode_cache(vnode), size);
+ }
}
}
@@ -767,11 +771,11 @@ int afs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
*/
if (!(attr->ia_valid & (supported & ~ATTR_SIZE & ~ATTR_MTIME)) &&
attr->ia_size < i_size &&
- attr->ia_size > vnode->status.size) {
- truncate_pagecache(inode, attr->ia_size);
+ attr->ia_size > vnode->netfs.remote_i_size) {
+ truncate_setsize(inode, attr->ia_size);
+ netfs_resize_file(&vnode->netfs, size, false);
fscache_resize_cookie(afs_vnode_cache(vnode),
attr->ia_size);
- i_size_write(inode, attr->ia_size);
ret = 0;
goto out_unlock;
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 55aa0679d8ce..6ce5a612937c 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -321,8 +321,7 @@ struct afs_net {
struct list_head fs_probe_slow; /* List of afs_server to probe at 5m intervals */
struct hlist_head fs_proc; /* procfs servers list */
- struct hlist_head fs_addresses4; /* afs_server (by lowest IPv4 addr) */
- struct hlist_head fs_addresses6; /* afs_server (by lowest IPv6 addr) */
+ struct hlist_head fs_addresses; /* afs_server (by lowest IPv6 addr) */
seqlock_t fs_addr_lock; /* For fs_addresses[46] */
struct work_struct fs_manager;
@@ -561,8 +560,7 @@ struct afs_server {
struct afs_server __rcu *uuid_next; /* Next server with same UUID */
struct afs_server *uuid_prev; /* Previous server with same UUID */
struct list_head probe_link; /* Link in net->fs_probe_list */
- struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
- struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
+ struct hlist_node addr_link; /* Link in net->fs_addresses6 */
struct hlist_node proc_link; /* Link in net->fs_proc */
struct list_head volumes; /* RCU list of afs_server_entry objects */
struct afs_server *gc_next; /* Next server in manager's list */
@@ -985,62 +983,6 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl
i_size_read(&vnode->netfs.inode), flags);
}
-/*
- * We use folio->private to hold the amount of the folio that we've written to,
- * splitting the field into two parts. However, we need to represent a range
- * 0...FOLIO_SIZE, so we reduce the resolution if the size of the folio
- * exceeds what we can encode.
- */
-#ifdef CONFIG_64BIT
-#define __AFS_FOLIO_PRIV_MASK 0x7fffffffUL
-#define __AFS_FOLIO_PRIV_SHIFT 32
-#define __AFS_FOLIO_PRIV_MMAPPED 0x80000000UL
-#else
-#define __AFS_FOLIO_PRIV_MASK 0x7fffUL
-#define __AFS_FOLIO_PRIV_SHIFT 16
-#define __AFS_FOLIO_PRIV_MMAPPED 0x8000UL
-#endif
-
-static inline unsigned int afs_folio_dirty_resolution(struct folio *folio)
-{
- int shift = folio_shift(folio) - (__AFS_FOLIO_PRIV_SHIFT - 1);
- return (shift > 0) ? shift : 0;
-}
-
-static inline size_t afs_folio_dirty_from(struct folio *folio, unsigned long priv)
-{
- unsigned long x = priv & __AFS_FOLIO_PRIV_MASK;
-
- /* The lower bound is inclusive */
- return x << afs_folio_dirty_resolution(folio);
-}
-
-static inline size_t afs_folio_dirty_to(struct folio *folio, unsigned long priv)
-{
- unsigned long x = (priv >> __AFS_FOLIO_PRIV_SHIFT) & __AFS_FOLIO_PRIV_MASK;
-
- /* The upper bound is immediately beyond the region */
- return (x + 1) << afs_folio_dirty_resolution(folio);
-}
-
-static inline unsigned long afs_folio_dirty(struct folio *folio, size_t from, size_t to)
-{
- unsigned int res = afs_folio_dirty_resolution(folio);
- from >>= res;
- to = (to - 1) >> res;
- return (to << __AFS_FOLIO_PRIV_SHIFT) | from;
-}
-
-static inline unsigned long afs_folio_dirty_mmapped(unsigned long priv)
-{
- return priv | __AFS_FOLIO_PRIV_MMAPPED;
-}
-
-static inline bool afs_is_folio_dirty_mmapped(unsigned long priv)
-{
- return priv & __AFS_FOLIO_PRIV_MMAPPED;
-}
-
#include <trace/events/afs.h>
/*****************************************************************************/
@@ -1167,7 +1109,6 @@ extern int afs_release(struct inode *, struct file *);
extern int afs_fetch_data(struct afs_vnode *, struct afs_read *);
extern struct afs_read *afs_alloc_read(gfp_t);
extern void afs_put_read(struct afs_read *);
-extern int afs_write_inode(struct inode *, struct writeback_control *);
static inline struct afs_read *afs_get_read(struct afs_read *req)
{
@@ -1658,24 +1599,11 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
/*
* write.c
*/
-#ifdef CONFIG_AFS_FSCACHE
-bool afs_dirty_folio(struct address_space *, struct folio *);
-#else
-#define afs_dirty_folio filemap_dirty_folio
-#endif
-extern int afs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
-extern int afs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
-extern int afs_writepage(struct page *, struct writeback_control *);
extern int afs_writepages(struct address_space *, struct writeback_control *);
-extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
extern void afs_prune_wb_keys(struct afs_vnode *);
-int afs_launder_folio(struct folio *);
+void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len);
/*
* xattr.c
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 1b3bd21c168a..a14f6013e316 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -90,8 +90,7 @@ static int __net_init afs_net_init(struct net *net_ns)
INIT_LIST_HEAD(&net->fs_probe_slow);
INIT_HLIST_HEAD(&net->fs_proc);
- INIT_HLIST_HEAD(&net->fs_addresses4);
- INIT_HLIST_HEAD(&net->fs_addresses6);
+ INIT_HLIST_HEAD(&net->fs_addresses);
seqlock_init(&net->fs_addr_lock);
INIT_WORK(&net->fs_manager, afs_manage_servers);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 3bd02571f30d..15eab053af6d 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -166,7 +166,7 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
if (!preflist) {
seq_puts(m, "NO PREFS\n");
- return 0;
+ goto out;
}
seq_printf(m, "PROT SUBNET PRIOR (v=%u n=%u/%u/%u)\n",
@@ -191,7 +191,8 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
}
}
- rcu_read_lock();
+out:
+ rcu_read_unlock();
return 0;
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index e169121f603e..038f9d0ae3af 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -38,7 +38,7 @@ struct afs_server *afs_find_server(struct afs_net *net, const struct rxrpc_peer
seq++; /* 2 on the 1st/lockless path, otherwise odd */
read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
- hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
+ hlist_for_each_entry_rcu(server, &net->fs_addresses, addr_link) {
estate = rcu_dereference(server->endpoint_state);
alist = estate->addresses;
for (i = 0; i < alist->nr_addrs; i++)
@@ -177,10 +177,8 @@ added_dup:
* bit, but anything we might want to do gets messy and memory
* intensive.
*/
- if (alist->nr_ipv4 > 0)
- hlist_add_head_rcu(&server->addr4_link, &net->fs_addresses4);
- if (alist->nr_addrs > alist->nr_ipv4)
- hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6);
+ if (alist->nr_addrs > 0)
+ hlist_add_head_rcu(&server->addr_link, &net->fs_addresses);
write_sequnlock(&net->fs_addr_lock);
@@ -511,10 +509,8 @@ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
list_del(&server->probe_link);
hlist_del_rcu(&server->proc_link);
- if (!hlist_unhashed(&server->addr4_link))
- hlist_del_rcu(&server->addr4_link);
- if (!hlist_unhashed(&server->addr6_link))
- hlist_del_rcu(&server->addr6_link);
+ if (!hlist_unhashed(&server->addr_link))
+ hlist_del_rcu(&server->addr_link);
}
write_sequnlock(&net->fs_lock);
diff --git a/fs/afs/super.c b/fs/afs/super.c
index ae2d66a52add..f3ba1c3e72f5 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -55,7 +55,7 @@ int afs_net_id;
static const struct super_operations afs_super_ops = {
.statfs = afs_statfs,
.alloc_inode = afs_alloc_inode,
- .write_inode = afs_write_inode,
+ .write_inode = netfs_unpin_writeback,
.drop_inode = afs_drop_inode,
.destroy_inode = afs_destroy_inode,
.free_inode = afs_free_inode,
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 020ecd45e476..af3a3f57c1b3 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -353,7 +353,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
{
struct afs_server_list *new, *old, *discard;
struct afs_vldb_entry *vldb;
- char idbuf[16];
+ char idbuf[24];
int ret, idsz;
_enter("");
@@ -361,7 +361,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
/* We look up an ID by passing it as a decimal string in the
* operation's name parameter.
*/
- idsz = sprintf(idbuf, "%llu", volume->vid);
+ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
if (IS_ERR(vldb)) {
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 61d34ad2ca7d..74402d95a884 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -12,309 +12,17 @@
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#include "internal.h"
-static int afs_writepages_region(struct address_space *mapping,
- struct writeback_control *wbc,
- loff_t start, loff_t end, loff_t *_next,
- bool max_one_loop);
-
-static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
- loff_t i_size, bool caching);
-
-#ifdef CONFIG_AFS_FSCACHE
-/*
- * Mark a page as having been made dirty and thus needing writeback. We also
- * need to pin the cache object to write back to.
- */
-bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
-{
- return fscache_dirty_folio(mapping, folio,
- afs_vnode_cache(AFS_FS_I(mapping->host)));
-}
-static void afs_folio_start_fscache(bool caching, struct folio *folio)
-{
- if (caching)
- folio_start_fscache(folio);
-}
-#else
-static void afs_folio_start_fscache(bool caching, struct folio *folio)
-{
-}
-#endif
-
-/*
- * Flush out a conflicting write. This may extend the write to the surrounding
- * pages if also dirty and contiguous to the conflicting region..
- */
-static int afs_flush_conflicting_write(struct address_space *mapping,
- struct folio *folio)
-{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = LONG_MAX,
- .range_start = folio_pos(folio),
- .range_end = LLONG_MAX,
- };
- loff_t next;
-
- return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX,
- &next, true);
-}
-
-/*
- * prepare to perform part of a write to a page
- */
-int afs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct page **_page, void **fsdata)
-{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- struct folio *folio;
- unsigned long priv;
- unsigned f, from;
- unsigned t, to;
- pgoff_t index;
- int ret;
-
- _enter("{%llx:%llu},%llx,%x",
- vnode->fid.vid, vnode->fid.vnode, pos, len);
-
- /* Prefetch area to be written into the cache if we're caching this
- * file. We need to do this before we get a lock on the page in case
- * there's more than one writer competing for the same cache block.
- */
- ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
- if (ret < 0)
- return ret;
-
- index = folio_index(folio);
- from = pos - index * PAGE_SIZE;
- to = from + len;
-
-try_again:
- /* See if this page is already partially written in a way that we can
- * merge the new write with.
- */
- if (folio_test_private(folio)) {
- priv = (unsigned long)folio_get_private(folio);
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
- ASSERTCMP(f, <=, t);
-
- if (folio_test_writeback(folio)) {
- trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
- folio_unlock(folio);
- goto wait_for_writeback;
- }
- /* If the file is being filled locally, allow inter-write
- * spaces to be merged into writes. If it's not, only write
- * back what the user gives us.
- */
- if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
- (to < f || from > t))
- goto flush_conflicting_write;
- }
-
- *_page = folio_file_page(folio, pos / PAGE_SIZE);
- _leave(" = 0");
- return 0;
-
- /* The previous write and this write aren't adjacent or overlapping, so
- * flush the page out.
- */
-flush_conflicting_write:
- trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio);
- folio_unlock(folio);
-
- ret = afs_flush_conflicting_write(mapping, folio);
- if (ret < 0)
- goto error;
-
-wait_for_writeback:
- ret = folio_wait_writeback_killable(folio);
- if (ret < 0)
- goto error;
-
- ret = folio_lock_killable(folio);
- if (ret < 0)
- goto error;
- goto try_again;
-
-error:
- folio_put(folio);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * finalise part of a write to a page
- */
-int afs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *subpage, void *fsdata)
-{
- struct folio *folio = page_folio(subpage);
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- unsigned long priv;
- unsigned int f, from = offset_in_folio(folio, pos);
- unsigned int t, to = from + copied;
- loff_t i_size, write_end_pos;
-
- _enter("{%llx:%llu},{%lx}",
- vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
-
- if (!folio_test_uptodate(folio)) {
- if (copied < len) {
- copied = 0;
- goto out;
- }
-
- folio_mark_uptodate(folio);
- }
-
- if (copied == 0)
- goto out;
-
- write_end_pos = pos + copied;
-
- i_size = i_size_read(&vnode->netfs.inode);
- if (write_end_pos > i_size) {
- write_seqlock(&vnode->cb_lock);
- i_size = i_size_read(&vnode->netfs.inode);
- if (write_end_pos > i_size)
- afs_set_i_size(vnode, write_end_pos);
- write_sequnlock(&vnode->cb_lock);
- fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
- }
-
- if (folio_test_private(folio)) {
- priv = (unsigned long)folio_get_private(folio);
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
- if (from < f)
- f = from;
- if (to > t)
- t = to;
- priv = afs_folio_dirty(folio, f, t);
- folio_change_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
- } else {
- priv = afs_folio_dirty(folio, from, to);
- folio_attach_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
- }
-
- if (folio_mark_dirty(folio))
- _debug("dirtied %lx", folio_index(folio));
-
-out:
- folio_unlock(folio);
- folio_put(folio);
- return copied;
-}
-
-/*
- * kill all the pages in the given range
- */
-static void afs_kill_pages(struct address_space *mapping,
- loff_t start, loff_t len)
-{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct folio *folio;
- pgoff_t index = start / PAGE_SIZE;
- pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
-
- _enter("{%llx:%llu},%llx @%llx",
- vnode->fid.vid, vnode->fid.vnode, len, start);
-
- do {
- _debug("kill %lx (to %lx)", index, last);
-
- folio = filemap_get_folio(mapping, index);
- if (IS_ERR(folio)) {
- next = index + 1;
- continue;
- }
-
- next = folio_next_index(folio);
-
- folio_clear_uptodate(folio);
- folio_end_writeback(folio);
- folio_lock(folio);
- generic_error_remove_folio(mapping, folio);
- folio_unlock(folio);
- folio_put(folio);
-
- } while (index = next, index <= last);
-
- _leave("");
-}
-
-/*
- * Redirty all the pages in a given range.
- */
-static void afs_redirty_pages(struct writeback_control *wbc,
- struct address_space *mapping,
- loff_t start, loff_t len)
-{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct folio *folio;
- pgoff_t index = start / PAGE_SIZE;
- pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
-
- _enter("{%llx:%llu},%llx @%llx",
- vnode->fid.vid, vnode->fid.vnode, len, start);
-
- do {
- _debug("redirty %llx @%llx", len, start);
-
- folio = filemap_get_folio(mapping, index);
- if (IS_ERR(folio)) {
- next = index + 1;
- continue;
- }
-
- next = index + folio_nr_pages(folio);
- folio_redirty_for_writepage(wbc, folio);
- folio_end_writeback(folio);
- folio_put(folio);
- } while (index = next, index <= last);
-
- _leave("");
-}
-
/*
* completion of write to server
*/
static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
{
- struct address_space *mapping = vnode->netfs.inode.i_mapping;
- struct folio *folio;
- pgoff_t end;
-
- XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
-
_enter("{%llx:%llu},{%x @%llx}",
vnode->fid.vid, vnode->fid.vnode, len, start);
- rcu_read_lock();
-
- end = (start + len - 1) / PAGE_SIZE;
- xas_for_each(&xas, folio, end) {
- if (!folio_test_writeback(folio)) {
- kdebug("bad %x @%llx page %lx %lx",
- len, start, folio_index(folio), end);
- ASSERT(folio_test_writeback(folio));
- }
-
- trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
- folio_detach_private(folio);
- folio_end_writeback(folio);
- }
-
- rcu_read_unlock();
-
afs_prune_wb_keys(vnode);
_leave("");
}
@@ -451,363 +159,53 @@ try_next_key:
return afs_put_operation(op);
}
-/*
- * Extend the region to be written back to include subsequent contiguously
- * dirty pages if possible, but don't sleep while doing so.
- *
- * If this page holds new content, then we can include filler zeros in the
- * writeback.
- */
-static void afs_extend_writeback(struct address_space *mapping,
- struct afs_vnode *vnode,
- long *_count,
- loff_t start,
- loff_t max_len,
- bool new_content,
- bool caching,
- unsigned int *_len)
+static void afs_upload_to_server(struct netfs_io_subrequest *subreq)
{
- struct folio_batch fbatch;
- struct folio *folio;
- unsigned long priv;
- unsigned int psize, filler = 0;
- unsigned int f, t;
- loff_t len = *_len;
- pgoff_t index = (start + len) / PAGE_SIZE;
- bool stop = true;
- unsigned int i;
-
- XA_STATE(xas, &mapping->i_pages, index);
- folio_batch_init(&fbatch);
-
- do {
- /* Firstly, we gather up a batch of contiguous dirty pages
- * under the RCU read lock - but we can't clear the dirty flags
- * there if any of those pages are mapped.
- */
- rcu_read_lock();
-
- xas_for_each(&xas, folio, ULONG_MAX) {
- stop = true;
- if (xas_retry(&xas, folio))
- continue;
- if (xa_is_value(folio))
- break;
- if (folio_index(folio) != index)
- break;
-
- if (!folio_try_get_rcu(folio)) {
- xas_reset(&xas);
- continue;
- }
-
- /* Has the page moved or been split? */
- if (unlikely(folio != xas_reload(&xas))) {
- folio_put(folio);
- break;
- }
-
- if (!folio_trylock(folio)) {
- folio_put(folio);
- break;
- }
- if (!folio_test_dirty(folio) ||
- folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
- folio_unlock(folio);
- folio_put(folio);
- break;
- }
-
- psize = folio_size(folio);
- priv = (unsigned long)folio_get_private(folio);
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
- if (f != 0 && !new_content) {
- folio_unlock(folio);
- folio_put(folio);
- break;
- }
-
- len += filler + t;
- filler = psize - t;
- if (len >= max_len || *_count <= 0)
- stop = true;
- else if (t == psize || new_content)
- stop = false;
-
- index += folio_nr_pages(folio);
- if (!folio_batch_add(&fbatch, folio))
- break;
- if (stop)
- break;
- }
-
- if (!stop)
- xas_pause(&xas);
- rcu_read_unlock();
-
- /* Now, if we obtained any folios, we can shift them to being
- * writable and mark them for caching.
- */
- if (!folio_batch_count(&fbatch))
- break;
-
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
- folio = fbatch.folios[i];
- trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
-
- if (!folio_clear_dirty_for_io(folio))
- BUG();
- folio_start_writeback(folio);
- afs_folio_start_fscache(caching, folio);
-
- *_count -= folio_nr_pages(folio);
- folio_unlock(folio);
- }
+ struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
+ ssize_t ret;
- folio_batch_release(&fbatch);
- cond_resched();
- } while (!stop);
+ _enter("%x[%x],%zx",
+ subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count);
- *_len = len;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ ret = afs_store_data(vnode, &subreq->io_iter, subreq->start,
+ subreq->rreq->origin == NETFS_LAUNDER_WRITE);
+ netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len,
+ false);
}
-/*
- * Synchronously write back the locked page and any subsequent non-locked dirty
- * pages.
- */
-static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
- struct writeback_control *wbc,
- struct folio *folio,
- loff_t start, loff_t end)
+static void afs_upload_to_server_worker(struct work_struct *work)
{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct iov_iter iter;
- unsigned long priv;
- unsigned int offset, to, len, max_len;
- loff_t i_size = i_size_read(&vnode->netfs.inode);
- bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
- long count = wbc->nr_to_write;
- int ret;
-
- _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
-
- folio_start_writeback(folio);
- afs_folio_start_fscache(caching, folio);
-
- count -= folio_nr_pages(folio);
-
- /* Find all consecutive lockable dirty pages that have contiguous
- * written regions, stopping when we find a page that is not
- * immediately lockable, is not dirty or is missing, or we reach the
- * end of the range.
- */
- priv = (unsigned long)folio_get_private(folio);
- offset = afs_folio_dirty_from(folio, priv);
- to = afs_folio_dirty_to(folio, priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
-
- len = to - offset;
- start += offset;
- if (start < i_size) {
- /* Trim the write to the EOF; the extra data is ignored. Also
- * put an upper limit on the size of a single storedata op.
- */
- max_len = 65536 * 4096;
- max_len = min_t(unsigned long long, max_len, end - start + 1);
- max_len = min_t(unsigned long long, max_len, i_size - start);
-
- if (len < max_len &&
- (to == folio_size(folio) || new_content))
- afs_extend_writeback(mapping, vnode, &count,
- start, max_len, new_content,
- caching, &len);
- len = min_t(loff_t, len, max_len);
- }
-
- /* We now have a contiguous set of dirty pages, each with writeback
- * set; the first page is still locked at this point, but all the rest
- * have been unlocked.
- */
- folio_unlock(folio);
-
- if (start < i_size) {
- _debug("write back %x @%llx [%llx]", len, start, i_size);
-
- /* Speculatively write to the cache. We have to fix this up
- * later if the store fails.
- */
- afs_write_to_cache(vnode, start, len, i_size, caching);
-
- iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
- ret = afs_store_data(vnode, &iter, start, false);
- } else {
- _debug("write discard %x @%llx [%llx]", len, start, i_size);
-
- /* The dirty region was entirely beyond the EOF. */
- fscache_clear_page_bits(mapping, start, len, caching);
- afs_pages_written_back(vnode, start, len);
- ret = 0;
- }
-
- switch (ret) {
- case 0:
- wbc->nr_to_write = count;
- ret = len;
- break;
+ struct netfs_io_subrequest *subreq =
+ container_of(work, struct netfs_io_subrequest, work);
- default:
- pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
- fallthrough;
- case -EACCES:
- case -EPERM:
- case -ENOKEY:
- case -EKEYEXPIRED:
- case -EKEYREJECTED:
- case -EKEYREVOKED:
- case -ENETRESET:
- afs_redirty_pages(wbc, mapping, start, len);
- mapping_set_error(mapping, ret);
- break;
-
- case -EDQUOT:
- case -ENOSPC:
- afs_redirty_pages(wbc, mapping, start, len);
- mapping_set_error(mapping, -ENOSPC);
- break;
-
- case -EROFS:
- case -EIO:
- case -EREMOTEIO:
- case -EFBIG:
- case -ENOENT:
- case -ENOMEDIUM:
- case -ENXIO:
- trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
- afs_kill_pages(mapping, start, len);
- mapping_set_error(mapping, ret);
- break;
- }
-
- _leave(" = %d", ret);
- return ret;
+ afs_upload_to_server(subreq);
}
/*
- * write a region of pages back to the server
+ * Set up write requests for a writeback slice. We need to add a write request
+ * for each write we want to make.
*/
-static int afs_writepages_region(struct address_space *mapping,
- struct writeback_control *wbc,
- loff_t start, loff_t end, loff_t *_next,
- bool max_one_loop)
+void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len)
{
- struct folio *folio;
- struct folio_batch fbatch;
- ssize_t ret;
- unsigned int i;
- int n, skips = 0;
-
- _enter("%llx,%llx,", start, end);
- folio_batch_init(&fbatch);
-
- do {
- pgoff_t index = start / PAGE_SIZE;
-
- n = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
- PAGECACHE_TAG_DIRTY, &fbatch);
-
- if (!n)
- break;
- for (i = 0; i < n; i++) {
- folio = fbatch.folios[i];
- start = folio_pos(folio); /* May regress with THPs */
-
- _debug("wback %lx", folio_index(folio));
-
- /* At this point we hold neither the i_pages lock nor the
- * page lock: the page may be truncated or invalidated
- * (changing page->mapping to NULL), or even swizzled
- * back from swapper_space to tmpfs file mapping
- */
-try_again:
- if (wbc->sync_mode != WB_SYNC_NONE) {
- ret = folio_lock_killable(folio);
- if (ret < 0) {
- folio_batch_release(&fbatch);
- return ret;
- }
- } else {
- if (!folio_trylock(folio))
- continue;
- }
-
- if (folio->mapping != mapping ||
- !folio_test_dirty(folio)) {
- start += folio_size(folio);
- folio_unlock(folio);
- continue;
- }
-
- if (folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
- folio_unlock(folio);
- if (wbc->sync_mode != WB_SYNC_NONE) {
- folio_wait_writeback(folio);
-#ifdef CONFIG_AFS_FSCACHE
- folio_wait_fscache(folio);
-#endif
- goto try_again;
- }
-
- start += folio_size(folio);
- if (wbc->sync_mode == WB_SYNC_NONE) {
- if (skips >= 5 || need_resched()) {
- *_next = start;
- folio_batch_release(&fbatch);
- _leave(" = 0 [%llx]", *_next);
- return 0;
- }
- skips++;
- }
- continue;
- }
-
- if (!folio_clear_dirty_for_io(folio))
- BUG();
- ret = afs_write_back_from_locked_folio(mapping, wbc,
- folio, start, end);
- if (ret < 0) {
- _leave(" = %zd", ret);
- folio_batch_release(&fbatch);
- return ret;
- }
-
- start += ret;
- }
+ struct netfs_io_subrequest *subreq;
- folio_batch_release(&fbatch);
- cond_resched();
- } while (wbc->nr_to_write > 0);
+ _enter("%x,%llx-%llx", wreq->debug_id, start, start + len);
- *_next = start;
- _leave(" = 0 [%llx]", *_next);
- return 0;
+ subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER,
+ start, len, afs_upload_to_server_worker);
+ if (subreq)
+ netfs_queue_write_request(subreq);
}
/*
* write some of the pending data back to the server
*/
-int afs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+int afs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- loff_t start, next;
int ret;
- _enter("");
-
/* We have to be careful as we can end up racing with setattr()
* truncating the pagecache since the caller doesn't take a lock here
* to prevent it.
@@ -817,69 +215,12 @@ int afs_writepages(struct address_space *mapping,
else if (!down_read_trylock(&vnode->validate_lock))
return 0;
- if (wbc->range_cyclic) {
- start = mapping->writeback_index * PAGE_SIZE;
- ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX,
- &next, false);
- if (ret == 0) {
- mapping->writeback_index = next / PAGE_SIZE;
- if (start > 0 && wbc->nr_to_write > 0) {
- ret = afs_writepages_region(mapping, wbc, 0,
- start, &next, false);
- if (ret == 0)
- mapping->writeback_index =
- next / PAGE_SIZE;
- }
- }
- } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
- ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX,
- &next, false);
- if (wbc->nr_to_write > 0 && ret == 0)
- mapping->writeback_index = next / PAGE_SIZE;
- } else {
- ret = afs_writepages_region(mapping, wbc,
- wbc->range_start, wbc->range_end,
- &next, false);
- }
-
+ ret = netfs_writepages(mapping, wbc);
up_read(&vnode->validate_lock);
- _leave(" = %d", ret);
return ret;
}
/*
- * write to an AFS file
- */
-ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
-{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
- struct afs_file *af = iocb->ki_filp->private_data;
- ssize_t result;
- size_t count = iov_iter_count(from);
-
- _enter("{%llx:%llu},{%zu},",
- vnode->fid.vid, vnode->fid.vnode, count);
-
- if (IS_SWAPFILE(&vnode->netfs.inode)) {
- printk(KERN_INFO
- "AFS: Attempt to write to active swap file!\n");
- return -EBUSY;
- }
-
- if (!count)
- return 0;
-
- result = afs_validate(vnode, af->key);
- if (result < 0)
- return result;
-
- result = generic_file_write_iter(iocb, from);
-
- _leave(" = %zd", result);
- return result;
-}
-
-/*
* flush any dirty pages for this process, and check for write errors.
* - the return status from this call provides a reliable indication of
* whether any write errors occurred for this process.
@@ -907,59 +248,11 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
*/
vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
{
- struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
- struct inode *inode = file_inode(file);
- struct afs_vnode *vnode = AFS_FS_I(inode);
- struct afs_file *af = file->private_data;
- unsigned long priv;
- vm_fault_t ret = VM_FAULT_RETRY;
-
- _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
-
- afs_validate(vnode, af->key);
- sb_start_pagefault(inode->i_sb);
-
- /* Wait for the page to be written to the cache before we allow it to
- * be modified. We then assume the entire page will need writing back.
- */
-#ifdef CONFIG_AFS_FSCACHE
- if (folio_test_fscache(folio) &&
- folio_wait_fscache_killable(folio) < 0)
- goto out;
-#endif
-
- if (folio_wait_writeback_killable(folio))
- goto out;
-
- if (folio_lock_killable(folio) < 0)
- goto out;
-
- /* We mustn't change folio->private until writeback is complete as that
- * details the portion of the page we need to write back and we might
- * need to redirty the page if there's a problem.
- */
- if (folio_wait_writeback_killable(folio) < 0) {
- folio_unlock(folio);
- goto out;
- }
-
- priv = afs_folio_dirty(folio, 0, folio_size(folio));
- priv = afs_folio_dirty_mmapped(priv);
- if (folio_test_private(folio)) {
- folio_change_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
- } else {
- folio_attach_private(folio, (void *)priv);
- trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
- }
- file_update_time(file);
-
- ret = VM_FAULT_LOCKED;
-out:
- sb_end_pagefault(inode->i_sb);
- return ret;
+ if (afs_validate(AFS_FS_I(file_inode(file)), afs_file_key(file)) < 0)
+ return VM_FAULT_SIGBUS;
+ return netfs_page_mkwrite(vmf, NULL);
}
/*
@@ -989,64 +282,3 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
afs_put_wb_key(wbk);
}
}
-
-/*
- * Clean up a page during invalidation.
- */
-int afs_launder_folio(struct folio *folio)
-{
- struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
- struct iov_iter iter;
- struct bio_vec bv;
- unsigned long priv;
- unsigned int f, t;
- int ret = 0;
-
- _enter("{%lx}", folio->index);
-
- priv = (unsigned long)folio_get_private(folio);
- if (folio_clear_dirty_for_io(folio)) {
- f = 0;
- t = folio_size(folio);
- if (folio_test_private(folio)) {
- f = afs_folio_dirty_from(folio, priv);
- t = afs_folio_dirty_to(folio, priv);
- }
-
- bvec_set_folio(&bv, folio, t - f, f);
- iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, bv.bv_len);
-
- trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
- ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
- }
-
- trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
- folio_detach_private(folio);
- folio_wait_fscache(folio);
- return ret;
-}
-
-/*
- * Deal with the completion of writing the data to the cache.
- */
-static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct afs_vnode *vnode = priv;
-
- if (IS_ERR_VALUE(transferred_or_error) &&
- transferred_or_error != -ENOBUFS)
- afs_invalidate_cache(vnode, 0);
-}
-
-/*
- * Save the write to the cache also.
- */
-static void afs_write_to_cache(struct afs_vnode *vnode,
- loff_t start, size_t len, loff_t i_size,
- bool caching)
-{
- fscache_write_to_cache(afs_vnode_cache(vnode),
- vnode->netfs.inode.i_mapping, start, len, i_size,
- afs_write_to_cache_done, vnode, caching);
-}
diff --git a/fs/aio.c b/fs/aio.c
index bb2ff48991f3..9cdaa2faa536 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -589,13 +589,24 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
{
- struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
- struct kioctx *ctx = req->ki_ctx;
+ struct aio_kiocb *req;
+ struct kioctx *ctx;
unsigned long flags;
+ /*
+ * kiocb didn't come from aio or is neither a read nor a write, hence
+ * ignore it.
+ */
+ if (!(iocb->ki_flags & IOCB_AIO_RW))
+ return;
+
+ req = container_of(iocb, struct aio_kiocb, rw);
+
if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
return;
+ ctx = req->ki_ctx;
+
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_add_tail(&req->ki_list, &ctx->active_reqs);
req->ki_cancel = cancel;
@@ -1509,7 +1520,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
req->ki_complete = aio_complete_rw;
req->private = NULL;
req->ki_pos = iocb->aio_offset;
- req->ki_flags = req->ki_filp->f_iocb_flags;
+ req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
if (iocb->aio_flags & IOCB_FLAG_RESFD)
req->ki_flags |= IOCB_EVENTFD;
if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index 7423a3557c68..1a05cecda7cc 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -27,7 +27,6 @@ bcachefs-y := \
checksum.o \
clock.o \
compress.o \
- counters.o \
darray.o \
debug.o \
dirent.o \
@@ -71,6 +70,7 @@ bcachefs-y := \
reflink.o \
replicas.o \
sb-clean.o \
+ sb-counters.o \
sb-downgrade.o \
sb-errors.o \
sb-members.o \
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index a09b9d00226a..fd3e175d8342 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -273,7 +273,7 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
c, err, alloc_key_dirty_sectors_0,
"data_type %s but dirty_sectors==0",
- bch2_data_types[a.v->data_type]);
+ bch2_data_type_str(a.v->data_type));
break;
case BCH_DATA_cached:
bkey_fsck_err_on(!a.v->cached_sectors ||
@@ -321,16 +321,12 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
{
struct bch_alloc_v4 _a;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
- unsigned i;
prt_newline(out);
printbuf_indent_add(out, 2);
- prt_printf(out, "gen %u oldest_gen %u data_type %s",
- a->gen, a->oldest_gen,
- a->data_type < BCH_DATA_NR
- ? bch2_data_types[a->data_type]
- : "(invalid data type)");
+ prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
+ bch2_prt_data_type(out, a->data_type);
prt_newline(out);
prt_printf(out, "journal_seq %llu", a->journal_seq);
prt_newline(out);
@@ -353,23 +349,6 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
prt_newline(out);
prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
- prt_newline(out);
-
- if (BCH_ALLOC_V4_NR_BACKPOINTERS(a)) {
- struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
- const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
-
- prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
- printbuf_indent_add(out, 2);
-
- for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
- prt_newline(out);
- bch2_backpointer_to_text(out, &bps[i]);
- }
-
- printbuf_indent_sub(out, 2);
- }
-
printbuf_indent_sub(out, 2);
}
@@ -839,7 +818,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
}
}
- if (!(flags & BTREE_TRIGGER_TRANSACTIONAL) && (flags & BTREE_TRIGGER_INSERT)) {
+ if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq = new_a->journal_seq;
@@ -1625,13 +1604,36 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
return ret;
}
+struct discard_buckets_state {
+ u64 seen;
+ u64 open;
+ u64 need_journal_commit;
+ u64 discarded;
+ struct bch_dev *ca;
+ u64 need_journal_commit_this_dev;
+};
+
+static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
+{
+ if (s->ca == ca)
+ return;
+
+ if (s->ca && s->need_journal_commit_this_dev >
+ bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
+ bch2_journal_flush_async(&c->journal, NULL);
+
+ if (s->ca)
+ percpu_ref_put(&s->ca->ref);
+ if (ca)
+ percpu_ref_get(&ca->ref);
+ s->ca = ca;
+ s->need_journal_commit_this_dev = 0;
+}
+
static int bch2_discard_one_bucket(struct btree_trans *trans,
struct btree_iter *need_discard_iter,
struct bpos *discard_pos_done,
- u64 *seen,
- u64 *open,
- u64 *need_journal_commit,
- u64 *discarded)
+ struct discard_buckets_state *s)
{
struct bch_fs *c = trans->c;
struct bpos pos = need_discard_iter->pos;
@@ -1643,20 +1645,24 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
int ret = 0;
ca = bch_dev_bkey_exists(c, pos.inode);
+
if (!percpu_ref_tryget(&ca->io_ref)) {
bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
return 0;
}
+ discard_buckets_next_dev(c, s, ca);
+
if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
- (*open)++;
+ s->open++;
goto out;
}
if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
pos.inode, pos.offset)) {
- (*need_journal_commit)++;
+ s->need_journal_commit++;
+ s->need_journal_commit_this_dev++;
goto out;
}
@@ -1709,7 +1715,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
* This works without any other locks because this is the only
* thread that removes items from the need_discard tree
*/
- bch2_trans_unlock(trans);
+ bch2_trans_unlock_long(trans);
blkdev_issue_discard(ca->disk_sb.bdev,
k.k->p.offset * ca->mi.bucket_size,
ca->mi.bucket_size,
@@ -1732,9 +1738,9 @@ write:
goto out;
count_event(c, bucket_discard);
- (*discarded)++;
+ s->discarded++;
out:
- (*seen)++;
+ s->seen++;
bch2_trans_iter_exit(trans, &iter);
percpu_ref_put(&ca->io_ref);
printbuf_exit(&buf);
@@ -1744,7 +1750,7 @@ out:
static void bch2_do_discards_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
- u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
+ struct discard_buckets_state s = {};
struct bpos discard_pos_done = POS_MAX;
int ret;
@@ -1756,19 +1762,14 @@ static void bch2_do_discards_work(struct work_struct *work)
ret = bch2_trans_run(c,
for_each_btree_key(trans, iter,
BTREE_ID_need_discard, POS_MIN, 0, k,
- bch2_discard_one_bucket(trans, &iter, &discard_pos_done,
- &seen,
- &open,
- &need_journal_commit,
- &discarded)));
-
- if (need_journal_commit * 2 > seen)
- bch2_journal_flush_async(&c->journal, NULL);
+ bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
- bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+ discard_buckets_next_dev(c, &s, NULL);
- trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
+ trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
bch2_err_str(ret));
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
void bch2_do_discards(struct bch_fs *c)
diff --git a/fs/bcachefs/alloc_background_format.h b/fs/bcachefs/alloc_background_format.h
new file mode 100644
index 000000000000..b4ec20be93b8
--- /dev/null
+++ b/fs/bcachefs/alloc_background_format.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H
+#define _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H
+
+struct bch_alloc {
+ struct bch_val v;
+ __u8 fields;
+ __u8 gen;
+ __u8 data[];
+} __packed __aligned(8);
+
+#define BCH_ALLOC_FIELDS_V1() \
+ x(read_time, 16) \
+ x(write_time, 16) \
+ x(data_type, 8) \
+ x(dirty_sectors, 16) \
+ x(cached_sectors, 16) \
+ x(oldest_gen, 8) \
+ x(stripe, 32) \
+ x(stripe_redundancy, 8)
+
+enum {
+#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
+ BCH_ALLOC_FIELDS_V1()
+#undef x
+};
+
+struct bch_alloc_v2 {
+ struct bch_val v;
+ __u8 nr_fields;
+ __u8 gen;
+ __u8 oldest_gen;
+ __u8 data_type;
+ __u8 data[];
+} __packed __aligned(8);
+
+#define BCH_ALLOC_FIELDS_V2() \
+ x(read_time, 64) \
+ x(write_time, 64) \
+ x(dirty_sectors, 32) \
+ x(cached_sectors, 32) \
+ x(stripe, 32) \
+ x(stripe_redundancy, 8)
+
+struct bch_alloc_v3 {
+ struct bch_val v;
+ __le64 journal_seq;
+ __le32 flags;
+ __u8 nr_fields;
+ __u8 gen;
+ __u8 oldest_gen;
+ __u8 data_type;
+ __u8 data[];
+} __packed __aligned(8);
+
+LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
+LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
+
+struct bch_alloc_v4 {
+ struct bch_val v;
+ __u64 journal_seq;
+ __u32 flags;
+ __u8 gen;
+ __u8 oldest_gen;
+ __u8 data_type;
+ __u8 stripe_redundancy;
+ __u32 dirty_sectors;
+ __u32 cached_sectors;
+ __u64 io_time[2];
+ __u32 stripe;
+ __u32 nr_external_backpointers;
+ __u64 fragmentation_lru;
+} __packed __aligned(8);
+
+#define BCH_ALLOC_V4_U64s_V0 6
+#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(__u64))
+
+BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
+BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
+BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
+BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
+
+#define KEY_TYPE_BUCKET_GENS_BITS 8
+#define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS)
+#define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1)
+
+struct bch_bucket_gens {
+ struct bch_val v;
+ u8 gens[KEY_TYPE_BUCKET_GENS_NR];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_ALLOC_BACKGROUND_FORMAT_H */
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index b0ff47998a94..633d3223b353 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -1525,10 +1525,11 @@ static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, str
unsigned data_type = ob->data_type;
barrier(); /* READ_ONCE() doesn't work on bitfields */
- prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u",
+ prt_printf(out, "%zu ref %u ",
ob - c->open_buckets,
- atomic_read(&ob->pin),
- data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
+ atomic_read(&ob->pin));
+ bch2_prt_data_type(out, data_type);
+ prt_printf(out, " %u:%llu gen %u allocated %u/%u",
ob->dev, ob->bucket, ob->gen,
ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
if (ob->ec)
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index e358a2ffffde..569b97904da4 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -68,9 +68,11 @@ void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer
void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
- prt_str(out, "bucket=");
- bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
- prt_str(out, " ");
+ if (bch2_dev_exists2(c, k.k->p.inode)) {
+ prt_str(out, "bucket=");
+ bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
+ prt_str(out, " ");
+ }
bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
}
@@ -400,13 +402,24 @@ int bch2_check_btree_backpointers(struct bch_fs *c)
return ret;
}
+static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
+{
+ return bpos_eq(l.k->p, r.k->p) &&
+ bkey_bytes(l.k) == bkey_bytes(r.k) &&
+ !memcmp(l.v, r.v, bkey_val_bytes(l.k));
+}
+
+struct extents_to_bp_state {
+ struct bpos bucket_start;
+ struct bpos bucket_end;
+ struct bkey_buf last_flushed;
+};
+
static int check_bp_exists(struct btree_trans *trans,
+ struct extents_to_bp_state *s,
struct bpos bucket,
struct bch_backpointer bp,
- struct bkey_s_c orig_k,
- struct bpos bucket_start,
- struct bpos bucket_end,
- struct bkey_buf *last_flushed)
+ struct bkey_s_c orig_k)
{
struct bch_fs *c = trans->c;
struct btree_iter bp_iter = { NULL };
@@ -417,8 +430,8 @@ static int check_bp_exists(struct btree_trans *trans,
bch2_bkey_buf_init(&tmp);
- if (bpos_lt(bucket, bucket_start) ||
- bpos_gt(bucket, bucket_end))
+ if (bpos_lt(bucket, s->bucket_start) ||
+ bpos_gt(bucket, s->bucket_end))
return 0;
if (!bch2_dev_bucket_exists(c, bucket))
@@ -433,11 +446,9 @@ static int check_bp_exists(struct btree_trans *trans,
if (bp_k.k->type != KEY_TYPE_backpointer ||
memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
- if (!bpos_eq(orig_k.k->p, last_flushed->k->k.p) ||
- bkey_bytes(orig_k.k) != bkey_bytes(&last_flushed->k->k) ||
- memcmp(orig_k.v, &last_flushed->k->v, bkey_val_bytes(orig_k.k))) {
- bch2_bkey_buf_reassemble(&tmp, c, orig_k);
+ bch2_bkey_buf_reassemble(&tmp, c, orig_k);
+ if (!bkey_and_val_eq(orig_k, bkey_i_to_s_c(s->last_flushed.k))) {
if (bp.level) {
bch2_trans_unlock(trans);
bch2_btree_interior_updates_flush(c);
@@ -447,7 +458,7 @@ static int check_bp_exists(struct btree_trans *trans,
if (ret)
goto err;
- bch2_bkey_buf_copy(last_flushed, c, tmp.k);
+ bch2_bkey_buf_copy(&s->last_flushed, c, tmp.k);
ret = -BCH_ERR_transaction_restart_write_buffer_flush;
goto out;
}
@@ -475,10 +486,8 @@ missing:
}
static int check_extent_to_backpointers(struct btree_trans *trans,
+ struct extents_to_bp_state *s,
enum btree_id btree, unsigned level,
- struct bpos bucket_start,
- struct bpos bucket_end,
- struct bkey_buf *last_flushed,
struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
@@ -498,9 +507,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
bch2_extent_ptr_to_bp(c, btree, level,
k, p, &bucket_pos, &bp);
- ret = check_bp_exists(trans, bucket_pos, bp, k,
- bucket_start, bucket_end,
- last_flushed);
+ ret = check_bp_exists(trans, s, bucket_pos, bp, k);
if (ret)
return ret;
}
@@ -509,10 +516,8 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
}
static int check_btree_root_to_backpointers(struct btree_trans *trans,
+ struct extents_to_bp_state *s,
enum btree_id btree_id,
- struct bpos bucket_start,
- struct bpos bucket_end,
- struct bkey_buf *last_flushed,
int *level)
{
struct bch_fs *c = trans->c;
@@ -536,9 +541,7 @@ retry:
*level = b->c.level;
k = bkey_i_to_s_c(&b->key);
- ret = check_extent_to_backpointers(trans, btree_id, b->c.level + 1,
- bucket_start, bucket_end,
- last_flushed, k);
+ ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -559,7 +562,7 @@ static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
si_meminfo(&i);
mem_bytes = i.totalram * i.mem_unit;
- return div_u64(mem_bytes >> 1, btree_bytes(c));
+ return div_u64(mem_bytes >> 1, c->opts.btree_node_size);
}
static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
@@ -610,43 +613,35 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
}
static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
- struct bpos bucket_start,
- struct bpos bucket_end)
+ struct extents_to_bp_state *s)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
- enum btree_id btree_id;
- struct bkey_s_c k;
- struct bkey_buf last_flushed;
int ret = 0;
- bch2_bkey_buf_init(&last_flushed);
- bkey_init(&last_flushed.k->k);
-
- for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) {
+ for (enum btree_id btree_id = 0;
+ btree_id < btree_id_nr_alive(c);
+ btree_id++) {
int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
ret = commit_do(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc,
- check_btree_root_to_backpointers(trans, btree_id,
- bucket_start, bucket_end,
- &last_flushed, &level));
+ check_btree_root_to_backpointers(trans, s, btree_id, &level));
if (ret)
return ret;
while (level >= depth) {
+ struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
level,
BTREE_ITER_PREFETCH);
while (1) {
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&iter);
+
+ struct bkey_s_c k = bch2_btree_iter_peek(&iter);
if (!k.k)
break;
ret = bkey_err(k) ?:
- check_extent_to_backpointers(trans, btree_id, level,
- bucket_start, bucket_end,
- &last_flushed, k) ?:
+ check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
bch2_trans_commit(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
@@ -668,7 +663,6 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
}
}
- bch2_bkey_buf_exit(&last_flushed, c);
return 0;
}
@@ -731,37 +725,43 @@ static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
int bch2_check_extents_to_backpointers(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct bpos start = POS_MIN, end;
+ struct extents_to_bp_state s = { .bucket_start = POS_MIN };
int ret;
+ bch2_bkey_buf_init(&s.last_flushed);
+ bkey_init(&s.last_flushed.k->k);
+
while (1) {
- ret = bch2_get_alloc_in_memory_pos(trans, start, &end);
+ ret = bch2_get_alloc_in_memory_pos(trans, s.bucket_start, &s.bucket_end);
if (ret)
break;
- if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
+ if ( bpos_eq(s.bucket_start, POS_MIN) &&
+ !bpos_eq(s.bucket_end, SPOS_MAX))
bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
__func__, btree_nodes_fit_in_ram(c));
- if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
+ if (!bpos_eq(s.bucket_start, POS_MIN) ||
+ !bpos_eq(s.bucket_end, SPOS_MAX)) {
struct printbuf buf = PRINTBUF;
prt_str(&buf, "check_extents_to_backpointers(): ");
- bch2_bpos_to_text(&buf, start);
+ bch2_bpos_to_text(&buf, s.bucket_start);
prt_str(&buf, "-");
- bch2_bpos_to_text(&buf, end);
+ bch2_bpos_to_text(&buf, s.bucket_end);
bch_verbose(c, "%s", buf.buf);
printbuf_exit(&buf);
}
- ret = bch2_check_extents_to_backpointers_pass(trans, start, end);
- if (ret || bpos_eq(end, SPOS_MAX))
+ ret = bch2_check_extents_to_backpointers_pass(trans, &s);
+ if (ret || bpos_eq(s.bucket_end, SPOS_MAX))
break;
- start = bpos_successor(end);
+ s.bucket_start = bpos_successor(s.bucket_end);
}
bch2_trans_put(trans);
+ bch2_bkey_buf_exit(&s.last_flushed, c);
bch_err_fn(c, ret);
return ret;
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
index 737e2396ade7..327365a9feac 100644
--- a/fs/bcachefs/backpointers.h
+++ b/fs/bcachefs/backpointers.h
@@ -2,6 +2,7 @@
#ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
#define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
+#include "btree_cache.h"
#include "btree_iter.h"
#include "btree_update.h"
#include "buckets.h"
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index dac383e37181..69d0d60d50e3 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -1204,11 +1204,6 @@ static inline unsigned block_sectors(const struct bch_fs *c)
return c->opts.block_size >> 9;
}
-static inline size_t btree_sectors(const struct bch_fs *c)
-{
- return c->opts.btree_node_size >> 9;
-}
-
static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
{
return c->btree_key_cache_btrees & (1U << btree);
@@ -1254,6 +1249,18 @@ static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
return stdio;
}
+static inline unsigned metadata_replicas_required(struct bch_fs *c)
+{
+ return min(c->opts.metadata_replicas,
+ c->opts.metadata_replicas_required);
+}
+
+static inline unsigned data_replicas_required(struct bch_fs *c)
+{
+ return min(c->opts.data_replicas,
+ c->opts.data_replicas_required);
+}
+
#define BKEY_PADDED_ONSTACK(key, pad) \
struct { struct bkey_i key; __u64 key ## _pad[pad]; }
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 0d5ac4184fbc..0668b682a21c 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -417,600 +417,12 @@ struct bch_set {
struct bch_val v;
};
-/* Extents */
-
-/*
- * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
- * preceded by checksum/compression information (bch_extent_crc32 or
- * bch_extent_crc64).
- *
- * One major determining factor in the format of extents is how we handle and
- * represent extents that have been partially overwritten and thus trimmed:
- *
- * If an extent is not checksummed or compressed, when the extent is trimmed we
- * don't have to remember the extent we originally allocated and wrote: we can
- * merely adjust ptr->offset to point to the start of the data that is currently
- * live. The size field in struct bkey records the current (live) size of the
- * extent, and is also used to mean "size of region on disk that we point to" in
- * this case.
- *
- * Thus an extent that is not checksummed or compressed will consist only of a
- * list of bch_extent_ptrs, with none of the fields in
- * bch_extent_crc32/bch_extent_crc64.
- *
- * When an extent is checksummed or compressed, it's not possible to read only
- * the data that is currently live: we have to read the entire extent that was
- * originally written, and then return only the part of the extent that is
- * currently live.
- *
- * Thus, in addition to the current size of the extent in struct bkey, we need
- * to store the size of the originally allocated space - this is the
- * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
- * when the extent is trimmed, instead of modifying the offset field of the
- * pointer, we keep a second smaller offset field - "offset into the original
- * extent of the currently live region".
- *
- * The other major determining factor is replication and data migration:
- *
- * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
- * write, we will initially write all the replicas in the same format, with the
- * same checksum type and compression format - however, when copygc runs later (or
- * tiering/cache promotion, anything that moves data), it is not in general
- * going to rewrite all the pointers at once - one of the replicas may be in a
- * bucket on one device that has very little fragmentation while another lives
- * in a bucket that has become heavily fragmented, and thus is being rewritten
- * sooner than the rest.
- *
- * Thus it will only move a subset of the pointers (or in the case of
- * tiering/cache promotion perhaps add a single pointer without dropping any
- * current pointers), and if the extent has been partially overwritten it must
- * write only the currently live portion (or copygc would not be able to reduce
- * fragmentation!) - which necessitates a different bch_extent_crc format for
- * the new pointer.
- *
- * But in the interests of space efficiency, we don't want to store one
- * bch_extent_crc for each pointer if we don't have to.
- *
- * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
- * bch_extent_ptrs appended arbitrarily one after the other. We determine the
- * type of a given entry with a scheme similar to utf8 (except we're encoding a
- * type, not a size), encoding the type in the position of the first set bit:
- *
- * bch_extent_crc32 - 0b1
- * bch_extent_ptr - 0b10
- * bch_extent_crc64 - 0b100
- *
- * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
- * bch_extent_crc64 is the least constrained).
- *
- * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
- * until the next bch_extent_crc32/64.
- *
- * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
- * is neither checksummed nor compressed.
- */
-
/* 128 bits, sufficient for cryptographic MACs: */
struct bch_csum {
__le64 lo;
__le64 hi;
} __packed __aligned(8);
-#define BCH_EXTENT_ENTRY_TYPES() \
- x(ptr, 0) \
- x(crc32, 1) \
- x(crc64, 2) \
- x(crc128, 3) \
- x(stripe_ptr, 4) \
- x(rebalance, 5)
-#define BCH_EXTENT_ENTRY_MAX 6
-
-enum bch_extent_entry_type {
-#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
- BCH_EXTENT_ENTRY_TYPES()
-#undef x
-};
-
-/* Compressed/uncompressed size are stored biased by 1: */
-struct bch_extent_crc32 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u32 type:2,
- _compressed_size:7,
- _uncompressed_size:7,
- offset:7,
- _unused:1,
- csum_type:4,
- compression_type:4;
- __u32 csum;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u32 csum;
- __u32 compression_type:4,
- csum_type:4,
- _unused:1,
- offset:7,
- _uncompressed_size:7,
- _compressed_size:7,
- type:2;
-#endif
-} __packed __aligned(8);
-
-#define CRC32_SIZE_MAX (1U << 7)
-#define CRC32_NONCE_MAX 0
-
-struct bch_extent_crc64 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:3,
- _compressed_size:9,
- _uncompressed_size:9,
- offset:9,
- nonce:10,
- csum_type:4,
- compression_type:4,
- csum_hi:16;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 csum_hi:16,
- compression_type:4,
- csum_type:4,
- nonce:10,
- offset:9,
- _uncompressed_size:9,
- _compressed_size:9,
- type:3;
-#endif
- __u64 csum_lo;
-} __packed __aligned(8);
-
-#define CRC64_SIZE_MAX (1U << 9)
-#define CRC64_NONCE_MAX ((1U << 10) - 1)
-
-struct bch_extent_crc128 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:4,
- _compressed_size:13,
- _uncompressed_size:13,
- offset:13,
- nonce:13,
- csum_type:4,
- compression_type:4;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 compression_type:4,
- csum_type:4,
- nonce:13,
- offset:13,
- _uncompressed_size:13,
- _compressed_size:13,
- type:4;
-#endif
- struct bch_csum csum;
-} __packed __aligned(8);
-
-#define CRC128_SIZE_MAX (1U << 13)
-#define CRC128_NONCE_MAX ((1U << 13) - 1)
-
-/*
- * @reservation - pointer hasn't been written to, just reserved
- */
-struct bch_extent_ptr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:1,
- cached:1,
- unused:1,
- unwritten:1,
- offset:44, /* 8 petabytes */
- dev:8,
- gen:8;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 gen:8,
- dev:8,
- offset:44,
- unwritten:1,
- unused:1,
- cached:1,
- type:1;
-#endif
-} __packed __aligned(8);
-
-struct bch_extent_stripe_ptr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:5,
- block:8,
- redundancy:4,
- idx:47;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 idx:47,
- redundancy:4,
- block:8,
- type:5;
-#endif
-};
-
-struct bch_extent_rebalance {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:6,
- unused:34,
- compression:8, /* enum bch_compression_opt */
- target:16;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- __u64 target:16,
- compression:8,
- unused:34,
- type:6;
-#endif
-};
-
-union bch_extent_entry {
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
- unsigned long type;
-#elif __BITS_PER_LONG == 32
- struct {
- unsigned long pad;
- unsigned long type;
- };
-#else
-#error edit for your odd byteorder.
-#endif
-
-#define x(f, n) struct bch_extent_##f f;
- BCH_EXTENT_ENTRY_TYPES()
-#undef x
-};
-
-struct bch_btree_ptr {
- struct bch_val v;
-
- __u64 _data[0];
- struct bch_extent_ptr start[];
-} __packed __aligned(8);
-
-struct bch_btree_ptr_v2 {
- struct bch_val v;
-
- __u64 mem_ptr;
- __le64 seq;
- __le16 sectors_written;
- __le16 flags;
- struct bpos min_key;
- __u64 _data[0];
- struct bch_extent_ptr start[];
-} __packed __aligned(8);
-
-LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
-
-struct bch_extent {
- struct bch_val v;
-
- __u64 _data[0];
- union bch_extent_entry start[];
-} __packed __aligned(8);
-
-struct bch_reservation {
- struct bch_val v;
-
- __le32 generation;
- __u8 nr_replicas;
- __u8 pad[3];
-} __packed __aligned(8);
-
-/* Maximum size (in u64s) a single pointer could be: */
-#define BKEY_EXTENT_PTR_U64s_MAX\
- ((sizeof(struct bch_extent_crc128) + \
- sizeof(struct bch_extent_ptr)) / sizeof(__u64))
-
-/* Maximum possible size of an entire extent value: */
-#define BKEY_EXTENT_VAL_U64s_MAX \
- (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
-
-/* * Maximum possible size of an entire extent, key + value: */
-#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
-
-/* Btree pointers don't carry around checksums: */
-#define BKEY_BTREE_PTR_VAL_U64s_MAX \
- ((sizeof(struct bch_btree_ptr_v2) + \
- sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
-#define BKEY_BTREE_PTR_U64s_MAX \
- (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
-
-/* Inodes */
-
-#define BLOCKDEV_INODE_MAX 4096
-
-#define BCACHEFS_ROOT_INO 4096
-
-struct bch_inode {
- struct bch_val v;
-
- __le64 bi_hash_seed;
- __le32 bi_flags;
- __le16 bi_mode;
- __u8 fields[];
-} __packed __aligned(8);
-
-struct bch_inode_v2 {
- struct bch_val v;
-
- __le64 bi_journal_seq;
- __le64 bi_hash_seed;
- __le64 bi_flags;
- __le16 bi_mode;
- __u8 fields[];
-} __packed __aligned(8);
-
-struct bch_inode_v3 {
- struct bch_val v;
-
- __le64 bi_journal_seq;
- __le64 bi_hash_seed;
- __le64 bi_flags;
- __le64 bi_sectors;
- __le64 bi_size;
- __le64 bi_version;
- __u8 fields[];
-} __packed __aligned(8);
-
-#define INODEv3_FIELDS_START_INITIAL 6
-#define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
-
-struct bch_inode_generation {
- struct bch_val v;
-
- __le32 bi_generation;
- __le32 pad;
-} __packed __aligned(8);
-
-/*
- * bi_subvol and bi_parent_subvol are only set for subvolume roots:
- */
-
-#define BCH_INODE_FIELDS_v2() \
- x(bi_atime, 96) \
- x(bi_ctime, 96) \
- x(bi_mtime, 96) \
- x(bi_otime, 96) \
- x(bi_size, 64) \
- x(bi_sectors, 64) \
- x(bi_uid, 32) \
- x(bi_gid, 32) \
- x(bi_nlink, 32) \
- x(bi_generation, 32) \
- x(bi_dev, 32) \
- x(bi_data_checksum, 8) \
- x(bi_compression, 8) \
- x(bi_project, 32) \
- x(bi_background_compression, 8) \
- x(bi_data_replicas, 8) \
- x(bi_promote_target, 16) \
- x(bi_foreground_target, 16) \
- x(bi_background_target, 16) \
- x(bi_erasure_code, 16) \
- x(bi_fields_set, 16) \
- x(bi_dir, 64) \
- x(bi_dir_offset, 64) \
- x(bi_subvol, 32) \
- x(bi_parent_subvol, 32)
-
-#define BCH_INODE_FIELDS_v3() \
- x(bi_atime, 96) \
- x(bi_ctime, 96) \
- x(bi_mtime, 96) \
- x(bi_otime, 96) \
- x(bi_uid, 32) \
- x(bi_gid, 32) \
- x(bi_nlink, 32) \
- x(bi_generation, 32) \
- x(bi_dev, 32) \
- x(bi_data_checksum, 8) \
- x(bi_compression, 8) \
- x(bi_project, 32) \
- x(bi_background_compression, 8) \
- x(bi_data_replicas, 8) \
- x(bi_promote_target, 16) \
- x(bi_foreground_target, 16) \
- x(bi_background_target, 16) \
- x(bi_erasure_code, 16) \
- x(bi_fields_set, 16) \
- x(bi_dir, 64) \
- x(bi_dir_offset, 64) \
- x(bi_subvol, 32) \
- x(bi_parent_subvol, 32) \
- x(bi_nocow, 8)
-
-/* subset of BCH_INODE_FIELDS */
-#define BCH_INODE_OPTS() \
- x(data_checksum, 8) \
- x(compression, 8) \
- x(project, 32) \
- x(background_compression, 8) \
- x(data_replicas, 8) \
- x(promote_target, 16) \
- x(foreground_target, 16) \
- x(background_target, 16) \
- x(erasure_code, 16) \
- x(nocow, 8)
-
-enum inode_opt_id {
-#define x(name, ...) \
- Inode_opt_##name,
- BCH_INODE_OPTS()
-#undef x
- Inode_opt_nr,
-};
-
-#define BCH_INODE_FLAGS() \
- x(sync, 0) \
- x(immutable, 1) \
- x(append, 2) \
- x(nodump, 3) \
- x(noatime, 4) \
- x(i_size_dirty, 5) \
- x(i_sectors_dirty, 6) \
- x(unlinked, 7) \
- x(backptr_untrusted, 8)
-
-/* bits 20+ reserved for packed fields below: */
-
-enum bch_inode_flags {
-#define x(t, n) BCH_INODE_##t = 1U << n,
- BCH_INODE_FLAGS()
-#undef x
-};
-
-enum __bch_inode_flags {
-#define x(t, n) __BCH_INODE_##t = n,
- BCH_INODE_FLAGS()
-#undef x
-};
-
-LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
-LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
-LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
-
-LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
-LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
-
-LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
-LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
-
-LE64_BITMASK(INODEv3_FIELDS_START,
- struct bch_inode_v3, bi_flags, 31, 36);
-LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52);
-
-/* Dirents */
-
-/*
- * Dirents (and xattrs) have to implement string lookups; since our b-tree
- * doesn't support arbitrary length strings for the key, we instead index by a
- * 64 bit hash (currently truncated sha1) of the string, stored in the offset
- * field of the key - using linear probing to resolve hash collisions. This also
- * provides us with the readdir cookie posix requires.
- *
- * Linear probing requires us to use whiteouts for deletions, in the event of a
- * collision:
- */
-
-struct bch_dirent {
- struct bch_val v;
-
- /* Target inode number: */
- union {
- __le64 d_inum;
- struct { /* DT_SUBVOL */
- __le32 d_child_subvol;
- __le32 d_parent_subvol;
- };
- };
-
- /*
- * Copy of mode bits 12-15 from the target inode - so userspace can get
- * the filetype without having to do a stat()
- */
- __u8 d_type;
-
- __u8 d_name[];
-} __packed __aligned(8);
-
-#define DT_SUBVOL 16
-#define BCH_DT_MAX 17
-
-#define BCH_NAME_MAX 512
-
-/* Xattrs */
-
-#define KEY_TYPE_XATTR_INDEX_USER 0
-#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
-#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
-#define KEY_TYPE_XATTR_INDEX_TRUSTED 3
-#define KEY_TYPE_XATTR_INDEX_SECURITY 4
-
-struct bch_xattr {
- struct bch_val v;
- __u8 x_type;
- __u8 x_name_len;
- __le16 x_val_len;
- __u8 x_name[];
-} __packed __aligned(8);
-
-/* Bucket/allocation information: */
-
-struct bch_alloc {
- struct bch_val v;
- __u8 fields;
- __u8 gen;
- __u8 data[];
-} __packed __aligned(8);
-
-#define BCH_ALLOC_FIELDS_V1() \
- x(read_time, 16) \
- x(write_time, 16) \
- x(data_type, 8) \
- x(dirty_sectors, 16) \
- x(cached_sectors, 16) \
- x(oldest_gen, 8) \
- x(stripe, 32) \
- x(stripe_redundancy, 8)
-
-enum {
-#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
- BCH_ALLOC_FIELDS_V1()
-#undef x
-};
-
-struct bch_alloc_v2 {
- struct bch_val v;
- __u8 nr_fields;
- __u8 gen;
- __u8 oldest_gen;
- __u8 data_type;
- __u8 data[];
-} __packed __aligned(8);
-
-#define BCH_ALLOC_FIELDS_V2() \
- x(read_time, 64) \
- x(write_time, 64) \
- x(dirty_sectors, 32) \
- x(cached_sectors, 32) \
- x(stripe, 32) \
- x(stripe_redundancy, 8)
-
-struct bch_alloc_v3 {
- struct bch_val v;
- __le64 journal_seq;
- __le32 flags;
- __u8 nr_fields;
- __u8 gen;
- __u8 oldest_gen;
- __u8 data_type;
- __u8 data[];
-} __packed __aligned(8);
-
-LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
-LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
-
-struct bch_alloc_v4 {
- struct bch_val v;
- __u64 journal_seq;
- __u32 flags;
- __u8 gen;
- __u8 oldest_gen;
- __u8 data_type;
- __u8 stripe_redundancy;
- __u32 dirty_sectors;
- __u32 cached_sectors;
- __u64 io_time[2];
- __u32 stripe;
- __u32 nr_external_backpointers;
- __u64 fragmentation_lru;
-} __packed __aligned(8);
-
-#define BCH_ALLOC_V4_U64s_V0 6
-#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(__u64))
-
-BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
-BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
-BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
-BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
-
-#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40
-
struct bch_backpointer {
struct bch_val v;
__u8 btree_id;
@@ -1021,154 +433,6 @@ struct bch_backpointer {
struct bpos pos;
} __packed __aligned(8);
-#define KEY_TYPE_BUCKET_GENS_BITS 8
-#define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS)
-#define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1)
-
-struct bch_bucket_gens {
- struct bch_val v;
- u8 gens[KEY_TYPE_BUCKET_GENS_NR];
-} __packed __aligned(8);
-
-/* Quotas: */
-
-enum quota_types {
- QTYP_USR = 0,
- QTYP_GRP = 1,
- QTYP_PRJ = 2,
- QTYP_NR = 3,
-};
-
-enum quota_counters {
- Q_SPC = 0,
- Q_INO = 1,
- Q_COUNTERS = 2,
-};
-
-struct bch_quota_counter {
- __le64 hardlimit;
- __le64 softlimit;
-};
-
-struct bch_quota {
- struct bch_val v;
- struct bch_quota_counter c[Q_COUNTERS];
-} __packed __aligned(8);
-
-/* Erasure coding */
-
-struct bch_stripe {
- struct bch_val v;
- __le16 sectors;
- __u8 algorithm;
- __u8 nr_blocks;
- __u8 nr_redundant;
-
- __u8 csum_granularity_bits;
- __u8 csum_type;
- __u8 pad;
-
- struct bch_extent_ptr ptrs[];
-} __packed __aligned(8);
-
-/* Reflink: */
-
-struct bch_reflink_p {
- struct bch_val v;
- __le64 idx;
- /*
- * A reflink pointer might point to an indirect extent which is then
- * later split (by copygc or rebalance). If we only pointed to part of
- * the original indirect extent, and then one of the fragments is
- * outside the range we point to, we'd leak a refcount: so when creating
- * reflink pointers, we need to store pad values to remember the full
- * range we were taking a reference on.
- */
- __le32 front_pad;
- __le32 back_pad;
-} __packed __aligned(8);
-
-struct bch_reflink_v {
- struct bch_val v;
- __le64 refcount;
- union bch_extent_entry start[0];
- __u64 _data[];
-} __packed __aligned(8);
-
-struct bch_indirect_inline_data {
- struct bch_val v;
- __le64 refcount;
- u8 data[];
-};
-
-/* Inline data */
-
-struct bch_inline_data {
- struct bch_val v;
- u8 data[];
-};
-
-/* Subvolumes: */
-
-#define SUBVOL_POS_MIN POS(0, 1)
-#define SUBVOL_POS_MAX POS(0, S32_MAX)
-#define BCACHEFS_ROOT_SUBVOL 1
-
-struct bch_subvolume {
- struct bch_val v;
- __le32 flags;
- __le32 snapshot;
- __le64 inode;
- /*
- * Snapshot subvolumes form a tree, separate from the snapshot nodes
- * tree - if this subvolume is a snapshot, this is the ID of the
- * subvolume it was created from:
- */
- __le32 parent;
- __le32 pad;
- bch_le128 otime;
-};
-
-LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
-/*
- * We need to know whether a subvolume is a snapshot so we can know whether we
- * can delete it (or whether it should just be rm -rf'd)
- */
-LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
-LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
-
-/* Snapshots */
-
-struct bch_snapshot {
- struct bch_val v;
- __le32 flags;
- __le32 parent;
- __le32 children[2];
- __le32 subvol;
- /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
- __le32 tree;
- __le32 depth;
- __le32 skip[3];
-};
-
-LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
-
-/* True if a subvolume points to this snapshot node: */
-LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
-
-/*
- * Snapshot trees:
- *
- * The snapshot_trees btree gives us persistent indentifier for each tree of
- * bch_snapshot nodes, and allow us to record and easily find the root/master
- * subvolume that other snapshots were created from:
- */
-struct bch_snapshot_tree {
- struct bch_val v;
- __le32 master_subvol;
- __le32 root_snapshot;
-};
-
/* LRU btree: */
struct bch_lru {
@@ -1178,33 +442,6 @@ struct bch_lru {
#define LRU_ID_STRIPES (1U << 16)
-/* Logged operations btree: */
-
-struct bch_logged_op_truncate {
- struct bch_val v;
- __le32 subvol;
- __le32 pad;
- __le64 inum;
- __le64 new_i_size;
-};
-
-enum logged_op_finsert_state {
- LOGGED_OP_FINSERT_start,
- LOGGED_OP_FINSERT_shift_extents,
- LOGGED_OP_FINSERT_finish,
-};
-
-struct bch_logged_op_finsert {
- struct bch_val v;
- __u8 state;
- __u8 pad[3];
- __le32 subvol;
- __le64 inum;
- __le64 dst_offset;
- __le64 src_offset;
- __le64 pos;
-};
-
/* Optional/variable size superblock sections: */
struct bch_sb_field {
@@ -1230,6 +467,19 @@ struct bch_sb_field {
x(ext, 13) \
x(downgrade, 14)
+#include "alloc_background_format.h"
+#include "extents_format.h"
+#include "reflink_format.h"
+#include "ec_format.h"
+#include "inode_format.h"
+#include "dirent_format.h"
+#include "xattr_format.h"
+#include "quota_format.h"
+#include "logged_ops_format.h"
+#include "snapshot_format.h"
+#include "subvolume_format.h"
+#include "sb-counters_format.h"
+
enum bch_sb_field_type {
#define x(f, nr) BCH_SB_FIELD_##f = nr,
BCH_SB_FIELDS()
@@ -1465,23 +715,6 @@ struct bch_sb_field_replicas {
struct bch_replicas_entry_v1 entries[];
} __packed __aligned(8);
-/* BCH_SB_FIELD_quota: */
-
-struct bch_sb_quota_counter {
- __le32 timelimit;
- __le32 warnlimit;
-};
-
-struct bch_sb_quota_type {
- __le64 flags;
- struct bch_sb_quota_counter c[Q_COUNTERS];
-};
-
-struct bch_sb_field_quota {
- struct bch_sb_field field;
- struct bch_sb_quota_type q[QTYP_NR];
-} __packed __aligned(8);
-
/* BCH_SB_FIELD_disk_groups: */
#define BCH_SB_LABEL_SIZE 32
@@ -1500,101 +733,6 @@ struct bch_sb_field_disk_groups {
struct bch_disk_group entries[];
} __packed __aligned(8);
-/* BCH_SB_FIELD_counters */
-
-#define BCH_PERSISTENT_COUNTERS() \
- x(io_read, 0) \
- x(io_write, 1) \
- x(io_move, 2) \
- x(bucket_invalidate, 3) \
- x(bucket_discard, 4) \
- x(bucket_alloc, 5) \
- x(bucket_alloc_fail, 6) \
- x(btree_cache_scan, 7) \
- x(btree_cache_reap, 8) \
- x(btree_cache_cannibalize, 9) \
- x(btree_cache_cannibalize_lock, 10) \
- x(btree_cache_cannibalize_lock_fail, 11) \
- x(btree_cache_cannibalize_unlock, 12) \
- x(btree_node_write, 13) \
- x(btree_node_read, 14) \
- x(btree_node_compact, 15) \
- x(btree_node_merge, 16) \
- x(btree_node_split, 17) \
- x(btree_node_rewrite, 18) \
- x(btree_node_alloc, 19) \
- x(btree_node_free, 20) \
- x(btree_node_set_root, 21) \
- x(btree_path_relock_fail, 22) \
- x(btree_path_upgrade_fail, 23) \
- x(btree_reserve_get_fail, 24) \
- x(journal_entry_full, 25) \
- x(journal_full, 26) \
- x(journal_reclaim_finish, 27) \
- x(journal_reclaim_start, 28) \
- x(journal_write, 29) \
- x(read_promote, 30) \
- x(read_bounce, 31) \
- x(read_split, 33) \
- x(read_retry, 32) \
- x(read_reuse_race, 34) \
- x(move_extent_read, 35) \
- x(move_extent_write, 36) \
- x(move_extent_finish, 37) \
- x(move_extent_fail, 38) \
- x(move_extent_start_fail, 39) \
- x(copygc, 40) \
- x(copygc_wait, 41) \
- x(gc_gens_end, 42) \
- x(gc_gens_start, 43) \
- x(trans_blocked_journal_reclaim, 44) \
- x(trans_restart_btree_node_reused, 45) \
- x(trans_restart_btree_node_split, 46) \
- x(trans_restart_fault_inject, 47) \
- x(trans_restart_iter_upgrade, 48) \
- x(trans_restart_journal_preres_get, 49) \
- x(trans_restart_journal_reclaim, 50) \
- x(trans_restart_journal_res_get, 51) \
- x(trans_restart_key_cache_key_realloced, 52) \
- x(trans_restart_key_cache_raced, 53) \
- x(trans_restart_mark_replicas, 54) \
- x(trans_restart_mem_realloced, 55) \
- x(trans_restart_memory_allocation_failure, 56) \
- x(trans_restart_relock, 57) \
- x(trans_restart_relock_after_fill, 58) \
- x(trans_restart_relock_key_cache_fill, 59) \
- x(trans_restart_relock_next_node, 60) \
- x(trans_restart_relock_parent_for_fill, 61) \
- x(trans_restart_relock_path, 62) \
- x(trans_restart_relock_path_intent, 63) \
- x(trans_restart_too_many_iters, 64) \
- x(trans_restart_traverse, 65) \
- x(trans_restart_upgrade, 66) \
- x(trans_restart_would_deadlock, 67) \
- x(trans_restart_would_deadlock_write, 68) \
- x(trans_restart_injected, 69) \
- x(trans_restart_key_cache_upgrade, 70) \
- x(trans_traverse_all, 71) \
- x(transaction_commit, 72) \
- x(write_super, 73) \
- x(trans_restart_would_deadlock_recursion_limit, 74) \
- x(trans_restart_write_buffer_flush, 75) \
- x(trans_restart_split_race, 76) \
- x(write_buffer_flush_slowpath, 77) \
- x(write_buffer_flush_sync, 78)
-
-enum bch_persistent_counters {
-#define x(t, n, ...) BCH_COUNTER_##t,
- BCH_PERSISTENT_COUNTERS()
-#undef x
- BCH_COUNTER_NR
-};
-
-struct bch_sb_field_counters {
- struct bch_sb_field field;
- __le64 d[];
-};
-
/*
* On clean shutdown, store btree roots and current journal sequence number in
* the superblock:
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
index abdb05507d16..76e79a15ba08 100644
--- a/fs/bcachefs/bkey.c
+++ b/fs/bcachefs/bkey.c
@@ -33,7 +33,7 @@ void bch2_bkey_packed_to_binary_text(struct printbuf *out,
next_key_bits -= 64;
}
- bch2_prt_u64_binary(out, v, min(word_bits, nr_key_bits));
+ bch2_prt_u64_base2_nbits(out, v, min(word_bits, nr_key_bits));
if (!next_key_bits)
break;
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 761f5e33b1e6..5e52684764eb 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -63,8 +63,17 @@ static int key_type_cookie_invalid(struct bch_fs *c, struct bkey_s_c k,
return 0;
}
+static void key_type_cookie_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_cookie ck = bkey_s_c_to_cookie(k);
+
+ prt_printf(out, "%llu", le64_to_cpu(ck.v->cookie));
+}
+
#define bch2_bkey_ops_cookie ((struct bkey_ops) { \
.key_invalid = key_type_cookie_invalid, \
+ .val_to_text = key_type_cookie_to_text, \
.min_val_size = 8, \
})
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index ee82283722b7..03efe8ee565a 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -83,9 +83,10 @@ enum btree_update_flags {
__BTREE_TRIGGER_NORUN,
__BTREE_TRIGGER_TRANSACTIONAL,
+ __BTREE_TRIGGER_ATOMIC,
+ __BTREE_TRIGGER_GC,
__BTREE_TRIGGER_INSERT,
__BTREE_TRIGGER_OVERWRITE,
- __BTREE_TRIGGER_GC,
__BTREE_TRIGGER_BUCKET_INVALIDATE,
};
@@ -107,6 +108,10 @@ enum btree_update_flags {
* causing us to go emergency read-only)
*/
#define BTREE_TRIGGER_TRANSACTIONAL (1U << __BTREE_TRIGGER_TRANSACTIONAL)
+#define BTREE_TRIGGER_ATOMIC (1U << __BTREE_TRIGGER_ATOMIC)
+
+/* We're in gc/fsck: running triggers to recalculate e.g. disk usage */
+#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
/* @new is entering the btree */
#define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
@@ -114,9 +119,6 @@ enum btree_update_flags {
/* @old is leaving the btree */
#define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
-/* We're in gc/fsck: running triggers to recalculate e.g. disk usage */
-#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
-
/* signal from bucket invalidate path to alloc trigger */
#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 74bf8eb90a4c..3fd1085b6c61 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -720,7 +720,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
{
struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
struct bkey_i min_key, max_key;
- unsigned j, cacheline = 1;
+ unsigned cacheline = 1;
t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
bset_ro_tree_capacity(b, t));
@@ -823,13 +823,12 @@ void bch2_bset_init_first(struct btree *b, struct bset *i)
set_btree_bset(b, t, i);
}
-void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
- struct btree_node_entry *bne)
+void bch2_bset_init_next(struct btree *b, struct btree_node_entry *bne)
{
struct bset *i = &bne->keys;
struct bset_tree *t;
- BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
+ BUG_ON(bset_byte_offset(b, bne) >= btree_buf_bytes(b));
BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
BUG_ON(b->nsets >= MAX_BSETS);
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
index 632c2b8c5460..79c77baaa383 100644
--- a/fs/bcachefs/bset.h
+++ b/fs/bcachefs/bset.h
@@ -264,8 +264,7 @@ static inline struct bset *bset_next_set(struct btree *b,
void bch2_btree_keys_init(struct btree *);
void bch2_bset_init_first(struct btree *, struct bset *);
-void bch2_bset_init_next(struct bch_fs *, struct btree *,
- struct btree_node_entry *);
+void bch2_bset_init_next(struct btree *, struct btree_node_entry *);
void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
void bch2_bset_insert(struct btree *, struct btree_node_iter *,
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 8e2488a4b58d..d7c81beac14a 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -60,7 +60,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
clear_btree_node_just_written(b);
- kvpfree(b->data, btree_bytes(c));
+ kvpfree(b->data, btree_buf_bytes(b));
b->data = NULL;
#ifdef __KERNEL__
kvfree(b->aux_data);
@@ -94,7 +94,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
{
BUG_ON(b->data || b->aux_data);
- b->data = kvpmalloc(btree_bytes(c), gfp);
+ b->data = kvpmalloc(btree_buf_bytes(b), gfp);
if (!b->data)
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
#ifdef __KERNEL__
@@ -107,7 +107,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
b->aux_data = NULL;
#endif
if (!b->aux_data) {
- kvpfree(b->data, btree_bytes(c));
+ kvpfree(b->data, btree_buf_bytes(b));
b->data = NULL;
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
}
@@ -126,7 +126,7 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
bkey_btree_ptr_init(&b->key);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
- b->byte_order = ilog2(btree_bytes(c));
+ b->byte_order = ilog2(c->opts.btree_node_size);
return b;
}
@@ -408,7 +408,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
if (c->verify_data)
list_move(&c->verify_data->list, &bc->live);
- kvpfree(c->verify_ondisk, btree_bytes(c));
+ kvpfree(c->verify_ondisk, c->opts.btree_node_size);
for (i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
@@ -1192,7 +1192,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
" failed unpacked %zu\n",
b->unpack_fn_len,
b->nr.live_u64s * sizeof(u64),
- btree_bytes(c) - sizeof(struct btree_node),
+ btree_buf_bytes(b) - sizeof(struct btree_node),
b->nr.live_u64s * 100 / btree_max_u64s(c),
b->sib_u64s[0],
b->sib_u64s[1],
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 4e1af5882052..6d33885fdbde 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -74,22 +74,27 @@ static inline bool btree_node_hashed(struct btree *b)
_iter = 0; _iter < (_tbl)->size; _iter++) \
rht_for_each_entry_rcu((_b), (_pos), _tbl, _iter, hash)
-static inline size_t btree_bytes(struct bch_fs *c)
+static inline size_t btree_buf_bytes(const struct btree *b)
{
- return c->opts.btree_node_size;
+ return 1UL << b->byte_order;
}
-static inline size_t btree_max_u64s(struct bch_fs *c)
+static inline size_t btree_buf_max_u64s(const struct btree *b)
{
- return (btree_bytes(c) - sizeof(struct btree_node)) / sizeof(u64);
+ return (btree_buf_bytes(b) - sizeof(struct btree_node)) / sizeof(u64);
}
-static inline size_t btree_pages(struct bch_fs *c)
+static inline size_t btree_max_u64s(const struct bch_fs *c)
{
- return btree_bytes(c) / PAGE_SIZE;
+ return (c->opts.btree_node_size - sizeof(struct btree_node)) / sizeof(u64);
}
-static inline unsigned btree_blocks(struct bch_fs *c)
+static inline size_t btree_sectors(const struct bch_fs *c)
+{
+ return c->opts.btree_node_size >> SECTOR_SHIFT;
+}
+
+static inline unsigned btree_blocks(const struct bch_fs *c)
{
return btree_sectors(c) >> c->block_bits;
}
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 49b4ade758c3..1102995643b1 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -597,7 +597,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
p.ptr.gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) {
@@ -615,7 +615,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
p.ptr.gen, g->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) {
@@ -637,7 +637,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
p.ptr.gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))))
@@ -649,7 +649,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
p.ptr.gen, g->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))))
@@ -664,8 +664,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
"bucket %u:%zu different types of data in same bucket: %s, %s\n"
"while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_types[g->data_type],
- bch2_data_types[data_type],
+ bch2_data_type_str(g->data_type),
+ bch2_data_type_str(data_type),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
if (data_type == BCH_DATA_btree) {
@@ -1238,11 +1238,11 @@ static int bch2_gc_done(struct bch_fs *c,
for (i = 0; i < BCH_DATA_NR; i++) {
copy_dev_field(dev_usage_buckets_wrong,
- d[i].buckets, "%s buckets", bch2_data_types[i]);
+ d[i].buckets, "%s buckets", bch2_data_type_str(i));
copy_dev_field(dev_usage_sectors_wrong,
- d[i].sectors, "%s sectors", bch2_data_types[i]);
+ d[i].sectors, "%s sectors", bch2_data_type_str(i));
copy_dev_field(dev_usage_fragmented_wrong,
- d[i].fragmented, "%s fragmented", bch2_data_types[i]);
+ d[i].fragmented, "%s fragmented", bch2_data_type_str(i));
}
}
@@ -1253,19 +1253,19 @@ static int bch2_gc_done(struct bch_fs *c,
bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
copy_fs_field(fs_usage_hidden_wrong,
- hidden, "hidden");
+ b.hidden, "hidden");
copy_fs_field(fs_usage_btree_wrong,
- btree, "btree");
+ b.btree, "btree");
if (!metadata_only) {
copy_fs_field(fs_usage_data_wrong,
- data, "data");
+ b.data, "data");
copy_fs_field(fs_usage_cached_wrong,
- cached, "cached");
+ b.cached, "cached");
copy_fs_field(fs_usage_reserved_wrong,
- reserved, "reserved");
+ b.reserved, "reserved");
copy_fs_field(fs_usage_nr_inodes_wrong,
- nr_inodes,"nr_inodes");
+ b.nr_inodes,"nr_inodes");
for (i = 0; i < BCH_REPLICAS_MAX; i++)
copy_fs_field(fs_usage_persistent_reserved_wrong,
@@ -1417,8 +1417,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
": got %s, should be %s",
iter->pos.inode, iter->pos.offset,
gc.gen,
- bch2_data_types[new.data_type],
- bch2_data_types[gc.data_type]))
+ bch2_data_type_str(new.data_type),
+ bch2_data_type_str(gc.data_type)))
new.data_type = gc.data_type;
#define copy_bucket_field(_errtype, _f) \
@@ -1428,7 +1428,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
": got %u, should be %u", \
iter->pos.inode, iter->pos.offset, \
gc.gen, \
- bch2_data_types[gc.data_type], \
+ bch2_data_type_str(gc.data_type), \
new._f, gc._f)) \
new._f = gc._f; \
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 33db48e2153f..aa9b6cbe3226 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -112,7 +112,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
unsigned flags = memalloc_nofs_save();
void *p;
- BUG_ON(size > btree_bytes(c));
+ BUG_ON(size > c->opts.btree_node_size);
*used_mempool = false;
p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
@@ -174,8 +174,8 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
- for (k = unwritten_whiteouts_start(c, b);
- k != unwritten_whiteouts_end(c, b);
+ for (k = unwritten_whiteouts_start(b);
+ k != unwritten_whiteouts_end(b);
k = bkey_p_next(k))
*--ptrs = k;
@@ -192,7 +192,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
verify_no_dups(b, new_whiteouts,
(void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
- memcpy_u64s(unwritten_whiteouts_start(c, b),
+ memcpy_u64s(unwritten_whiteouts_start(b),
new_whiteouts, b->whiteout_u64s);
btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
@@ -313,7 +313,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
}
bytes = sorting_entire_node
- ? btree_bytes(c)
+ ? btree_buf_bytes(b)
: __vstruct_bytes(struct btree_node, u64s);
out = btree_bounce_alloc(c, bytes, &used_mempool);
@@ -338,7 +338,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
if (sorting_entire_node) {
u64s = le16_to_cpu(out->keys.u64s);
- BUG_ON(bytes != btree_bytes(c));
+ BUG_ON(bytes != btree_buf_bytes(b));
/*
* Our temporary buffer is the same size as the btree node's
@@ -502,7 +502,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
bne = want_new_bset(c, b);
if (bne)
- bch2_bset_init_next(c, b, bne);
+ bch2_bset_init_next(b, bne);
bch2_btree_build_aux_trees(b);
@@ -1160,7 +1160,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
ptr_written, b->written);
} else {
for (bne = write_block(b);
- bset_byte_offset(b, bne) < btree_bytes(c);
+ bset_byte_offset(b, bne) < btree_buf_bytes(b);
bne = (void *) bne + block_bytes(c))
btree_err_on(bne->keys.seq == b->data->keys.seq &&
!bch2_journal_seq_is_blacklisted(c,
@@ -1172,7 +1172,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
"found bset signature after last bset");
}
- sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
+ sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
sorted->keys.u64s = 0;
set_btree_bset(b, b->set, &b->data->keys);
@@ -1188,7 +1188,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
BUG_ON(b->nr.live_u64s != u64s);
- btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
+ btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
if (updated_range)
bch2_btree_node_drop_keys_outside_node(b);
@@ -1284,7 +1284,7 @@ static void btree_node_read_work(struct work_struct *work)
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
- bio->bi_iter.bi_size = btree_bytes(c);
+ bio->bi_iter.bi_size = btree_buf_bytes(b);
if (rb->have_ioref) {
bio_set_dev(bio, ca->disk_sb.bdev);
@@ -1512,7 +1512,7 @@ fsck_err:
}
if (best >= 0) {
- memcpy(b->data, ra->buf[best], btree_bytes(c));
+ memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
} else {
ret = -1;
@@ -1578,7 +1578,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
for (i = 0; i < ra->nr; i++) {
ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
ra->bio[i] = bio_alloc_bioset(NULL,
- buf_pages(ra->buf[i], btree_bytes(c)),
+ buf_pages(ra->buf[i], btree_buf_bytes(b)),
REQ_OP_READ|REQ_SYNC|REQ_META,
GFP_NOFS,
&c->btree_bio);
@@ -1598,7 +1598,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
rb->pick = pick;
rb->bio.bi_iter.bi_sector = pick.ptr.offset;
rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
- bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
+ bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
if (rb->have_ioref) {
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
@@ -1665,7 +1665,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
bio = bio_alloc_bioset(NULL,
- buf_pages(b->data, btree_bytes(c)),
+ buf_pages(b->data, btree_buf_bytes(b)),
REQ_OP_READ|REQ_SYNC|REQ_META,
GFP_NOFS,
&c->btree_bio);
@@ -1679,7 +1679,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
INIT_WORK(&rb->work, btree_node_read_work);
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_end_io = btree_node_read_endio;
- bch2_bio_map(bio, b->data, btree_bytes(c));
+ bch2_bio_map(bio, b->data, btree_buf_bytes(b));
if (rb->have_ioref) {
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
@@ -2074,8 +2074,8 @@ do_write:
i->u64s = 0;
sort_iter_add(&sort_iter.iter,
- unwritten_whiteouts_start(c, b),
- unwritten_whiteouts_end(c, b));
+ unwritten_whiteouts_start(b),
+ unwritten_whiteouts_end(b));
SET_BSET_SEPARATE_WHITEOUTS(i, false);
b->whiteout_u64s = 0;
@@ -2251,7 +2251,7 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
bne = want_new_bset(c, b);
if (bne)
- bch2_bset_init_next(c, b, bne);
+ bch2_bset_init_next(b, bne);
bch2_btree_build_aux_trees(b);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index fa298289e016..3ef338df82f5 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1337,7 +1337,7 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
if (path->should_be_locked &&
!trans->restarted &&
- (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
+ (!dup || !bch2_btree_path_relock_norestart(trans, dup)))
return;
if (dup) {
@@ -2156,7 +2156,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* isn't monotonically increasing before FILTER_SNAPSHOTS, and
* that's what we check against in extents mode:
*/
- if (k.k->p.inode > end.inode)
+ if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bkey_gt(k.k->p, end)
+ : k.k->p.inode > end.inode))
goto end;
if (iter->update_path &&
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index da2b74fa63fc..24772538e4cc 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -819,6 +819,11 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
+/*
+ * This should not be used in a fastpath, without first trying _do in
+ * nonblocking mode - it will cause excessive transaction restarts and
+ * potentially livelocking:
+ */
#define drop_locks_do(_trans, _do) \
({ \
bch2_trans_unlock(_trans); \
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index 2d1c95c42f24..684397442338 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -92,7 +92,7 @@ static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
continue;
bch2_btree_trans_to_text(out, i->trans);
- bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1);
+ bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
}
}
@@ -227,7 +227,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
prt_printf(&buf, "backtrace:");
prt_newline(&buf);
printbuf_indent_add(&buf, 2);
- bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2);
+ bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
printbuf_indent_sub(&buf, 2);
prt_newline(&buf);
}
@@ -631,8 +631,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans,
}
__flatten
-bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
- struct btree_path *path, unsigned long trace_ip)
+bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path)
{
struct get_locks_fail f;
@@ -642,7 +641,7 @@ bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
int __bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
- if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
+ if (!bch2_btree_path_relock_norestart(trans, path)) {
trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
}
@@ -759,12 +758,39 @@ int bch2_trans_relock(struct btree_trans *trans)
if (unlikely(trans->restarted))
return -((int) trans->restarted);
- trans_for_each_path(trans, path, i)
+ trans_for_each_path(trans, path, i) {
+ struct get_locks_fail f;
+
if (path->should_be_locked &&
- !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
- trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
+ !btree_path_get_locks(trans, path, false, &f)) {
+ if (trace_trans_restart_relock_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bpos_to_text(&buf, path->pos);
+ prt_printf(&buf, " l=%u seq=%u node seq=",
+ f.l, path->l[f.l].lock_seq);
+ if (IS_ERR_OR_NULL(f.b)) {
+ prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
+ } else {
+ prt_printf(&buf, "%u", f.b->c.lock.seq);
+
+ struct six_lock_count c =
+ bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
+ prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
+
+ c = six_lock_counts(&f.b->c.lock);
+ prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
+ }
+
+ trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ count_event(trans->c, trans_restart_relock);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
}
+ }
+
return 0;
}
@@ -778,7 +804,7 @@ int bch2_trans_relock_notrace(struct btree_trans *trans)
trans_for_each_path(trans, path, i)
if (path->should_be_locked &&
- !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
+ !bch2_btree_path_relock_norestart(trans, path)) {
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
}
return 0;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index cc5500a957a1..4bd72c855da1 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -312,8 +312,7 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *,
/* relock: */
-bool bch2_btree_path_relock_norestart(struct btree_trans *,
- struct btree_path *, unsigned long);
+bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *);
int __bch2_btree_path_relock(struct btree_trans *,
struct btree_path *, unsigned long);
@@ -353,12 +352,6 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
/* upgrade */
-
-struct get_locks_fail {
- unsigned l;
- struct btree *b;
-};
-
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
struct btree_path *, unsigned,
struct get_locks_fail *);
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 90eb8065ff2d..30d69a6d133e 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -139,8 +139,7 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
- EBUG_ON(insert->k.u64s >
- bch_btree_keys_u64s_remaining(trans->c, b));
+ EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b));
EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos));
k = bch2_btree_node_iter_peek_all(node_iter, b);
@@ -160,7 +159,7 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
k->type = KEY_TYPE_deleted;
if (k->needs_whiteout)
- push_whiteout(trans->c, b, insert->k.p);
+ push_whiteout(b, insert->k.p);
k->needs_whiteout = false;
if (k >= btree_bset_last(b)->start) {
@@ -348,9 +347,7 @@ static noinline void journal_transaction_name(struct btree_trans *trans)
static inline int btree_key_can_insert(struct btree_trans *trans,
struct btree *b, unsigned u64s)
{
- struct bch_fs *c = trans->c;
-
- if (!bch2_btree_node_insert_fits(c, b, u64s))
+ if (!bch2_btree_node_insert_fits(b, u64s))
return -BCH_ERR_btree_insert_btree_node_full;
return 0;
@@ -418,7 +415,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
return 0;
new_u64s = roundup_pow_of_two(u64s);
- new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT);
+ new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN);
if (unlikely(!new_k))
return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s);
@@ -448,9 +445,6 @@ static int run_one_mem_trigger(struct btree_trans *trans,
if (unlikely(flags & BTREE_TRIGGER_NORUN))
return 0;
- if (!btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)))
- return 0;
-
if (old_ops->trigger == new_ops->trigger) {
ret = bch2_key_trigger(trans, i->btree_id, i->level,
old, bkey_i_to_s(new),
@@ -586,9 +580,6 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
{
- struct bch_fs *c = trans->c;
- int ret = 0;
-
trans_for_each_update(trans, i) {
/*
* XXX: synchronization of cached update triggers with gc
@@ -596,14 +587,15 @@ static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
*/
BUG_ON(i->cached || i->level);
- if (gc_visited(c, gc_pos_btree_node(insert_l(trans, i)->b))) {
- ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
+ if (btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)) &&
+ gc_visited(trans->c, gc_pos_btree_node(insert_l(trans, i)->b))) {
+ int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
if (ret)
- break;
+ return ret;
}
}
- return ret;
+ return 0;
}
static inline int
@@ -680,6 +672,9 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas))
return -BCH_ERR_btree_insert_need_mark_replicas;
+ /* XXX: we only want to run this if deltas are nonzero */
+ bch2_trans_account_disk_usage_change(trans);
+
h = trans->hooks;
while (h) {
ret = h->fn(trans, h);
@@ -689,8 +684,8 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
}
trans_for_each_update(trans, i)
- if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) {
- ret = run_one_mem_trigger(trans, i, i->flags);
+ if (BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS & (1U << i->bkey_type)) {
+ ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_ATOMIC|i->flags);
if (ret)
goto fatal_err;
}
@@ -994,6 +989,8 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
!trans->journal_entries_u64s)
goto out_reset;
+ memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta));
+
ret = bch2_trans_commit_run_triggers(trans);
if (ret)
goto out_reset;
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index d530307046f4..4a5a64499eb7 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -430,6 +430,9 @@ struct btree_trans {
struct journal_res journal_res;
u64 *journal_seq;
struct disk_reservation *disk_res;
+
+ struct bch_fs_usage_base fs_usage_delta;
+
unsigned journal_u64s;
unsigned extra_disk_res; /* XXX kill */
struct replicas_delta_list *fs_usage_deltas;
@@ -653,7 +656,7 @@ const char *bch2_btree_node_type_str(enum btree_node_type);
BIT_ULL(BKEY_TYPE_reflink)| \
BIT_ULL(BKEY_TYPE_btree))
-#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
+#define BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS \
(BIT_ULL(BKEY_TYPE_alloc)| \
BIT_ULL(BKEY_TYPE_inodes)| \
BIT_ULL(BKEY_TYPE_stripes)| \
@@ -661,7 +664,7 @@ const char *bch2_btree_node_type_str(enum btree_node_type);
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
- BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
+ BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS)
static inline bool btree_node_type_needs_gc(enum btree_node_type type)
{
@@ -738,4 +741,9 @@ enum btree_node_sibling {
btree_next_sib,
};
+struct get_locks_fail {
+ unsigned l;
+ struct btree *b;
+};
+
#endif /* _BCACHEFS_BTREE_TYPES_H */
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 44f9dfa28a09..4530b14ff2c3 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -159,7 +159,7 @@ static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
{
size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f);
- return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
+ return __vstruct_bytes(struct btree_node, u64s) < btree_buf_bytes(b);
}
/* Btree node freeing/allocation: */
@@ -280,7 +280,8 @@ retry:
writepoint_ptr(&c->btree_write_point),
&devs_have,
res->nr_replicas,
- c->opts.metadata_replicas_required,
+ min(res->nr_replicas,
+ c->opts.metadata_replicas_required),
watermark, 0, cl, &wp);
if (unlikely(ret))
return ERR_PTR(ret);
@@ -1097,7 +1098,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
* Always check for space for two keys, even if we won't have to
* split at prior level - it might have been a merge instead:
*/
- if (bch2_btree_node_insert_fits(c, path->l[update_level].b,
+ if (bch2_btree_node_insert_fits(path->l[update_level].b,
BKEY_BTREE_PTR_U64s_MAX * 2))
break;
@@ -1401,7 +1402,7 @@ static void __btree_split_node(struct btree_update *as,
unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s +
nr_keys[i].val_u64s;
- if (__vstruct_bytes(struct btree_node, u64s) > btree_bytes(as->c))
+ if (__vstruct_bytes(struct btree_node, u64s) > btree_buf_bytes(b))
n[i]->data->format = b->format;
btree_node_set_format(n[i], n[i]->data->format);
@@ -1703,7 +1704,7 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
bch2_btree_node_prep_for_write(trans, path, b);
- if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
+ if (!bch2_btree_node_insert_fits(b, bch2_keylist_u64s(keys))) {
bch2_btree_node_unlock_write(trans, path, b);
goto split;
}
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index adfc62083844..c593c925d1e3 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -184,21 +184,19 @@ static inline void btree_node_reset_sib_u64s(struct btree *b)
b->sib_u64s[1] = b->nr.live_u64s;
}
-static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
+static inline void *btree_data_end(struct btree *b)
{
- return (void *) b->data + btree_bytes(c);
+ return (void *) b->data + btree_buf_bytes(b);
}
-static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c,
- struct btree *b)
+static inline struct bkey_packed *unwritten_whiteouts_start(struct btree *b)
{
- return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s);
+ return (void *) ((u64 *) btree_data_end(b) - b->whiteout_u64s);
}
-static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c,
- struct btree *b)
+static inline struct bkey_packed *unwritten_whiteouts_end(struct btree *b)
{
- return btree_data_end(c, b);
+ return btree_data_end(b);
}
static inline void *write_block(struct btree *b)
@@ -221,13 +219,11 @@ static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
return __btree_addr_written(b, k);
}
-static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
- struct btree *b,
- void *end)
+static inline ssize_t __bch2_btree_u64s_remaining(struct btree *b, void *end)
{
ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
b->whiteout_u64s;
- ssize_t total = c->opts.btree_node_size >> 3;
+ ssize_t total = btree_buf_bytes(b) >> 3;
/* Always leave one extra u64 for bch2_varint_decode: */
used++;
@@ -235,10 +231,9 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
return total - used;
}
-static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
- struct btree *b)
+static inline size_t bch2_btree_keys_u64s_remaining(struct btree *b)
{
- ssize_t remaining = __bch_btree_u64s_remaining(c, b,
+ ssize_t remaining = __bch2_btree_u64s_remaining(b,
btree_bkey_last(b, bset_tree_last(b)));
BUG_ON(remaining < 0);
@@ -260,14 +255,13 @@ static inline unsigned btree_write_set_buffer(struct btree *b)
return 8 << BTREE_WRITE_SET_U64s_BITS;
}
-static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
- struct btree *b)
+static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree *b)
{
struct bset_tree *t = bset_tree_last(b);
struct btree_node_entry *bne = max(write_block(b),
(void *) btree_bkey_last(b, bset_tree_last(b)));
ssize_t remaining_space =
- __bch_btree_u64s_remaining(c, b, bne->keys.start);
+ __bch2_btree_u64s_remaining(b, bne->keys.start);
if (unlikely(bset_written(b, bset(b, t)))) {
if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
@@ -281,12 +275,11 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
return NULL;
}
-static inline void push_whiteout(struct bch_fs *c, struct btree *b,
- struct bpos pos)
+static inline void push_whiteout(struct btree *b, struct bpos pos)
{
struct bkey_packed k;
- BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+ BUG_ON(bch2_btree_keys_u64s_remaining(b) < BKEY_U64s);
EBUG_ON(btree_node_just_written(b));
if (!bkey_pack_pos(&k, pos, b)) {
@@ -299,20 +292,19 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b,
k.needs_whiteout = true;
b->whiteout_u64s += k.u64s;
- bkey_p_copy(unwritten_whiteouts_start(c, b), &k);
+ bkey_p_copy(unwritten_whiteouts_start(b), &k);
}
/*
* write lock must be held on @b (else the dirty bset that we were going to
* insert into could be written out from under us)
*/
-static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
- struct btree *b, unsigned u64s)
+static inline bool bch2_btree_node_insert_fits(struct btree *b, unsigned u64s)
{
if (unlikely(btree_node_need_rewrite(b)))
return false;
- return u64s <= bch_btree_keys_u64s_remaining(c, b);
+ return u64s <= bch2_btree_keys_u64s_remaining(b);
}
void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index 5c1169c78daf..ac7844861966 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -125,13 +125,12 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
struct btree_write_buffered_key *wb,
bool *write_locked, size_t *fast)
{
- struct bch_fs *c = trans->c;
struct btree_path *path;
int ret;
EBUG_ON(!wb->journal_seq);
- EBUG_ON(!c->btree_write_buffer.flushing.pin.seq);
- EBUG_ON(c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
+ EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
+ EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
ret = bch2_btree_iter_traverse(iter);
if (ret)
@@ -155,7 +154,7 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
*write_locked = true;
}
- if (unlikely(!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s))) {
+ if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) {
*write_locked = false;
return wb_flush_one_slowpath(trans, iter, wb);
}
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index d83ea0e53df3..54f7826ac498 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -25,7 +25,7 @@
#include <linux/preempt.h>
-static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
+static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
enum bch_data_type data_type,
s64 sectors)
{
@@ -54,20 +54,20 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
bch2_fs_usage_acc_to_base(c, i);
for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
- usage->reserved += usage->persistent_reserved[i];
+ usage->b.reserved += usage->persistent_reserved[i];
for (unsigned i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(&c->replicas, i);
- fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
+ fs_usage_data_type_to_base(&usage->b, e->data_type, usage->replicas[i]);
}
for_each_member_device(c, ca) {
struct bch_dev_usage dev = bch2_dev_usage_read(ca);
- usage->hidden += (dev.d[BCH_DATA_sb].buckets +
- dev.d[BCH_DATA_journal].buckets) *
+ usage->b.hidden += (dev.d[BCH_DATA_sb].buckets +
+ dev.d[BCH_DATA_journal].buckets) *
ca->mi.bucket_size;
}
@@ -188,15 +188,15 @@ void bch2_fs_usage_to_text(struct printbuf *out,
prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
prt_printf(out, "hidden:\t\t\t\t%llu\n",
- fs_usage->u.hidden);
+ fs_usage->u.b.hidden);
prt_printf(out, "data:\t\t\t\t%llu\n",
- fs_usage->u.data);
+ fs_usage->u.b.data);
prt_printf(out, "cached:\t\t\t\t%llu\n",
- fs_usage->u.cached);
+ fs_usage->u.b.cached);
prt_printf(out, "reserved:\t\t\t%llu\n",
- fs_usage->u.reserved);
+ fs_usage->u.b.reserved);
prt_printf(out, "nr_inodes:\t\t\t%llu\n",
- fs_usage->u.nr_inodes);
+ fs_usage->u.b.nr_inodes);
prt_printf(out, "online reserved:\t\t%llu\n",
fs_usage->online_reserved);
@@ -225,10 +225,10 @@ static u64 reserve_factor(u64 r)
u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
{
- return min(fs_usage->u.hidden +
- fs_usage->u.btree +
- fs_usage->u.data +
- reserve_factor(fs_usage->u.reserved +
+ return min(fs_usage->u.b.hidden +
+ fs_usage->u.b.btree +
+ fs_usage->u.b.data +
+ reserve_factor(fs_usage->u.b.reserved +
fs_usage->online_reserved),
c->capacity);
}
@@ -240,17 +240,17 @@ __bch2_fs_usage_read_short(struct bch_fs *c)
u64 data, reserved;
ret.capacity = c->capacity -
- bch2_fs_usage_read_one(c, &c->usage_base->hidden);
+ bch2_fs_usage_read_one(c, &c->usage_base->b.hidden);
- data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
- bch2_fs_usage_read_one(c, &c->usage_base->btree);
- reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
+ data = bch2_fs_usage_read_one(c, &c->usage_base->b.data) +
+ bch2_fs_usage_read_one(c, &c->usage_base->b.btree);
+ reserved = bch2_fs_usage_read_one(c, &c->usage_base->b.reserved) +
percpu_u64_get(c->online_reserved);
ret.used = min(ret.capacity, data + reserve_factor(reserved));
ret.free = ret.capacity - ret.used;
- ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
+ ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes);
return ret;
}
@@ -284,7 +284,7 @@ void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
prt_newline(out);
for (unsigned i = 0; i < BCH_DATA_NR; i++) {
- prt_str(out, bch2_data_types[i]);
+ bch2_prt_data_type(out, i);
prt_tab(out);
prt_u64(out, usage->d[i].buckets);
prt_tab_rjust(out);
@@ -308,9 +308,9 @@ void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
fs_usage = fs_usage_ptr(c, journal_seq, gc);
if (data_type_is_hidden(old->data_type))
- fs_usage->hidden -= ca->mi.bucket_size;
+ fs_usage->b.hidden -= ca->mi.bucket_size;
if (data_type_is_hidden(new->data_type))
- fs_usage->hidden += ca->mi.bucket_size;
+ fs_usage->b.hidden += ca->mi.bucket_size;
u = dev_usage_ptr(ca, journal_seq, gc);
@@ -359,7 +359,7 @@ static inline int __update_replicas(struct bch_fs *c,
if (idx < 0)
return -1;
- fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
+ fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
fs_usage->replicas[idx] += sectors;
return 0;
}
@@ -394,7 +394,7 @@ int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k,
preempt_disable();
fs_usage = fs_usage_ptr(c, journal_seq, gc);
- fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
+ fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
fs_usage->replicas[idx] += sectors;
preempt_enable();
err:
@@ -523,8 +523,8 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
if (bch2_fs_inconsistent_on(g->data_type &&
g->data_type != data_type, c,
"different types of data in same bucket: %s, %s",
- bch2_data_types[g->data_type],
- bch2_data_types[data_type])) {
+ bch2_data_type_str(g->data_type),
+ bch2_data_type_str(data_type))) {
ret = -EIO;
goto err;
}
@@ -532,7 +532,7 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
ca->dev_idx, b, g->gen,
- bch2_data_types[g->data_type ?: data_type],
+ bch2_data_type_str(g->data_type ?: data_type),
g->dirty_sectors, sectors)) {
ret = -EIO;
goto err;
@@ -575,7 +575,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
- bch2_data_types[bucket_data_type ?: ptr_data_type],
+ bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
ret = -EIO;
@@ -588,7 +588,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
- bch2_data_types[bucket_data_type ?: ptr_data_type],
+ bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
@@ -603,7 +603,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
"while marking %s",
ptr->dev, bucket_nr, b_gen,
*bucket_gen(ca, bucket_nr),
- bch2_data_types[bucket_data_type ?: ptr_data_type],
+ bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
@@ -624,8 +624,8 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
- bch2_data_types[bucket_data_type],
- bch2_data_types[ptr_data_type],
+ bch2_data_type_str(bucket_data_type),
+ bch2_data_type_str(ptr_data_type),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
ret = -EIO;
@@ -638,7 +638,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
- bch2_data_types[bucket_data_type ?: ptr_data_type],
+ bch2_data_type_str(bucket_data_type ?: ptr_data_type),
bucket_sectors, sectors,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
@@ -677,11 +677,11 @@ void bch2_trans_fs_usage_revert(struct btree_trans *trans,
BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
}
- dst->nr_inodes -= deltas->nr_inodes;
+ dst->b.nr_inodes -= deltas->nr_inodes;
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
added -= deltas->persistent_reserved[i];
- dst->reserved -= deltas->persistent_reserved[i];
+ dst->b.reserved -= deltas->persistent_reserved[i];
dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
}
@@ -694,48 +694,25 @@ void bch2_trans_fs_usage_revert(struct btree_trans *trans,
percpu_up_read(&c->mark_lock);
}
-int bch2_trans_fs_usage_apply(struct btree_trans *trans,
- struct replicas_delta_list *deltas)
+void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
+ u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
static int warned_disk_usage = 0;
bool warn = false;
- u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
- struct replicas_delta *d, *d2;
- struct replicas_delta *top = (void *) deltas->d + deltas->used;
- struct bch_fs_usage *dst;
- s64 added = 0, should_not_have_added;
- unsigned i;
percpu_down_read(&c->mark_lock);
preempt_disable();
- dst = fs_usage_ptr(c, trans->journal_res.seq, false);
-
- for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
- switch (d->r.data_type) {
- case BCH_DATA_btree:
- case BCH_DATA_user:
- case BCH_DATA_parity:
- added += d->delta;
- }
+ struct bch_fs_usage_base *dst = &fs_usage_ptr(c, trans->journal_res.seq, false)->b;
+ struct bch_fs_usage_base *src = &trans->fs_usage_delta;
- if (__update_replicas(c, dst, &d->r, d->delta))
- goto need_mark;
- }
-
- dst->nr_inodes += deltas->nr_inodes;
-
- for (i = 0; i < BCH_REPLICAS_MAX; i++) {
- added += deltas->persistent_reserved[i];
- dst->reserved += deltas->persistent_reserved[i];
- dst->persistent_reserved[i] += deltas->persistent_reserved[i];
- }
+ s64 added = src->btree + src->data + src->reserved;
/*
* Not allowed to reduce sectors_available except by getting a
* reservation:
*/
- should_not_have_added = added - (s64) disk_res_sectors;
+ s64 should_not_have_added = added - (s64) disk_res_sectors;
if (unlikely(should_not_have_added > 0)) {
u64 old, new, v = atomic64_read(&c->sectors_available);
@@ -754,6 +731,13 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
this_cpu_sub(*c->online_reserved, added);
}
+ dst->hidden += src->hidden;
+ dst->btree += src->btree;
+ dst->data += src->data;
+ dst->cached += src->cached;
+ dst->reserved += src->reserved;
+ dst->nr_inodes += src->nr_inodes;
+
preempt_enable();
percpu_up_read(&c->mark_lock);
@@ -761,6 +745,34 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
bch2_trans_inconsistent(trans,
"disk usage increased %lli more than %llu sectors reserved)",
should_not_have_added, disk_res_sectors);
+}
+
+int bch2_trans_fs_usage_apply(struct btree_trans *trans,
+ struct replicas_delta_list *deltas)
+{
+ struct bch_fs *c = trans->c;
+ struct replicas_delta *d, *d2;
+ struct replicas_delta *top = (void *) deltas->d + deltas->used;
+ struct bch_fs_usage *dst;
+ unsigned i;
+
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
+ dst = fs_usage_ptr(c, trans->journal_res.seq, false);
+
+ for (d = deltas->d; d != top; d = replicas_delta_next(d))
+ if (__update_replicas(c, dst, &d->r, d->delta))
+ goto need_mark;
+
+ dst->b.nr_inodes += deltas->nr_inodes;
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+ dst->b.reserved += deltas->persistent_reserved[i];
+ dst->persistent_reserved[i] += deltas->persistent_reserved[i];
+ }
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
return 0;
need_mark:
/* revert changes: */
@@ -1084,7 +1096,7 @@ static int __trigger_reservation(struct btree_trans *trans,
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc);
replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved));
- fs_usage->reserved += sectors;
+ fs_usage->b.reserved += sectors;
fs_usage->persistent_reserved[replicas - 1] += sectors;
preempt_enable();
@@ -1130,9 +1142,9 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
iter.pos.inode, iter.pos.offset, a->v.gen,
- bch2_data_types[a->v.data_type],
- bch2_data_types[type],
- bch2_data_types[type]);
+ bch2_data_type_str(a->v.data_type),
+ bch2_data_type_str(type),
+ bch2_data_type_str(type));
ret = -EIO;
goto err;
}
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 2c95cc5d86be..6387e039f789 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -356,6 +356,8 @@ int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
ret; \
})
+void bch2_trans_account_disk_usage_change(struct btree_trans *);
+
void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
@@ -385,6 +387,21 @@ static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
return false;
}
+static inline const char *bch2_data_type_str(enum bch_data_type type)
+{
+ return type < BCH_DATA_NR
+ ? __bch2_data_types[type]
+ : "(invalid data type)";
+}
+
+static inline void bch2_prt_data_type(struct printbuf *out, enum bch_data_type type)
+{
+ if (type < BCH_DATA_NR)
+ prt_str(out, __bch2_data_types[type]);
+ else
+ prt_printf(out, "(invalid data type %u)", type);
+}
+
/* disk reservations: */
static inline void bch2_disk_reservation_put(struct bch_fs *c,
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 783f71017204..6a31740222a7 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -45,23 +45,18 @@ struct bch_dev_usage {
} d[BCH_DATA_NR];
};
-struct bch_fs_usage {
- /* all fields are in units of 512 byte sectors: */
+struct bch_fs_usage_base {
u64 hidden;
u64 btree;
u64 data;
u64 cached;
u64 reserved;
u64 nr_inodes;
+};
- /* XXX: add stats for compression ratio */
-#if 0
- u64 uncompressed;
- u64 compressed;
-#endif
-
- /* broken out: */
-
+struct bch_fs_usage {
+ /* all fields are in units of 512 byte sectors: */
+ struct bch_fs_usage_base b;
u64 persistent_reserved[BCH_REPLICAS_MAX];
u64 replicas[];
};
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index f41889093a2c..363644451106 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -109,7 +109,7 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
- while (1) {
+ do {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread && kthread_should_stop())
break;
@@ -119,7 +119,7 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
schedule();
try_to_freeze();
- }
+ } while (0);
__set_current_state(TASK_RUNNING);
del_timer_sync(&wait.cpu_timer);
diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h
index 607fd5e232c9..58c2eb45570f 100644
--- a/fs/bcachefs/compress.h
+++ b/fs/bcachefs/compress.h
@@ -47,6 +47,14 @@ static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
}
+static inline void bch2_prt_compression_type(struct printbuf *out, enum bch_compression_type type)
+{
+ if (type < BCH_COMPRESSION_TYPE_NR)
+ prt_str(out, __bch2_compression_types[type]);
+ else
+ prt_printf(out, "(invalid compression type %u)", type);
+}
+
int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
struct bch_extent_crc_unpacked *);
int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 6f13477ff652..4150feca42a2 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -285,9 +285,7 @@ restart_drop_extra_replicas:
k.k->p, bkey_start_pos(&insert->k)) ?:
bch2_insert_snapshot_whiteouts(trans, m->btree_id,
k.k->p, insert->k.p) ?:
- bch2_bkey_set_needs_rebalance(c, insert,
- op->opts.background_target,
- op->opts.background_compression) ?:
+ bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
bch2_trans_update(trans, &iter, insert,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
bch2_trans_commit(trans, &op->res,
@@ -529,7 +527,7 @@ int bch2_data_update_init(struct btree_trans *trans,
BCH_WRITE_DATA_ENCODED|
BCH_WRITE_MOVE|
m->data_opts.write_flags;
- m->op.compression_opt = io_opts.background_compression ?: io_opts.compression;
+ m->op.compression_opt = background_compression(io_opts);
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
bkey_for_each_ptr(ptrs, ptr)
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index d6418948495f..7bdba8507fc9 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -44,19 +44,19 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
return false;
bio = bio_alloc_bioset(ca->disk_sb.bdev,
- buf_pages(n_sorted, btree_bytes(c)),
+ buf_pages(n_sorted, btree_buf_bytes(b)),
REQ_OP_READ|REQ_META,
GFP_NOFS,
&c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset;
- bch2_bio_map(bio, n_sorted, btree_bytes(c));
+ bch2_bio_map(bio, n_sorted, btree_buf_bytes(b));
submit_bio_wait(bio);
bio_put(bio);
percpu_ref_put(&ca->io_ref);
- memcpy(n_ondisk, n_sorted, btree_bytes(c));
+ memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
v->written = 0;
if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error)
@@ -137,7 +137,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
mutex_lock(&c->verify_lock);
if (!c->verify_ondisk) {
- c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
+ c->verify_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
if (!c->verify_ondisk)
goto out;
}
@@ -199,19 +199,19 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
return;
}
- n_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
+ n_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL);
if (!n_ondisk) {
prt_printf(out, "memory allocation failure\n");
goto out;
}
bio = bio_alloc_bioset(ca->disk_sb.bdev,
- buf_pages(n_ondisk, btree_bytes(c)),
+ buf_pages(n_ondisk, btree_buf_bytes(b)),
REQ_OP_READ|REQ_META,
GFP_NOFS,
&c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset;
- bch2_bio_map(bio, n_ondisk, btree_bytes(c));
+ bch2_bio_map(bio, n_ondisk, btree_buf_bytes(b));
ret = submit_bio_wait(bio);
if (ret) {
@@ -293,7 +293,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
out:
if (bio)
bio_put(bio);
- kvpfree(n_ondisk, btree_bytes(c));
+ kvpfree(n_ondisk, btree_buf_bytes(b));
percpu_ref_put(&ca->io_ref);
}
@@ -627,7 +627,7 @@ restart:
prt_printf(&i->buf, "backtrace:");
prt_newline(&i->buf);
printbuf_indent_add(&i->buf, 2);
- bch2_prt_task_backtrace(&i->buf, task, 0);
+ bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
printbuf_indent_sub(&i->buf, 2);
prt_newline(&i->buf);
diff --git a/fs/bcachefs/dirent_format.h b/fs/bcachefs/dirent_format.h
new file mode 100644
index 000000000000..5e116b88e814
--- /dev/null
+++ b/fs/bcachefs/dirent_format.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DIRENT_FORMAT_H
+#define _BCACHEFS_DIRENT_FORMAT_H
+
+/*
+ * Dirents (and xattrs) have to implement string lookups; since our b-tree
+ * doesn't support arbitrary length strings for the key, we instead index by a
+ * 64 bit hash (currently truncated sha1) of the string, stored in the offset
+ * field of the key - using linear probing to resolve hash collisions. This also
+ * provides us with the readdir cookie posix requires.
+ *
+ * Linear probing requires us to use whiteouts for deletions, in the event of a
+ * collision:
+ */
+
+struct bch_dirent {
+ struct bch_val v;
+
+ /* Target inode number: */
+ union {
+ __le64 d_inum;
+ struct { /* DT_SUBVOL */
+ __le32 d_child_subvol;
+ __le32 d_parent_subvol;
+ };
+ };
+
+ /*
+ * Copy of mode bits 12-15 from the target inode - so userspace can get
+ * the filetype without having to do a stat()
+ */
+ __u8 d_type;
+
+ __u8 d_name[];
+} __packed __aligned(8);
+
+#define DT_SUBVOL 16
+#define BCH_DT_MAX 17
+
+#define BCH_NAME_MAX 512
+
+#endif /* _BCACHEFS_DIRENT_FORMAT_H */
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index d802bc63c8d0..d503af270024 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -190,7 +190,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
a->v.stripe_redundancy, trans,
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
iter.pos.inode, iter.pos.offset, a->v.gen,
- bch2_data_types[a->v.data_type],
+ bch2_data_type_str(a->v.data_type),
a->v.dirty_sectors,
a->v.stripe, s.k->p.offset)) {
ret = -EIO;
@@ -200,7 +200,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
iter.pos.inode, iter.pos.offset, a->v.gen,
- bch2_data_types[a->v.data_type],
+ bch2_data_type_str(a->v.data_type),
a->v.dirty_sectors,
s.k->p.offset)) {
ret = -EIO;
@@ -367,7 +367,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
}
}
- if (!(flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))) {
+ if (flags & BTREE_TRIGGER_ATOMIC) {
struct stripe *m = genradix_ptr(&c->stripes, idx);
if (!m) {
diff --git a/fs/bcachefs/ec_format.h b/fs/bcachefs/ec_format.h
new file mode 100644
index 000000000000..44ce88ba08d7
--- /dev/null
+++ b/fs/bcachefs/ec_format.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EC_FORMAT_H
+#define _BCACHEFS_EC_FORMAT_H
+
+struct bch_stripe {
+ struct bch_val v;
+ __le16 sectors;
+ __u8 algorithm;
+ __u8 nr_blocks;
+ __u8 nr_redundant;
+
+ __u8 csum_granularity_bits;
+ __u8 csum_type;
+ __u8 pad;
+
+ struct bch_extent_ptr ptrs[];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_EC_FORMAT_H */
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 82ec056f4cdb..61395b113df9 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -8,6 +8,7 @@
#include "bcachefs.h"
#include "bkey_methods.h"
+#include "btree_cache.h"
#include "btree_gc.h"
#include "btree_io.h"
#include "btree_iter.h"
@@ -1018,12 +1019,12 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
struct bch_extent_crc_unpacked crc =
bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
- prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
+ prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress ",
crc.compressed_size,
crc.uncompressed_size,
crc.offset, crc.nonce,
- bch2_csum_types[crc.csum_type],
- bch2_compression_types[crc.compression_type]);
+ bch2_csum_types[crc.csum_type]);
+ bch2_prt_compression_type(out, crc.compression_type);
break;
}
case BCH_EXTENT_ENTRY_stripe_ptr: {
@@ -1334,10 +1335,12 @@ bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
}
int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
- unsigned target, unsigned compression)
+ struct bch_io_opts *opts)
{
struct bkey_s k = bkey_i_to_s(_k);
struct bch_extent_rebalance *r;
+ unsigned target = opts->background_target;
+ unsigned compression = background_compression(*opts);
bool needs_rebalance;
if (!bkey_extent_is_direct_data(k.k))
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index a855c94d43dd..6bf839d69e84 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -708,7 +708,7 @@ unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c,
bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c);
int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *,
- unsigned, unsigned);
+ struct bch_io_opts *);
/* Generic extent code: */
diff --git a/fs/bcachefs/extents_format.h b/fs/bcachefs/extents_format.h
new file mode 100644
index 000000000000..3bd2fdbb0817
--- /dev/null
+++ b/fs/bcachefs/extents_format.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EXTENTS_FORMAT_H
+#define _BCACHEFS_EXTENTS_FORMAT_H
+
+/*
+ * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
+ * preceded by checksum/compression information (bch_extent_crc32 or
+ * bch_extent_crc64).
+ *
+ * One major determining factor in the format of extents is how we handle and
+ * represent extents that have been partially overwritten and thus trimmed:
+ *
+ * If an extent is not checksummed or compressed, when the extent is trimmed we
+ * don't have to remember the extent we originally allocated and wrote: we can
+ * merely adjust ptr->offset to point to the start of the data that is currently
+ * live. The size field in struct bkey records the current (live) size of the
+ * extent, and is also used to mean "size of region on disk that we point to" in
+ * this case.
+ *
+ * Thus an extent that is not checksummed or compressed will consist only of a
+ * list of bch_extent_ptrs, with none of the fields in
+ * bch_extent_crc32/bch_extent_crc64.
+ *
+ * When an extent is checksummed or compressed, it's not possible to read only
+ * the data that is currently live: we have to read the entire extent that was
+ * originally written, and then return only the part of the extent that is
+ * currently live.
+ *
+ * Thus, in addition to the current size of the extent in struct bkey, we need
+ * to store the size of the originally allocated space - this is the
+ * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
+ * when the extent is trimmed, instead of modifying the offset field of the
+ * pointer, we keep a second smaller offset field - "offset into the original
+ * extent of the currently live region".
+ *
+ * The other major determining factor is replication and data migration:
+ *
+ * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
+ * write, we will initially write all the replicas in the same format, with the
+ * same checksum type and compression format - however, when copygc runs later (or
+ * tiering/cache promotion, anything that moves data), it is not in general
+ * going to rewrite all the pointers at once - one of the replicas may be in a
+ * bucket on one device that has very little fragmentation while another lives
+ * in a bucket that has become heavily fragmented, and thus is being rewritten
+ * sooner than the rest.
+ *
+ * Thus it will only move a subset of the pointers (or in the case of
+ * tiering/cache promotion perhaps add a single pointer without dropping any
+ * current pointers), and if the extent has been partially overwritten it must
+ * write only the currently live portion (or copygc would not be able to reduce
+ * fragmentation!) - which necessitates a different bch_extent_crc format for
+ * the new pointer.
+ *
+ * But in the interests of space efficiency, we don't want to store one
+ * bch_extent_crc for each pointer if we don't have to.
+ *
+ * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
+ * bch_extent_ptrs appended arbitrarily one after the other. We determine the
+ * type of a given entry with a scheme similar to utf8 (except we're encoding a
+ * type, not a size), encoding the type in the position of the first set bit:
+ *
+ * bch_extent_crc32 - 0b1
+ * bch_extent_ptr - 0b10
+ * bch_extent_crc64 - 0b100
+ *
+ * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
+ * bch_extent_crc64 is the least constrained).
+ *
+ * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
+ * until the next bch_extent_crc32/64.
+ *
+ * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
+ * is neither checksummed nor compressed.
+ */
+
+#define BCH_EXTENT_ENTRY_TYPES() \
+ x(ptr, 0) \
+ x(crc32, 1) \
+ x(crc64, 2) \
+ x(crc128, 3) \
+ x(stripe_ptr, 4) \
+ x(rebalance, 5)
+#define BCH_EXTENT_ENTRY_MAX 6
+
+enum bch_extent_entry_type {
+#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
+ BCH_EXTENT_ENTRY_TYPES()
+#undef x
+};
+
+/* Compressed/uncompressed size are stored biased by 1: */
+struct bch_extent_crc32 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u32 type:2,
+ _compressed_size:7,
+ _uncompressed_size:7,
+ offset:7,
+ _unused:1,
+ csum_type:4,
+ compression_type:4;
+ __u32 csum;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u32 csum;
+ __u32 compression_type:4,
+ csum_type:4,
+ _unused:1,
+ offset:7,
+ _uncompressed_size:7,
+ _compressed_size:7,
+ type:2;
+#endif
+} __packed __aligned(8);
+
+#define CRC32_SIZE_MAX (1U << 7)
+#define CRC32_NONCE_MAX 0
+
+struct bch_extent_crc64 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:3,
+ _compressed_size:9,
+ _uncompressed_size:9,
+ offset:9,
+ nonce:10,
+ csum_type:4,
+ compression_type:4,
+ csum_hi:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 csum_hi:16,
+ compression_type:4,
+ csum_type:4,
+ nonce:10,
+ offset:9,
+ _uncompressed_size:9,
+ _compressed_size:9,
+ type:3;
+#endif
+ __u64 csum_lo;
+} __packed __aligned(8);
+
+#define CRC64_SIZE_MAX (1U << 9)
+#define CRC64_NONCE_MAX ((1U << 10) - 1)
+
+struct bch_extent_crc128 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:4,
+ _compressed_size:13,
+ _uncompressed_size:13,
+ offset:13,
+ nonce:13,
+ csum_type:4,
+ compression_type:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 compression_type:4,
+ csum_type:4,
+ nonce:13,
+ offset:13,
+ _uncompressed_size:13,
+ _compressed_size:13,
+ type:4;
+#endif
+ struct bch_csum csum;
+} __packed __aligned(8);
+
+#define CRC128_SIZE_MAX (1U << 13)
+#define CRC128_NONCE_MAX ((1U << 13) - 1)
+
+/*
+ * @reservation - pointer hasn't been written to, just reserved
+ */
+struct bch_extent_ptr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:1,
+ cached:1,
+ unused:1,
+ unwritten:1,
+ offset:44, /* 8 petabytes */
+ dev:8,
+ gen:8;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 gen:8,
+ dev:8,
+ offset:44,
+ unwritten:1,
+ unused:1,
+ cached:1,
+ type:1;
+#endif
+} __packed __aligned(8);
+
+struct bch_extent_stripe_ptr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:5,
+ block:8,
+ redundancy:4,
+ idx:47;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 idx:47,
+ redundancy:4,
+ block:8,
+ type:5;
+#endif
+};
+
+struct bch_extent_rebalance {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:6,
+ unused:34,
+ compression:8, /* enum bch_compression_opt */
+ target:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 target:16,
+ compression:8,
+ unused:34,
+ type:6;
+#endif
+};
+
+union bch_extent_entry {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
+ unsigned long type;
+#elif __BITS_PER_LONG == 32
+ struct {
+ unsigned long pad;
+ unsigned long type;
+ };
+#else
+#error edit for your odd byteorder.
+#endif
+
+#define x(f, n) struct bch_extent_##f f;
+ BCH_EXTENT_ENTRY_TYPES()
+#undef x
+};
+
+struct bch_btree_ptr {
+ struct bch_val v;
+
+ __u64 _data[0];
+ struct bch_extent_ptr start[];
+} __packed __aligned(8);
+
+struct bch_btree_ptr_v2 {
+ struct bch_val v;
+
+ __u64 mem_ptr;
+ __le64 seq;
+ __le16 sectors_written;
+ __le16 flags;
+ struct bpos min_key;
+ __u64 _data[0];
+ struct bch_extent_ptr start[];
+} __packed __aligned(8);
+
+LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
+
+struct bch_extent {
+ struct bch_val v;
+
+ __u64 _data[0];
+ union bch_extent_entry start[];
+} __packed __aligned(8);
+
+/* Maximum size (in u64s) a single pointer could be: */
+#define BKEY_EXTENT_PTR_U64s_MAX\
+ ((sizeof(struct bch_extent_crc128) + \
+ sizeof(struct bch_extent_ptr)) / sizeof(__u64))
+
+/* Maximum possible size of an entire extent value: */
+#define BKEY_EXTENT_VAL_U64s_MAX \
+ (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
+
+/* * Maximum possible size of an entire extent, key + value: */
+#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
+
+/* Btree pointers don't carry around checksums: */
+#define BKEY_BTREE_PTR_VAL_U64s_MAX \
+ ((sizeof(struct bch_btree_ptr_v2) + \
+ sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
+#define BKEY_BTREE_PTR_U64s_MAX \
+ (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
+
+struct bch_reservation {
+ struct bch_val v;
+
+ __le32 generation;
+ __u8 nr_replicas;
+ __u8 pad[3];
+} __packed __aligned(8);
+
+struct bch_inline_data {
+ struct bch_val v;
+ u8 data[];
+};
+
+#endif /* _BCACHEFS_EXTENTS_FORMAT_H */
diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h
index 9637f636e32d..b04750dbf870 100644
--- a/fs/bcachefs/eytzinger.h
+++ b/fs/bcachefs/eytzinger.h
@@ -156,7 +156,7 @@ static inline unsigned inorder_to_eytzinger1(unsigned i, unsigned size)
}
#define eytzinger1_for_each(_i, _size) \
- for ((_i) = eytzinger1_first((_size)); \
+ for (unsigned (_i) = eytzinger1_first((_size)); \
(_i) != 0; \
(_i) = eytzinger1_next((_i), (_size)))
@@ -227,7 +227,7 @@ static inline unsigned inorder_to_eytzinger0(unsigned i, unsigned size)
}
#define eytzinger0_for_each(_i, _size) \
- for ((_i) = eytzinger0_first((_size)); \
+ for (unsigned (_i) = eytzinger0_first((_size)); \
(_i) != -1; \
(_i) = eytzinger0_next((_i), (_size)))
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 73c12e565af5..27710cdd5710 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -303,18 +303,6 @@ void bch2_readahead(struct readahead_control *ractl)
darray_exit(&readpages_iter.folios);
}
-static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
- subvol_inum inum, struct folio *folio)
-{
- bch2_folio_create(folio, __GFP_NOFAIL);
-
- rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
- rbio->bio.bi_iter.bi_sector = folio_sector(folio);
- BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
-
- bch2_trans_run(c, (bchfs_read(trans, rbio, inum, NULL), 0));
-}
-
static void bch2_read_single_folio_end_io(struct bio *bio)
{
complete(bio->bi_private);
@@ -329,6 +317,9 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
int ret;
DECLARE_COMPLETION_ONSTACK(done);
+ if (!bch2_folio_create(folio, GFP_KERNEL))
+ return -ENOMEM;
+
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
@@ -336,7 +327,11 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
rbio->bio.bi_private = &done;
rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
- __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
+ rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
+
+ bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0));
wait_for_completion(&done);
ret = blk_status_to_errno(rbio->bio.bi_status);
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index fdd57c5785c9..33cb6da3a5ad 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -77,6 +77,10 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+ /* bios must be 512 byte aligned: */
+ if ((offset|iter->count) & (SECTOR_SIZE - 1))
+ return -EINVAL;
+
ret = min_t(loff_t, iter->count,
max_t(loff_t, 0, i_size_read(&inode->v) - offset));
@@ -84,6 +88,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
return ret;
shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
+ if (shorten >= iter->count)
+ shorten = 0;
iter->count -= shorten;
bio = bio_alloc_bioset(NULL,
diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c
index ff664fd0d8ef..d359aa9b33b8 100644
--- a/fs/bcachefs/fs-io-pagecache.c
+++ b/fs/bcachefs/fs-io-pagecache.c
@@ -309,39 +309,49 @@ void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode,
}
}
-void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
- u64 start, u64 end)
+int bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
+ u64 *start, u64 end,
+ bool nonblocking)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t index = *start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
struct folio_batch fbatch;
s64 i_sectors_delta = 0;
- unsigned i, j;
+ int ret = 0;
- if (end <= start)
- return;
+ if (end <= *start)
+ return 0;
folio_batch_init(&fbatch);
while (filemap_get_folios(inode->v.i_mapping,
&index, end_index, &fbatch)) {
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
+
+ if (!nonblocking)
+ folio_lock(folio);
+ else if (!folio_trylock(folio)) {
+ folio_batch_release(&fbatch);
+ ret = -EAGAIN;
+ break;
+ }
+
u64 folio_start = folio_sector(folio);
u64 folio_end = folio_end_sector(folio);
- unsigned folio_offset = max(start, folio_start) - folio_start;
- unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
- struct bch_folio *s;
BUG_ON(end <= folio_start);
- folio_lock(folio);
- s = bch2_folio(folio);
+ *start = min(end, folio_end);
+ struct bch_folio *s = bch2_folio(folio);
if (s) {
+ unsigned folio_offset = max(*start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+
spin_lock(&s->lock);
- for (j = folio_offset; j < folio_offset + folio_len; j++) {
+ for (unsigned j = folio_offset; j < folio_offset + folio_len; j++) {
i_sectors_delta -= s->s[j].state == SECTOR_dirty;
bch2_folio_sector_set(folio, s, j,
folio_sector_reserve(s->s[j].state));
@@ -356,6 +366,7 @@ void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
}
bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
+ return ret;
}
static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
diff --git a/fs/bcachefs/fs-io-pagecache.h b/fs/bcachefs/fs-io-pagecache.h
index 27f712ae37a6..8cbaba6565b4 100644
--- a/fs/bcachefs/fs-io-pagecache.h
+++ b/fs/bcachefs/fs-io-pagecache.h
@@ -143,7 +143,7 @@ int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
-void bch2_mark_pagecache_reserved(struct bch_inode_info *, u64, u64);
+int bch2_mark_pagecache_reserved(struct bch_inode_info *, u64 *, u64, bool);
int bch2_get_folio_disk_reservation(struct bch_fs *,
struct bch_inode_info *,
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 98bd5babab19..8c70123b6a0c 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -79,7 +79,7 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
continue;
bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
- REQ_OP_FLUSH,
+ REQ_OP_WRITE|REQ_PREFLUSH,
GFP_KERNEL,
&c->nocow_flush_bioset),
struct nocow_flush, bio);
@@ -675,8 +675,11 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
- drop_locks_do(trans,
- (bch2_mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
+ if (bch2_mark_pagecache_reserved(inode, &hole_start,
+ iter.pos.offset, true))
+ drop_locks_do(trans,
+ bch2_mark_pagecache_reserved(inode, &hole_start,
+ iter.pos.offset, false));
bkey_err:
bch2_quota_reservation_put(c, inode, &quota_res);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 1cbc5807bc80..3dc8630ff9fe 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -337,11 +337,12 @@ static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
if (arg.flags & BCH_SUBVOL_SNAPSHOT_RO)
create_flags |= BCH_CREATE_SNAPSHOT_RO;
- /* why do we need this lock? */
- down_read(&c->vfs_sb->s_umount);
-
- if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE)
+ if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) {
+ /* sync_inodes_sb enforce s_umount is locked */
+ down_read(&c->vfs_sb->s_umount);
sync_inodes_sb(c->vfs_sb);
+ up_read(&c->vfs_sb->s_umount);
+ }
retry:
if (arg.src_ptr) {
error = user_path_at(arg.dirfd,
@@ -425,8 +426,6 @@ err2:
goto retry;
}
err1:
- up_read(&c->vfs_sb->s_umount);
-
return error;
}
@@ -456,6 +455,7 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
if (IS_ERR(victim))
return PTR_ERR(victim);
+ dir = d_inode(path.dentry);
if (victim->d_sb->s_fs_info != c) {
ret = -EXDEV;
goto err;
@@ -464,14 +464,13 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
ret = -ENOENT;
goto err;
}
- dir = d_inode(path.dentry);
ret = __bch2_unlink(dir, victim, true);
if (!ret) {
fsnotify_rmdir(dir, victim);
d_delete(victim);
}
- inode_unlock(dir);
err:
+ inode_unlock(dir);
dput(victim);
path_put(&path);
return ret;
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index ec419b8e2c43..77ae65542db9 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -435,7 +435,7 @@ static int bch2_link(struct dentry *old_dentry, struct inode *vdir,
bch2_subvol_is_ro(c, inode->ei_subvol) ?:
__bch2_link(c, inode, dir, dentry);
if (unlikely(ret))
- return ret;
+ return bch2_err_class(ret);
ihold(&inode->v);
d_instantiate(dentry, &inode->v);
@@ -487,8 +487,9 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
struct bch_inode_info *dir= to_bch_ei(vdir);
struct bch_fs *c = dir->v.i_sb->s_fs_info;
- return bch2_subvol_is_ro(c, dir->ei_subvol) ?:
+ int ret = bch2_subvol_is_ro(c, dir->ei_subvol) ?:
__bch2_unlink(vdir, dentry, false);
+ return bch2_err_class(ret);
}
static int bch2_symlink(struct mnt_idmap *idmap,
@@ -523,7 +524,7 @@ static int bch2_symlink(struct mnt_idmap *idmap,
return 0;
err:
iput(&inode->v);
- return ret;
+ return bch2_err_class(ret);
}
static int bch2_mkdir(struct mnt_idmap *idmap,
@@ -641,7 +642,7 @@ err:
src_inode,
dst_inode);
- return ret;
+ return bch2_err_class(ret);
}
static void bch2_setattr_copy(struct mnt_idmap *idmap,
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 4f0ecd605675..6a760777bafb 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -119,22 +119,19 @@ static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
if (!ret)
*snapshot = iter.pos.snapshot;
err:
- bch_err_msg(trans->c, ret, "fetching inode %llu:%u", inode_nr, *snapshot);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
-static int __lookup_dirent(struct btree_trans *trans,
+static int lookup_dirent_in_snapshot(struct btree_trans *trans,
struct bch_hash_info hash_info,
subvol_inum dir, struct qstr *name,
- u64 *target, unsigned *type)
+ u64 *target, unsigned *type, u32 snapshot)
{
struct btree_iter iter;
struct bkey_s_c_dirent d;
- int ret;
-
- ret = bch2_hash_lookup(trans, &iter, bch2_dirent_hash_desc,
- &hash_info, dir, name, 0);
+ int ret = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
+ &hash_info, dir, name, 0, snapshot);
if (ret)
return ret;
@@ -225,15 +222,16 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
struct bch_inode_unpacked root_inode;
struct bch_hash_info root_hash_info;
- ret = lookup_inode(trans, root_inum.inum, &root_inode, &snapshot);
+ u32 root_inode_snapshot = snapshot;
+ ret = lookup_inode(trans, root_inum.inum, &root_inode, &root_inode_snapshot);
bch_err_msg(c, ret, "looking up root inode");
if (ret)
return ret;
root_hash_info = bch2_hash_info_init(c, &root_inode);
- ret = __lookup_dirent(trans, root_hash_info, root_inum,
- &lostfound_str, &inum, &d_type);
+ ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
+ &lostfound_str, &inum, &d_type, snapshot);
if (bch2_err_matches(ret, ENOENT))
goto create_lostfound;
@@ -250,7 +248,10 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
* The bch2_check_dirents pass has already run, dangling dirents
* shouldn't exist here:
*/
- return lookup_inode(trans, inum, lostfound, &snapshot);
+ ret = lookup_inode(trans, inum, lostfound, &snapshot);
+ bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)",
+ inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot));
+ return ret;
create_lostfound:
/*
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 37dce96f48ac..086f0090b03a 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -506,22 +506,33 @@ fsck_err:
static void __bch2_inode_unpacked_to_text(struct printbuf *out,
struct bch_inode_unpacked *inode)
{
- prt_printf(out, "mode=%o ", inode->bi_mode);
+ printbuf_indent_add(out, 2);
+ prt_printf(out, "mode=%o", inode->bi_mode);
+ prt_newline(out);
prt_str(out, "flags=");
prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1));
prt_printf(out, " (%x)", inode->bi_flags);
+ prt_newline(out);
- prt_printf(out, " journal_seq=%llu bi_size=%llu bi_sectors=%llu bi_version=%llu",
- inode->bi_journal_seq,
- inode->bi_size,
- inode->bi_sectors,
- inode->bi_version);
+ prt_printf(out, "journal_seq=%llu", inode->bi_journal_seq);
+ prt_newline(out);
+
+ prt_printf(out, "bi_size=%llu", inode->bi_size);
+ prt_newline(out);
+
+ prt_printf(out, "bi_sectors=%llu", inode->bi_sectors);
+ prt_newline(out);
+
+ prt_newline(out);
+ prt_printf(out, "bi_version=%llu", inode->bi_version);
#define x(_name, _bits) \
- prt_printf(out, " "#_name "=%llu", (u64) inode->_name);
+ prt_printf(out, #_name "=%llu", (u64) inode->_name); \
+ prt_newline(out);
BCH_INODE_FIELDS_v3()
#undef x
+ printbuf_indent_sub(out, 2);
}
void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode)
@@ -587,7 +598,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
}
}
- if (!(flags & BTREE_TRIGGER_TRANSACTIONAL) && (flags & BTREE_TRIGGER_INSERT)) {
+ if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
BUG_ON(!trans->journal_res.seq);
bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq);
@@ -597,7 +608,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
struct bch_fs *c = trans->c;
percpu_down_read(&c->mark_lock);
- this_cpu_add(c->usage_gc->nr_inodes, nr);
+ this_cpu_add(c->usage_gc->b.nr_inodes, nr);
percpu_up_read(&c->mark_lock);
}
diff --git a/fs/bcachefs/inode_format.h b/fs/bcachefs/inode_format.h
new file mode 100644
index 000000000000..83d107331edf
--- /dev/null
+++ b/fs/bcachefs/inode_format.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_INODE_FORMAT_H
+#define _BCACHEFS_INODE_FORMAT_H
+
+#define BLOCKDEV_INODE_MAX 4096
+#define BCACHEFS_ROOT_INO 4096
+
+struct bch_inode {
+ struct bch_val v;
+
+ __le64 bi_hash_seed;
+ __le32 bi_flags;
+ __le16 bi_mode;
+ __u8 fields[];
+} __packed __aligned(8);
+
+struct bch_inode_v2 {
+ struct bch_val v;
+
+ __le64 bi_journal_seq;
+ __le64 bi_hash_seed;
+ __le64 bi_flags;
+ __le16 bi_mode;
+ __u8 fields[];
+} __packed __aligned(8);
+
+struct bch_inode_v3 {
+ struct bch_val v;
+
+ __le64 bi_journal_seq;
+ __le64 bi_hash_seed;
+ __le64 bi_flags;
+ __le64 bi_sectors;
+ __le64 bi_size;
+ __le64 bi_version;
+ __u8 fields[];
+} __packed __aligned(8);
+
+#define INODEv3_FIELDS_START_INITIAL 6
+#define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
+
+struct bch_inode_generation {
+ struct bch_val v;
+
+ __le32 bi_generation;
+ __le32 pad;
+} __packed __aligned(8);
+
+/*
+ * bi_subvol and bi_parent_subvol are only set for subvolume roots:
+ */
+
+#define BCH_INODE_FIELDS_v2() \
+ x(bi_atime, 96) \
+ x(bi_ctime, 96) \
+ x(bi_mtime, 96) \
+ x(bi_otime, 96) \
+ x(bi_size, 64) \
+ x(bi_sectors, 64) \
+ x(bi_uid, 32) \
+ x(bi_gid, 32) \
+ x(bi_nlink, 32) \
+ x(bi_generation, 32) \
+ x(bi_dev, 32) \
+ x(bi_data_checksum, 8) \
+ x(bi_compression, 8) \
+ x(bi_project, 32) \
+ x(bi_background_compression, 8) \
+ x(bi_data_replicas, 8) \
+ x(bi_promote_target, 16) \
+ x(bi_foreground_target, 16) \
+ x(bi_background_target, 16) \
+ x(bi_erasure_code, 16) \
+ x(bi_fields_set, 16) \
+ x(bi_dir, 64) \
+ x(bi_dir_offset, 64) \
+ x(bi_subvol, 32) \
+ x(bi_parent_subvol, 32)
+
+#define BCH_INODE_FIELDS_v3() \
+ x(bi_atime, 96) \
+ x(bi_ctime, 96) \
+ x(bi_mtime, 96) \
+ x(bi_otime, 96) \
+ x(bi_uid, 32) \
+ x(bi_gid, 32) \
+ x(bi_nlink, 32) \
+ x(bi_generation, 32) \
+ x(bi_dev, 32) \
+ x(bi_data_checksum, 8) \
+ x(bi_compression, 8) \
+ x(bi_project, 32) \
+ x(bi_background_compression, 8) \
+ x(bi_data_replicas, 8) \
+ x(bi_promote_target, 16) \
+ x(bi_foreground_target, 16) \
+ x(bi_background_target, 16) \
+ x(bi_erasure_code, 16) \
+ x(bi_fields_set, 16) \
+ x(bi_dir, 64) \
+ x(bi_dir_offset, 64) \
+ x(bi_subvol, 32) \
+ x(bi_parent_subvol, 32) \
+ x(bi_nocow, 8)
+
+/* subset of BCH_INODE_FIELDS */
+#define BCH_INODE_OPTS() \
+ x(data_checksum, 8) \
+ x(compression, 8) \
+ x(project, 32) \
+ x(background_compression, 8) \
+ x(data_replicas, 8) \
+ x(promote_target, 16) \
+ x(foreground_target, 16) \
+ x(background_target, 16) \
+ x(erasure_code, 16) \
+ x(nocow, 8)
+
+enum inode_opt_id {
+#define x(name, ...) \
+ Inode_opt_##name,
+ BCH_INODE_OPTS()
+#undef x
+ Inode_opt_nr,
+};
+
+#define BCH_INODE_FLAGS() \
+ x(sync, 0) \
+ x(immutable, 1) \
+ x(append, 2) \
+ x(nodump, 3) \
+ x(noatime, 4) \
+ x(i_size_dirty, 5) \
+ x(i_sectors_dirty, 6) \
+ x(unlinked, 7) \
+ x(backptr_untrusted, 8)
+
+/* bits 20+ reserved for packed fields below: */
+
+enum bch_inode_flags {
+#define x(t, n) BCH_INODE_##t = 1U << n,
+ BCH_INODE_FLAGS()
+#undef x
+};
+
+enum __bch_inode_flags {
+#define x(t, n) __BCH_INODE_##t = n,
+ BCH_INODE_FLAGS()
+#undef x
+};
+
+LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
+LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
+LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
+
+LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
+LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
+LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_FIELDS_START,
+ struct bch_inode_v3, bi_flags, 31, 36);
+LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52);
+
+#endif /* _BCACHEFS_INODE_FORMAT_H */
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index ca6d5f516aa2..1baf78594cca 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -442,9 +442,7 @@ case LOGGED_OP_FINSERT_shift_extents:
op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
- ret = bch2_bkey_set_needs_rebalance(c, copy,
- opts.background_target,
- opts.background_compression) ?:
+ ret = bch2_bkey_set_needs_rebalance(c, copy, &opts) ?:
bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
bch2_logged_op_update(trans, &op->k_i) ?:
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 33c0e783d546..2c098ac017b3 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -362,9 +362,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
bkey_start_pos(&sk.k->k),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- ret = bch2_bkey_set_needs_rebalance(c, sk.k,
- op->opts.background_target,
- op->opts.background_compression) ?:
+ ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
bch2_extent_update(trans, inum, &iter, sk.k,
&op->res,
op->new_i_size, &op->i_sectors_delta,
@@ -1447,10 +1445,11 @@ err:
op->flags |= BCH_WRITE_DONE;
if (ret < 0) {
- bch_err_inum_offset_ratelimited(c,
- op->pos.inode,
- op->pos.offset << 9,
- "%s(): error: %s", __func__, bch2_err_str(ret));
+ if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT))
+ bch_err_inum_offset_ratelimited(c,
+ op->pos.inode,
+ op->pos.offset << 9,
+ "%s(): error: %s", __func__, bch2_err_str(ret));
op->error = ret;
break;
}
@@ -1565,6 +1564,7 @@ CLOSURE_CALLBACK(bch2_write)
BUG_ON(!op->write_point.v);
BUG_ON(bkey_eq(op->pos, POS_MAX));
+ op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
op->start_time = local_clock();
bch2_keylist_init(&op->insert_keys, op->inline_keys);
wbio_init(bio)->put_bio = false;
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 8538ef34f62b..bc890776eb57 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -27,6 +27,47 @@ static const char * const bch2_journal_errors[] = {
NULL
};
+static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
+{
+ union journal_res_state s = READ_ONCE(j->reservations);
+ unsigned i = seq & JOURNAL_BUF_MASK;
+ struct journal_buf *buf = j->buf + i;
+
+ prt_printf(out, "seq:");
+ prt_tab(out);
+ prt_printf(out, "%llu", seq);
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "refcount:");
+ prt_tab(out);
+ prt_printf(out, "%u", journal_state_count(s, i));
+ prt_newline(out);
+
+ prt_printf(out, "size:");
+ prt_tab(out);
+ prt_human_readable_u64(out, vstruct_bytes(buf->data));
+ prt_newline(out);
+
+ prt_printf(out, "expires");
+ prt_tab(out);
+ prt_printf(out, "%li jiffies", buf->expires - jiffies);
+ prt_newline(out);
+
+ printbuf_indent_sub(out, 2);
+}
+
+static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
+{
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 24);
+
+ for (u64 seq = journal_last_unwritten_seq(j);
+ seq <= journal_cur_seq(j);
+ seq++)
+ bch2_journal_buf_to_text(out, j, seq);
+}
+
static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
{
return seq > j->seq_ondisk;
@@ -156,7 +197,7 @@ void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write)
* We don't close a journal_buf until the next journal_buf is finished writing,
* and can be opened again - this also initializes the next journal_buf:
*/
-static void __journal_entry_close(struct journal *j, unsigned closed_val)
+static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_buf *buf = journal_cur_buf(j);
@@ -185,7 +226,17 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val)
/* Close out old buffer: */
buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
- trace_journal_entry_close(c, vstruct_bytes(buf->data));
+ if (trace_journal_entry_close_enabled() && trace) {
+ struct printbuf pbuf = PRINTBUF;
+ pbuf.atomic++;
+
+ prt_str(&pbuf, "entry size: ");
+ prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
+ prt_newline(&pbuf);
+ bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
+ trace_journal_entry_close(c, pbuf.buf);
+ printbuf_exit(&pbuf);
+ }
sectors = vstruct_blocks_plus(buf->data, c->block_bits,
buf->u64s_reserved) << c->block_bits;
@@ -225,7 +276,7 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val)
void bch2_journal_halt(struct journal *j)
{
spin_lock(&j->lock);
- __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
+ __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
if (!j->err_seq)
j->err_seq = journal_cur_seq(j);
journal_wake(j);
@@ -239,7 +290,7 @@ static bool journal_entry_want_write(struct journal *j)
/* Don't close it yet if we already have a write in flight: */
if (ret)
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
else if (nr_unwritten_journal_entries(j)) {
struct journal_buf *buf = journal_cur_buf(j);
@@ -406,7 +457,7 @@ static void journal_write_work(struct work_struct *work)
if (delta > 0)
mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
else
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
unlock:
spin_unlock(&j->lock);
}
@@ -463,13 +514,21 @@ retry:
buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
ret = journal_entry_open(j);
if (ret == JOURNAL_ERR_max_in_flight) {
track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight],
&j->max_in_flight_start, true);
- trace_and_count(c, journal_entry_full, c);
+ if (trace_journal_entry_full_enabled()) {
+ struct printbuf buf = PRINTBUF;
+ buf.atomic++;
+
+ bch2_journal_bufs_to_text(&buf, j);
+ trace_journal_entry_full(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+ count_event(c, journal_entry_full);
}
unlock:
can_discard = j->can_discard;
@@ -549,7 +608,7 @@ void bch2_journal_entry_res_resize(struct journal *j,
/*
* Not enough room in current journal entry, have to flush it:
*/
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
} else {
journal_cur_buf(j)->u64s_reserved += d;
}
@@ -606,7 +665,7 @@ recheck_need_open:
struct journal_res res = { 0 };
if (journal_entry_is_open(j))
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
spin_unlock(&j->lock);
@@ -786,7 +845,7 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou
if (buf->need_flush_to_write_buffer) {
if (seq == journal_cur_seq(j))
- __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
union journal_res_state s;
s.v = atomic64_read_acquire(&j->reservations.counter);
@@ -1339,35 +1398,9 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
}
prt_newline(out);
-
- for (u64 seq = journal_cur_seq(j);
- seq >= journal_last_unwritten_seq(j);
- --seq) {
- unsigned i = seq & JOURNAL_BUF_MASK;
-
- prt_printf(out, "unwritten entry:");
- prt_tab(out);
- prt_printf(out, "%llu", seq);
- prt_newline(out);
- printbuf_indent_add(out, 2);
-
- prt_printf(out, "refcount:");
- prt_tab(out);
- prt_printf(out, "%u", journal_state_count(s, i));
- prt_newline(out);
-
- prt_printf(out, "sectors:");
- prt_tab(out);
- prt_printf(out, "%u", j->buf[i].sectors);
- prt_newline(out);
-
- prt_printf(out, "expires");
- prt_tab(out);
- prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
- prt_newline(out);
-
- printbuf_indent_sub(out, 2);
- }
+ prt_printf(out, "unwritten entries:");
+ prt_newline(out);
+ bch2_journal_bufs_to_text(out, j);
prt_printf(out,
"replay done:\t\t%i\n",
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index b0f4dd491e12..47805193f18c 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -683,10 +683,7 @@ static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs
prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
for (i = 0; i < nr_types; i++) {
- if (i < BCH_DATA_NR)
- prt_printf(out, " %s", bch2_data_types[i]);
- else
- prt_printf(out, " (unknown data type %u)", i);
+ bch2_prt_data_type(out, i);
prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
le64_to_cpu(u->d[i].buckets),
le64_to_cpu(u->d[i].sectors),
@@ -1481,6 +1478,8 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w)
c->opts.foreground_target;
unsigned i, replicas = 0, replicas_want =
READ_ONCE(c->opts.metadata_replicas);
+ unsigned replicas_need = min_t(unsigned, replicas_want,
+ READ_ONCE(c->opts.metadata_replicas_required));
rcu_read_lock();
retry:
@@ -1529,7 +1528,7 @@ done:
BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
- return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
+ return replicas >= replicas_need ? 0 : -EROFS;
}
static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
@@ -1991,7 +1990,8 @@ CLOSURE_CALLBACK(bch2_journal_write)
percpu_ref_get(&ca->io_ref);
bio = ca->journal.bio;
- bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
+ bio_reset(bio, ca->disk_sb.bdev,
+ REQ_OP_WRITE|REQ_PREFLUSH);
bio->bi_end_io = journal_write_endio;
bio->bi_private = ca;
closure_bio_submit(bio, cl);
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index 820d25e19e5f..c33dca641575 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -205,7 +205,7 @@ void bch2_journal_space_available(struct journal *j)
j->can_discard = can_discard;
- if (nr_online < c->opts.metadata_replicas_required) {
+ if (nr_online < metadata_replicas_required(c)) {
ret = JOURNAL_ERR_insufficient_devices;
goto out;
}
@@ -892,9 +892,11 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
journal_seq_pin(j, seq)->devs);
seq++;
- spin_unlock(&j->lock);
- ret = bch2_mark_replicas(c, &replicas.e);
- spin_lock(&j->lock);
+ if (replicas.e.nr_devs) {
+ spin_unlock(&j->lock);
+ ret = bch2_mark_replicas(c, &replicas.e);
+ spin_lock(&j->lock);
+ }
}
spin_unlock(&j->lock);
err:
diff --git a/fs/bcachefs/logged_ops_format.h b/fs/bcachefs/logged_ops_format.h
new file mode 100644
index 000000000000..6a4bf7129dba
--- /dev/null
+++ b/fs/bcachefs/logged_ops_format.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_LOGGED_OPS_FORMAT_H
+#define _BCACHEFS_LOGGED_OPS_FORMAT_H
+
+struct bch_logged_op_truncate {
+ struct bch_val v;
+ __le32 subvol;
+ __le32 pad;
+ __le64 inum;
+ __le64 new_i_size;
+};
+
+enum logged_op_finsert_state {
+ LOGGED_OP_FINSERT_start,
+ LOGGED_OP_FINSERT_shift_extents,
+ LOGGED_OP_FINSERT_finish,
+};
+
+struct bch_logged_op_finsert {
+ struct bch_val v;
+ __u8 state;
+ __u8 pad[3];
+ __le32 subvol;
+ __le64 inum;
+ __le64 dst_offset;
+ __le64 src_offset;
+ __le64 pos;
+};
+
+#endif /* _BCACHEFS_LOGGED_OPS_FORMAT_H */
diff --git a/fs/bcachefs/mean_and_variance.h b/fs/bcachefs/mean_and_variance.h
index b2be565bb8f2..64df11ab422b 100644
--- a/fs/bcachefs/mean_and_variance.h
+++ b/fs/bcachefs/mean_and_variance.h
@@ -17,7 +17,7 @@
* Rust and rustc has issues with u128.
*/
-#if defined(__SIZEOF_INT128__) && defined(__KERNEL__)
+#if defined(__SIZEOF_INT128__) && defined(__KERNEL__) && !defined(CONFIG_PARISC)
typedef struct {
unsigned __int128 v;
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 7a33319dcd16..bf68ea49447b 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -6,9 +6,11 @@
#include "backpointers.h"
#include "bkey_buf.h"
#include "btree_gc.h"
+#include "btree_io.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_write_buffer.h"
+#include "compress.h"
#include "disk_groups.h"
#include "ec.h"
#include "errcode.h"
@@ -34,12 +36,46 @@ const char * const bch2_data_ops_strs[] = {
NULL
};
-static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k)
+static void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ printbuf_tabstop_push(out, 20);
+ prt_str(out, "rewrite ptrs:");
+ prt_tab(out);
+ bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
+ prt_newline(out);
+
+ prt_str(out, "kill ptrs: ");
+ prt_tab(out);
+ bch2_prt_u64_base2(out, data_opts->kill_ptrs);
+ prt_newline(out);
+
+ prt_str(out, "target: ");
+ prt_tab(out);
+ bch2_target_to_text(out, c, data_opts->target);
+ prt_newline(out);
+
+ prt_str(out, "compression: ");
+ prt_tab(out);
+ bch2_compression_opt_to_text(out, background_compression(*io_opts));
+ prt_newline(out);
+
+ prt_str(out, "extra replicas: ");
+ prt_tab(out);
+ prt_u64(out, data_opts->extra_replicas);
+}
+
+static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
{
if (trace_move_extent_enabled()) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
+ prt_newline(&buf);
+ bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts);
trace_move_extent(c, buf.buf);
printbuf_exit(&buf);
}
@@ -111,6 +147,15 @@ static void move_write(struct moving_io *io)
return;
}
+ if (trace_move_extent_write_enabled()) {
+ struct bch_fs *c = io->write.op.c;
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k));
+ trace_move_extent_write(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+
closure_get(&io->write.ctxt->cl);
atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
atomic_inc(&io->write.ctxt->write_ios);
@@ -241,9 +286,10 @@ int bch2_move_extent(struct moving_context *ctxt,
unsigned sectors = k.k->size, pages;
int ret = -ENOMEM;
+ trace_move_extent2(c, k, &io_opts, &data_opts);
+
if (ctxt->stats)
ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
- trace_move_extent2(c, k);
bch2_data_update_opts_normalize(k, &data_opts);
@@ -759,6 +805,8 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
if (!b)
goto next;
+ unsigned sectors = btree_ptr_sectors_written(&b->key);
+
ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
bch2_trans_iter_exit(trans, &iter);
@@ -768,11 +816,10 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
goto err;
if (ctxt->rate)
- bch2_ratelimit_increment(ctxt->rate,
- c->opts.btree_node_size >> 9);
+ bch2_ratelimit_increment(ctxt->rate, sectors);
if (ctxt->stats) {
- atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
- atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
+ atomic64_add(sectors, &ctxt->stats->sectors_seen);
+ atomic64_add(sectors, &ctxt->stats->sectors_moved);
}
}
next:
@@ -1083,9 +1130,9 @@ int bch2_data_job(struct bch_fs *c,
void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
{
- prt_printf(out, "%s: data type=%s pos=",
- stats->name,
- bch2_data_types[stats->data_type]);
+ prt_printf(out, "%s: data type==", stats->name);
+ bch2_prt_data_type(out, stats->data_type);
+ prt_str(out, " pos=");
bch2_bbpos_to_text(out, stats->pos);
prt_newline(out);
printbuf_indent_add(out, 2);
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index 8e6f230eac38..b1ed0b9a20d3 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -52,7 +52,7 @@ const char * const bch2_csum_opts[] = {
NULL
};
-const char * const bch2_compression_types[] = {
+const char * const __bch2_compression_types[] = {
BCH_COMPRESSION_TYPES()
NULL
};
@@ -72,7 +72,7 @@ const char * const bch2_str_hash_opts[] = {
NULL
};
-const char * const bch2_data_types[] = {
+const char * const __bch2_data_types[] = {
BCH_DATA_TYPES()
NULL
};
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 93a24fef4214..9a4b7faa3765 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -18,11 +18,11 @@ extern const char * const bch2_sb_compat[];
extern const char * const __bch2_btree_ids[];
extern const char * const bch2_csum_types[];
extern const char * const bch2_csum_opts[];
-extern const char * const bch2_compression_types[];
+extern const char * const __bch2_compression_types[];
extern const char * const bch2_compression_opts[];
extern const char * const bch2_str_hash_types[];
extern const char * const bch2_str_hash_opts[];
-extern const char * const bch2_data_types[];
+extern const char * const __bch2_data_types[];
extern const char * const bch2_member_states[];
extern const char * const bch2_jset_entry_types[];
extern const char * const bch2_fs_usage_types[];
@@ -564,6 +564,11 @@ struct bch_io_opts {
#undef x
};
+static inline unsigned background_compression(struct bch_io_opts opts)
+{
+ return opts.background_compression ?: opts.compression;
+}
+
struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts);
bool bch2_opt_is_inode_opt(enum bch_opt_id);
diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c
index accf246c3233..b27d22925929 100644
--- a/fs/bcachefs/printbuf.c
+++ b/fs/bcachefs/printbuf.c
@@ -56,6 +56,7 @@ void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list args)
va_copy(args2, args);
len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args2);
+ va_end(args2);
} while (len + 1 >= printbuf_remaining(out) &&
!bch2_printbuf_make_room(out, len + 1));
diff --git a/fs/bcachefs/quota_format.h b/fs/bcachefs/quota_format.h
new file mode 100644
index 000000000000..dc34347ef6c7
--- /dev/null
+++ b/fs/bcachefs/quota_format.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_QUOTA_FORMAT_H
+#define _BCACHEFS_QUOTA_FORMAT_H
+
+/* KEY_TYPE_quota: */
+
+enum quota_types {
+ QTYP_USR = 0,
+ QTYP_GRP = 1,
+ QTYP_PRJ = 2,
+ QTYP_NR = 3,
+};
+
+enum quota_counters {
+ Q_SPC = 0,
+ Q_INO = 1,
+ Q_COUNTERS = 2,
+};
+
+struct bch_quota_counter {
+ __le64 hardlimit;
+ __le64 softlimit;
+};
+
+struct bch_quota {
+ struct bch_val v;
+ struct bch_quota_counter c[Q_COUNTERS];
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_quota: */
+
+struct bch_sb_quota_counter {
+ __le32 timelimit;
+ __le32 warnlimit;
+};
+
+struct bch_sb_quota_type {
+ __le64 flags;
+ struct bch_sb_quota_counter c[Q_COUNTERS];
+};
+
+struct bch_sb_field_quota {
+ struct bch_sb_field field;
+ struct bch_sb_quota_type q[QTYP_NR];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_QUOTA_FORMAT_H */
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index 95f46cb3b5bd..22d1017aa49b 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -177,8 +177,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
prt_str(&buf, "target=");
bch2_target_to_text(&buf, c, r->target);
prt_str(&buf, " compression=");
- struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
- prt_str(&buf, bch2_compression_opts[opt.type]);
+ bch2_compression_opt_to_text(&buf, r->compression);
prt_str(&buf, " ");
bch2_bkey_val_to_text(&buf, c, k);
@@ -254,13 +253,12 @@ static bool rebalance_pred(struct bch_fs *c, void *arg,
if (k.k->p.inode) {
target = io_opts->background_target;
- compression = io_opts->background_compression ?: io_opts->compression;
+ compression = background_compression(*io_opts);
} else {
const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
target = r ? r->target : io_opts->background_target;
- compression = r ? r->compression :
- (io_opts->background_compression ?: io_opts->compression);
+ compression = r ? r->compression : background_compression(*io_opts);
}
data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
@@ -371,6 +369,7 @@ static int do_rebalance(struct moving_context *ctxt)
!kthread_should_stop() &&
!atomic64_read(&r->work_stats.sectors_seen) &&
!atomic64_read(&r->scan_stats.sectors_seen)) {
+ bch2_moving_ctxt_flush_all(ctxt);
bch2_trans_unlock_long(trans);
rebalance_wait(c);
}
@@ -385,7 +384,6 @@ static int bch2_rebalance_thread(void *arg)
struct bch_fs *c = arg;
struct bch_fs_rebalance *r = &c->rebalance;
struct moving_context ctxt;
- int ret;
set_freezable();
@@ -393,8 +391,7 @@ static int bch2_rebalance_thread(void *arg)
writepoint_ptr(&c->rebalance_write_point),
true);
- while (!kthread_should_stop() &&
- !(ret = do_rebalance(&ctxt)))
+ while (!kthread_should_stop() && !do_rebalance(&ctxt))
;
bch2_moving_ctxt_exit(&ctxt);
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 725214605a05..21e13bb4335b 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -280,7 +280,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
le64_to_cpu(u->v);
break;
case BCH_FS_USAGE_inodes:
- c->usage_base->nr_inodes = le64_to_cpu(u->v);
+ c->usage_base->b.nr_inodes = le64_to_cpu(u->v);
break;
case BCH_FS_USAGE_key_version:
atomic64_set(&c->key_version,
@@ -577,8 +577,9 @@ u64 bch2_recovery_passes_from_stable(u64 v)
static bool check_version_upgrade(struct bch_fs *c)
{
- unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version);
unsigned latest_version = bcachefs_metadata_version_current;
+ unsigned latest_compatible = min(latest_version,
+ bch2_latest_compatible_version(c->sb.version));
unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
unsigned new_version = 0;
@@ -597,7 +598,7 @@ static bool check_version_upgrade(struct bch_fs *c)
new_version = latest_version;
break;
case BCH_VERSION_UPGRADE_none:
- new_version = old_version;
+ new_version = min(old_version, latest_version);
break;
}
}
@@ -774,7 +775,7 @@ int bch2_fs_recovery(struct bch_fs *c)
goto err;
}
- if (!(c->opts.nochanges && c->opts.norecovery)) {
+ if (!c->opts.nochanges) {
mutex_lock(&c->sb_lock);
bool write_sb = false;
@@ -804,7 +805,7 @@ int bch2_fs_recovery(struct bch_fs *c)
if (bch2_check_version_downgrade(c)) {
struct printbuf buf = PRINTBUF;
- prt_str(&buf, "Version downgrade required:\n");
+ prt_str(&buf, "Version downgrade required:");
__le64 passes = ext->recovery_passes_required[0];
bch2_sb_set_downgrade(c,
@@ -812,7 +813,7 @@ int bch2_fs_recovery(struct bch_fs *c)
BCH_VERSION_MINOR(c->sb.version));
passes = ext->recovery_passes_required[0] & ~passes;
if (passes) {
- prt_str(&buf, " running recovery passes: ");
+ prt_str(&buf, "\n running recovery passes: ");
prt_bitflags(&buf, bch2_recovery_passes,
bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
}
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index faa5d3670058..c47c66c2b394 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -292,10 +292,10 @@ static inline void check_indirect_extent_deleting(struct bkey_s new, unsigned *f
}
}
-int bch2_trans_mark_reflink_v(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+int bch2_trigger_reflink_v(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s new,
+ unsigned flags)
{
if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
(flags & BTREE_TRIGGER_INSERT))
@@ -324,7 +324,7 @@ void bch2_indirect_inline_data_to_text(struct printbuf *out,
min(datalen, 32U), d.v->data);
}
-int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
+int bch2_trigger_indirect_inline_data(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s new,
unsigned flags)
@@ -486,6 +486,13 @@ s64 bch2_remap_range(struct bch_fs *c,
bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
+ if (dst_inum.inum < src_inum.inum) {
+ /* Avoid some lock cycle transaction restarts */
+ ret = bch2_btree_iter_traverse(&dst_iter);
+ if (ret)
+ continue;
+ }
+
dst_done = dst_iter.pos.offset - dst_start.offset;
src_want = POS(src_start.inode, src_start.offset + dst_done);
bch2_btree_iter_set_pos(&src_iter, src_want);
@@ -538,9 +545,7 @@ s64 bch2_remap_range(struct bch_fs *c,
min(src_k.k->p.offset - src_want.offset,
dst_end.offset - dst_iter.pos.offset));
- ret = bch2_bkey_set_needs_rebalance(c, new_dst.k,
- opts.background_target,
- opts.background_compression) ?:
+ ret = bch2_bkey_set_needs_rebalance(c, new_dst.k, &opts) ?:
bch2_extent_update(trans, dst_inum, &dst_iter,
new_dst.k, &disk_res,
new_i_size, i_sectors_delta,
diff --git a/fs/bcachefs/reflink.h b/fs/bcachefs/reflink.h
index 8ee778ec0022..4d8867289717 100644
--- a/fs/bcachefs/reflink.h
+++ b/fs/bcachefs/reflink.h
@@ -24,14 +24,14 @@ int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
+int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
.key_invalid = bch2_reflink_v_invalid, \
.val_to_text = bch2_reflink_v_to_text, \
.swab = bch2_ptr_swab, \
- .trigger = bch2_trans_mark_reflink_v, \
+ .trigger = bch2_trigger_reflink_v, \
.min_val_size = 8, \
})
@@ -39,7 +39,7 @@ int bch2_indirect_inline_data_invalid(struct bch_fs *, struct bkey_s_c,
enum bkey_invalid_flags, struct printbuf *);
void bch2_indirect_inline_data_to_text(struct printbuf *,
struct bch_fs *, struct bkey_s_c);
-int bch2_trans_mark_indirect_inline_data(struct btree_trans *,
+int bch2_trigger_indirect_inline_data(struct btree_trans *,
enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s,
unsigned);
@@ -47,7 +47,7 @@ int bch2_trans_mark_indirect_inline_data(struct btree_trans *,
#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) { \
.key_invalid = bch2_indirect_inline_data_invalid, \
.val_to_text = bch2_indirect_inline_data_to_text, \
- .trigger = bch2_trans_mark_indirect_inline_data, \
+ .trigger = bch2_trigger_indirect_inline_data, \
.min_val_size = 8, \
})
diff --git a/fs/bcachefs/reflink_format.h b/fs/bcachefs/reflink_format.h
new file mode 100644
index 000000000000..6772eebb1fc6
--- /dev/null
+++ b/fs/bcachefs/reflink_format.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REFLINK_FORMAT_H
+#define _BCACHEFS_REFLINK_FORMAT_H
+
+struct bch_reflink_p {
+ struct bch_val v;
+ __le64 idx;
+ /*
+ * A reflink pointer might point to an indirect extent which is then
+ * later split (by copygc or rebalance). If we only pointed to part of
+ * the original indirect extent, and then one of the fragments is
+ * outside the range we point to, we'd leak a refcount: so when creating
+ * reflink pointers, we need to store pad values to remember the full
+ * range we were taking a reference on.
+ */
+ __le32 front_pad;
+ __le32 back_pad;
+} __packed __aligned(8);
+
+struct bch_reflink_v {
+ struct bch_val v;
+ __le64 refcount;
+ union bch_extent_entry start[0];
+ __u64 _data[];
+} __packed __aligned(8);
+
+struct bch_indirect_inline_data {
+ struct bch_val v;
+ __le64 refcount;
+ u8 data[];
+};
+
+#endif /* _BCACHEFS_REFLINK_FORMAT_H */
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 92ba56ef1fc8..cc2672c12031 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -9,6 +9,12 @@
static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
struct bch_replicas_cpu *);
+/* Some (buggy!) compilers don't allow memcmp to be passed as a pointer */
+static int bch2_memcmp(const void *l, const void *r, size_t size)
+{
+ return memcmp(l, r, size);
+}
+
/* Replicas tracking - in memory: */
static void verify_replicas_entry(struct bch_replicas_entry_v1 *e)
@@ -33,21 +39,16 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
{
- eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
+ eytzinger0_sort(r->entries, r->nr, r->entry_size, bch2_memcmp, NULL);
}
static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
struct bch_replicas_entry_v0 *e)
{
- unsigned i;
-
- if (e->data_type < BCH_DATA_NR)
- prt_printf(out, "%s", bch2_data_types[e->data_type]);
- else
- prt_printf(out, "(invalid data type %u)", e->data_type);
+ bch2_prt_data_type(out, e->data_type);
prt_printf(out, ": %u [", e->nr_devs);
- for (i = 0; i < e->nr_devs; i++)
+ for (unsigned i = 0; i < e->nr_devs; i++)
prt_printf(out, i ? " %u" : "%u", e->devs[i]);
prt_printf(out, "]");
}
@@ -55,15 +56,10 @@ static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
void bch2_replicas_entry_to_text(struct printbuf *out,
struct bch_replicas_entry_v1 *e)
{
- unsigned i;
-
- if (e->data_type < BCH_DATA_NR)
- prt_printf(out, "%s", bch2_data_types[e->data_type]);
- else
- prt_printf(out, "(invalid data type %u)", e->data_type);
+ bch2_prt_data_type(out, e->data_type);
prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs);
- for (i = 0; i < e->nr_devs; i++)
+ for (unsigned i = 0; i < e->nr_devs; i++)
prt_printf(out, i ? " %u" : "%u", e->devs[i]);
prt_printf(out, "]");
}
@@ -831,7 +827,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
sort_cmp_size(cpu_r->entries,
cpu_r->nr,
cpu_r->entry_size,
- memcmp, NULL);
+ bch2_memcmp, NULL);
for (i = 0; i < cpu_r->nr; i++) {
struct bch_replicas_entry_v1 *e =
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
index 9632f36f5f31..b6bf0ebe7e84 100644
--- a/fs/bcachefs/sb-clean.c
+++ b/fs/bcachefs/sb-clean.c
@@ -207,7 +207,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = BCH_FS_USAGE_inodes;
- u->v = cpu_to_le64(c->usage_base->nr_inodes);
+ u->v = cpu_to_le64(c->usage_base->b.nr_inodes);
}
{
diff --git a/fs/bcachefs/counters.c b/fs/bcachefs/sb-counters.c
index 02a996e06a64..7dc898761bb3 100644
--- a/fs/bcachefs/counters.c
+++ b/fs/bcachefs/sb-counters.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "super-io.h"
-#include "counters.h"
+#include "sb-counters.h"
/* BCH_SB_FIELD_counters */
diff --git a/fs/bcachefs/counters.h b/fs/bcachefs/sb-counters.h
index 4778aa19bf34..81f8aec9fcb1 100644
--- a/fs/bcachefs/counters.h
+++ b/fs/bcachefs/sb-counters.h
@@ -1,11 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_COUNTERS_H
-#define _BCACHEFS_COUNTERS_H
+#ifndef _BCACHEFS_SB_COUNTERS_H
+#define _BCACHEFS_SB_COUNTERS_H
#include "bcachefs.h"
#include "super-io.h"
-
int bch2_sb_counters_to_cpu(struct bch_fs *);
int bch2_sb_counters_from_cpu(struct bch_fs *);
@@ -14,4 +13,4 @@ int bch2_fs_counters_init(struct bch_fs *);
extern const struct bch_sb_field_ops bch_sb_field_ops_counters;
-#endif // _BCACHEFS_COUNTERS_H
+#endif // _BCACHEFS_SB_COUNTERS_H
diff --git a/fs/bcachefs/sb-counters_format.h b/fs/bcachefs/sb-counters_format.h
new file mode 100644
index 000000000000..62ea478215d0
--- /dev/null
+++ b/fs/bcachefs/sb-counters_format.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_COUNTERS_FORMAT_H
+#define _BCACHEFS_SB_COUNTERS_FORMAT_H
+
+#define BCH_PERSISTENT_COUNTERS() \
+ x(io_read, 0) \
+ x(io_write, 1) \
+ x(io_move, 2) \
+ x(bucket_invalidate, 3) \
+ x(bucket_discard, 4) \
+ x(bucket_alloc, 5) \
+ x(bucket_alloc_fail, 6) \
+ x(btree_cache_scan, 7) \
+ x(btree_cache_reap, 8) \
+ x(btree_cache_cannibalize, 9) \
+ x(btree_cache_cannibalize_lock, 10) \
+ x(btree_cache_cannibalize_lock_fail, 11) \
+ x(btree_cache_cannibalize_unlock, 12) \
+ x(btree_node_write, 13) \
+ x(btree_node_read, 14) \
+ x(btree_node_compact, 15) \
+ x(btree_node_merge, 16) \
+ x(btree_node_split, 17) \
+ x(btree_node_rewrite, 18) \
+ x(btree_node_alloc, 19) \
+ x(btree_node_free, 20) \
+ x(btree_node_set_root, 21) \
+ x(btree_path_relock_fail, 22) \
+ x(btree_path_upgrade_fail, 23) \
+ x(btree_reserve_get_fail, 24) \
+ x(journal_entry_full, 25) \
+ x(journal_full, 26) \
+ x(journal_reclaim_finish, 27) \
+ x(journal_reclaim_start, 28) \
+ x(journal_write, 29) \
+ x(read_promote, 30) \
+ x(read_bounce, 31) \
+ x(read_split, 33) \
+ x(read_retry, 32) \
+ x(read_reuse_race, 34) \
+ x(move_extent_read, 35) \
+ x(move_extent_write, 36) \
+ x(move_extent_finish, 37) \
+ x(move_extent_fail, 38) \
+ x(move_extent_start_fail, 39) \
+ x(copygc, 40) \
+ x(copygc_wait, 41) \
+ x(gc_gens_end, 42) \
+ x(gc_gens_start, 43) \
+ x(trans_blocked_journal_reclaim, 44) \
+ x(trans_restart_btree_node_reused, 45) \
+ x(trans_restart_btree_node_split, 46) \
+ x(trans_restart_fault_inject, 47) \
+ x(trans_restart_iter_upgrade, 48) \
+ x(trans_restart_journal_preres_get, 49) \
+ x(trans_restart_journal_reclaim, 50) \
+ x(trans_restart_journal_res_get, 51) \
+ x(trans_restart_key_cache_key_realloced, 52) \
+ x(trans_restart_key_cache_raced, 53) \
+ x(trans_restart_mark_replicas, 54) \
+ x(trans_restart_mem_realloced, 55) \
+ x(trans_restart_memory_allocation_failure, 56) \
+ x(trans_restart_relock, 57) \
+ x(trans_restart_relock_after_fill, 58) \
+ x(trans_restart_relock_key_cache_fill, 59) \
+ x(trans_restart_relock_next_node, 60) \
+ x(trans_restart_relock_parent_for_fill, 61) \
+ x(trans_restart_relock_path, 62) \
+ x(trans_restart_relock_path_intent, 63) \
+ x(trans_restart_too_many_iters, 64) \
+ x(trans_restart_traverse, 65) \
+ x(trans_restart_upgrade, 66) \
+ x(trans_restart_would_deadlock, 67) \
+ x(trans_restart_would_deadlock_write, 68) \
+ x(trans_restart_injected, 69) \
+ x(trans_restart_key_cache_upgrade, 70) \
+ x(trans_traverse_all, 71) \
+ x(transaction_commit, 72) \
+ x(write_super, 73) \
+ x(trans_restart_would_deadlock_recursion_limit, 74) \
+ x(trans_restart_write_buffer_flush, 75) \
+ x(trans_restart_split_race, 76) \
+ x(write_buffer_flush_slowpath, 77) \
+ x(write_buffer_flush_sync, 78)
+
+enum bch_persistent_counters {
+#define x(t, n, ...) BCH_COUNTER_##t,
+ BCH_PERSISTENT_COUNTERS()
+#undef x
+ BCH_COUNTER_NR
+};
+
+struct bch_sb_field_counters {
+ struct bch_sb_field field;
+ __le64 d[];
+};
+
+#endif /* _BCACHEFS_SB_COUNTERS_FORMAT_H */
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
index a44a238bf8b5..eff5ce18c69c 100644
--- a/fs/bcachefs/sb-members.c
+++ b/fs/bcachefs/sb-members.c
@@ -251,7 +251,7 @@ static void member_to_text(struct printbuf *out,
prt_printf(out, "Data allowed:");
prt_tab(out);
if (BCH_MEMBER_DATA_ALLOWED(&m))
- prt_bitflags(out, bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m));
+ prt_bitflags(out, __bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m));
else
prt_printf(out, "(none)");
prt_newline(out);
@@ -259,7 +259,7 @@ static void member_to_text(struct printbuf *out,
prt_printf(out, "Has data:");
prt_tab(out);
if (data_have)
- prt_bitflags(out, bch2_data_types, data_have);
+ prt_bitflags(out, __bch2_data_types, data_have);
else
prt_printf(out, "(none)");
prt_newline(out);
@@ -421,7 +421,7 @@ void bch2_dev_errors_reset(struct bch_dev *ca)
m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
for (unsigned i = 0; i < ARRAY_SIZE(m->errors_at_reset); i++)
m->errors_at_reset[i] = cpu_to_le64(atomic64_read(&ca->errors[i]));
- m->errors_reset_time = ktime_get_real_seconds();
+ m->errors_reset_time = cpu_to_le64(ktime_get_real_seconds());
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 56af937523ff..ac6ba04d5521 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -728,7 +728,7 @@ static int check_snapshot(struct btree_trans *trans,
return 0;
memset(&s, 0, sizeof(s));
- memcpy(&s, k.v, bkey_val_bytes(k.k));
+ memcpy(&s, k.v, min(sizeof(s), bkey_val_bytes(k.k)));
id = le32_to_cpu(s.parent);
if (id) {
@@ -1053,6 +1053,8 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
n->v.tree = cpu_to_le32(tree);
n->v.depth = cpu_to_le32(depth);
+ n->v.btime.lo = cpu_to_le64(bch2_current_time(c));
+ n->v.btime.hi = 0;
for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
@@ -1681,5 +1683,5 @@ int bch2_snapshots_read(struct bch_fs *c)
void bch2_fs_snapshots_exit(struct bch_fs *c)
{
- kfree(rcu_dereference_protected(c->snapshots, true));
+ kvfree(rcu_dereference_protected(c->snapshots, true));
}
diff --git a/fs/bcachefs/snapshot_format.h b/fs/bcachefs/snapshot_format.h
new file mode 100644
index 000000000000..aabcd3a74cd9
--- /dev/null
+++ b/fs/bcachefs/snapshot_format.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SNAPSHOT_FORMAT_H
+#define _BCACHEFS_SNAPSHOT_FORMAT_H
+
+struct bch_snapshot {
+ struct bch_val v;
+ __le32 flags;
+ __le32 parent;
+ __le32 children[2];
+ __le32 subvol;
+ /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
+ __le32 tree;
+ __le32 depth;
+ __le32 skip[3];
+ bch_le128 btime;
+};
+
+LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
+
+/* True if a subvolume points to this snapshot node: */
+LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
+
+/*
+ * Snapshot trees:
+ *
+ * The snapshot_trees btree gives us persistent indentifier for each tree of
+ * bch_snapshot nodes, and allow us to record and easily find the root/master
+ * subvolume that other snapshots were created from:
+ */
+struct bch_snapshot_tree {
+ struct bch_val v;
+ __le32 master_subvol;
+ __le32 root_snapshot;
+};
+
+#endif /* _BCACHEFS_SNAPSHOT_FORMAT_H */
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 89fdb7c21134..fcaa5a888744 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -160,21 +160,16 @@ static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, s
}
static __always_inline int
-bch2_hash_lookup(struct btree_trans *trans,
+bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
struct btree_iter *iter,
const struct bch_hash_desc desc,
const struct bch_hash_info *info,
subvol_inum inum, const void *key,
- unsigned flags)
+ unsigned flags, u32 snapshot)
{
struct bkey_s_c k;
- u32 snapshot;
int ret;
- ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
- if (ret)
- return ret;
-
for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
POS(inum.inum, U64_MAX),
@@ -195,6 +190,19 @@ bch2_hash_lookup(struct btree_trans *trans,
}
static __always_inline int
+bch2_hash_lookup(struct btree_trans *trans,
+ struct btree_iter *iter,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ subvol_inum inum, const void *key,
+ unsigned flags)
+{
+ u32 snapshot;
+ return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
+ bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot);
+}
+
+static __always_inline int
bch2_hash_hole(struct btree_trans *trans,
struct btree_iter *iter,
const struct bch_hash_desc desc,
diff --git a/fs/bcachefs/subvolume_format.h b/fs/bcachefs/subvolume_format.h
new file mode 100644
index 000000000000..af79134b07d6
--- /dev/null
+++ b/fs/bcachefs/subvolume_format.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SUBVOLUME_FORMAT_H
+#define _BCACHEFS_SUBVOLUME_FORMAT_H
+
+#define SUBVOL_POS_MIN POS(0, 1)
+#define SUBVOL_POS_MAX POS(0, S32_MAX)
+#define BCACHEFS_ROOT_SUBVOL 1
+
+struct bch_subvolume {
+ struct bch_val v;
+ __le32 flags;
+ __le32 snapshot;
+ __le64 inode;
+ /*
+ * Snapshot subvolumes form a tree, separate from the snapshot nodes
+ * tree - if this subvolume is a snapshot, this is the ID of the
+ * subvolume it was created from:
+ *
+ * This is _not_ necessarily the subvolume of the directory containing
+ * this subvolume:
+ */
+ __le32 parent;
+ __le32 pad;
+ bch_le128 otime;
+};
+
+LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
+/*
+ * We need to know whether a subvolume is a snapshot so we can know whether we
+ * can delete it (or whether it should just be rm -rf'd)
+ */
+LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
+LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
+
+#endif /* _BCACHEFS_SUBVOLUME_FORMAT_H */
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 6d3db5cce5f6..36988add581f 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -2,7 +2,6 @@
#include "bcachefs.h"
#include "checksum.h"
-#include "counters.h"
#include "disk_groups.h"
#include "ec.h"
#include "error.h"
@@ -13,6 +12,7 @@
#include "replicas.h"
#include "quota.h"
#include "sb-clean.h"
+#include "sb-counters.h"
#include "sb-downgrade.h"
#include "sb-errors.h"
#include "sb-members.h"
@@ -717,7 +717,7 @@ retry:
if (IS_ERR(sb->bdev_handle)) {
ret = PTR_ERR(sb->bdev_handle);
- goto out;
+ goto err;
}
sb->bdev = sb->bdev_handle->bdev;
@@ -1321,7 +1321,9 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
prt_printf(out, "Superblock size:");
prt_tab(out);
- prt_printf(out, "%zu", vstruct_bytes(sb));
+ prt_units_u64(out, vstruct_bytes(sb));
+ prt_str(out, "/");
+ prt_units_u64(out, 512ULL << sb->layout.sb_max_size_bits);
prt_newline(out);
prt_printf(out, "Clean:");
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 9dbc35940197..6b23e11825e6 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -23,7 +23,6 @@
#include "checksum.h"
#include "clock.h"
#include "compress.h"
-#include "counters.h"
#include "debug.h"
#include "disk_groups.h"
#include "ec.h"
@@ -49,6 +48,7 @@
#include "recovery.h"
#include "replicas.h"
#include "sb-clean.h"
+#include "sb-counters.h"
#include "sb-errors.h"
#include "sb-members.h"
#include "snapshot.h"
@@ -883,7 +883,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
!(c->online_reserved = alloc_percpu(u64)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
- btree_bytes(c)) ||
+ c->opts.btree_node_size) ||
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
!(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
sizeof(u64), GFP_KERNEL))) {
@@ -1386,8 +1386,8 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
prt_bdevname(&name, ca->disk_sb.bdev);
if (c->sb.nr_devices == 1)
- strlcpy(c->name, name.buf, sizeof(c->name));
- strlcpy(ca->name, name.buf, sizeof(ca->name));
+ strscpy(c->name, name.buf, sizeof(c->name));
+ strscpy(ca->name, name.buf, sizeof(ca->name));
printbuf_exit(&name);
@@ -1428,10 +1428,10 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
? c->opts.metadata_replicas
- : c->opts.metadata_replicas_required,
+ : metadata_replicas_required(c),
!(flags & BCH_FORCE_IF_DATA_DEGRADED)
? c->opts.data_replicas
- : c->opts.data_replicas_required);
+ : data_replicas_required(c));
return nr_rw >= required;
case BCH_MEMBER_STATE_failed:
@@ -1625,7 +1625,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
if (data) {
struct printbuf data_has = PRINTBUF;
- prt_bitflags(&data_has, bch2_data_types, data);
+ prt_bitflags(&data_has, __bch2_data_types, data);
bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
printbuf_exit(&data_has);
ret = -EBUSY;
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 8ed52319ff68..cee80c47feea 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -21,6 +21,7 @@
#include "btree_gc.h"
#include "buckets.h"
#include "clock.h"
+#include "compress.h"
#include "disk_groups.h"
#include "ec.h"
#include "inode.h"
@@ -247,7 +248,7 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
mutex_lock(&c->btree_cache.lock);
list_for_each_entry(b, &c->btree_cache.live, list)
- ret += btree_bytes(c);
+ ret += btree_buf_bytes(b);
mutex_unlock(&c->btree_cache.lock);
return ret;
@@ -330,7 +331,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
prt_newline(out);
for (unsigned i = 0; i < ARRAY_SIZE(s); i++) {
- prt_str(out, bch2_compression_types[i]);
+ bch2_prt_compression_type(out, i);
prt_tab(out);
prt_human_readable_u64(out, s[i].sectors_compressed << 9);
@@ -725,8 +726,10 @@ STORE(bch2_fs_opts_dir)
bch2_opt_set_sb(c, opt, v);
bch2_opt_set_by_id(&c->opts, id, v);
- if ((id == Opt_background_target ||
- id == Opt_background_compression) && v)
+ if (v &&
+ (id == Opt_background_target ||
+ id == Opt_background_compression ||
+ (id == Opt_compression && !c->opts.background_compression)))
bch2_set_rebalance_needs_scan(c, 0);
ret = size;
@@ -883,7 +886,7 @@ static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
for (i = 1; i < BCH_DATA_NR; i++)
prt_printf(out, "%-12s:%12llu\n",
- bch2_data_types[i],
+ bch2_data_type_str(i),
percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
}
}
@@ -908,7 +911,7 @@ SHOW(bch2_dev)
}
if (attr == &sysfs_has_data) {
- prt_bitflags(out, bch2_data_types, bch2_dev_has_data(c, ca));
+ prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca));
prt_char(out, '\n');
}
diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c
index b1c867aa2b58..9220d7de10db 100644
--- a/fs/bcachefs/thread_with_file.c
+++ b/fs/bcachefs/thread_with_file.c
@@ -53,9 +53,9 @@ int bch2_run_thread_with_file(struct thread_with_file *thr,
if (ret)
goto err;
- fd_install(fd, file);
get_task_struct(thr->task);
wake_up_process(thr->task);
+ fd_install(fd, file);
return fd;
err:
if (fd >= 0)
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index c94876b3bb06..293b90d704fb 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -46,7 +46,7 @@ DECLARE_EVENT_CLASS(fs_str,
__assign_str(str, str);
),
- TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
+ TP_printk("%d,%d\n%s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(str))
);
DECLARE_EVENT_CLASS(trans_str,
@@ -273,28 +273,14 @@ DEFINE_EVENT(bch_fs, journal_full,
TP_ARGS(c)
);
-DEFINE_EVENT(bch_fs, journal_entry_full,
- TP_PROTO(struct bch_fs *c),
- TP_ARGS(c)
+DEFINE_EVENT(fs_str, journal_entry_full,
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
-TRACE_EVENT(journal_entry_close,
- TP_PROTO(struct bch_fs *c, unsigned bytes),
- TP_ARGS(c, bytes),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u32, bytes )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->bytes = bytes;
- ),
-
- TP_printk("%d,%d entry bytes %u",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->bytes)
+DEFINE_EVENT(fs_str, journal_entry_close,
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
DEFINE_EVENT(bio, journal_write,
@@ -542,7 +528,7 @@ TRACE_EVENT(btree_path_relock_fail,
__entry->level = path->level;
TRACE_BPOS_assign(pos, path->pos);
- c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
+ c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
__entry->self_read_count = c.n[SIX_LOCK_read];
__entry->self_intent_count = c.n[SIX_LOCK_intent];
@@ -827,40 +813,28 @@ TRACE_EVENT(bucket_evacuate,
);
DEFINE_EVENT(fs_str, move_extent,
- TP_PROTO(struct bch_fs *c, const char *k),
- TP_ARGS(c, k)
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
DEFINE_EVENT(fs_str, move_extent_read,
- TP_PROTO(struct bch_fs *c, const char *k),
- TP_ARGS(c, k)
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
DEFINE_EVENT(fs_str, move_extent_write,
- TP_PROTO(struct bch_fs *c, const char *k),
- TP_ARGS(c, k)
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
DEFINE_EVENT(fs_str, move_extent_finish,
- TP_PROTO(struct bch_fs *c, const char *k),
- TP_ARGS(c, k)
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
-TRACE_EVENT(move_extent_fail,
- TP_PROTO(struct bch_fs *c, const char *msg),
- TP_ARGS(c, msg),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __string(msg, msg )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __assign_str(msg, msg);
- ),
-
- TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
+DEFINE_EVENT(fs_str, move_extent_fail,
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
DEFINE_EVENT(fs_str, move_extent_start_fail,
@@ -1039,7 +1013,7 @@ TRACE_EVENT(trans_restart_split_race,
__entry->level = b->c.level;
__entry->written = b->written;
__entry->blocks = btree_blocks(trans->c);
- __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
+ __entry->u64s_remaining = bch2_btree_keys_u64s_remaining(b);
),
TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
@@ -1146,8 +1120,6 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
TP_ARGS(trans, caller_ip, path)
);
-struct get_locks_fail;
-
TRACE_EVENT(trans_restart_upgrade,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
@@ -1195,11 +1167,9 @@ TRACE_EVENT(trans_restart_upgrade,
__entry->node_seq)
);
-DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
- TP_PROTO(struct btree_trans *trans,
- unsigned long caller_ip,
- struct btree_path *path),
- TP_ARGS(trans, caller_ip, path)
+DEFINE_EVENT(trans_str, trans_restart_relock,
+ TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
+ TP_ARGS(trans, caller_ip, str)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index c2ef7cddaa4f..3a32faa86b5c 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -241,12 +241,17 @@ bool bch2_is_zero(const void *_p, size_t n)
return true;
}
-void bch2_prt_u64_binary(struct printbuf *out, u64 v, unsigned nr_bits)
+void bch2_prt_u64_base2_nbits(struct printbuf *out, u64 v, unsigned nr_bits)
{
while (nr_bits)
prt_char(out, '0' + ((v >> --nr_bits) & 1));
}
+void bch2_prt_u64_base2(struct printbuf *out, u64 v)
+{
+ bch2_prt_u64_base2_nbits(out, v, fls64(v) ?: 1);
+}
+
void bch2_print_string_as_lines(const char *prefix, const char *lines)
{
const char *p;
@@ -267,14 +272,14 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
console_unlock();
}
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr)
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr,
+ gfp_t gfp)
{
#ifdef CONFIG_STACKTRACE
unsigned nr_entries = 0;
- int ret = 0;
stack->nr = 0;
- ret = darray_make_room(stack, 32);
+ int ret = darray_make_room_gfp(stack, 32, gfp);
if (ret)
return ret;
@@ -284,7 +289,7 @@ int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigne
do {
nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, skipnr + 1);
} while (nr_entries == stack->size &&
- !(ret = darray_make_room(stack, stack->size * 2)));
+ !(ret = darray_make_room_gfp(stack, stack->size * 2, gfp)));
stack->nr = nr_entries;
up_read(&task->signal->exec_update_lock);
@@ -303,10 +308,10 @@ void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack)
}
}
-int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr)
+int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr, gfp_t gfp)
{
bch_stacktrace stack = { 0 };
- int ret = bch2_save_backtrace(&stack, task, skipnr + 1);
+ int ret = bch2_save_backtrace(&stack, task, skipnr + 1, gfp);
bch2_prt_backtrace(out, &stack);
darray_exit(&stack);
@@ -413,14 +418,15 @@ static inline void bch2_time_stats_update_one(struct bch2_time_stats *stats,
bch2_quantiles_update(&stats->quantiles, duration);
}
- if (time_after64(end, stats->last_event)) {
+ if (stats->last_event && time_after64(end, stats->last_event)) {
freq = end - stats->last_event;
mean_and_variance_update(&stats->freq_stats, freq);
mean_and_variance_weighted_update(&stats->freq_stats_weighted, freq);
stats->max_freq = max(stats->max_freq, freq);
stats->min_freq = min(stats->min_freq, freq);
- stats->last_event = end;
}
+
+ stats->last_event = end;
}
static void __bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
@@ -1186,7 +1192,9 @@ int bch2_split_devs(const char *_dev_name, darray_str *ret)
{
darray_init(ret);
- char *dev_name = kstrdup(_dev_name, GFP_KERNEL), *s = dev_name;
+ char *dev_name, *s, *orig;
+
+ dev_name = orig = kstrdup(_dev_name, GFP_KERNEL);
if (!dev_name)
return -ENOMEM;
@@ -1201,10 +1209,10 @@ int bch2_split_devs(const char *_dev_name, darray_str *ret)
}
}
- kfree(dev_name);
+ kfree(orig);
return 0;
err:
bch2_darray_str_exit(ret);
- kfree(dev_name);
+ kfree(orig);
return -ENOMEM;
}
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index c75fc31915d3..b414736d59a5 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -342,14 +342,15 @@ bool bch2_is_zero(const void *, size_t);
u64 bch2_read_flag_list(char *, const char * const[]);
-void bch2_prt_u64_binary(struct printbuf *, u64, unsigned);
+void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
+void bch2_prt_u64_base2(struct printbuf *, u64);
void bch2_print_string_as_lines(const char *prefix, const char *lines);
typedef DARRAY(unsigned long) bch_stacktrace;
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned);
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
-int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned);
+int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t);
static inline void prt_bdevname(struct printbuf *out, struct block_device *bdev)
{
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 5a1858fb9879..9c0d2316031b 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -590,8 +590,9 @@ err:
mutex_unlock(&inode->ei_update_lock);
if (value &&
- (opt_id == Opt_background_compression ||
- opt_id == Opt_background_target))
+ (opt_id == Opt_background_target ||
+ opt_id == Opt_background_compression ||
+ (opt_id == Opt_compression && !inode_opt_get(c, &inode->ei_inode, background_compression))))
bch2_set_rebalance_needs_scan(c, inode->ei_inode.bi_inum);
return bch2_err_class(ret);
diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h
new file mode 100644
index 000000000000..e9f810539552
--- /dev/null
+++ b/fs/bcachefs/xattr_format.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_XATTR_FORMAT_H
+#define _BCACHEFS_XATTR_FORMAT_H
+
+#define KEY_TYPE_XATTR_INDEX_USER 0
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
+#define KEY_TYPE_XATTR_INDEX_TRUSTED 3
+#define KEY_TYPE_XATTR_INDEX_SECURITY 4
+
+struct bch_xattr {
+ struct bch_val v;
+ __u8 x_type;
+ __u8 x_name_len;
+ __le16 x_val_len;
+ __u8 x_name[];
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_XATTR_FORMAT_H */
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index a9be9ac99222..378d9103a207 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1455,6 +1455,7 @@ out:
*/
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
{
+ LIST_HEAD(retry_list);
struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
@@ -1476,6 +1477,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_lock(&fs_info->unused_bgs_lock);
while (!list_empty(&fs_info->unused_bgs)) {
+ u64 used;
int trimming;
block_group = list_first_entry(&fs_info->unused_bgs,
@@ -1511,9 +1513,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
goto next;
}
+ spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- if (block_group->reserved || block_group->pinned ||
- block_group->used || block_group->ro ||
+ if (btrfs_is_block_group_used(block_group) || block_group->ro ||
list_is_singular(&block_group->list)) {
/*
* We want to bail if we made new allocations or have
@@ -1523,10 +1525,49 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
*/
trace_btrfs_skip_unused_block_group(block_group);
spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
up_write(&space_info->groups_sem);
goto next;
}
+
+ /*
+ * The block group may be unused but there may be space reserved
+ * accounting with the existence of that block group, that is,
+ * space_info->bytes_may_use was incremented by a task but no
+ * space was yet allocated from the block group by the task.
+ * That space may or may not be allocated, as we are generally
+ * pessimistic about space reservation for metadata as well as
+ * for data when using compression (as we reserve space based on
+ * the worst case, when data can't be compressed, and before
+ * actually attempting compression, before starting writeback).
+ *
+ * So check if the total space of the space_info minus the size
+ * of this block group is less than the used space of the
+ * space_info - if that's the case, then it means we have tasks
+ * that might be relying on the block group in order to allocate
+ * extents, and add back the block group to the unused list when
+ * we finish, so that we retry later in case no tasks ended up
+ * needing to allocate extents from the block group.
+ */
+ used = btrfs_space_info_used(space_info, true);
+ if (space_info->total_bytes - block_group->length < used) {
+ /*
+ * Add a reference for the list, compensate for the ref
+ * drop under the "next" label for the
+ * fs_info->unused_bgs list.
+ */
+ btrfs_get_block_group(block_group);
+ list_add_tail(&block_group->bg_list, &retry_list);
+
+ trace_btrfs_skip_unused_block_group(block_group);
+ spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
+ up_write(&space_info->groups_sem);
+ goto next;
+ }
+
spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
/* We don't want to force the issue, only flip if it's ok. */
ret = inc_block_group_ro(block_group, 0);
@@ -1650,12 +1691,16 @@ next:
btrfs_put_block_group(block_group);
spin_lock(&fs_info->unused_bgs_lock);
}
+ list_splice_tail(&retry_list, &fs_info->unused_bgs);
spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
return;
flip_async:
btrfs_end_transaction(trans);
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_splice_tail(&retry_list, &fs_info->unused_bgs);
+ spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
btrfs_put_block_group(block_group);
btrfs_discard_punt_unused_bgs_list(fs_info);
@@ -2684,6 +2729,37 @@ next:
btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
list_del_init(&block_group->bg_list);
clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
+
+ /*
+ * If the block group is still unused, add it to the list of
+ * unused block groups. The block group may have been created in
+ * order to satisfy a space reservation, in which case the
+ * extent allocation only happens later. But often we don't
+ * actually need to allocate space that we previously reserved,
+ * so the block group may become unused for a long time. For
+ * example for metadata we generally reserve space for a worst
+ * possible scenario, but then don't end up allocating all that
+ * space or none at all (due to no need to COW, extent buffers
+ * were already COWed in the current transaction and still
+ * unwritten, tree heights lower than the maximum possible
+ * height, etc). For data we generally reserve the axact amount
+ * of space we are going to allocate later, the exception is
+ * when using compression, as we must reserve space based on the
+ * uncompressed data size, because the compression is only done
+ * when writeback triggered and we don't know how much space we
+ * are actually going to need, so we reserve the uncompressed
+ * size because the data may be uncompressible in the worst case.
+ */
+ if (ret == 0) {
+ bool used;
+
+ spin_lock(&block_group->lock);
+ used = btrfs_is_block_group_used(block_group);
+ spin_unlock(&block_group->lock);
+
+ if (!used)
+ btrfs_mark_bg_unused(block_group);
+ }
}
btrfs_trans_release_chunk_metadata(trans);
}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index c4a1f01cc1c2..962b11983901 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -257,6 +257,13 @@ static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
return (block_group->start + block_group->length);
}
+static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg)
+{
+ lockdep_assert_held(&bg->lock);
+
+ return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0);
+}
+
static inline bool btrfs_is_block_group_data_only(
struct btrfs_block_group *block_group)
{
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index ceb5f586a2d5..1043a8142351 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -494,7 +494,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
block_rsv = get_block_rsv(trans, root);
- if (unlikely(block_rsv->size == 0))
+ if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
goto try_reserve;
again:
ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
index b0bd12b8652f..43a9a6b5a79f 100644
--- a/fs/btrfs/block-rsv.h
+++ b/fs/btrfs/block-rsv.h
@@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
return data_race(rsv->full);
}
+/*
+ * Get the reserved mount of a block reserve in a context where getting a stale
+ * value is acceptable, instead of accessing it directly and trigger data race
+ * warning from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
+{
+ u64 ret;
+
+ spin_lock(&rsv->lock);
+ ret = rsv->reserved;
+ spin_unlock(&rsv->lock);
+
+ return ret;
+}
+
+/*
+ * Get the size of a block reserve in a context where getting a stale value is
+ * acceptable, instead of accessing it directly and trigger data race warning
+ * from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
+{
+ u64 ret;
+
+ spin_lock(&rsv->lock);
+ ret = rsv->size;
+ spin_unlock(&rsv->lock);
+
+ return ret;
+}
+
#endif /* BTRFS_BLOCK_RSV_H */
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 193168214eeb..68345f73d429 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -141,16 +141,16 @@ static int compression_decompress_bio(struct list_head *ws,
}
static int compression_decompress(int type, struct list_head *ws,
- const u8 *data_in, struct page *dest_page,
- unsigned long start_byte, size_t srclen, size_t destlen)
+ const u8 *data_in, struct page *dest_page,
+ unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
switch (type) {
case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
- start_byte, srclen, destlen);
+ dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
- start_byte, srclen, destlen);
+ dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
- start_byte, srclen, destlen);
+ dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_NONE:
default:
/*
@@ -1037,14 +1037,23 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
* start_byte tells us the offset into the compressed data we're interested in
*/
int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
- unsigned long start_byte, size_t srclen, size_t destlen)
+ unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
struct list_head *workspace;
+ const u32 sectorsize = fs_info->sectorsize;
int ret;
+ /*
+ * The full destination page range should not exceed the page size.
+ * And the @destlen should not exceed sectorsize, as this is only called for
+ * inline file extents, which should not exceed sectorsize.
+ */
+ ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
+
workspace = get_workspace(type, 0);
ret = compression_decompress(type, workspace, data_in, dest_page,
- start_byte, srclen, destlen);
+ dest_pgoff, srclen, destlen);
put_workspace(type, workspace);
return ret;
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 93cc92974dee..afd7e50d073d 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -148,7 +148,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+ struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *zlib_alloc_workspace(unsigned int level);
void zlib_free_workspace(struct list_head *ws);
@@ -159,7 +159,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+ struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *lzo_alloc_workspace(unsigned int level);
void lzo_free_workspace(struct list_head *ws);
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index c276b136ab63..5b0b64571418 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -1046,7 +1046,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
goto add;
/* Skip too large extent */
- if (range_len >= extent_thresh)
+ if (em->len >= extent_thresh)
goto next;
/*
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 2833e8ef4c09..acf9f4b6c044 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -245,7 +245,6 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
u64 reserve_size = 0;
u64 qgroup_rsv_size = 0;
- u64 csum_leaves;
unsigned outstanding_extents;
lockdep_assert_held(&inode->lock);
@@ -260,10 +259,12 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
outstanding_extents);
reserve_size += btrfs_calc_metadata_size(fs_info, 1);
}
- csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
- inode->csum_bytes);
- reserve_size += btrfs_calc_insert_metadata_size(fs_info,
- csum_leaves);
+ if (!(inode->flags & BTRFS_INODE_NODATASUM)) {
+ u64 csum_leaves;
+
+ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
+ reserve_size += btrfs_calc_insert_metadata_size(fs_info, csum_leaves);
+ }
/*
* For qgroup rsv, the calculation is very simple:
* account one nodesize for each outstanding extent
@@ -278,14 +279,20 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
spin_unlock(&block_rsv->lock);
}
-static void calc_inode_reservations(struct btrfs_fs_info *fs_info,
+static void calc_inode_reservations(struct btrfs_inode *inode,
u64 num_bytes, u64 disk_num_bytes,
u64 *meta_reserve, u64 *qgroup_reserve)
{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
u64 nr_extents = count_max_extents(fs_info, num_bytes);
- u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
+ u64 csum_leaves;
u64 inode_update = btrfs_calc_metadata_size(fs_info, 1);
+ if (inode->flags & BTRFS_INODE_NODATASUM)
+ csum_leaves = 0;
+ else
+ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes);
+
*meta_reserve = btrfs_calc_insert_metadata_size(fs_info,
nr_extents + csum_leaves);
@@ -337,7 +344,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
* everything out and try again, which is bad. This way we just
* over-reserve slightly, and clean up the mess when we are done.
*/
- calc_inode_reservations(fs_info, num_bytes, disk_num_bytes,
+ calc_inode_reservations(inode, num_bytes, disk_num_bytes,
&meta_reserve, &qgroup_reserve);
ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true,
noflush);
@@ -359,7 +366,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
nr_extents = count_max_extents(fs_info, num_bytes);
spin_lock(&inode->lock);
btrfs_mod_outstanding_extents(inode, nr_extents);
- inode->csum_bytes += disk_num_bytes;
+ if (!(inode->flags & BTRFS_INODE_NODATASUM))
+ inode->csum_bytes += disk_num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
@@ -393,7 +401,8 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
spin_lock(&inode->lock);
- inode->csum_bytes -= num_bytes;
+ if (!(inode->flags & BTRFS_INODE_NODATASUM))
+ inode->csum_bytes -= num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 1502d664c892..79c4293ddf37 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -725,6 +725,23 @@ leave:
return ret;
}
+static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
+{
+ if (args->start.srcdevid == 0) {
+ if (memchr(args->start.srcdev_name, 0,
+ sizeof(args->start.srcdev_name)) == NULL)
+ return -ENAMETOOLONG;
+ } else {
+ args->start.srcdev_name[0] = 0;
+ }
+
+ if (memchr(args->start.tgtdev_name, 0,
+ sizeof(args->start.tgtdev_name)) == NULL)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
@@ -737,10 +754,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
default:
return -EINVAL;
}
-
- if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
- args->start.tgtdev_name[0] == '\0')
- return -EINVAL;
+ ret = btrfs_check_replace_dev_names(args);
+ if (ret < 0)
+ return ret;
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index c6907d533fe8..c843563914ca 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1307,12 +1307,12 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
*
* @objectid: root id
* @anon_dev: preallocated anonymous block device number for new roots,
- * pass 0 for new allocation.
+ * pass NULL for a new allocation.
* @check_ref: whether to check root item references, If true, return -ENOENT
* for orphan roots
*/
static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
- u64 objectid, dev_t anon_dev,
+ u64 objectid, dev_t *anon_dev,
bool check_ref)
{
struct btrfs_root *root;
@@ -1336,8 +1336,17 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
again:
root = btrfs_lookup_fs_root(fs_info, objectid);
if (root) {
- /* Shouldn't get preallocated anon_dev for cached roots */
- ASSERT(!anon_dev);
+ /*
+ * Some other caller may have read out the newly inserted
+ * subvolume already (for things like backref walk etc). Not
+ * that common but still possible. In that case, we just need
+ * to free the anon_dev.
+ */
+ if (unlikely(anon_dev && *anon_dev)) {
+ free_anon_bdev(*anon_dev);
+ *anon_dev = 0;
+ }
+
if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
btrfs_put_root(root);
return ERR_PTR(-ENOENT);
@@ -1357,7 +1366,7 @@ again:
goto fail;
}
- ret = btrfs_init_fs_root(root, anon_dev);
+ ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0);
if (ret)
goto fail;
@@ -1393,7 +1402,7 @@ fail:
* root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
* and once again by our caller.
*/
- if (anon_dev)
+ if (anon_dev && *anon_dev)
root->anon_dev = 0;
btrfs_put_root(root);
return ERR_PTR(ret);
@@ -1409,7 +1418,7 @@ fail:
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, bool check_ref)
{
- return btrfs_get_root_ref(fs_info, objectid, 0, check_ref);
+ return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref);
}
/*
@@ -1417,11 +1426,11 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
* the anonymous block device id
*
* @objectid: tree objectid
- * @anon_dev: if zero, allocate a new anonymous block device or use the
- * parameter value
+ * @anon_dev: if NULL, allocate a new anonymous block device or use the
+ * parameter value if not NULL
*/
struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
- u64 objectid, dev_t anon_dev)
+ u64 objectid, dev_t *anon_dev)
{
return btrfs_get_root_ref(fs_info, objectid, anon_dev, true);
}
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 9413726b329b..eb3473d1c1ac 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -61,7 +61,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
u64 objectid, bool check_ref);
struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
- u64 objectid, dev_t anon_dev);
+ u64 objectid, dev_t *anon_dev);
struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 objectid);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f396aba92c57..8e8cc1111277 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1260,7 +1260,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
u64 bytes_left, end;
u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
- if (WARN_ON(start != aligned_start)) {
+ /* Adjust the range to be aligned to 512B sectors if necessary. */
+ if (start != aligned_start) {
len -= aligned_start - start;
len = round_down(len, 1 << SECTOR_SHIFT);
start = aligned_start;
@@ -4298,6 +4299,42 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
return 0;
}
+static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
+ struct find_free_extent_ctl *ffe_ctl)
+{
+ if (ffe_ctl->for_treelog) {
+ spin_lock(&fs_info->treelog_bg_lock);
+ if (fs_info->treelog_bg)
+ ffe_ctl->hint_byte = fs_info->treelog_bg;
+ spin_unlock(&fs_info->treelog_bg_lock);
+ } else if (ffe_ctl->for_data_reloc) {
+ spin_lock(&fs_info->relocation_bg_lock);
+ if (fs_info->data_reloc_bg)
+ ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+ spin_unlock(&fs_info->relocation_bg_lock);
+ } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
+ struct btrfs_block_group *block_group;
+
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
+ /*
+ * No lock is OK here because avail is monotinically
+ * decreasing, and this is just a hint.
+ */
+ u64 avail = block_group->zone_capacity - block_group->alloc_offset;
+
+ if (block_group_bits(block_group, ffe_ctl->flags) &&
+ avail >= ffe_ctl->num_bytes) {
+ ffe_ctl->hint_byte = block_group->start;
+ break;
+ }
+ }
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+ }
+
+ return 0;
+}
+
static int prepare_allocation(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info,
@@ -4308,19 +4345,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
return prepare_allocation_clustered(fs_info, ffe_ctl,
space_info, ins);
case BTRFS_EXTENT_ALLOC_ZONED:
- if (ffe_ctl->for_treelog) {
- spin_lock(&fs_info->treelog_bg_lock);
- if (fs_info->treelog_bg)
- ffe_ctl->hint_byte = fs_info->treelog_bg;
- spin_unlock(&fs_info->treelog_bg_lock);
- }
- if (ffe_ctl->for_data_reloc) {
- spin_lock(&fs_info->relocation_bg_lock);
- if (fs_info->data_reloc_bg)
- ffe_ctl->hint_byte = fs_info->data_reloc_bg;
- spin_unlock(&fs_info->relocation_bg_lock);
- }
- return 0;
+ return prepare_allocation_zoned(fs_info, ffe_ctl);
default:
BUG();
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cfd2967f04a2..8b4bef05e222 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2480,6 +2480,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
struct fiemap_cache *cache,
u64 offset, u64 phys, u64 len, u32 flags)
{
+ u64 cache_end;
int ret = 0;
/* Set at the end of extent_fiemap(). */
@@ -2489,15 +2490,102 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
goto assign;
/*
- * Sanity check, extent_fiemap() should have ensured that new
- * fiemap extent won't overlap with cached one.
- * Not recoverable.
+ * When iterating the extents of the inode, at extent_fiemap(), we may
+ * find an extent that starts at an offset behind the end offset of the
+ * previous extent we processed. This happens if fiemap is called
+ * without FIEMAP_FLAG_SYNC and there are ordered extents completing
+ * while we call btrfs_next_leaf() (through fiemap_next_leaf_item()).
*
- * NOTE: Physical address can overlap, due to compression
+ * For example we are in leaf X processing its last item, which is the
+ * file extent item for file range [512K, 1M[, and after
+ * btrfs_next_leaf() releases the path, there's an ordered extent that
+ * completes for the file range [768K, 2M[, and that results in trimming
+ * the file extent item so that it now corresponds to the file range
+ * [512K, 768K[ and a new file extent item is inserted for the file
+ * range [768K, 2M[, which may end up as the last item of leaf X or as
+ * the first item of the next leaf - in either case btrfs_next_leaf()
+ * will leave us with a path pointing to the new extent item, for the
+ * file range [768K, 2M[, since that's the first key that follows the
+ * last one we processed. So in order not to report overlapping extents
+ * to user space, we trim the length of the previously cached extent and
+ * emit it.
+ *
+ * Upon calling btrfs_next_leaf() we may also find an extent with an
+ * offset smaller than or equals to cache->offset, and this happens
+ * when we had a hole or prealloc extent with several delalloc ranges in
+ * it, but after btrfs_next_leaf() released the path, delalloc was
+ * flushed and the resulting ordered extents were completed, so we can
+ * now have found a file extent item for an offset that is smaller than
+ * or equals to what we have in cache->offset. We deal with this as
+ * described below.
*/
- if (cache->offset + cache->len > offset) {
- WARN_ON(1);
- return -EINVAL;
+ cache_end = cache->offset + cache->len;
+ if (cache_end > offset) {
+ if (offset == cache->offset) {
+ /*
+ * We cached a dealloc range (found in the io tree) for
+ * a hole or prealloc extent and we have now found a
+ * file extent item for the same offset. What we have
+ * now is more recent and up to date, so discard what
+ * we had in the cache and use what we have just found.
+ */
+ goto assign;
+ } else if (offset > cache->offset) {
+ /*
+ * The extent range we previously found ends after the
+ * offset of the file extent item we found and that
+ * offset falls somewhere in the middle of that previous
+ * extent range. So adjust the range we previously found
+ * to end at the offset of the file extent item we have
+ * just found, since this extent is more up to date.
+ * Emit that adjusted range and cache the file extent
+ * item we have just found. This corresponds to the case
+ * where a previously found file extent item was split
+ * due to an ordered extent completing.
+ */
+ cache->len = offset - cache->offset;
+ goto emit;
+ } else {
+ const u64 range_end = offset + len;
+
+ /*
+ * The offset of the file extent item we have just found
+ * is behind the cached offset. This means we were
+ * processing a hole or prealloc extent for which we
+ * have found delalloc ranges (in the io tree), so what
+ * we have in the cache is the last delalloc range we
+ * found while the file extent item we found can be
+ * either for a whole delalloc range we previously
+ * emmitted or only a part of that range.
+ *
+ * We have two cases here:
+ *
+ * 1) The file extent item's range ends at or behind the
+ * cached extent's end. In this case just ignore the
+ * current file extent item because we don't want to
+ * overlap with previous ranges that may have been
+ * emmitted already;
+ *
+ * 2) The file extent item starts behind the currently
+ * cached extent but its end offset goes beyond the
+ * end offset of the cached extent. We don't want to
+ * overlap with a previous range that may have been
+ * emmitted already, so we emit the currently cached
+ * extent and then partially store the current file
+ * extent item's range in the cache, for the subrange
+ * going the cached extent's end to the end of the
+ * file extent item.
+ */
+ if (range_end <= cache_end)
+ return 0;
+
+ if (!(flags & (FIEMAP_EXTENT_ENCODED | FIEMAP_EXTENT_DELALLOC)))
+ phys += cache_end - offset;
+
+ offset = cache_end;
+ len = range_end - cache_end;
+ goto emit;
+ }
}
/*
@@ -2517,6 +2605,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
return 0;
}
+emit:
/* Not mergeable, need to submit cached one */
ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
cache->len, cache->flags);
@@ -2689,16 +2778,34 @@ static int fiemap_process_hole(struct btrfs_inode *inode,
* it beyond i_size.
*/
while (cur_offset < end && cur_offset < i_size) {
+ struct extent_state *cached_state = NULL;
u64 delalloc_start;
u64 delalloc_end;
u64 prealloc_start;
+ u64 lockstart;
+ u64 lockend;
u64 prealloc_len = 0;
bool delalloc;
+ lockstart = round_down(cur_offset, inode->root->fs_info->sectorsize);
+ lockend = round_up(end, inode->root->fs_info->sectorsize);
+
+ /*
+ * We are only locking for the delalloc range because that's the
+ * only thing that can change here. With fiemap we have a lock
+ * on the inode, so no buffered or direct writes can happen.
+ *
+ * However mmaps and normal page writeback will cause this to
+ * change arbitrarily. We have to lock the extent lock here to
+ * make sure that nobody messes with the tree while we're doing
+ * btrfs_find_delalloc_in_range.
+ */
+ lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
delalloc_cached_state,
&delalloc_start,
&delalloc_end);
+ unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
if (!delalloc)
break;
@@ -2866,15 +2973,15 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
const u64 ino = btrfs_ino(inode);
- struct extent_state *cached_state = NULL;
struct extent_state *delalloc_cached_state = NULL;
struct btrfs_path *path;
struct fiemap_cache cache = { 0 };
struct btrfs_backref_share_check_ctx *backref_ctx;
u64 last_extent_end;
u64 prev_extent_end;
- u64 lockstart;
- u64 lockend;
+ u64 range_start;
+ u64 range_end;
+ const u64 sectorsize = inode->root->fs_info->sectorsize;
bool stopped = false;
int ret;
@@ -2885,22 +2992,19 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
goto out;
}
- lockstart = round_down(start, inode->root->fs_info->sectorsize);
- lockend = round_up(start + len, inode->root->fs_info->sectorsize);
- prev_extent_end = lockstart;
-
- btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
- lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ range_start = round_down(start, sectorsize);
+ range_end = round_up(start + len, sectorsize);
+ prev_extent_end = range_start;
ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
if (ret < 0)
- goto out_unlock;
+ goto out;
btrfs_release_path(path);
path->reada = READA_FORWARD;
- ret = fiemap_search_slot(inode, path, lockstart);
+ ret = fiemap_search_slot(inode, path, range_start);
if (ret < 0) {
- goto out_unlock;
+ goto out;
} else if (ret > 0) {
/*
* No file extent item found, but we may have delalloc between
@@ -2910,7 +3014,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
goto check_eof_delalloc;
}
- while (prev_extent_end < lockend) {
+ while (prev_extent_end < range_end) {
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_file_extent_item *ei;
struct btrfs_key key;
@@ -2933,21 +3037,21 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
* The first iteration can leave us at an extent item that ends
* before our range's start. Move to the next item.
*/
- if (extent_end <= lockstart)
+ if (extent_end <= range_start)
goto next_item;
backref_ctx->curr_leaf_bytenr = leaf->start;
/* We have in implicit hole (NO_HOLES feature enabled). */
if (prev_extent_end < key.offset) {
- const u64 range_end = min(key.offset, lockend) - 1;
+ const u64 hole_end = min(key.offset, range_end) - 1;
ret = fiemap_process_hole(inode, fieinfo, &cache,
&delalloc_cached_state,
backref_ctx, 0, 0, 0,
- prev_extent_end, range_end);
+ prev_extent_end, hole_end);
if (ret < 0) {
- goto out_unlock;
+ goto out;
} else if (ret > 0) {
/* fiemap_fill_next_extent() told us to stop. */
stopped = true;
@@ -2955,7 +3059,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
}
/* We've reached the end of the fiemap range, stop. */
- if (key.offset >= lockend) {
+ if (key.offset >= range_end) {
stopped = true;
break;
}
@@ -3003,7 +3107,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
extent_gen,
backref_ctx);
if (ret < 0)
- goto out_unlock;
+ goto out;
else if (ret > 0)
flags |= FIEMAP_EXTENT_SHARED;
}
@@ -3014,7 +3118,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
}
if (ret < 0) {
- goto out_unlock;
+ goto out;
} else if (ret > 0) {
/* fiemap_fill_next_extent() told us to stop. */
stopped = true;
@@ -3025,12 +3129,12 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
next_item:
if (fatal_signal_pending(current)) {
ret = -EINTR;
- goto out_unlock;
+ goto out;
}
ret = fiemap_next_leaf_item(inode, path);
if (ret < 0) {
- goto out_unlock;
+ goto out;
} else if (ret > 0) {
/* No more file extent items for this inode. */
break;
@@ -3049,29 +3153,41 @@ check_eof_delalloc:
btrfs_free_path(path);
path = NULL;
- if (!stopped && prev_extent_end < lockend) {
+ if (!stopped && prev_extent_end < range_end) {
ret = fiemap_process_hole(inode, fieinfo, &cache,
&delalloc_cached_state, backref_ctx,
- 0, 0, 0, prev_extent_end, lockend - 1);
+ 0, 0, 0, prev_extent_end, range_end - 1);
if (ret < 0)
- goto out_unlock;
- prev_extent_end = lockend;
+ goto out;
+ prev_extent_end = range_end;
}
if (cache.cached && cache.offset + cache.len >= last_extent_end) {
const u64 i_size = i_size_read(&inode->vfs_inode);
if (prev_extent_end < i_size) {
+ struct extent_state *cached_state = NULL;
u64 delalloc_start;
u64 delalloc_end;
+ u64 lockstart;
+ u64 lockend;
bool delalloc;
+ lockstart = round_down(prev_extent_end, sectorsize);
+ lockend = round_up(i_size, sectorsize);
+
+ /*
+ * See the comment in fiemap_process_hole as to why
+ * we're doing the locking here.
+ */
+ lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
delalloc = btrfs_find_delalloc_in_range(inode,
prev_extent_end,
i_size - 1,
&delalloc_cached_state,
&delalloc_start,
&delalloc_end);
+ unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
if (!delalloc)
cache.flags |= FIEMAP_EXTENT_LAST;
} else {
@@ -3080,10 +3196,6 @@ check_eof_delalloc:
}
ret = emit_last_fiemap_cache(fieinfo, &cache);
-
-out_unlock:
- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
- btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
out:
free_extent_state(delalloc_cached_state);
btrfs_free_backref_share_ctx(backref_ctx);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 809b11472a80..4795738d5785 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3184,8 +3184,23 @@ out:
unwritten_start += logical_len;
clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
- /* Drop extent maps for the part of the extent we didn't write. */
- btrfs_drop_extent_map_range(inode, unwritten_start, end, false);
+ /*
+ * Drop extent maps for the part of the extent we didn't write.
+ *
+ * We have an exception here for the free_space_inode, this is
+ * because when we do btrfs_get_extent() on the free space inode
+ * we will search the commit root. If this is a new block group
+ * we won't find anything, and we will trip over the assert in
+ * writepage where we do ASSERT(em->block_start !=
+ * EXTENT_MAP_HOLE).
+ *
+ * Theoretically we could also skip this for any NOCOW extent as
+ * we don't mess with the extent map tree in the NOCOW case, but
+ * for now simply skip this if we are the free space inode.
+ */
+ if (!btrfs_is_free_space_inode(inode))
+ btrfs_drop_extent_map_range(inode, unwritten_start,
+ end, false);
/*
* If the ordered extent had an IOERR or something else went
@@ -4458,6 +4473,8 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
u64 root_flags;
int ret;
+ down_write(&fs_info->subvol_sem);
+
/*
* Don't allow to delete a subvolume with send in progress. This is
* inside the inode lock so the error handling that has to drop the bit
@@ -4469,25 +4486,25 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
btrfs_warn(fs_info,
"attempt to delete subvolume %llu during send",
dest->root_key.objectid);
- return -EPERM;
+ ret = -EPERM;
+ goto out_up_write;
}
if (atomic_read(&dest->nr_swapfiles)) {
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu with active swapfile",
root->root_key.objectid);
- return -EPERM;
+ ret = -EPERM;
+ goto out_up_write;
}
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
root_flags | BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
- down_write(&fs_info->subvol_sem);
-
ret = may_destroy_subvol(dest);
if (ret)
- goto out_up_write;
+ goto out_undead;
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
@@ -4497,7 +4514,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
*/
ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
if (ret)
- goto out_up_write;
+ goto out_undead;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
@@ -4563,15 +4580,17 @@ out_end_trans:
inode->i_flags |= S_DEAD;
out_release:
btrfs_subvolume_release_metadata(root, &block_rsv);
-out_up_write:
- up_write(&fs_info->subvol_sem);
+out_undead:
if (ret) {
spin_lock(&dest->root_item_lock);
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
- } else {
+ }
+out_up_write:
+ up_write(&fs_info->subvol_sem);
+ if (!ret) {
d_invalidate(dentry);
btrfs_prune_dentries(dest);
ASSERT(dest->send_in_progress == 0);
@@ -7816,6 +7835,7 @@ struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
int ret;
ret = fiemap_prep(inode, fieinfo, start, &len, 0);
@@ -7841,7 +7861,26 @@ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret;
}
- return extent_fiemap(BTRFS_I(inode), fieinfo, start, len);
+ btrfs_inode_lock(btrfs_inode, BTRFS_ILOCK_SHARED);
+
+ /*
+ * We did an initial flush to avoid holding the inode's lock while
+ * triggering writeback and waiting for the completion of IO and ordered
+ * extents. Now after we locked the inode we do it again, because it's
+ * possible a new write may have happened in between those two steps.
+ */
+ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) {
+ ret = btrfs_wait_ordered_range(inode, 0, LLONG_MAX);
+ if (ret) {
+ btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
+ return ret;
+ }
+ }
+
+ ret = extent_fiemap(btrfs_inode, fieinfo, start, len);
+ btrfs_inode_unlock(btrfs_inode, BTRFS_ILOCK_SHARED);
+
+ return ret;
}
static int btrfs_writepages(struct address_space *mapping,
@@ -10269,6 +10308,13 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE)
return -EINVAL;
+ /*
+ * Compressed extents should always have checksums, so error out if we
+ * have a NOCOW file or inode was created while mounted with NODATASUM.
+ */
+ if (inode->flags & BTRFS_INODE_NODATASUM)
+ return -EINVAL;
+
orig_count = iov_iter_count(from);
/* The extent size must be sane. */
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 41b479861b3c..9d1eac15e09e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -721,7 +721,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
free_extent_buffer(leaf);
leaf = NULL;
- new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
+ new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
btrfs_abort_transaction(trans, ret);
@@ -790,6 +790,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
return -EOPNOTSUPP;
}
+ if (btrfs_root_refs(&root->root_item) == 0)
+ return -ENOENT;
+
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return -EINVAL;
@@ -2608,6 +2611,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
ret = -EFAULT;
goto out;
}
+ if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
/* compression requires us to start the IO */
if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
@@ -3808,6 +3815,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
goto out;
}
+ if (sa->create && is_fstree(sa->qgroupid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 1131d5a29d61..e43bc0fdc74e 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -425,16 +425,16 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
}
int lzo_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+ struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
+ const u32 sectorsize = fs_info->sectorsize;
size_t in_len;
size_t out_len;
size_t max_segment_len = WORKSPACE_BUF_LENGTH;
int ret = 0;
- char *kaddr;
- unsigned long bytes;
if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
return -EUCLEAN;
@@ -451,7 +451,7 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
}
data_in += LZO_LEN;
- out_len = PAGE_SIZE;
+ out_len = sectorsize;
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
if (ret != LZO_E_OK) {
pr_warn("BTRFS: decompress failed!\n");
@@ -459,29 +459,13 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
goto out;
}
- if (out_len < start_byte) {
+ ASSERT(out_len <= sectorsize);
+ memcpy_to_page(dest_page, dest_pgoff, workspace->buf, out_len);
+ /* Early end, considered as an error. */
+ if (unlikely(out_len < destlen)) {
ret = -EIO;
- goto out;
+ memzero_page(dest_page, dest_pgoff + out_len, destlen - out_len);
}
-
- /*
- * the caller is already checking against PAGE_SIZE, but lets
- * move this check closer to the memcpy/memset
- */
- destlen = min_t(unsigned long, destlen, PAGE_SIZE);
- bytes = min_t(unsigned long, destlen, out_len - start_byte);
-
- kaddr = kmap_local_page(dest_page);
- memcpy(kaddr, workspace->buf + start_byte, bytes);
-
- /*
- * btrfs_getblock is doing a zero on the tail of the page too,
- * but this will cover anything missing from the decompressed
- * data.
- */
- if (bytes < destlen)
- memset(kaddr+bytes, 0, destlen-bytes);
- kunmap_local(kaddr);
out:
return ret;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 63b426cc7798..5470e1cdf10c 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1736,6 +1736,15 @@ out:
return ret;
}
+static bool qgroup_has_usage(struct btrfs_qgroup *qgroup)
+{
+ return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 ||
+ qgroup->excl > 0 || qgroup->excl_cmpr > 0 ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 ||
+ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0);
+}
+
int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -1755,6 +1764,11 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
goto out;
}
+ if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* Check if there are no children of this qgroup */
if (!list_empty(&qgroup->members)) {
ret = -EBUSY;
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 6486f0d7e993..8c4fc98ca9ce 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -889,8 +889,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
out_unlock:
spin_unlock(&fs_info->ref_verify_lock);
out:
- if (ret)
+ if (ret) {
+ btrfs_free_ref_cache(fs_info);
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ }
return ret;
}
@@ -1021,8 +1023,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
}
}
if (ret) {
- btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
btrfs_free_ref_cache(fs_info);
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
}
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index a01807cbd4d4..0123d2728923 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1098,12 +1098,22 @@ out:
static void scrub_read_endio(struct btrfs_bio *bbio)
{
struct scrub_stripe *stripe = bbio->private;
+ struct bio_vec *bvec;
+ int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
+ int num_sectors;
+ u32 bio_size = 0;
+ int i;
+
+ ASSERT(sector_nr < stripe->nr_sectors);
+ bio_for_each_bvec_all(bvec, &bbio->bio, i)
+ bio_size += bvec->bv_len;
+ num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
if (bbio->bio.bi_status) {
- bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
- bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
+ bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
+ bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
} else {
- bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
+ bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
}
bio_put(&bbio->bio);
if (atomic_dec_and_test(&stripe->pending_io)) {
@@ -1636,6 +1646,9 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
{
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
struct btrfs_bio *bbio = NULL;
+ unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
+ stripe->bg->length - stripe->logical) >>
+ fs_info->sectorsize_bits;
u64 stripe_len = BTRFS_STRIPE_LEN;
int mirror = stripe->mirror_num;
int i;
@@ -1646,6 +1659,10 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
struct page *page = scrub_stripe_get_page(stripe, i);
unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
+ /* We're beyond the chunk boundary, no need to read anymore. */
+ if (i >= nr_sectors)
+ break;
+
/* The current sector cannot be merged, submit the bio. */
if (bbio &&
((i > 0 &&
@@ -1701,6 +1718,9 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_bio *bbio;
+ unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
+ stripe->bg->length - stripe->logical) >>
+ fs_info->sectorsize_bits;
int mirror = stripe->mirror_num;
ASSERT(stripe->bg);
@@ -1715,14 +1735,16 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
scrub_read_endio, stripe);
- /* Read the whole stripe. */
bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
- for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
+ /* Read the whole range inside the chunk boundary. */
+ for (unsigned int cur = 0; cur < nr_sectors; cur++) {
+ struct page *page = scrub_stripe_get_page(stripe, cur);
+ unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
int ret;
- ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
+ ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
/* We should have allocated enough bio vectors. */
- ASSERT(ret == PAGE_SIZE);
+ ASSERT(ret == fs_info->sectorsize);
}
atomic_inc(&stripe->pending_io);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 4e36550618e5..e48a063ef085 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6705,11 +6705,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
if (ret)
goto out;
}
- if (sctx->cur_inode_last_extent <
- sctx->cur_inode_size) {
- ret = send_hole(sctx, sctx->cur_inode_size);
- if (ret)
+ if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
+ ret = range_is_hole_in_parent(sctx,
+ sctx->cur_inode_last_extent,
+ sctx->cur_inode_size);
+ if (ret < 0) {
goto out;
+ } else if (ret == 0) {
+ ret = send_hole(sctx, sctx->cur_inode_size);
+ if (ret < 0)
+ goto out;
+ } else {
+ /* Range is already a hole, skip. */
+ ret = 0;
+ }
}
}
if (need_truncate) {
@@ -8111,7 +8120,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
}
if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
- ret = -EINVAL;
+ ret = -EOPNOTSUPP;
goto out;
}
@@ -8205,8 +8214,8 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
goto out;
}
- sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
- arg->clone_sources_count + 1,
+ sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
+ sizeof(*sctx->clone_roots),
GFP_KERNEL);
if (!sctx->clone_roots) {
ret = -ENOMEM;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 571bb13587d5..3b54eb583474 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -856,7 +856,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info)
{
- u64 global_rsv_size = fs_info->global_block_rsv.reserved;
+ const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
u64 ordered, delalloc;
u64 thresh;
u64 used;
@@ -956,8 +956,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
if (ordered >= delalloc)
- used += fs_info->delayed_refs_rsv.reserved +
- fs_info->delayed_block_rsv.reserved;
+ used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
+ btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
else
used += space_info->bytes_may_use - global_rsv_size;
@@ -1173,7 +1173,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
enum btrfs_flush_state flush;
u64 delalloc_size = 0;
u64 to_reclaim, block_rsv_size;
- u64 global_rsv_size = global_rsv->reserved;
+ const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
loops++;
@@ -1185,9 +1185,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* assume it's tied up in delalloc reservations.
*/
block_rsv_size = global_rsv_size +
- delayed_block_rsv->reserved +
- delayed_refs_rsv->reserved +
- trans_rsv->reserved;
+ btrfs_block_rsv_reserved(delayed_block_rsv) +
+ btrfs_block_rsv_reserved(delayed_refs_rsv) +
+ btrfs_block_rsv_reserved(trans_rsv);
if (block_rsv_size < space_info->bytes_may_use)
delalloc_size = space_info->bytes_may_use - block_rsv_size;
@@ -1207,16 +1207,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
to_reclaim = delalloc_size;
flush = FLUSH_DELALLOC;
} else if (space_info->bytes_pinned >
- (delayed_block_rsv->reserved +
- delayed_refs_rsv->reserved)) {
+ (btrfs_block_rsv_reserved(delayed_block_rsv) +
+ btrfs_block_rsv_reserved(delayed_refs_rsv))) {
to_reclaim = space_info->bytes_pinned;
flush = COMMIT_TRANS;
- } else if (delayed_block_rsv->reserved >
- delayed_refs_rsv->reserved) {
- to_reclaim = delayed_block_rsv->reserved;
+ } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
+ btrfs_block_rsv_reserved(delayed_refs_rsv)) {
+ to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
flush = FLUSH_DELAYED_ITEMS_NR;
} else {
- to_reclaim = delayed_refs_rsv->reserved;
+ to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
flush = FLUSH_DELAYED_REFS_NR;
}
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 93511d54abf8..0e49dab8dad2 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -475,7 +475,8 @@ void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
spin_lock_irqsave(&subpage->lock, flags);
bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
- folio_start_writeback(folio);
+ if (!folio_test_writeback(folio))
+ folio_start_writeback(folio);
spin_unlock_irqrestore(&subpage->lock, flags);
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 896acfda1789..101f786963d4 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1457,6 +1457,14 @@ static int btrfs_reconfigure(struct fs_context *fc)
btrfs_info_to_ctx(fs_info, &old_ctx);
+ /*
+ * This is our "bind mount" trick, we don't want to allow the user to do
+ * anything other than mount a different ro/rw and a different subvol,
+ * all of the mount options should be maintained.
+ */
+ if (mount_reconfigure)
+ ctx->mount_opt = old_ctx.mount_opt;
+
sync_filesystem(sb);
set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5b3333ceef04..bf8e64c766b6 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -564,56 +564,22 @@ static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
u64 num_bytes,
u64 *delayed_refs_bytes)
{
- struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info;
- u64 extra_delayed_refs_bytes = 0;
- u64 bytes;
+ u64 bytes = num_bytes + *delayed_refs_bytes;
int ret;
/*
- * If there's a gap between the size of the delayed refs reserve and
- * its reserved space, than some tasks have added delayed refs or bumped
- * its size otherwise (due to block group creation or removal, or block
- * group item update). Also try to allocate that gap in order to prevent
- * using (and possibly abusing) the global reserve when committing the
- * transaction.
- */
- if (flush == BTRFS_RESERVE_FLUSH_ALL &&
- !btrfs_block_rsv_full(delayed_refs_rsv)) {
- spin_lock(&delayed_refs_rsv->lock);
- if (delayed_refs_rsv->size > delayed_refs_rsv->reserved)
- extra_delayed_refs_bytes = delayed_refs_rsv->size -
- delayed_refs_rsv->reserved;
- spin_unlock(&delayed_refs_rsv->lock);
- }
-
- bytes = num_bytes + *delayed_refs_bytes + extra_delayed_refs_bytes;
-
- /*
* We want to reserve all the bytes we may need all at once, so we only
* do 1 enospc flushing cycle per transaction start.
*/
ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
- if (ret == 0) {
- if (extra_delayed_refs_bytes > 0)
- btrfs_migrate_to_delayed_refs_rsv(fs_info,
- extra_delayed_refs_bytes);
- return 0;
- }
-
- if (extra_delayed_refs_bytes > 0) {
- bytes -= extra_delayed_refs_bytes;
- ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
- if (ret == 0)
- return 0;
- }
/*
* If we are an emergency flush, which can steal from the global block
* reserve, then attempt to not reserve space for the delayed refs, as
* we will consume space for them from the global block reserve.
*/
- if (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
+ if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
bytes -= *delayed_refs_bytes;
*delayed_refs_bytes = 0;
ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
@@ -1868,7 +1834,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
key.offset = (u64)-1;
- pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
+ pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
pending->snap = NULL;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 50fdc69fdddf..6eccf8496486 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1436,7 +1436,7 @@ static int check_extent_item(struct extent_buffer *leaf,
if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
- ptr, inline_type, end);
+ ptr, btrfs_extent_inline_ref_size(inline_type), end);
return -EUCLEAN;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 4c32497311d2..d67785be2c77 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3087,7 +3087,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
map = btrfs_find_chunk_map(fs_info, logical, length);
if (unlikely(!map)) {
- read_unlock(&fs_info->mapping_tree_lock);
btrfs_crit(fs_info,
"unable to find chunk map for logical %llu length %llu",
logical, length);
@@ -3095,7 +3094,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
}
if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
- read_unlock(&fs_info->mapping_tree_lock);
btrfs_crit(fs_info,
"found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
logical, logical + length, map->start,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 36cf1f0e338e..8da66ea699e8 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -354,18 +354,13 @@ done:
}
int zlib_decompress(struct list_head *ws, const u8 *data_in,
- struct page *dest_page, unsigned long start_byte, size_t srclen,
+ struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
int wbits = MAX_WBITS;
- unsigned long bytes_left;
- unsigned long total_out = 0;
- unsigned long pg_offset = 0;
-
- destlen = min_t(unsigned long, destlen, PAGE_SIZE);
- bytes_left = destlen;
+ unsigned long to_copy;
workspace->strm.next_in = data_in;
workspace->strm.avail_in = srclen;
@@ -390,60 +385,30 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
return -EIO;
}
- while (bytes_left > 0) {
- unsigned long buf_start;
- unsigned long buf_offset;
- unsigned long bytes;
-
- ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
- if (ret != Z_OK && ret != Z_STREAM_END)
- break;
-
- buf_start = total_out;
- total_out = workspace->strm.total_out;
-
- if (total_out == buf_start) {
- ret = -EIO;
- break;
- }
-
- if (total_out <= start_byte)
- goto next;
-
- if (total_out > start_byte && buf_start < start_byte)
- buf_offset = start_byte - buf_start;
- else
- buf_offset = 0;
-
- bytes = min(PAGE_SIZE - pg_offset,
- PAGE_SIZE - (buf_offset % PAGE_SIZE));
- bytes = min(bytes, bytes_left);
+ /*
+ * Everything (in/out buf) should be at most one sector, there should
+ * be no need to switch any input/output buffer.
+ */
+ ret = zlib_inflate(&workspace->strm, Z_FINISH);
+ to_copy = min(workspace->strm.total_out, destlen);
+ if (ret != Z_STREAM_END)
+ goto out;
- memcpy_to_page(dest_page, pg_offset,
- workspace->buf + buf_offset, bytes);
+ memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy);
- pg_offset += bytes;
- bytes_left -= bytes;
-next:
- workspace->strm.next_out = workspace->buf;
- workspace->strm.avail_out = workspace->buf_size;
- }
-
- if (ret != Z_STREAM_END && bytes_left != 0)
+out:
+ if (unlikely(to_copy != destlen)) {
+ pr_warn_ratelimited("BTRFS: infalte failed, decompressed=%lu expected=%zu\n",
+ to_copy, destlen);
ret = -EIO;
- else
+ } else {
ret = 0;
+ }
zlib_inflateEnd(&workspace->strm);
- /*
- * this should only happen if zlib returned fewer bytes than we
- * expected. btrfs_get_block is responsible for zeroing from the
- * end of the inline extent (destlen) to the end of the page
- */
- if (pg_offset < destlen) {
- memzero_page(dest_page, pg_offset, destlen - pg_offset);
- }
+ if (unlikely(to_copy < destlen))
+ memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
return ret;
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 5bd76813b23f..5f750fa53a2b 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1639,6 +1639,15 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
out:
+ /* Reject non SINGLE data profiles without RST */
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
+ (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EINVAL;
+ }
+
if (cache->alloc_offset > cache->zone_capacity) {
btrfs_err(fs_info,
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
@@ -1670,6 +1679,7 @@ out:
}
bitmap_free(active);
kfree(zone_info);
+ btrfs_free_chunk_map(map);
return ret;
}
@@ -2055,6 +2065,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
map = block_group->physical_map;
+ spin_lock(&fs_info->zone_active_bgs_lock);
spin_lock(&block_group->lock);
if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
ret = true;
@@ -2067,7 +2078,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
goto out_unlock;
}
- spin_lock(&fs_info->zone_active_bgs_lock);
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_zoned_device_info *zinfo;
int reserved = 0;
@@ -2087,20 +2097,17 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
*/
if (atomic_read(&zinfo->active_zones_left) <= reserved) {
ret = false;
- spin_unlock(&fs_info->zone_active_bgs_lock);
goto out_unlock;
}
if (!btrfs_dev_set_active_zone(device, physical)) {
/* Cannot activate the zone */
ret = false;
- spin_unlock(&fs_info->zone_active_bgs_lock);
goto out_unlock;
}
if (!is_data)
zinfo->reserved_active_zones--;
}
- spin_unlock(&fs_info->zone_active_bgs_lock);
/* Successfully activated all the zones */
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
@@ -2108,8 +2115,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
/* For the active block group list */
btrfs_get_block_group(block_group);
-
- spin_lock(&fs_info->zone_active_bgs_lock);
list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
spin_unlock(&fs_info->zone_active_bgs_lock);
@@ -2117,6 +2122,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
out_unlock:
spin_unlock(&block_group->lock);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
return ret;
}
diff --git a/fs/cachefiles/Kconfig b/fs/cachefiles/Kconfig
index 8df715640a48..c5a070550ee3 100644
--- a/fs/cachefiles/Kconfig
+++ b/fs/cachefiles/Kconfig
@@ -2,7 +2,7 @@
config CACHEFILES
tristate "Filesystem caching on files"
- depends on FSCACHE && BLOCK
+ depends on NETFS_SUPPORT && FSCACHE && BLOCK
help
This permits use of a mounted filesystem as a cache for other
filesystems - primarily networking filesystems - thus allowing fast
diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
index 7077f72e6f47..f449f7340aad 100644
--- a/fs/cachefiles/cache.c
+++ b/fs/cachefiles/cache.c
@@ -168,6 +168,8 @@ error_unsupported:
dput(root);
error_open_root:
cachefiles_end_secure(cache, saved_cred);
+ put_cred(cache->cache_cred);
+ cache->cache_cred = NULL;
error_getsec:
fscache_relinquish_cache(cache_cookie);
cache->cache = NULL;
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 3f24905f4066..6465e2574230 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -816,6 +816,7 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
cachefiles_put_directory(cache->graveyard);
cachefiles_put_directory(cache->store);
mntput(cache->mnt);
+ put_cred(cache->cache_cred);
kfree(cache->rootdirname);
kfree(cache->secctx);
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 4a87c9d714a9..d33169f0018b 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -246,7 +246,7 @@ extern bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
enum fscache_want_state want_state);
extern int __cachefiles_prepare_write(struct cachefiles_object *object,
struct file *file,
- loff_t *_start, size_t *_len,
+ loff_t *_start, size_t *_len, size_t upper_len,
bool no_space_allocated_yet);
extern int __cachefiles_write(struct cachefiles_object *object,
struct file *file,
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 5857241c5918..1d685357e67f 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -517,18 +517,26 @@ cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
*/
int __cachefiles_prepare_write(struct cachefiles_object *object,
struct file *file,
- loff_t *_start, size_t *_len,
+ loff_t *_start, size_t *_len, size_t upper_len,
bool no_space_allocated_yet)
{
struct cachefiles_cache *cache = object->volume->cache;
loff_t start = *_start, pos;
- size_t len = *_len, down;
+ size_t len = *_len;
int ret;
/* Round to DIO size */
- down = start - round_down(start, PAGE_SIZE);
- *_start = start - down;
- *_len = round_up(down + len, PAGE_SIZE);
+ start = round_down(*_start, PAGE_SIZE);
+ if (start != *_start || *_len > upper_len) {
+ /* Probably asked to cache a streaming write written into the
+ * pagecache when the cookie was temporarily out of service to
+ * culling.
+ */
+ fscache_count_dio_misfit();
+ return -ENOBUFS;
+ }
+
+ *_len = round_up(len, PAGE_SIZE);
/* We need to work out whether there's sufficient disk space to perform
* the write - but we can skip that check if we have space already
@@ -539,7 +547,7 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,
pos = cachefiles_inject_read_error();
if (pos == 0)
- pos = vfs_llseek(file, *_start, SEEK_DATA);
+ pos = vfs_llseek(file, start, SEEK_DATA);
if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
if (pos == -ENXIO)
goto check_space; /* Unallocated tail */
@@ -547,7 +555,7 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,
cachefiles_trace_seek_error);
return pos;
}
- if ((u64)pos >= (u64)*_start + *_len)
+ if ((u64)pos >= (u64)start + *_len)
goto check_space; /* Unallocated region */
/* We have a block that's at least partially filled - if we're low on
@@ -560,13 +568,13 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,
pos = cachefiles_inject_read_error();
if (pos == 0)
- pos = vfs_llseek(file, *_start, SEEK_HOLE);
+ pos = vfs_llseek(file, start, SEEK_HOLE);
if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
trace_cachefiles_io_error(object, file_inode(file), pos,
cachefiles_trace_seek_error);
return pos;
}
- if ((u64)pos >= (u64)*_start + *_len)
+ if ((u64)pos >= (u64)start + *_len)
return 0; /* Fully allocated */
/* Partially allocated, but insufficient space: cull. */
@@ -574,7 +582,7 @@ int __cachefiles_prepare_write(struct cachefiles_object *object,
ret = cachefiles_inject_remove_error();
if (ret == 0)
ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- *_start, *_len);
+ start, *_len);
if (ret < 0) {
trace_cachefiles_io_error(object, file_inode(file), ret,
cachefiles_trace_fallocate_error);
@@ -591,8 +599,8 @@ check_space:
}
static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size,
- bool no_space_allocated_yet)
+ loff_t *_start, size_t *_len, size_t upper_len,
+ loff_t i_size, bool no_space_allocated_yet)
{
struct cachefiles_object *object = cachefiles_cres_object(cres);
struct cachefiles_cache *cache = object->volume->cache;
@@ -608,7 +616,7 @@ static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
cachefiles_begin_secure(cache, &saved_cred);
ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
- _start, _len,
+ _start, _len, upper_len,
no_space_allocated_yet);
cachefiles_end_secure(cache, saved_cred);
return ret;
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index b8fbbb1961bb..4ba42f1fa3b4 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -50,7 +50,7 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
return -ENOBUFS;
cachefiles_begin_secure(cache, &saved_cred);
- ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
+ ret = __cachefiles_prepare_write(object, file, &pos, &len, len, true);
cachefiles_end_secure(cache, saved_cred);
if (ret < 0)
return ret;
@@ -539,6 +539,9 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
struct fscache_volume *volume = object->volume->vcookie;
size_t volume_key_size, cookie_key_size, data_len;
+ if (!object->ondemand)
+ return 0;
+
/*
* CacheFiles will firstly check the cache file under the root cache
* directory. If the coherency check failed, it will fallback to
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 94df854147d3..7249d70e1a43 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -7,6 +7,7 @@ config CEPH_FS
select CRYPTO_AES
select CRYPTO
select NETFS_SUPPORT
+ select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
default n
help
Choose Y or M here to include support for mounting the
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 13af429ab030..1340d77124ae 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -159,27 +159,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
ceph_put_snap_context(snapc);
}
- folio_wait_fscache(folio);
-}
-
-static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
-{
- struct inode *inode = folio->mapping->host;
- struct ceph_client *cl = ceph_inode_to_client(inode);
-
- doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode),
- folio->index, folio_test_dirty(folio) ? "" : "not ");
-
- if (folio_test_private(folio))
- return false;
-
- if (folio_test_fscache(folio)) {
- if (current_is_kswapd() || !(gfp & __GFP_FS))
- return false;
- folio_wait_fscache(folio);
- }
- ceph_fscache_note_page_release(inode);
- return true;
+ netfs_invalidate_folio(folio, offset, length);
}
static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
@@ -357,6 +337,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
u64 len = subreq->len;
bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
u64 off = subreq->start;
+ int extent_cnt;
if (ceph_inode_is_shutdown(inode)) {
err = -EIO;
@@ -370,8 +351,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
- CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica,
- NULL, ci->i_truncate_seq, ci->i_truncate_size, false);
+ CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq,
+ ci->i_truncate_size, false);
if (IS_ERR(req)) {
err = PTR_ERR(req);
req = NULL;
@@ -379,7 +360,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
}
if (sparse) {
- err = ceph_alloc_sparse_ext_map(&req->r_ops[0]);
+ extent_cnt = __ceph_sparse_read_ext_count(inode, len);
+ err = ceph_alloc_sparse_ext_map(&req->r_ops[0], extent_cnt);
if (err)
goto out;
}
@@ -509,7 +491,6 @@ static void ceph_netfs_free_request(struct netfs_io_request *rreq)
const struct netfs_request_ops ceph_netfs_ops = {
.init_request = ceph_init_request,
.free_request = ceph_netfs_free_request,
- .begin_cache_operation = ceph_begin_cache_operation,
.issue_read = ceph_netfs_issue_read,
.expand_readahead = ceph_netfs_expand_readahead,
.clamp_length = ceph_netfs_clamp_length,
@@ -1586,7 +1567,7 @@ const struct address_space_operations ceph_aops = {
.write_end = ceph_write_end,
.dirty_folio = ceph_dirty_folio,
.invalidate_folio = ceph_invalidate_folio,
- .release_folio = ceph_release_folio,
+ .release_folio = netfs_release_folio,
.direct_IO = noop_direct_IO,
};
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index dc502daac49a..20efac020394 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -43,38 +43,19 @@ static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
}
}
-static inline void ceph_fscache_unpin_writeback(struct inode *inode,
+static inline int ceph_fscache_unpin_writeback(struct inode *inode,
struct writeback_control *wbc)
{
- fscache_unpin_writeback(wbc, ceph_fscache_cookie(ceph_inode(inode)));
+ return netfs_unpin_writeback(inode, wbc);
}
-static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
- struct folio *folio)
-{
- struct ceph_inode_info *ci = ceph_inode(mapping->host);
-
- return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci));
-}
-
-static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
-{
- struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode));
-
- return fscache_begin_read_operation(&rreq->cache_resources, cookie);
-}
+#define ceph_fscache_dirty_folio netfs_dirty_folio
static inline bool ceph_is_cache_enabled(struct inode *inode)
{
return fscache_cookie_enabled(ceph_fscache_cookie(ceph_inode(inode)));
}
-static inline void ceph_fscache_note_page_release(struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
-
- fscache_note_page_release(ceph_fscache_cookie(ci));
-}
#else /* CONFIG_CEPH_FSCACHE */
static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc,
struct fs_context *fc)
@@ -119,30 +100,18 @@ static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
{
}
-static inline void ceph_fscache_unpin_writeback(struct inode *inode,
- struct writeback_control *wbc)
+static inline int ceph_fscache_unpin_writeback(struct inode *inode,
+ struct writeback_control *wbc)
{
+ return 0;
}
-static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
- struct folio *folio)
-{
- return filemap_dirty_folio(mapping, folio);
-}
+#define ceph_fscache_dirty_folio filemap_dirty_folio
static inline bool ceph_is_cache_enabled(struct inode *inode)
{
return false;
}
-
-static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq)
-{
- return -ENOBUFS;
-}
-
-static inline void ceph_fscache_note_page_release(struct inode *inode)
-{
-}
#endif /* CONFIG_CEPH_FSCACHE */
#endif
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 2c0b8dc3dd0d..7fb4aae97412 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1452,7 +1452,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
if (flushing & CEPH_CAP_XATTR_EXCL) {
arg->old_xattr_buf = __ceph_build_xattrs_blob(ci);
arg->xattr_version = ci->i_xattrs.version;
- arg->xattr_buf = ci->i_xattrs.blob;
+ arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob);
} else {
arg->xattr_buf = NULL;
arg->old_xattr_buf = NULL;
@@ -1553,6 +1553,7 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
encode_cap_msg(msg, arg);
ceph_con_send(&arg->session->s_con, msg);
ceph_buffer_put(arg->old_xattr_buf);
+ ceph_buffer_put(arg->xattr_buf);
if (arg->wake)
wake_up_all(&ci->i_cap_wq);
}
@@ -2155,6 +2156,30 @@ retry:
ceph_cap_string(cap->implemented),
ceph_cap_string(revoking));
+ /* completed revocation? going down and there are no caps? */
+ if (revoking) {
+ if ((revoking & cap_used) == 0) {
+ doutc(cl, "completed revocation of %s\n",
+ ceph_cap_string(cap->implemented & ~cap->issued));
+ goto ack;
+ }
+
+ /*
+ * If the "i_wrbuffer_ref" was increased by mmap or generic
+ * cache write just before the ceph_check_caps() is called,
+ * the Fb capability revoking will fail this time. Then we
+ * must wait for the BDI's delayed work to flush the dirty
+ * pages and to release the "i_wrbuffer_ref", which will cost
+ * at most 5 seconds. That means the MDS needs to wait at
+ * most 5 seconds to finished the Fb capability's revocation.
+ *
+ * Let's queue a writeback for it.
+ */
+ if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref &&
+ (revoking & CEPH_CAP_FILE_BUFFER))
+ queue_writeback = true;
+ }
+
if (cap == ci->i_auth_cap &&
(cap->issued & CEPH_CAP_FILE_WR)) {
/* request larger max_size from MDS? */
@@ -2182,30 +2207,6 @@ retry:
}
}
- /* completed revocation? going down and there are no caps? */
- if (revoking) {
- if ((revoking & cap_used) == 0) {
- doutc(cl, "completed revocation of %s\n",
- ceph_cap_string(cap->implemented & ~cap->issued));
- goto ack;
- }
-
- /*
- * If the "i_wrbuffer_ref" was increased by mmap or generic
- * cache write just before the ceph_check_caps() is called,
- * the Fb capability revoking will fail this time. Then we
- * must wait for the BDI's delayed work to flush the dirty
- * pages and to release the "i_wrbuffer_ref", which will cost
- * at most 5 seconds. That means the MDS needs to wait at
- * most 5 seconds to finished the Fb capability's revocation.
- *
- * Let's queue a writeback for it.
- */
- if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref &&
- (revoking & CEPH_CAP_FILE_BUFFER))
- queue_writeback = true;
- }
-
/* want more caps from mds? */
if (want & ~cap->mds_wanted) {
if (want & ~(cap->mds_wanted | cap->issued))
@@ -3215,7 +3216,6 @@ static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
enum put_cap_refs_mode {
PUT_CAP_REFS_SYNC = 0,
- PUT_CAP_REFS_NO_CHECK,
PUT_CAP_REFS_ASYNC,
};
@@ -3331,11 +3331,6 @@ void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had)
__ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC);
}
-void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
-{
- __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_NO_CHECK);
-}
-
/*
* Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
* context. Adjust per-snap dirty page accounting as appropriate.
@@ -4777,7 +4772,22 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
if (__ceph_caps_dirty(ci)) {
struct ceph_mds_client *mdsc =
ceph_inode_to_fs_client(inode)->mdsc;
- __cap_delay_requeue_front(mdsc, ci);
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ spin_lock(&mdsc->cap_unlink_delay_lock);
+ ci->i_ceph_flags |= CEPH_I_FLUSH;
+ if (!list_empty(&ci->i_cap_delay_list))
+ list_del_init(&ci->i_cap_delay_list);
+ list_add_tail(&ci->i_cap_delay_list,
+ &mdsc->cap_unlink_delay_list);
+ spin_unlock(&mdsc->cap_unlink_delay_lock);
+
+ /*
+ * Fire the work immediately, because the MDS maybe
+ * waiting for caps release.
+ */
+ ceph_queue_cap_unlink_work(mdsc);
}
}
spin_unlock(&ci->i_ceph_lock);
@@ -4887,13 +4897,15 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
struct inode *dir,
int mds, int drop, int unless)
{
- struct dentry *parent = NULL;
struct ceph_mds_request_release *rel = *p;
struct ceph_dentry_info *di = ceph_dentry(dentry);
struct ceph_client *cl;
int force = 0;
int ret;
+ /* This shouldn't happen */
+ BUG_ON(!dir);
+
/*
* force an record for the directory caps if we have a dentry lease.
* this is racy (can't take i_ceph_lock and d_lock together), but it
@@ -4903,14 +4915,9 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
spin_lock(&dentry->d_lock);
if (di->lease_session && di->lease_session->s_mds == mds)
force = 1;
- if (!dir) {
- parent = dget(dentry->d_parent);
- dir = d_inode(parent);
- }
spin_unlock(&dentry->d_lock);
ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
- dput(parent);
cl = ceph_inode_to_client(dir);
spin_lock(&dentry->d_lock);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 678596684596..0e9f56eaba1e 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1593,10 +1593,12 @@ struct ceph_lease_walk_control {
unsigned long dir_lease_ttl;
};
+static int __dir_lease_check(const struct dentry *, struct ceph_lease_walk_control *);
+static int __dentry_lease_check(const struct dentry *);
+
static unsigned long
__dentry_leases_walk(struct ceph_mds_client *mdsc,
- struct ceph_lease_walk_control *lwc,
- int (*check)(struct dentry*, void*))
+ struct ceph_lease_walk_control *lwc)
{
struct ceph_dentry_info *di, *tmp;
struct dentry *dentry, *last = NULL;
@@ -1624,7 +1626,10 @@ __dentry_leases_walk(struct ceph_mds_client *mdsc,
goto next;
}
- ret = check(dentry, lwc);
+ if (lwc->dir_lease)
+ ret = __dir_lease_check(dentry, lwc);
+ else
+ ret = __dentry_lease_check(dentry);
if (ret & TOUCH) {
/* move it into tail of dir lease list */
__dentry_dir_lease_touch(mdsc, di);
@@ -1681,7 +1686,7 @@ next:
return freed;
}
-static int __dentry_lease_check(struct dentry *dentry, void *arg)
+static int __dentry_lease_check(const struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
int ret;
@@ -1696,9 +1701,9 @@ static int __dentry_lease_check(struct dentry *dentry, void *arg)
return DELETE;
}
-static int __dir_lease_check(struct dentry *dentry, void *arg)
+static int __dir_lease_check(const struct dentry *dentry,
+ struct ceph_lease_walk_control *lwc)
{
- struct ceph_lease_walk_control *lwc = arg;
struct ceph_dentry_info *di = ceph_dentry(dentry);
int ret = __dir_lease_try_check(dentry);
@@ -1737,7 +1742,7 @@ int ceph_trim_dentries(struct ceph_mds_client *mdsc)
lwc.dir_lease = false;
lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE * 2;
- freed = __dentry_leases_walk(mdsc, &lwc, __dentry_lease_check);
+ freed = __dentry_leases_walk(mdsc, &lwc);
if (!lwc.nr_to_scan) /* more invalid leases */
return -EAGAIN;
@@ -1747,7 +1752,7 @@ int ceph_trim_dentries(struct ceph_mds_client *mdsc)
lwc.dir_lease = true;
lwc.expire_dir_lease = freed < count;
lwc.dir_lease_ttl = mdsc->fsc->mount_options->caps_wanted_delay_max * HZ;
- freed +=__dentry_leases_walk(mdsc, &lwc, __dir_lease_check);
+ freed +=__dentry_leases_walk(mdsc, &lwc);
if (!lwc.nr_to_scan) /* more to check */
return -EAGAIN;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 726af69d4d62..a79f163ae4ed 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -286,8 +286,6 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
doutc(cl, "%llx.%llx parent %llx hash %x err=%d", vino.ino,
vino.snap, sfh->parent_ino, sfh->hash, err);
}
- if (IS_ERR(inode))
- return ERR_CAST(inode);
/* see comments in ceph_get_parent() */
return unlinked ? d_obtain_root(inode) : d_obtain_alias(inode);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index d380d9dad0e0..abe8028d95bf 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1029,6 +1029,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
struct ceph_osd_req_op *op;
u64 read_off = off;
u64 read_len = len;
+ int extent_cnt;
/* determine new offset/length if encrypted */
ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
@@ -1068,7 +1069,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
op = &req->r_ops[0];
if (sparse) {
- ret = ceph_alloc_sparse_ext_map(op);
+ extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
if (ret) {
ceph_osdc_put_request(req);
break;
@@ -1465,6 +1467,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
ssize_t len;
struct ceph_osd_req_op *op;
int readop = sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ;
+ int extent_cnt;
if (write)
size = min_t(u64, size, fsc->mount_options->wsize);
@@ -1528,7 +1531,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
op = &req->r_ops[0];
if (sparse) {
- ret = ceph_alloc_sparse_ext_map(op);
+ extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
if (ret) {
ceph_osdc_put_request(req);
break;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 0679240f06db..7b2e77517f23 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -78,6 +78,8 @@ struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
if (!inode)
return ERR_PTR(-ENOMEM);
+ inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
+
if (!S_ISLNK(*mode)) {
err = ceph_pre_init_acls(dir, mode, as_ctx);
if (err < 0)
@@ -574,7 +576,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
doutc(fsc->client, "%p\n", &ci->netfs.inode);
/* Set parameters for the netfs library */
- netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
+ netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
spin_lock_init(&ci->i_ceph_lock);
@@ -694,7 +696,7 @@ void ceph_evict_inode(struct inode *inode)
percpu_counter_dec(&mdsc->metric.total_inodes);
truncate_inode_pages_final(&inode->i_data);
- if (inode->i_state & I_PINNING_FSCACHE_WB)
+ if (inode->i_state & I_PINNING_NETFS_WB)
ceph_fscache_unuse_cookie(inode, true);
clear_inode(inode);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 02ebfabfc8ee..3ab9c268a8bb 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1089,7 +1089,7 @@ void ceph_mdsc_release_request(struct kref *kref)
struct ceph_mds_request *req = container_of(kref,
struct ceph_mds_request,
r_kref);
- ceph_mdsc_release_dir_caps_no_check(req);
+ ceph_mdsc_release_dir_caps_async(req);
destroy_reply_info(&req->r_reply_info);
if (req->r_request)
ceph_msg_put(req->r_request);
@@ -1534,7 +1534,8 @@ static int encode_metric_spec(void **p, void *end)
* session message, specialization for CEPH_SESSION_REQUEST_OPEN
* to include additional client metadata fields.
*/
-static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
+static struct ceph_msg *
+create_session_full_msg(struct ceph_mds_client *mdsc, int op, u64 seq)
{
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
@@ -1578,6 +1579,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
size = METRIC_BYTES(count);
extra_bytes += 2 + 4 + 4 + size;
+ /* flags, mds auth caps and oldest_client_tid */
+ extra_bytes += 4 + 4 + 8;
+
/* Allocate the message */
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
GFP_NOFS, false);
@@ -1589,16 +1593,16 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
end = p + msg->front.iov_len;
h = p;
- h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
+ h->op = cpu_to_le32(op);
h->seq = cpu_to_le64(seq);
/*
* Serialize client metadata into waiting buffer space, using
* the format that userspace expects for map<string, string>
*
- * ClientSession messages with metadata are v4
+ * ClientSession messages with metadata are v7
*/
- msg->hdr.version = cpu_to_le16(4);
+ msg->hdr.version = cpu_to_le16(7);
msg->hdr.compat_version = cpu_to_le16(1);
/* The write pointer, following the session_head structure */
@@ -1634,6 +1638,15 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
return ERR_PTR(ret);
}
+ /* version == 5, flags */
+ ceph_encode_32(&p, 0);
+
+ /* version == 6, mds auth caps */
+ ceph_encode_32(&p, 0);
+
+ /* version == 7, oldest_client_tid */
+ ceph_encode_64(&p, mdsc->oldest_tid);
+
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
@@ -1663,7 +1676,8 @@ static int __open_session(struct ceph_mds_client *mdsc,
session->s_renew_requested = jiffies;
/* send connect message */
- msg = create_session_open_msg(mdsc, session->s_seq);
+ msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_OPEN,
+ session->s_seq);
if (IS_ERR(msg))
return PTR_ERR(msg);
ceph_con_send(&session->s_con, msg);
@@ -2028,10 +2042,10 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
doutc(cl, "to mds%d (%s)\n", session->s_mds,
ceph_mds_state_name(state));
- msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
+ msg = create_session_full_msg(mdsc, CEPH_SESSION_REQUEST_RENEWCAPS,
++session->s_renew_seq);
- if (!msg)
- return -ENOMEM;
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
ceph_con_send(&session->s_con, msg);
return 0;
}
@@ -2470,6 +2484,50 @@ void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
}
}
+void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc)
+{
+ struct ceph_client *cl = mdsc->fsc->client;
+ if (mdsc->stopping)
+ return;
+
+ if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_unlink_work)) {
+ doutc(cl, "caps unlink work queued\n");
+ } else {
+ doutc(cl, "failed to queue caps unlink work\n");
+ }
+}
+
+static void ceph_cap_unlink_work(struct work_struct *work)
+{
+ struct ceph_mds_client *mdsc =
+ container_of(work, struct ceph_mds_client, cap_unlink_work);
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "begin\n");
+ spin_lock(&mdsc->cap_unlink_delay_lock);
+ while (!list_empty(&mdsc->cap_unlink_delay_list)) {
+ struct ceph_inode_info *ci;
+ struct inode *inode;
+
+ ci = list_first_entry(&mdsc->cap_unlink_delay_list,
+ struct ceph_inode_info,
+ i_cap_delay_list);
+ list_del_init(&ci->i_cap_delay_list);
+
+ inode = igrab(&ci->netfs.inode);
+ if (inode) {
+ spin_unlock(&mdsc->cap_unlink_delay_lock);
+ doutc(cl, "on %p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH);
+ iput(inode);
+ spin_lock(&mdsc->cap_unlink_delay_lock);
+ }
+ }
+ spin_unlock(&mdsc->cap_unlink_delay_lock);
+ doutc(cl, "done\n");
+}
+
/*
* requests
*/
@@ -4128,12 +4186,12 @@ static void handle_session(struct ceph_mds_session *session,
pr_info_client(cl, "mds%d reconnect success\n",
session->s_mds);
+ session->s_features = features;
if (session->s_state == CEPH_MDS_SESSION_OPEN) {
pr_notice_client(cl, "mds%d is already opened\n",
session->s_mds);
} else {
session->s_state = CEPH_MDS_SESSION_OPEN;
- session->s_features = features;
renewed_caps(mdsc, session, 0);
if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT,
&session->s_features))
@@ -4247,7 +4305,7 @@ void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
}
}
-void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
+void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req)
{
struct ceph_client *cl = req->r_mdsc->fsc->client;
int dcaps;
@@ -4255,8 +4313,7 @@ void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
dcaps = xchg(&req->r_dir_caps, 0);
if (dcaps) {
doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
- ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
- dcaps);
+ ceph_put_cap_refs_async(ceph_inode(req->r_parent), dcaps);
}
}
@@ -4292,7 +4349,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
if (req->r_session->s_mds != session->s_mds)
continue;
- ceph_mdsc_release_dir_caps_no_check(req);
+ ceph_mdsc_release_dir_caps_async(req);
__send_request(session, req, true);
}
@@ -5346,6 +5403,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_LIST_HEAD(&mdsc->cap_delay_list);
INIT_LIST_HEAD(&mdsc->cap_wait_list);
spin_lock_init(&mdsc->cap_delay_lock);
+ INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
+ spin_lock_init(&mdsc->cap_unlink_delay_lock);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
@@ -5354,6 +5413,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq);
INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
+ INIT_WORK(&mdsc->cap_unlink_work, ceph_cap_unlink_work);
err = ceph_metric_init(&mdsc->metric);
if (err)
goto err_mdsmap;
@@ -5627,6 +5687,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
ceph_cleanup_global_and_empty_realms(mdsc);
cancel_work_sync(&mdsc->cap_reclaim_work);
+ cancel_work_sync(&mdsc->cap_unlink_work);
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
doutc(cl, "done\n");
@@ -5870,7 +5931,8 @@ static void mds_peer_reset(struct ceph_connection *con)
pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n",
s->s_mds);
- if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
+ if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO &&
+ ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) >= CEPH_MDS_STATE_RECONNECT)
send_mds_reconnect(mdsc, s);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 2e6ddaa13d72..03f8ff00874f 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -462,6 +462,8 @@ struct ceph_mds_client {
unsigned long last_renew_caps; /* last time we renewed our caps */
struct list_head cap_delay_list; /* caps with delayed release */
spinlock_t cap_delay_lock; /* protects cap_delay_list */
+ struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
+ spinlock_t cap_unlink_delay_lock; /* protects cap_unlink_delay_list */
struct list_head snap_flush_list; /* cap_snaps ready to flush */
spinlock_t snap_flush_lock;
@@ -475,6 +477,8 @@ struct ceph_mds_client {
struct work_struct cap_reclaim_work;
atomic_t cap_reclaim_pending;
+ struct work_struct cap_unlink_work;
+
/*
* Cap reservations
*
@@ -552,7 +556,7 @@ extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req);
extern void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req);
-extern void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req);
+extern void ceph_mdsc_release_dir_caps_async(struct ceph_mds_request *req);
static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
{
kref_get(&req->r_kref);
@@ -574,6 +578,7 @@ extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
+extern void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc);
extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
int (*cb)(struct inode *, int mds, void *),
void *arg);
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index fae97c25ce58..8109aba66e02 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -380,10 +380,11 @@ struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
ceph_decode_skip_8(p, end, bad_ext);
/* required_client_features */
ceph_decode_skip_set(p, end, 64, bad_ext);
+ /* bal_rank_mask */
+ ceph_decode_skip_string(p, end, bad_ext);
+ }
+ if (mdsmap_ev >= 18) {
ceph_decode_64_safe(p, end, m->m_max_xattr_size, bad_ext);
- } else {
- /* This forces the usage of the (sync) SETXATTR Op */
- m->m_max_xattr_size = 0;
}
bad_ext:
doutc(cl, "m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h
index 89f1931f1ba6..1f2171dd01bf 100644
--- a/fs/ceph/mdsmap.h
+++ b/fs/ceph/mdsmap.h
@@ -27,7 +27,11 @@ struct ceph_mdsmap {
u32 m_session_timeout; /* seconds */
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
- u64 m_max_xattr_size; /* maximum size for xattrs blob */
+ /*
+ * maximum size for xattrs blob.
+ * Zeroed by default to force the usage of the (sync) SETXATTR Op.
+ */
+ u64 m_max_xattr_size;
u32 m_max_mds; /* expected up:active mds number */
u32 m_num_active_mds; /* actual up:active mds number */
u32 possible_max_rank; /* possible max rank index */
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 9d36c3532de1..06ee397e0c3a 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -197,10 +197,10 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
}
/*
- * This function walks through the snaprealm for an inode and returns the
- * ceph_snap_realm for the first snaprealm that has quotas set (max_files,
+ * This function walks through the snaprealm for an inode and set the
+ * realmp with the first snaprealm that has quotas set (max_files,
* max_bytes, or any, depending on the 'which_quota' argument). If the root is
- * reached, return the root ceph_snap_realm instead.
+ * reached, set the realmp with the root ceph_snap_realm instead.
*
* Note that the caller is responsible for calling ceph_put_snap_realm() on the
* returned realm.
@@ -211,10 +211,9 @@ void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc)
* this function will return -EAGAIN; otherwise, the snaprealms walk-through
* will be restarted.
*/
-static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
- struct inode *inode,
- enum quota_get_realm which_quota,
- bool retry)
+static int get_quota_realm(struct ceph_mds_client *mdsc, struct inode *inode,
+ enum quota_get_realm which_quota,
+ struct ceph_snap_realm **realmp, bool retry)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci = NULL;
@@ -222,8 +221,10 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
struct inode *in;
bool has_quota;
+ if (realmp)
+ *realmp = NULL;
if (ceph_snap(inode) != CEPH_NOSNAP)
- return NULL;
+ return 0;
restart:
realm = ceph_inode(inode)->i_snap_realm;
@@ -250,7 +251,7 @@ restart:
break;
ceph_put_snap_realm(mdsc, realm);
if (!retry)
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
goto restart;
}
@@ -259,8 +260,11 @@ restart:
iput(in);
next = realm->parent;
- if (has_quota || !next)
- return realm;
+ if (has_quota || !next) {
+ if (realmp)
+ *realmp = realm;
+ return 0;
+ }
ceph_get_snap_realm(mdsc, next);
ceph_put_snap_realm(mdsc, realm);
@@ -269,7 +273,7 @@ restart:
if (realm)
ceph_put_snap_realm(mdsc, realm);
- return NULL;
+ return 0;
}
bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
@@ -277,6 +281,7 @@ bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old->i_sb);
struct ceph_snap_realm *old_realm, *new_realm;
bool is_same;
+ int ret;
restart:
/*
@@ -286,9 +291,9 @@ restart:
* dropped and we can then restart the whole operation.
*/
down_read(&mdsc->snap_rwsem);
- old_realm = get_quota_realm(mdsc, old, QUOTA_GET_ANY, true);
- new_realm = get_quota_realm(mdsc, new, QUOTA_GET_ANY, false);
- if (PTR_ERR(new_realm) == -EAGAIN) {
+ get_quota_realm(mdsc, old, QUOTA_GET_ANY, &old_realm, true);
+ ret = get_quota_realm(mdsc, new, QUOTA_GET_ANY, &new_realm, false);
+ if (ret == -EAGAIN) {
up_read(&mdsc->snap_rwsem);
if (old_realm)
ceph_put_snap_realm(mdsc, old_realm);
@@ -492,8 +497,8 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
bool is_updated = false;
down_read(&mdsc->snap_rwsem);
- realm = get_quota_realm(mdsc, d_inode(fsc->sb->s_root),
- QUOTA_GET_MAX_BYTES, true);
+ get_quota_realm(mdsc, d_inode(fsc->sb->s_root), QUOTA_GET_MAX_BYTES,
+ &realm, true);
up_read(&mdsc->snap_rwsem);
if (!realm)
return false;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index fe0f64a0acb2..b63b4cd9b5b6 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -3,6 +3,7 @@
#define _FS_CEPH_SUPER_H
#include <linux/ceph/ceph_debug.h>
+#include <linux/ceph/osd_client.h>
#include <asm/unaligned.h>
#include <linux/backing-dev.h>
@@ -1254,8 +1255,6 @@ extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
extern void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had);
-extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
- int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
extern void __ceph_remove_capsnap(struct inode *inode,
@@ -1407,6 +1406,19 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci,
ceph_adjust_quota_realms_count(&ci->netfs.inode, has_quota);
}
+static inline int __ceph_sparse_read_ext_count(struct inode *inode, u64 len)
+{
+ int cnt = 0;
+
+ if (IS_ENCRYPTED(inode)) {
+ cnt = len >> CEPH_FSCRYPT_BLOCK_SHIFT;
+ if (cnt > CEPH_SPARSE_EXT_ARRAY_INITIAL)
+ cnt = 0;
+ }
+
+ return cnt;
+}
+
extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg);
diff --git a/fs/coredump.c b/fs/coredump.c
index f258c17c1841..be6403b4b14b 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -872,6 +872,9 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
loff_t pos;
ssize_t n;
+ if (!page)
+ return 0;
+
if (cprm->to_skip) {
if (!__dump_skip(cprm, cprm->to_skip))
return 0;
@@ -884,7 +887,6 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
pos = file->f_pos;
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
- iov_iter_set_copy_mc(&iter);
n = __kernel_write_iter(cprm->file, &iter, &pos);
if (n != PAGE_SIZE)
return 0;
@@ -895,10 +897,44 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
return 1;
}
+/*
+ * If we might get machine checks from kernel accesses during the
+ * core dump, let's get those errors early rather than during the
+ * IO. This is not performance-critical enough to warrant having
+ * all the machine check logic in the iovec paths.
+ */
+#ifdef copy_mc_to_kernel
+
+#define dump_page_alloc() alloc_page(GFP_KERNEL)
+#define dump_page_free(x) __free_page(x)
+static struct page *dump_page_copy(struct page *src, struct page *dst)
+{
+ void *buf = kmap_local_page(src);
+ size_t left = copy_mc_to_kernel(page_address(dst), buf, PAGE_SIZE);
+ kunmap_local(buf);
+ return left ? NULL : dst;
+}
+
+#else
+
+/* We just want to return non-NULL; it's never used. */
+#define dump_page_alloc() ERR_PTR(-EINVAL)
+#define dump_page_free(x) ((void)(x))
+static inline struct page *dump_page_copy(struct page *src, struct page *dst)
+{
+ return src;
+}
+#endif
+
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len)
{
unsigned long addr;
+ struct page *dump_page;
+
+ dump_page = dump_page_alloc();
+ if (!dump_page)
+ return 0;
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
struct page *page;
@@ -912,14 +948,17 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
*/
page = get_dump_page(addr);
if (page) {
- int stop = !dump_emit_page(cprm, page);
+ int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page));
put_page(page);
- if (stop)
+ if (stop) {
+ dump_page_free(dump_page);
return 0;
+ }
} else {
dump_skip(cprm, PAGE_SIZE);
}
}
+ dump_page_free(dump_page);
return 1;
}
#endif
diff --git a/fs/dcache.c b/fs/dcache.c
index b813528fb147..6ebccba33336 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3061,7 +3061,10 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
if (d_unhashed(dentry) || !dentry->d_inode)
return D_WALK_SKIP;
- dentry->d_lockref.count--;
+ if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
+ dentry->d_flags |= DCACHE_GENOCIDE;
+ dentry->d_lockref.count--;
+ }
}
return D_WALK_CONTINUE;
}
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index 169252e6dc46..f7206158ee81 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -38,7 +38,7 @@ struct efivar_entry {
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
struct list_head *),
- void *data, bool duplicates, struct list_head *head);
+ void *data, struct list_head *head);
int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 6038dd39367a..bb14462f6d99 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -343,12 +343,7 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
- err = efivar_init(efivarfs_callback, (void *)sb, true,
- &sfi->efivarfs_list);
- if (err)
- efivar_entry_iter(efivarfs_destroy, &sfi->efivarfs_list, NULL);
-
- return err;
+ return efivar_init(efivarfs_callback, sb, &sfi->efivarfs_list);
}
static int efivarfs_get_tree(struct fs_context *fc)
diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c
index 114ff0fd4e55..4d722af1014f 100644
--- a/fs/efivarfs/vars.c
+++ b/fs/efivarfs/vars.c
@@ -361,7 +361,6 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
* efivar_init - build the initial list of EFI variables
* @func: callback function to invoke for every variable
* @data: function-specific data to pass to @func
- * @duplicates: error if we encounter duplicates on @head?
* @head: initialised head of variable list
*
* Get every EFI variable from the firmware and invoke @func. @func
@@ -371,9 +370,9 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
*/
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
struct list_head *),
- void *data, bool duplicates, struct list_head *head)
+ void *data, struct list_head *head)
{
- unsigned long variable_name_size = 1024;
+ unsigned long variable_name_size = 512;
efi_char16_t *variable_name;
efi_status_t status;
efi_guid_t vendor_guid;
@@ -390,12 +389,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
goto free;
/*
- * Per EFI spec, the maximum storage allocated for both
- * the variable name and variable data is 1024 bytes.
+ * A small set of old UEFI implementations reject sizes
+ * above a certain threshold, the lowest seen in the wild
+ * is 512.
*/
do {
- variable_name_size = 1024;
+ variable_name_size = 512;
status = efivar_get_next_variable(&variable_name_size,
variable_name,
@@ -413,8 +413,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
* we'll ever see a different variable name,
* and may end up looping here forever.
*/
- if (duplicates &&
- variable_is_present(variable_name, &vendor_guid,
+ if (variable_is_present(variable_name, &vendor_guid,
head)) {
dup_variable_bug(variable_name, &vendor_guid,
variable_name_size);
@@ -432,9 +431,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
break;
case EFI_NOT_FOUND:
break;
+ case EFI_BUFFER_TOO_SMALL:
+ pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
+ variable_name_size);
+ status = EFI_NOT_FOUND;
+ break;
default:
- printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
- status);
+ pr_warn("efivars: get_next_variable: status=%lx\n", status);
status = EFI_NOT_FOUND;
break;
}
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index 1d318f85232d..fffd3919343e 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -114,8 +114,11 @@ config EROFS_FS_ZIP_DEFLATE
config EROFS_FS_ONDEMAND
bool "EROFS fscache-based on-demand read support"
- depends on CACHEFILES_ONDEMAND && (EROFS_FS=m && FSCACHE || EROFS_FS=y && FSCACHE=y)
- default n
+ depends on EROFS_FS
+ select NETFS_SUPPORT
+ select FSCACHE
+ select CACHEFILES
+ select CACHEFILES_ONDEMAND
help
This permits EROFS to use fscache-backed data blobs with on-demand
read support.
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 279933e007d2..7cc5841577b2 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -11,13 +11,12 @@
struct z_erofs_decompress_req {
struct super_block *sb;
struct page **in, **out;
-
unsigned short pageofs_in, pageofs_out;
unsigned int inputsize, outputsize;
- /* indicate the algorithm will be used for decompression */
- unsigned int alg;
+ unsigned int alg; /* the algorithm for decompression */
bool inplace_io, partial_decoding, fillgaps;
+ gfp_t gfp; /* allocation flags for extra temporary buffers */
};
struct z_erofs_decompressor {
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 1d65b9f60a39..d4cee95af14c 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -111,8 +111,9 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
victim = availables[--top];
get_page(victim);
} else {
- victim = erofs_allocpage(pagepool,
- GFP_KERNEL | __GFP_NOFAIL);
+ victim = erofs_allocpage(pagepool, rq->gfp);
+ if (!victim)
+ return -ENOMEM;
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
}
rq->out[i] = victim;
@@ -408,7 +409,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
int size, ret = 0;
if (!erofs_sb_has_compr_cfgs(sbi)) {
- sbi->available_compr_algs = Z_EROFS_COMPRESSION_LZ4;
+ sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
}
diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
index 4a64a9c91dd3..b98872058abe 100644
--- a/fs/erofs/decompressor_deflate.c
+++ b/fs/erofs/decompressor_deflate.c
@@ -95,7 +95,7 @@ int z_erofs_load_deflate_config(struct super_block *sb,
}
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
+ struct page **pgpl)
{
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -158,8 +158,12 @@ again:
strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
outsz -= strm->z.avail_out;
if (!rq->out[no]) {
- rq->out[no] = erofs_allocpage(pagepool,
- GFP_KERNEL | __GFP_NOFAIL);
+ rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
+ if (!rq->out[no]) {
+ kout = NULL;
+ err = -ENOMEM;
+ break;
+ }
set_page_private(rq->out[no],
Z_EROFS_SHORTLIVED_PAGE);
}
@@ -211,8 +215,11 @@ again:
DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
rq->in[j]));
- tmppage = erofs_allocpage(pagepool,
- GFP_KERNEL | __GFP_NOFAIL);
+ tmppage = erofs_allocpage(pgpl, rq->gfp);
+ if (!tmppage) {
+ err = -ENOMEM;
+ goto failed;
+ }
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
@@ -230,7 +237,7 @@ again:
break;
}
}
-
+failed:
if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
err = -EIO;
if (kout)
diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
index 2dd14f99c1dc..6ca357d83cfa 100644
--- a/fs/erofs/decompressor_lzma.c
+++ b/fs/erofs/decompressor_lzma.c
@@ -148,7 +148,7 @@ again:
}
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
+ struct page **pgpl)
{
const unsigned int nrpages_out =
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -215,8 +215,11 @@ again:
PAGE_SIZE - pageofs);
outlen -= strm->buf.out_size;
if (!rq->out[no] && rq->fillgaps) { /* deduped */
- rq->out[no] = erofs_allocpage(pagepool,
- GFP_KERNEL | __GFP_NOFAIL);
+ rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
+ if (!rq->out[no]) {
+ err = -ENOMEM;
+ break;
+ }
set_page_private(rq->out[no],
Z_EROFS_SHORTLIVED_PAGE);
}
@@ -258,8 +261,11 @@ again:
DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
rq->in[j]));
- tmppage = erofs_allocpage(pagepool,
- GFP_KERNEL | __GFP_NOFAIL);
+ tmppage = erofs_allocpage(pgpl, rq->gfp);
+ if (!tmppage) {
+ err = -ENOMEM;
+ goto failed;
+ }
set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
copy_highpage(tmppage, rq->in[j]);
rq->in[j] = tmppage;
@@ -277,6 +283,7 @@ again:
break;
}
}
+failed:
if (no < nrpages_out && strm->buf.out)
kunmap(rq->out[no]);
if (ni < nrpages_in)
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 87ff35bff8d5..89a7c2453aae 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -165,10 +165,10 @@ static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
{
int ret;
- struct erofs_fscache *ctx = folio_mapping(folio)->host->i_private;
+ struct erofs_fscache *ctx = folio->mapping->host->i_private;
struct erofs_fscache_request *req;
- req = erofs_fscache_req_alloc(folio_mapping(folio),
+ req = erofs_fscache_req_alloc(folio->mapping,
folio_pos(folio), folio_size(folio));
if (IS_ERR(req)) {
folio_unlock(folio);
@@ -276,7 +276,7 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
struct erofs_fscache_request *req;
int ret;
- req = erofs_fscache_req_alloc(folio_mapping(folio),
+ req = erofs_fscache_req_alloc(folio->mapping,
folio_pos(folio), folio_size(folio));
if (IS_ERR(req)) {
folio_unlock(folio);
@@ -381,11 +381,12 @@ static int erofs_fscache_init_domain(struct super_block *sb)
goto out;
if (!erofs_pseudo_mnt) {
- erofs_pseudo_mnt = kern_mount(&erofs_fs_type);
- if (IS_ERR(erofs_pseudo_mnt)) {
- err = PTR_ERR(erofs_pseudo_mnt);
+ struct vfsmount *mnt = kern_mount(&erofs_fs_type);
+ if (IS_ERR(mnt)) {
+ err = PTR_ERR(mnt);
goto out;
}
+ erofs_pseudo_mnt = mnt;
}
domain->volume = sbi->volume;
@@ -459,7 +460,7 @@ static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
- mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+ mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
inode->i_blkbits = EROFS_SB(sb)->blkszbits;
inode->i_private = ctx;
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 3d616dea55dc..36e638e8b53a 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -60,7 +60,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
} else {
const unsigned int gotten = sb->s_blocksize - *ofs;
- copied = kmalloc(vi->inode_isize, GFP_NOFS);
+ copied = kmalloc(vi->inode_isize, GFP_KERNEL);
if (!copied) {
err = -ENOMEM;
goto err_out;
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
index d4f631d39f0f..f0110a78acb2 100644
--- a/fs/erofs/namei.c
+++ b/fs/erofs/namei.c
@@ -130,24 +130,24 @@ static void *erofs_find_target_block(struct erofs_buf *target,
/* string comparison without already matched prefix */
diff = erofs_dirnamecmp(name, &dname, &matched);
- if (!diff) {
- *_ndirents = 0;
- goto out;
- } else if (diff > 0) {
- head = mid + 1;
- startprfx = matched;
-
- if (!IS_ERR(candidate))
- erofs_put_metabuf(target);
- *target = buf;
- candidate = de;
- *_ndirents = ndirents;
- } else {
+ if (diff < 0) {
erofs_put_metabuf(&buf);
-
back = mid - 1;
endprfx = matched;
+ continue;
+ }
+
+ if (!IS_ERR(candidate))
+ erofs_put_metabuf(target);
+ *target = buf;
+ if (!diff) {
+ *_ndirents = 0;
+ return de;
}
+ head = mid + 1;
+ startprfx = matched;
+ candidate = de;
+ *_ndirents = ndirents;
continue;
}
out: /* free if the candidate is valid */
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 5dea308764b4..e146d09151af 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -81,7 +81,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
repeat:
xa_lock(&sbi->managed_pslots);
pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
- NULL, grp, GFP_NOFS);
+ NULL, grp, GFP_KERNEL);
if (pre) {
if (xa_is_err(pre)) {
pre = ERR_PTR(xa_err(pre));
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 692c0c39be63..ff0aa72b0db3 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -82,6 +82,9 @@ struct z_erofs_pcluster {
/* L: indicate several pageofs_outs or not */
bool multibases;
+ /* L: whether extra buffer allocations are best-effort */
+ bool besteffort;
+
/* A: compressed bvecs (can be cached or inplaced pages) */
struct z_erofs_bvec compressed_bvecs[];
};
@@ -230,7 +233,7 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
struct page *nextpage = *candidate_bvpage;
if (!nextpage) {
- nextpage = erofs_allocpage(pagepool, GFP_NOFS);
+ nextpage = erofs_allocpage(pagepool, GFP_KERNEL);
if (!nextpage)
return -ENOMEM;
set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
@@ -302,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
if (nrpages > pcs->maxpages)
continue;
- pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
+ pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
if (!pcl)
return ERR_PTR(-ENOMEM);
pcl->pclustersize = size;
@@ -563,21 +566,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
unsigned int i;
- if (i_blocksize(fe->inode) != PAGE_SIZE)
- return;
- if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
+ if (i_blocksize(fe->inode) != PAGE_SIZE ||
+ fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
return;
for (i = 0; i < pclusterpages; ++i) {
struct page *page, *newpage;
void *t; /* mark pages just found for debugging */
- /* the compressed page was loaded before */
+ /* Inaccurate check w/o locking to avoid unneeded lookups */
if (READ_ONCE(pcl->compressed_bvecs[i].page))
continue;
page = find_get_page(mc, pcl->obj.index + i);
-
if (page) {
t = (void *)((unsigned long)page | 1);
newpage = NULL;
@@ -597,9 +598,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
t = (void *)((unsigned long)newpage | 1);
}
-
- if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
+ spin_lock(&pcl->obj.lockref.lock);
+ if (!pcl->compressed_bvecs[i].page) {
+ pcl->compressed_bvecs[i].page = t;
+ spin_unlock(&pcl->obj.lockref.lock);
continue;
+ }
+ spin_unlock(&pcl->obj.lockref.lock);
if (page)
put_page(page);
@@ -694,7 +699,7 @@ static void z_erofs_cache_invalidate_folio(struct folio *folio,
DBG_BUGON(stop > folio_size(folio) || stop < length);
if (offset == 0 && stop == folio_size(folio))
- while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
+ while (!z_erofs_cache_release_folio(folio, 0))
cond_resched();
}
@@ -713,36 +718,30 @@ int erofs_init_managed_cache(struct super_block *sb)
set_nlink(inode, 1);
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &z_erofs_cache_aops;
- mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+ mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
EROFS_SB(sb)->managed_cache = inode;
return 0;
}
-static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
- struct z_erofs_bvec *bvec)
-{
- struct z_erofs_pcluster *const pcl = fe->pcl;
-
- while (fe->icur > 0) {
- if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
- NULL, bvec->page)) {
- pcl->compressed_bvecs[fe->icur] = *bvec;
- return true;
- }
- }
- return false;
-}
-
/* callers must be with pcluster lock held */
static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
struct z_erofs_bvec *bvec, bool exclusive)
{
+ struct z_erofs_pcluster *pcl = fe->pcl;
int ret;
if (exclusive) {
/* give priority for inplaceio to use file pages first */
- if (z_erofs_try_inplace_io(fe, bvec))
+ spin_lock(&pcl->obj.lockref.lock);
+ while (fe->icur > 0) {
+ if (pcl->compressed_bvecs[--fe->icur].page)
+ continue;
+ pcl->compressed_bvecs[fe->icur] = *bvec;
+ spin_unlock(&pcl->obj.lockref.lock);
return 0;
+ }
+ spin_unlock(&pcl->obj.lockref.lock);
+
/* otherwise, check if it can be used as a bvpage */
if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
!fe->candidate_bvpage)
@@ -964,7 +963,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
}
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
- struct page *page)
+ struct page *page, bool ra)
{
struct inode *const inode = fe->inode;
struct erofs_map_blocks *const map = &fe->map;
@@ -1014,6 +1013,7 @@ repeat:
err = z_erofs_pcluster_begin(fe);
if (err)
goto out;
+ fe->pcl->besteffort |= !ra;
}
/*
@@ -1280,6 +1280,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
.inplace_io = overlapped,
.partial_decoding = pcl->partial,
.fillgaps = pcl->multibases,
+ .gfp = pcl->besteffort ?
+ GFP_KERNEL | __GFP_NOFAIL :
+ GFP_NOWAIT | __GFP_NORETRY
}, be->pagepool);
/* must handle all compressed pages before actual file pages */
@@ -1322,6 +1325,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
pcl->length = 0;
pcl->partial = true;
pcl->multibases = false;
+ pcl->besteffort = false;
pcl->bvset.nextpage = NULL;
pcl->vcnt = 0;
@@ -1423,23 +1427,26 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
{
gfp_t gfp = mapping_gfp_mask(mc);
bool tocache = false;
- struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr;
+ struct z_erofs_bvec zbv;
struct address_space *mapping;
- struct page *page, *oldpage;
+ struct page *page;
int justfound, bs = i_blocksize(f->inode);
/* Except for inplace pages, the entire page can be used for I/Os */
bvec->bv_offset = 0;
bvec->bv_len = PAGE_SIZE;
repeat:
- oldpage = READ_ONCE(zbv->page);
- if (!oldpage)
+ spin_lock(&pcl->obj.lockref.lock);
+ zbv = pcl->compressed_bvecs[nr];
+ page = zbv.page;
+ justfound = (unsigned long)page & 1UL;
+ page = (struct page *)((unsigned long)page & ~1UL);
+ pcl->compressed_bvecs[nr].page = page;
+ spin_unlock(&pcl->obj.lockref.lock);
+ if (!page)
goto out_allocpage;
- justfound = (unsigned long)oldpage & 1UL;
- page = (struct page *)((unsigned long)oldpage & ~1UL);
bvec->bv_page = page;
-
DBG_BUGON(z_erofs_is_shortlived_page(page));
/*
* Handle preallocated cached pages. We tried to allocate such pages
@@ -1448,7 +1455,6 @@ repeat:
*/
if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
set_page_private(page, 0);
- WRITE_ONCE(zbv->page, page);
tocache = true;
goto out_tocache;
}
@@ -1459,9 +1465,9 @@ repeat:
* therefore it is impossible for `mapping` to be NULL.
*/
if (mapping && mapping != mc) {
- if (zbv->offset < 0)
- bvec->bv_offset = round_up(-zbv->offset, bs);
- bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset;
+ if (zbv.offset < 0)
+ bvec->bv_offset = round_up(-zbv.offset, bs);
+ bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
return;
}
@@ -1471,7 +1477,6 @@ repeat:
/* the cached page is still in managed cache */
if (page->mapping == mc) {
- WRITE_ONCE(zbv->page, page);
/*
* The cached page is still available but without a valid
* `->private` pcluster hint. Let's reconnect them.
@@ -1503,11 +1508,15 @@ repeat:
put_page(page);
out_allocpage:
page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
- if (oldpage != cmpxchg(&zbv->page, oldpage, page)) {
+ spin_lock(&pcl->obj.lockref.lock);
+ if (pcl->compressed_bvecs[nr].page) {
erofs_pagepool_add(&f->pagepool, page);
+ spin_unlock(&pcl->obj.lockref.lock);
cond_resched();
goto repeat;
}
+ pcl->compressed_bvecs[nr].page = page;
+ spin_unlock(&pcl->obj.lockref.lock);
bvec->bv_page = page;
out_tocache:
if (!tocache || bs != PAGE_SIZE ||
@@ -1685,6 +1694,7 @@ submit_bio_retry:
if (cur + bvec.bv_len > end)
bvec.bv_len = end - cur;
+ DBG_BUGON(bvec.bv_len < sb->s_blocksize);
if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
bvec.bv_offset))
goto submit_bio_retry;
@@ -1785,7 +1795,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
if (PageUptodate(page))
unlock_page(page);
else
- (void)z_erofs_do_read_page(f, page);
+ (void)z_erofs_do_read_page(f, page, !!rac);
put_page(page);
}
@@ -1806,7 +1816,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
z_erofs_pcluster_readmore(&f, NULL, true);
- err = z_erofs_do_read_page(&f, &folio->page);
+ err = z_erofs_do_read_page(&f, &folio->page, false);
z_erofs_pcluster_readmore(&f, NULL, false);
z_erofs_pcluster_end(&f);
@@ -1847,7 +1857,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
folio = head;
head = folio_get_private(folio);
- err = z_erofs_do_read_page(&f, &folio->page);
+ err = z_erofs_do_read_page(&f, &folio->page, true);
if (err && err != -EINTR)
erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
folio->index, EROFS_I(inode)->nid);
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 9753875e41cb..e313c936351d 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -454,7 +454,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
.map = map,
};
int err = 0;
- unsigned int lclusterbits, endoff;
+ unsigned int lclusterbits, endoff, afmt;
unsigned long initial_lcn;
unsigned long long ofs, end;
@@ -543,17 +543,20 @@ static int z_erofs_do_map_blocks(struct inode *inode,
err = -EFSCORRUPTED;
goto unmap_out;
}
- if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
- map->m_algorithmformat =
- Z_EROFS_COMPRESSION_INTERLACED;
- else
- map->m_algorithmformat =
- Z_EROFS_COMPRESSION_SHIFTED;
- } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) {
- map->m_algorithmformat = vi->z_algorithmtype[1];
+ afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ?
+ Z_EROFS_COMPRESSION_INTERLACED :
+ Z_EROFS_COMPRESSION_SHIFTED;
} else {
- map->m_algorithmformat = vi->z_algorithmtype[0];
+ afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ?
+ vi->z_algorithmtype[1] : vi->z_algorithmtype[0];
+ if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) {
+ erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu",
+ afmt, vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
}
+ map->m_algorithmformat = afmt;
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
((flags & EROFS_GET_BLOCKS_READMORE) &&
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 3534d36a1474..df2ed3af486e 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -37,6 +37,7 @@
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/rculist.h>
+#include <linux/capability.h>
#include <net/busy_poll.h>
/*
@@ -227,6 +228,11 @@ struct eventpoll {
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
unsigned int napi_id;
+ /* busy poll timeout */
+ u32 busy_poll_usecs;
+ /* busy poll packet budget */
+ u16 busy_poll_budget;
+ bool prefer_busy_poll;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -387,11 +393,41 @@ static inline int ep_events_available(struct eventpoll *ep)
}
#ifdef CONFIG_NET_RX_BUSY_POLL
+/**
+ * busy_loop_ep_timeout - check if busy poll has timed out. The timeout value
+ * from the epoll instance ep is preferred, but if it is not set fallback to
+ * the system-wide global via busy_loop_timeout.
+ *
+ * @start_time: The start time used to compute the remaining time until timeout.
+ * @ep: Pointer to the eventpoll context.
+ *
+ * Return: true if the timeout has expired, false otherwise.
+ */
+static bool busy_loop_ep_timeout(unsigned long start_time,
+ struct eventpoll *ep)
+{
+ unsigned long bp_usec = READ_ONCE(ep->busy_poll_usecs);
+
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
+
+ return time_after(now, end_time);
+ } else {
+ return busy_loop_timeout(start_time);
+ }
+}
+
+static bool ep_busy_loop_on(struct eventpoll *ep)
+{
+ return !!ep->busy_poll_usecs || net_busy_loop_on();
+}
+
static bool ep_busy_loop_end(void *p, unsigned long start_time)
{
struct eventpoll *ep = p;
- return ep_events_available(ep) || busy_loop_timeout(start_time);
+ return ep_events_available(ep) || busy_loop_ep_timeout(start_time, ep);
}
/*
@@ -403,10 +439,15 @@ static bool ep_busy_loop_end(void *p, unsigned long start_time)
static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
+ u16 budget = READ_ONCE(ep->busy_poll_budget);
+ bool prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);
+
+ if (!budget)
+ budget = BUSY_POLL_BUDGET;
- if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) {
- napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, false,
- BUSY_POLL_BUDGET);
+ if (napi_id >= MIN_NAPI_ID && ep_busy_loop_on(ep)) {
+ napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end,
+ ep, prefer_busy_poll, budget);
if (ep_events_available(ep))
return true;
/*
@@ -425,12 +466,12 @@ static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
*/
static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
- struct eventpoll *ep;
+ struct eventpoll *ep = epi->ep;
unsigned int napi_id;
struct socket *sock;
struct sock *sk;
- if (!net_busy_loop_on())
+ if (!ep_busy_loop_on(ep))
return;
sock = sock_from_file(epi->ffd.file);
@@ -442,7 +483,6 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
return;
napi_id = READ_ONCE(sk->sk_napi_id);
- ep = epi->ep;
/* Non-NAPI IDs can be rejected
* or
@@ -455,6 +495,49 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
ep->napi_id = napi_id;
}
+static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct eventpoll *ep = file->private_data;
+ void __user *uarg = (void __user *)arg;
+ struct epoll_params epoll_params;
+
+ switch (cmd) {
+ case EPIOCSPARAMS:
+ if (copy_from_user(&epoll_params, uarg, sizeof(epoll_params)))
+ return -EFAULT;
+
+ /* pad byte must be zero */
+ if (epoll_params.__pad)
+ return -EINVAL;
+
+ if (epoll_params.busy_poll_usecs > S32_MAX)
+ return -EINVAL;
+
+ if (epoll_params.prefer_busy_poll > 1)
+ return -EINVAL;
+
+ if (epoll_params.busy_poll_budget > NAPI_POLL_WEIGHT &&
+ !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ WRITE_ONCE(ep->busy_poll_usecs, epoll_params.busy_poll_usecs);
+ WRITE_ONCE(ep->busy_poll_budget, epoll_params.busy_poll_budget);
+ WRITE_ONCE(ep->prefer_busy_poll, epoll_params.prefer_busy_poll);
+ return 0;
+ case EPIOCGPARAMS:
+ memset(&epoll_params, 0, sizeof(epoll_params));
+ epoll_params.busy_poll_usecs = READ_ONCE(ep->busy_poll_usecs);
+ epoll_params.busy_poll_budget = READ_ONCE(ep->busy_poll_budget);
+ epoll_params.prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);
+ if (copy_to_user(uarg, &epoll_params, sizeof(epoll_params)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
#else
static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
@@ -466,6 +549,12 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
}
+static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
@@ -825,6 +914,27 @@ static void ep_clear_and_put(struct eventpoll *ep)
ep_free(ep);
}
+static long ep_eventpoll_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ if (!is_file_epoll(file))
+ return -EINVAL;
+
+ switch (cmd) {
+ case EPIOCSPARAMS:
+ case EPIOCGPARAMS:
+ ret = ep_eventpoll_bp_ioctl(file, cmd, arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int ep_eventpoll_release(struct inode *inode, struct file *file)
{
struct eventpoll *ep = file->private_data;
@@ -931,6 +1041,8 @@ static const struct file_operations eventpoll_fops = {
.release = ep_eventpoll_release,
.poll = ep_eventpoll_poll,
.llseek = noop_llseek,
+ .unlocked_ioctl = ep_eventpoll_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/*
@@ -2058,6 +2170,11 @@ static int do_epoll_create(int flags)
error = PTR_ERR(file);
goto out_free_fd;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ep->busy_poll_usecs = 0;
+ ep->busy_poll_budget = 0;
+ ep->prefer_busy_poll = false;
+#endif
ep->file = file;
fd_install(fd, file);
return fd;
diff --git a/fs/exec.c b/fs/exec.c
index 73e4045df271..af4fbb61cd53 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -128,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
struct filename *tmp = getname(library);
int error = PTR_ERR(tmp);
static const struct open_flags uselib_flags = {
- .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
+ .open_flag = O_LARGEFILE | O_RDONLY,
.acc_mode = MAY_READ | MAY_EXEC,
.intent = LOOKUP_OPEN,
.lookup_flags = LOOKUP_FOLLOW,
@@ -904,6 +904,10 @@ EXPORT_SYMBOL(transfer_args_to_stack);
#endif /* CONFIG_MMU */
+/*
+ * On success, caller must call do_close_execat() on the returned
+ * struct file to close it.
+ */
static struct file *do_open_execat(int fd, struct filename *name, int flags)
{
struct file *file;
@@ -948,6 +952,17 @@ exit:
return ERR_PTR(err);
}
+/**
+ * open_exec - Open a path name for execution
+ *
+ * @name: path name to open with the intent of executing it.
+ *
+ * Returns ERR_PTR on failure or allocated struct file on success.
+ *
+ * As this is a wrapper for the internal do_open_execat(), callers
+ * must call allow_write_access() before fput() on release. Also see
+ * do_close_execat().
+ */
struct file *open_exec(const char *name)
{
struct filename *filename = getname_kernel(name);
@@ -1409,6 +1424,9 @@ int begin_new_exec(struct linux_binprm * bprm)
out_unlock:
up_write(&me->signal->exec_update_lock);
+ if (!bprm->cred)
+ mutex_unlock(&me->signal->cred_guard_mutex);
+
out:
return retval;
}
@@ -1484,6 +1502,15 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
return -ENOMEM;
}
+/* Matches do_open_execat() */
+static void do_close_execat(struct file *file)
+{
+ if (!file)
+ return;
+ allow_write_access(file);
+ fput(file);
+}
+
static void free_bprm(struct linux_binprm *bprm)
{
if (bprm->mm) {
@@ -1495,10 +1522,7 @@ static void free_bprm(struct linux_binprm *bprm)
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
- if (bprm->file) {
- allow_write_access(bprm->file);
- fput(bprm->file);
- }
+ do_close_execat(bprm->file);
if (bprm->executable)
fput(bprm->executable);
/* If a binfmt changed the interp, free it. */
@@ -1508,12 +1532,23 @@ static void free_bprm(struct linux_binprm *bprm)
kfree(bprm);
}
-static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
+static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int flags)
{
- struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+ struct linux_binprm *bprm;
+ struct file *file;
int retval = -ENOMEM;
- if (!bprm)
- goto out;
+
+ file = do_open_execat(fd, filename, flags);
+ if (IS_ERR(file))
+ return ERR_CAST(file);
+
+ bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
+ if (!bprm) {
+ do_close_execat(file);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bprm->file = file;
if (fd == AT_FDCWD || filename->name[0] == '/') {
bprm->filename = filename->name;
@@ -1526,18 +1561,28 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
if (!bprm->fdpath)
goto out_free;
+ /*
+ * Record that a name derived from an O_CLOEXEC fd will be
+ * inaccessible after exec. This allows the code in exec to
+ * choose to fail when the executable is not mmaped into the
+ * interpreter and an open file descriptor is not passed to
+ * the interpreter. This makes for a better user experience
+ * than having the interpreter start and then immediately fail
+ * when it finds the executable is inaccessible.
+ */
+ if (get_close_on_exec(fd))
+ bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
+
bprm->filename = bprm->fdpath;
}
bprm->interp = bprm->filename;
retval = bprm_mm_init(bprm);
- if (retval)
- goto out_free;
- return bprm;
+ if (!retval)
+ return bprm;
out_free:
free_bprm(bprm);
-out:
return ERR_PTR(retval);
}
@@ -1588,6 +1633,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
+ /* "users" and "in_exec" locked for copy_fs() */
if (p->fs->users > n_fs)
bprm->unsafe |= LSM_UNSAFE_SHARE;
else
@@ -1804,13 +1850,8 @@ static int exec_binprm(struct linux_binprm *bprm)
return 0;
}
-/*
- * sys_execve() executes a new program.
- */
-static int bprm_execve(struct linux_binprm *bprm,
- int fd, struct filename *filename, int flags)
+static int bprm_execve(struct linux_binprm *bprm)
{
- struct file *file;
int retval;
retval = prepare_bprm_creds(bprm);
@@ -1826,26 +1867,8 @@ static int bprm_execve(struct linux_binprm *bprm,
current->in_execve = 1;
sched_mm_cid_before_execve(current);
- file = do_open_execat(fd, filename, flags);
- retval = PTR_ERR(file);
- if (IS_ERR(file))
- goto out_unmark;
-
sched_exec();
- bprm->file = file;
- /*
- * Record that a name derived from an O_CLOEXEC fd will be
- * inaccessible after exec. This allows the code in exec to
- * choose to fail when the executable is not mmaped into the
- * interpreter and an open file descriptor is not passed to
- * the interpreter. This makes for a better user experience
- * than having the interpreter start and then immediately fail
- * when it finds the executable is inaccessible.
- */
- if (bprm->fdpath && get_close_on_exec(fd))
- bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
-
/* Set the unchanging part of bprm->cred */
retval = security_bprm_creds_for_exec(bprm);
if (retval)
@@ -1875,7 +1898,6 @@ out:
if (bprm->point_of_no_return && !fatal_signal_pending(current))
force_fatal_sig(SIGSEGV);
-out_unmark:
sched_mm_cid_after_execve(current);
current->fs->in_exec = 0;
current->in_execve = 0;
@@ -1910,7 +1932,7 @@ static int do_execveat_common(int fd, struct filename *filename,
* further execve() calls fail. */
current->flags &= ~PF_NPROC_EXCEEDED;
- bprm = alloc_bprm(fd, filename);
+ bprm = alloc_bprm(fd, filename, flags);
if (IS_ERR(bprm)) {
retval = PTR_ERR(bprm);
goto out_ret;
@@ -1959,7 +1981,7 @@ static int do_execveat_common(int fd, struct filename *filename,
bprm->argc = 1;
}
- retval = bprm_execve(bprm, fd, filename, flags);
+ retval = bprm_execve(bprm);
out_free:
free_bprm(bprm);
@@ -1984,7 +2006,7 @@ int kernel_execve(const char *kernel_filename,
if (IS_ERR(filename))
return PTR_ERR(filename);
- bprm = alloc_bprm(fd, filename);
+ bprm = alloc_bprm(fd, filename, 0);
if (IS_ERR(bprm)) {
retval = PTR_ERR(bprm);
goto out_ret;
@@ -2019,7 +2041,7 @@ int kernel_execve(const char *kernel_filename,
if (retval < 0)
goto out_free;
- retval = bprm_execve(bprm, fd, filename, 0);
+ retval = bprm_execve(bprm);
out_free:
free_bprm(bprm);
out_ret:
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 9474cd50da6d..361595433480 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -275,6 +275,7 @@ struct exfat_sb_info {
spinlock_t inode_hash_lock;
struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
+ struct rcu_head rcu;
};
#define EXFAT_CACHE_VALID 0
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index d25a96a148af..cc00f1a7a1e1 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -35,13 +35,18 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
if (new_num_clusters == num_clusters)
goto out;
- exfat_chain_set(&clu, ei->start_clu, num_clusters, ei->flags);
- ret = exfat_find_last_cluster(sb, &clu, &last_clu);
- if (ret)
- return ret;
+ if (num_clusters) {
+ exfat_chain_set(&clu, ei->start_clu, num_clusters, ei->flags);
+ ret = exfat_find_last_cluster(sb, &clu, &last_clu);
+ if (ret)
+ return ret;
+
+ clu.dir = last_clu + 1;
+ } else {
+ last_clu = EXFAT_EOF_CLUSTER;
+ clu.dir = EXFAT_EOF_CLUSTER;
+ }
- clu.dir = (last_clu == EXFAT_EOF_CLUSTER) ?
- EXFAT_EOF_CLUSTER : last_clu + 1;
clu.size = 0;
clu.flags = ei->flags;
@@ -51,17 +56,19 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
return ret;
/* Append new clusters to chain */
- if (clu.flags != ei->flags) {
- exfat_chain_cont_cluster(sb, ei->start_clu, num_clusters);
- ei->flags = ALLOC_FAT_CHAIN;
- }
- if (clu.flags == ALLOC_FAT_CHAIN)
- if (exfat_ent_set(sb, last_clu, clu.dir))
- goto free_clu;
-
- if (num_clusters == 0)
+ if (num_clusters) {
+ if (clu.flags != ei->flags)
+ if (exfat_chain_cont_cluster(sb, ei->start_clu, num_clusters))
+ goto free_clu;
+
+ if (clu.flags == ALLOC_FAT_CHAIN)
+ if (exfat_ent_set(sb, last_clu, clu.dir))
+ goto free_clu;
+ } else
ei->start_clu = clu.dir;
+ ei->flags = clu.flags;
+
out:
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
/* Expanded range not zeroed, do not update valid_size */
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 522edcbb2ce4..0687f952956c 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -501,7 +501,7 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
struct inode *inode = mapping->host;
struct exfat_inode_info *ei = EXFAT_I(inode);
loff_t pos = iocb->ki_pos;
- loff_t size = iocb->ki_pos + iov_iter_count(iter);
+ loff_t size = pos + iov_iter_count(iter);
int rw = iov_iter_rw(iter);
ssize_t ret;
@@ -525,11 +525,10 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
*/
ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
if (ret < 0) {
- if (rw == WRITE)
+ if (rw == WRITE && ret != -EIOCBQUEUED)
exfat_write_failed(mapping, size);
- if (ret != -EIOCBQUEUED)
- return ret;
+ return ret;
} else
size = pos + ret;
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index 705710f93e2d..afdf13c34ff5 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -655,7 +655,6 @@ static int exfat_load_upcase_table(struct super_block *sb,
unsigned int sect_size = sb->s_blocksize;
unsigned int i, index = 0;
u32 chksum = 0;
- int ret;
unsigned char skip = false;
unsigned short *upcase_table;
@@ -673,8 +672,7 @@ static int exfat_load_upcase_table(struct super_block *sb,
if (!bh) {
exfat_err(sb, "failed to read sector(0x%llx)",
(unsigned long long)sector);
- ret = -EIO;
- goto free_table;
+ return -EIO;
}
sector++;
for (i = 0; i < sect_size && index <= 0xFFFF; i += 2) {
@@ -701,15 +699,12 @@ static int exfat_load_upcase_table(struct super_block *sb,
exfat_err(sb, "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)",
index, chksum, utbl_checksum);
- ret = -EINVAL;
-free_table:
- exfat_free_upcase_table(sbi);
- return ret;
+ return -EINVAL;
}
static int exfat_load_default_upcase_table(struct super_block *sb)
{
- int i, ret = -EIO;
+ int i;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
unsigned char skip = false;
unsigned short uni = 0, *upcase_table;
@@ -740,8 +735,7 @@ static int exfat_load_default_upcase_table(struct super_block *sb)
return 0;
/* FATAL error: default upcase table has error */
- exfat_free_upcase_table(sbi);
- return ret;
+ return -EIO;
}
int exfat_create_upcase_table(struct super_block *sb)
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index d9d4fa91010b..fcb658267765 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -39,9 +39,6 @@ static void exfat_put_super(struct super_block *sb)
exfat_free_bitmap(sbi);
brelse(sbi->boot_bh);
mutex_unlock(&sbi->s_lock);
-
- unload_nls(sbi->nls_io);
- exfat_free_upcase_table(sbi);
}
static int exfat_sync_fs(struct super_block *sb, int wait)
@@ -600,7 +597,7 @@ static int __exfat_fill_super(struct super_block *sb)
ret = exfat_load_bitmap(sb);
if (ret) {
exfat_err(sb, "failed to load alloc-bitmap");
- goto free_upcase_table;
+ goto free_bh;
}
ret = exfat_count_used_clusters(sb, &sbi->used_clusters);
@@ -613,8 +610,6 @@ static int __exfat_fill_super(struct super_block *sb)
free_alloc_bitmap:
exfat_free_bitmap(sbi);
-free_upcase_table:
- exfat_free_upcase_table(sbi);
free_bh:
brelse(sbi->boot_bh);
return ret;
@@ -701,12 +696,10 @@ put_inode:
sb->s_root = NULL;
free_table:
- exfat_free_upcase_table(sbi);
exfat_free_bitmap(sbi);
brelse(sbi->boot_bh);
check_nls_io:
- unload_nls(sbi->nls_io);
return err;
}
@@ -771,13 +764,22 @@ static int exfat_init_fs_context(struct fs_context *fc)
return 0;
}
+static void delayed_free(struct rcu_head *p)
+{
+ struct exfat_sb_info *sbi = container_of(p, struct exfat_sb_info, rcu);
+
+ unload_nls(sbi->nls_io);
+ exfat_free_upcase_table(sbi);
+ exfat_free_sbi(sbi);
+}
+
static void exfat_kill_sb(struct super_block *sb)
{
struct exfat_sb_info *sbi = sb->s_fs_info;
kill_block_super(sb);
if (sbi)
- exfat_free_sbi(sbi);
+ call_rcu(&sbi->rcu, delayed_free);
}
static struct file_system_type exfat_fs_type = {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index a5d784872303..023571f8dd1b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -252,8 +252,10 @@ struct ext4_allocation_request {
#define EXT4_MAP_MAPPED BIT(BH_Mapped)
#define EXT4_MAP_UNWRITTEN BIT(BH_Unwritten)
#define EXT4_MAP_BOUNDARY BIT(BH_Boundary)
+#define EXT4_MAP_DELAYED BIT(BH_Delay)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
- EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
+ EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
+ EXT4_MAP_DELAYED)
struct ext4_map_blocks {
ext4_fsblk_t m_pblk;
@@ -2912,10 +2914,10 @@ extern const struct seq_operations ext4_mb_seq_groups_ops;
extern const struct seq_operations ext4_mb_seq_structs_summary_ops;
extern int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset);
extern int ext4_mb_init(struct super_block *);
-extern int ext4_mb_release(struct super_block *);
+extern void ext4_mb_release(struct super_block *);
extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
struct ext4_allocation_request *, int *);
-extern void ext4_discard_preallocations(struct inode *, unsigned int);
+extern void ext4_discard_preallocations(struct inode *);
extern int __init ext4_init_mballoc(void);
extern void ext4_exit_mballoc(void);
extern ext4_group_t ext4_mb_prefetch(struct super_block *sb,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 01299b55a567..7669d154c05e 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -100,7 +100,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
* i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
up_write(&EXT4_I(inode)->i_data_sem);
*dropped = 1;
return 0;
@@ -2229,7 +2229,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
/*
- * ext4_ext_determine_hole - determine hole around given block
+ * ext4_ext_find_hole - find hole around given block according to the given path
* @inode: inode we lookup in
* @path: path in extent tree to @lblk
* @lblk: pointer to logical block around which we want to determine hole
@@ -2241,9 +2241,9 @@ static int ext4_fill_es_cache_info(struct inode *inode,
* The function returns the length of a hole starting at @lblk. We update @lblk
* to the beginning of the hole if we managed to find it.
*/
-static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
- struct ext4_ext_path *path,
- ext4_lblk_t *lblk)
+static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t *lblk)
{
int depth = ext_depth(inode);
struct ext4_extent *ex;
@@ -2271,30 +2271,6 @@ static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
}
/*
- * ext4_ext_put_gap_in_cache:
- * calculate boundaries of the gap that the requested block fits into
- * and cache this gap
- */
-static void
-ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
- ext4_lblk_t hole_len)
-{
- struct extent_status es;
-
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
- hole_start + hole_len - 1, &es);
- if (es.es_len) {
- /* There's delayed extent containing lblock? */
- if (es.es_lblk <= hole_start)
- return;
- hole_len = min(es.es_lblk - hole_start, hole_len);
- }
- ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
- ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
- EXTENT_STATUS_HOLE);
-}
-
-/*
* ext4_ext_rm_idx:
* removes index from the index block.
*/
@@ -4062,6 +4038,72 @@ static int get_implied_cluster_alloc(struct super_block *sb,
return 0;
}
+/*
+ * Determine hole length around the given logical block, first try to
+ * locate and expand the hole from the given @path, and then adjust it
+ * if it's partially or completely converted to delayed extents, insert
+ * it into the extent cache tree if it's indeed a hole, finally return
+ * the length of the determined extent.
+ */
+static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_lblk_t lblk)
+{
+ ext4_lblk_t hole_start, len;
+ struct extent_status es;
+
+ hole_start = lblk;
+ len = ext4_ext_find_hole(inode, path, &hole_start);
+again:
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+ hole_start + len - 1, &es);
+ if (!es.es_len)
+ goto insert_hole;
+
+ /*
+ * There's a delalloc extent in the hole, handle it if the delalloc
+ * extent is in front of, behind and straddle the queried range.
+ */
+ if (lblk >= es.es_lblk + es.es_len) {
+ /*
+ * The delalloc extent is in front of the queried range,
+ * find again from the queried start block.
+ */
+ len -= lblk - hole_start;
+ hole_start = lblk;
+ goto again;
+ } else if (in_range(lblk, es.es_lblk, es.es_len)) {
+ /*
+ * The delalloc extent containing lblk, it must have been
+ * added after ext4_map_blocks() checked the extent status
+ * tree so we are not holding i_rwsem and delalloc info is
+ * only stabilized by i_data_sem we are going to release
+ * soon. Don't modify the extent status tree and report
+ * extent as a hole, just adjust the length to the delalloc
+ * extent's after lblk.
+ */
+ len = es.es_lblk + es.es_len - lblk;
+ return len;
+ } else {
+ /*
+ * The delalloc extent is partially or completely behind
+ * the queried range, update hole length until the
+ * beginning of the delalloc extent.
+ */
+ len = min(es.es_lblk - hole_start, len);
+ }
+
+insert_hole:
+ /* Put just found gap into cache to speed up subsequent requests */
+ ext_debug(inode, " -> %u:%u\n", hole_start, len);
+ ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
+
+ /* Update hole_len to reflect hole size after lblk */
+ if (hole_start != lblk)
+ len -= lblk - hole_start;
+
+ return len;
+}
/*
* Block allocation/map/preallocation routine for extents based files
@@ -4179,22 +4221,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
* we couldn't try to create block if create flag is zero
*/
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
- ext4_lblk_t hole_start, hole_len;
+ ext4_lblk_t len;
- hole_start = map->m_lblk;
- hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
- /*
- * put just found gap into cache to speed up
- * subsequent requests
- */
- ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
+ len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
- /* Update hole_len to reflect hole size after map->m_lblk */
- if (hole_start != map->m_lblk)
- hole_len -= map->m_lblk - hole_start;
map->m_pblk = 0;
- map->m_len = min_t(unsigned int, map->m_len, hole_len);
-
+ map->m_len = min_t(unsigned int, map->m_len, len);
goto out;
}
@@ -4313,7 +4345,7 @@ got_allocated_blocks:
* not a good idea to call discard here directly,
* but otherwise we'd need to call it every free().
*/
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
ext4_free_blocks(handle, inode, NULL, newblock,
@@ -5357,7 +5389,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
@@ -5365,7 +5397,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
ret = ext4_ext_shift_extents(inode, handle, punch_stop,
punch_stop - punch_start, SHIFT_LEFT);
@@ -5497,7 +5529,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
path = ext4_find_extent(inode, offset_lblk, NULL, 0);
if (IS_ERR(path)) {
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6aa15dafc677..54d6ff22585c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -174,7 +174,7 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
(atomic_read(&inode->i_writecount) == 1) &&
!EXT4_I(inode)->i_reserved_data_blocks) {
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
up_write(&EXT4_I(inode)->i_data_sem);
}
if (is_dx(inode) && filp->private_data)
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index a9f3716119d3..d8ca7f64f952 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -714,7 +714,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
* i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
up_write(&EXT4_I(inode)->i_data_sem);
*dropped = 1;
return 0;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5af1b0b8680e..2ccf3b5e3a7c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -371,7 +371,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
*/
if ((ei->i_reserved_data_blocks == 0) &&
!inode_is_open_for_write(inode))
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
}
static int __check_block_validity(struct inode *inode, const char *func,
@@ -515,6 +515,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
map->m_len = retval;
} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
map->m_pblk = 0;
+ map->m_flags |= ext4_es_is_delayed(&es) ?
+ EXT4_MAP_DELAYED : 0;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
@@ -1703,11 +1705,8 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
- if (ext4_es_is_hole(&es)) {
- retval = 0;
- down_read(&EXT4_I(inode)->i_data_sem);
+ if (ext4_es_is_hole(&es))
goto add_delayed;
- }
/*
* Delayed extent could be allocated by fallocate.
@@ -1749,26 +1748,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
retval = ext4_ind_map_blocks(NULL, inode, map, 0);
-
-add_delayed:
- if (retval == 0) {
- int ret;
-
- /*
- * XXX: __block_prepare_write() unmaps passed block,
- * is it OK?
- */
-
- ret = ext4_insert_delayed_block(inode, map->m_lblk);
- if (ret != 0) {
- retval = ret;
- goto out_unlock;
- }
-
- map_bh(bh, inode->i_sb, invalid_block);
- set_buffer_new(bh);
- set_buffer_delay(bh);
- } else if (retval > 0) {
+ if (retval < 0) {
+ up_read(&EXT4_I(inode)->i_data_sem);
+ return retval;
+ }
+ if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
@@ -1783,11 +1767,21 @@ add_delayed:
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
+ up_read(&EXT4_I(inode)->i_data_sem);
+ return retval;
}
+ up_read(&EXT4_I(inode)->i_data_sem);
-out_unlock:
- up_read((&EXT4_I(inode)->i_data_sem));
+add_delayed:
+ down_write(&EXT4_I(inode)->i_data_sem);
+ retval = ext4_insert_delayed_block(inode, map->m_lblk);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (retval)
+ return retval;
+ map_bh(bh, inode->i_sb, invalid_block);
+ set_buffer_new(bh);
+ set_buffer_delay(bh);
return retval;
}
@@ -3268,6 +3262,9 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
iomap->addr = (u64) map->m_pblk << blkbits;
if (flags & IOMAP_DAX)
iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
+ } else if (map->m_flags & EXT4_MAP_DELAYED) {
+ iomap->type = IOMAP_DELALLOC;
+ iomap->addr = IOMAP_NULL_ADDR;
} else {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
@@ -3430,35 +3427,11 @@ const struct iomap_ops ext4_iomap_overwrite_ops = {
.iomap_end = ext4_iomap_end,
};
-static bool ext4_iomap_is_delalloc(struct inode *inode,
- struct ext4_map_blocks *map)
-{
- struct extent_status es;
- ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
-
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
- map->m_lblk, end, &es);
-
- if (!es.es_len || es.es_lblk > end)
- return false;
-
- if (es.es_lblk > map->m_lblk) {
- map->m_len = es.es_lblk - map->m_lblk;
- return false;
- }
-
- offset = map->m_lblk - es.es_lblk;
- map->m_len = es.es_len - offset;
-
- return true;
-}
-
static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
int ret;
- bool delalloc = false;
struct ext4_map_blocks map;
u8 blkbits = inode->i_blkbits;
@@ -3499,13 +3472,8 @@ static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0)
return ret;
- if (ret == 0)
- delalloc = ext4_iomap_is_delalloc(inode, &map);
-
set_iomap:
ext4_set_iomap(inode, iomap, &map, offset, length, flags);
- if (delalloc && iomap->type == IOMAP_HOLE)
- iomap->type = IOMAP_DELALLOC;
return 0;
}
@@ -4015,12 +3983,12 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
/* If there are blocks to remove, do it */
if (stop_block > first_block) {
+ ext4_lblk_t hole_len = stop_block - first_block;
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
- ext4_es_remove_extent(inode, first_block,
- stop_block - first_block);
+ ext4_es_remove_extent(inode, first_block, hole_len);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
@@ -4029,6 +3997,8 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
+ ext4_es_insert_extent(inode, first_block, hole_len, ~0,
+ EXTENT_STATUS_HOLE);
up_write(&EXT4_I(inode)->i_data_sem);
}
ext4_fc_track_range(handle, inode, first_block, stop_block);
@@ -4170,7 +4140,7 @@ int ext4_truncate(struct inode *inode)
down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
err = ext4_ext_truncate(handle, inode);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index aa6be510eb8f..7160a71044c8 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -467,7 +467,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
ext4_reset_inode_seed(inode);
ext4_reset_inode_seed(inode_bl);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
err = ext4_mark_inode_dirty(handle, inode);
if (err < 0) {
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f44f668e407f..e4f7cf9d89c4 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -564,14 +564,14 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
+ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
ext4_grp_locked_error(sb, e4b->bd_group,
inode ? inode->i_ino : 0,
blocknr,
"freeing block already freed "
"(bit %u)",
first + i);
- ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
- EXT4_GROUP_INFO_BBITMAP_CORRUPT);
}
mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
}
@@ -677,7 +677,7 @@ do { \
} \
} while (0)
-static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
+static void __mb_check_buddy(struct ext4_buddy *e4b, char *file,
const char *function, int line)
{
struct super_block *sb = e4b->bd_sb;
@@ -696,7 +696,7 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
void *buddy2;
if (e4b->bd_info->bb_check_counter++ % 10)
- return 0;
+ return;
while (order > 1) {
buddy = mb_find_buddy(e4b, order, &max);
@@ -758,7 +758,7 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
grp = ext4_get_group_info(sb, e4b->bd_group);
if (!grp)
- return NULL;
+ return;
list_for_each(cur, &grp->bb_prealloc_list) {
ext4_group_t groupnr;
struct ext4_prealloc_space *pa;
@@ -768,7 +768,6 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
for (i = 0; i < pa->pa_len; i++)
MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
}
- return 0;
}
#undef MB_CHECK_ASSERT
#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
@@ -842,7 +841,7 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
struct ext4_sb_info *sbi = EXT4_SB(sb);
int new_order;
- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
+ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
return;
new_order = mb_avg_fragment_size_order(sb,
@@ -871,7 +870,7 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
* cr level needs an update.
*/
static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac,
- enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+ enum criteria *new_cr, ext4_group_t *group)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_group_info *iter;
@@ -945,7 +944,7 @@ ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int o
* order. Updates *new_cr if cr level needs an update.
*/
static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac,
- enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+ enum criteria *new_cr, ext4_group_t *group)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_group_info *grp = NULL;
@@ -990,7 +989,7 @@ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *
* much and fall to CR_GOAL_LEN_SLOW in that case.
*/
static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac,
- enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
+ enum criteria *new_cr, ext4_group_t *group)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_group_info *grp = NULL;
@@ -1125,11 +1124,11 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
}
if (*new_cr == CR_POWER2_ALIGNED) {
- ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group, ngroups);
+ ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group);
} else if (*new_cr == CR_GOAL_LEN_FAST) {
- ext4_mb_choose_next_group_goal_fast(ac, new_cr, group, ngroups);
+ ext4_mb_choose_next_group_goal_fast(ac, new_cr, group);
} else if (*new_cr == CR_BEST_AVAIL_LEN) {
- ext4_mb_choose_next_group_best_avail(ac, new_cr, group, ngroups);
+ ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
} else {
/*
* TODO: For CR=2, we can arrange groups in an rb tree sorted by
@@ -1233,6 +1232,24 @@ void ext4_mb_generate_buddy(struct super_block *sb,
atomic64_add(period, &sbi->s_mb_generation_time);
}
+static void mb_regenerate_buddy(struct ext4_buddy *e4b)
+{
+ int count;
+ int order = 1;
+ void *buddy;
+
+ while ((buddy = mb_find_buddy(e4b, order++, &count)))
+ mb_set_bits(buddy, 0, count);
+
+ e4b->bd_info->bb_fragments = 0;
+ memset(e4b->bd_info->bb_counters, 0,
+ sizeof(*e4b->bd_info->bb_counters) *
+ (e4b->bd_sb->s_blocksize_bits + 2));
+
+ ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
+ e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
+}
+
/* The buddy information is attached the buddy cache inode
* for convenience. The information regarding each group
* is loaded via ext4_mb_load_buddy. The information involve
@@ -1891,11 +1908,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
mb_check_buddy(e4b);
mb_free_blocks_double(inode, e4b, first, count);
- this_cpu_inc(discard_pa_seq);
- e4b->bd_info->bb_free += count;
- if (first < e4b->bd_info->bb_first_free)
- e4b->bd_info->bb_first_free = first;
-
/* access memory sequentially: check left neighbour,
* clear range and then check right neighbour
*/
@@ -1909,21 +1921,31 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t blocknr;
+ /*
+ * Fastcommit replay can free already freed blocks which
+ * corrupts allocation info. Regenerate it.
+ */
+ if (sbi->s_mount_state & EXT4_FC_REPLAY) {
+ mb_regenerate_buddy(e4b);
+ goto check;
+ }
+
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += EXT4_C2B(sbi, block);
- if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
- ext4_grp_locked_error(sb, e4b->bd_group,
- inode ? inode->i_ino : 0,
- blocknr,
- "freeing already freed block (bit %u); block bitmap corrupt.",
- block);
- ext4_mark_group_bitmap_corrupted(
- sb, e4b->bd_group,
+ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
- }
- goto done;
+ ext4_grp_locked_error(sb, e4b->bd_group,
+ inode ? inode->i_ino : 0, blocknr,
+ "freeing already freed block (bit %u); block bitmap corrupt.",
+ block);
+ return;
}
+ this_cpu_inc(discard_pa_seq);
+ e4b->bd_info->bb_free += count;
+ if (first < e4b->bd_info->bb_first_free)
+ e4b->bd_info->bb_first_free = first;
+
/* let's maintain fragments counter */
if (left_is_free && right_is_free)
e4b->bd_info->bb_fragments--;
@@ -1948,9 +1970,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
if (first <= last)
mb_buddy_mark_free(e4b, first >> 1, last >> 1);
-done:
mb_set_largest_free_order(sb, e4b->bd_info);
mb_update_avg_fragment_size(sb, e4b->bd_info);
+check:
mb_check_buddy(e4b);
}
@@ -2276,6 +2298,9 @@ void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
return;
ext4_lock_group(ac->ac_sb, group);
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ goto out;
+
max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
if (max > 0) {
@@ -2283,6 +2308,7 @@ void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
ext4_mb_use_best_found(ac, e4b);
}
+out:
ext4_unlock_group(ac->ac_sb, group);
ext4_mb_unload_buddy(e4b);
}
@@ -2309,12 +2335,10 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
if (err)
return err;
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
- ext4_mb_unload_buddy(e4b);
- return 0;
- }
-
ext4_lock_group(ac->ac_sb, group);
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ goto out;
+
max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
ac->ac_g_ex.fe_len, &ex);
ex.fe_logical = 0xDEADFA11; /* debug value */
@@ -2347,6 +2371,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
ac->ac_b_ex = ex;
ext4_mb_use_best_found(ac, e4b);
}
+out:
ext4_unlock_group(ac->ac_sb, group);
ext4_mb_unload_buddy(e4b);
@@ -2380,12 +2405,12 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
k = mb_find_next_zero_bit(buddy, max, 0);
if (k >= max) {
+ ext4_mark_group_bitmap_corrupted(ac->ac_sb,
+ e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
"%d free clusters of order %d. But found 0",
grp->bb_counters[i], i);
- ext4_mark_group_bitmap_corrupted(ac->ac_sb,
- e4b->bd_group,
- EXT4_GROUP_INFO_BBITMAP_CORRUPT);
break;
}
ac->ac_found++;
@@ -2436,12 +2461,12 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
* free blocks even though group info says we
* have free blocks
*/
+ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
"%d free clusters as per "
"group info. But bitmap says 0",
free);
- ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
- EXT4_GROUP_INFO_BBITMAP_CORRUPT);
break;
}
@@ -2467,12 +2492,12 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
if (WARN_ON(ex.fe_len <= 0))
break;
if (free < ex.fe_len) {
+ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
"%d free clusters as per "
"group info. But got %d blocks",
free, ex.fe_len);
- ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
- EXT4_GROUP_INFO_BBITMAP_CORRUPT);
/*
* The number of free blocks differs. This mostly
* indicate that the bitmap is corrupt. So exit
@@ -3725,7 +3750,7 @@ static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
return count;
}
-int ext4_mb_release(struct super_block *sb)
+void ext4_mb_release(struct super_block *sb)
{
ext4_group_t ngroups = ext4_get_groups_count(sb);
ext4_group_t i;
@@ -3801,8 +3826,6 @@ int ext4_mb_release(struct super_block *sb)
}
free_percpu(sbi->s_locality_groups);
-
- return 0;
}
static inline int ext4_issue_discard(struct super_block *sb,
@@ -5284,7 +5307,7 @@ static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
* the caller MUST hold group/inode locks.
* TODO: optimize the case when there are no in-core structures yet
*/
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
struct ext4_prealloc_space *pa)
{
@@ -5334,11 +5357,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
*/
}
atomic_add(free, &sbi->s_mb_discarded);
-
- return 0;
}
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_release_group_pa(struct ext4_buddy *e4b,
struct ext4_prealloc_space *pa)
{
@@ -5352,13 +5373,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
e4b->bd_group, group, pa->pa_pstart);
- return 0;
+ return;
}
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
-
- return 0;
}
/*
@@ -5479,7 +5498,7 @@ out_dbg:
*
* FIXME!! Make sure it is valid at all the call sites
*/
-void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
+void ext4_discard_preallocations(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct super_block *sb = inode->i_sb;
@@ -5491,9 +5510,8 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
struct rb_node *iter;
int err;
- if (!S_ISREG(inode->i_mode)) {
+ if (!S_ISREG(inode->i_mode))
return;
- }
if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
return;
@@ -5501,15 +5519,12 @@ void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
mb_debug(sb, "discard preallocation for inode %lu\n",
inode->i_ino);
trace_ext4_discard_preallocations(inode,
- atomic_read(&ei->i_prealloc_active), needed);
-
- if (needed == 0)
- needed = UINT_MAX;
+ atomic_read(&ei->i_prealloc_active));
repeat:
/* first, collect all pa's in the inode */
write_lock(&ei->i_prealloc_lock);
- for (iter = rb_first(&ei->i_prealloc_node); iter && needed;
+ for (iter = rb_first(&ei->i_prealloc_node); iter;
iter = rb_next(iter)) {
pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
@@ -5533,7 +5548,6 @@ repeat:
spin_unlock(&pa->pa_lock);
rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
list_add(&pa->u.pa_tmp_list, &list);
- needed--;
continue;
}
@@ -5943,7 +5957,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
/*
* release all resource we used in allocation
*/
-static int ext4_mb_release_context(struct ext4_allocation_context *ac)
+static void ext4_mb_release_context(struct ext4_allocation_context *ac)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_prealloc_space *pa = ac->ac_pa;
@@ -5980,7 +5994,6 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
mutex_unlock(&ac->ac_lg->lg_mutex);
ext4_mb_collect_stats(ac);
- return 0;
}
static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
@@ -6761,6 +6774,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
bool set_trimmed = false;
void *bitmap;
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ return 0;
+
last = ext4_last_grp_cluster(sb, e4b->bd_group);
bitmap = e4b->bd_bitmap;
if (start == 0 && max >= last)
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index d7aeb5da7d86..56938532b4ce 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -192,7 +192,6 @@ struct ext4_allocation_context {
*/
ext4_grpblk_t ac_orig_goal_len;
- __u32 ac_groups_considered;
__u32 ac_flags; /* allocation hints */
__u16 ac_groups_scanned;
__u16 ac_groups_linear_remaining;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 3aa57376d9c2..7cd4afa4de1d 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -618,6 +618,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
goto out;
o_end = o_start + len;
+ *moved_len = 0;
while (o_start < o_end) {
struct ext4_extent *ex;
ext4_lblk_t cur_blk, next_blk;
@@ -672,7 +673,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
*/
ext4_double_up_write_data_sem(orig_inode, donor_inode);
/* Swap original branches with new branches */
- move_extent_per_page(o_filp, donor_inode,
+ *moved_len += move_extent_per_page(o_filp, donor_inode,
orig_page_index, donor_page_index,
offset_in_page, cur_len,
unwritten, &ret);
@@ -682,14 +683,11 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
o_start += cur_len;
d_start += cur_len;
}
- *moved_len = o_start - orig_blk;
- if (*moved_len > len)
- *moved_len = len;
out:
if (*moved_len) {
- ext4_discard_preallocations(orig_inode, 0);
- ext4_discard_preallocations(donor_inode, 0);
+ ext4_discard_preallocations(orig_inode);
+ ext4_discard_preallocations(donor_inode);
}
ext4_free_ext_path(path);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index dcba0f85dfe2..0f931d0c227d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1525,7 +1525,7 @@ void ext4_clear_inode(struct inode *inode)
ext4_fc_del(inode);
invalidate_inode_buffers(inode);
clear_inode(inode);
- ext4_discard_preallocations(inode, 0);
+ ext4_discard_preallocations(inode);
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
dquot_drop(inode);
if (EXT4_I(inode)->jinode) {
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 75bf1f88843c..645240cc0229 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -92,10 +92,12 @@ static const char *ext4_get_link(struct dentry *dentry, struct inode *inode,
if (!dentry) {
bh = ext4_getblk(NULL, inode, 0, EXT4_GET_BLOCKS_CACHED_NOWAIT);
- if (IS_ERR(bh))
- return ERR_CAST(bh);
- if (!bh || !ext4_buffer_uptodate(bh))
+ if (IS_ERR(bh) || !bh)
return ERR_PTR(-ECHILD);
+ if (!ext4_buffer_uptodate(bh)) {
+ brelse(bh);
+ return ERR_PTR(-ECHILD);
+ }
} else {
bh = ext4_bread(NULL, inode, 0, 0);
if (IS_ERR(bh))
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1767493dffda..3d84fcc471c6 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1675,11 +1675,11 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
inode->i_state |= I_DIRTY_PAGES;
- else if (unlikely(inode->i_state & I_PINNING_FSCACHE_WB)) {
+ else if (unlikely(inode->i_state & I_PINNING_NETFS_WB)) {
if (!(inode->i_state & I_DIRTY_PAGES)) {
- inode->i_state &= ~I_PINNING_FSCACHE_WB;
- wbc->unpinned_fscache_wb = true;
- dirty |= I_PINNING_FSCACHE_WB; /* Cause write_inode */
+ inode->i_state &= ~I_PINNING_NETFS_WB;
+ wbc->unpinned_netfs_wb = true;
+ dirty |= I_PINNING_NETFS_WB; /* Cause write_inode */
}
}
@@ -1691,7 +1691,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (ret == 0)
ret = err;
}
- wbc->unpinned_fscache_wb = false;
+ wbc->unpinned_netfs_wb = false;
trace_writeback_single_inode(inode, wbc, nr_to_write);
return ret;
}
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
deleted file mode 100644
index b313a978ae0a..000000000000
--- a/fs/fscache/Kconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config FSCACHE
- tristate "General filesystem local caching manager"
- select NETFS_SUPPORT
- help
- This option enables a generic filesystem caching manager that can be
- used by various network and other filesystems to cache data locally.
- Different sorts of caches can be plugged in, depending on the
- resources available.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_STATS
- bool "Gather statistical information on local caching"
- depends on FSCACHE && PROC_FS
- select NETFS_STATS
- help
- This option causes statistical information to be gathered on local
- caching and exported through file:
-
- /proc/fs/fscache/stats
-
- The gathering of statistics adds a certain amount of overhead to
- execution as there are a quite a few stats gathered, and on a
- multi-CPU system these may be on cachelines that keep bouncing
- between CPUs. On the other hand, the stats are very useful for
- debugging purposes. Saying 'Y' here is recommended.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_DEBUG
- bool "Debug FS-Cache"
- depends on FSCACHE
- help
- This permits debugging to be dynamically enabled in the local caching
- management module. If this is set, the debugging output may be
- enabled by setting bits in /sys/modules/fscache/parameter/debug.
-
- See Documentation/filesystems/caching/fscache.rst for more information.
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
deleted file mode 100644
index afb090ea16c4..000000000000
--- a/fs/fscache/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for general filesystem caching code
-#
-
-fscache-y := \
- cache.o \
- cookie.o \
- io.o \
- main.o \
- volume.o
-
-fscache-$(CONFIG_PROC_FS) += proc.o
-fscache-$(CONFIG_FSCACHE_STATS) += stats.o
-
-obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
deleted file mode 100644
index 1336f517e9b1..000000000000
--- a/fs/fscache/internal.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Internal definitions for FS-Cache
- *
- * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) "FS-Cache: " fmt
-
-#include <linux/slab.h>
-#include <linux/fscache-cache.h>
-#include <trace/events/fscache.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-
-/*
- * cache.c
- */
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_caches_seq_ops;
-#endif
-bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
-void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
-struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
-void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
-
-static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
-{
- return smp_load_acquire(&cache->state);
-}
-
-static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
-{
- return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
-}
-
-static inline void fscache_set_cache_state(struct fscache_cache *cache,
- enum fscache_cache_state new_state)
-{
- smp_store_release(&cache->state, new_state);
-
-}
-
-static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
- enum fscache_cache_state old_state,
- enum fscache_cache_state new_state)
-{
- return try_cmpxchg_release(&cache->state, &old_state, new_state);
-}
-
-/*
- * cookie.c
- */
-extern struct kmem_cache *fscache_cookie_jar;
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_cookies_seq_ops;
-#endif
-extern struct timer_list fscache_cookie_lru_timer;
-
-extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
-extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
- enum fscache_access_trace why);
-
-static inline void fscache_see_cookie(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
-{
- trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
- where);
-}
-
-/*
- * main.c
- */
-extern unsigned fscache_debug;
-
-extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
-
-/*
- * proc.c
- */
-#ifdef CONFIG_PROC_FS
-extern int __init fscache_proc_init(void);
-extern void fscache_proc_cleanup(void);
-#else
-#define fscache_proc_init() (0)
-#define fscache_proc_cleanup() do {} while (0)
-#endif
-
-/*
- * stats.c
- */
-#ifdef CONFIG_FSCACHE_STATS
-extern atomic_t fscache_n_volumes;
-extern atomic_t fscache_n_volumes_collision;
-extern atomic_t fscache_n_volumes_nomem;
-extern atomic_t fscache_n_cookies;
-extern atomic_t fscache_n_cookies_lru;
-extern atomic_t fscache_n_cookies_lru_expired;
-extern atomic_t fscache_n_cookies_lru_removed;
-extern atomic_t fscache_n_cookies_lru_dropped;
-
-extern atomic_t fscache_n_acquires;
-extern atomic_t fscache_n_acquires_ok;
-extern atomic_t fscache_n_acquires_oom;
-
-extern atomic_t fscache_n_invalidates;
-
-extern atomic_t fscache_n_relinquishes;
-extern atomic_t fscache_n_relinquishes_retire;
-extern atomic_t fscache_n_relinquishes_dropped;
-
-extern atomic_t fscache_n_resizes;
-extern atomic_t fscache_n_resizes_null;
-
-static inline void fscache_stat(atomic_t *stat)
-{
- atomic_inc(stat);
-}
-
-static inline void fscache_stat_d(atomic_t *stat)
-{
- atomic_dec(stat);
-}
-
-#define __fscache_stat(stat) (stat)
-
-int fscache_stats_show(struct seq_file *m, void *v);
-#else
-
-#define __fscache_stat(stat) (NULL)
-#define fscache_stat(stat) do {} while (0)
-#define fscache_stat_d(stat) do {} while (0)
-#endif
-
-/*
- * volume.c
- */
-#ifdef CONFIG_PROC_FS
-extern const struct seq_operations fscache_volumes_seq_ops;
-#endif
-
-struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
- enum fscache_volume_trace where);
-void fscache_put_volume(struct fscache_volume *volume,
- enum fscache_volume_trace where);
-bool fscache_begin_volume_access(struct fscache_volume *volume,
- struct fscache_cookie *cookie,
- enum fscache_access_trace why);
-void fscache_create_volume(struct fscache_volume *volume, bool wait);
-
-
-/*****************************************************************************/
-/*
- * debug tracing
- */
-#define dbgprintk(FMT, ...) \
- printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
-
-#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
-
-#define kjournal(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-
-#ifdef __KDEBUG
-#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
-#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
-#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
-
-#elif defined(CONFIG_FSCACHE_DEBUG)
-#define _enter(FMT, ...) \
-do { \
- if (__do_kdebug(ENTER)) \
- kenter(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#define _leave(FMT, ...) \
-do { \
- if (__do_kdebug(LEAVE)) \
- kleave(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#define _debug(FMT, ...) \
-do { \
- if (__do_kdebug(DEBUG)) \
- kdebug(FMT, ##__VA_ARGS__); \
-} while (0)
-
-#else
-#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
-#endif
-
-/*
- * determine whether a particular optional debugging point should be logged
- * - we need to go through three steps to persuade cpp to correctly join the
- * shorthand in FSCACHE_DEBUG_LEVEL with its prefix
- */
-#define ____do_kdebug(LEVEL, POINT) \
- unlikely((fscache_debug & \
- (FSCACHE_POINT_##POINT << (FSCACHE_DEBUG_ ## LEVEL * 3))))
-#define ___do_kdebug(LEVEL, POINT) \
- ____do_kdebug(LEVEL, POINT)
-#define __do_kdebug(POINT) \
- ___do_kdebug(FSCACHE_DEBUG_LEVEL, POINT)
-
-#define FSCACHE_DEBUG_CACHE 0
-#define FSCACHE_DEBUG_COOKIE 1
-#define FSCACHE_DEBUG_OBJECT 2
-#define FSCACHE_DEBUG_OPERATION 3
-
-#define FSCACHE_POINT_ENTER 1
-#define FSCACHE_POINT_LEAVE 2
-#define FSCACHE_POINT_DEBUG 4
-
-#ifndef FSCACHE_DEBUG_LEVEL
-#define FSCACHE_DEBUG_LEVEL CACHE
-#endif
-
-/*
- * assertions
- */
-#if 1 /* defined(__KDEBUGALL) */
-
-#define ASSERT(X) \
-do { \
- if (unlikely(!(X))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTCMP(X, OP, Y) \
-do { \
- if (unlikely(!((X) OP (Y)))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- pr_err("%lx " #OP " %lx is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTIF(C, X) \
-do { \
- if (unlikely((C) && !(X))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- BUG(); \
- } \
-} while (0)
-
-#define ASSERTIFCMP(C, X, OP, Y) \
-do { \
- if (unlikely((C) && !((X) OP (Y)))) { \
- pr_err("\n"); \
- pr_err("Assertion failed\n"); \
- pr_err("%lx " #OP " %lx is false\n", \
- (unsigned long)(X), (unsigned long)(Y)); \
- BUG(); \
- } \
-} while (0)
-
-#else
-
-#define ASSERT(X) do {} while (0)
-#define ASSERTCMP(X, OP, Y) do {} while (0)
-#define ASSERTIF(C, X) do {} while (0)
-#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
-
-#endif /* assert or not */
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 91e89e68177e..b6cad106c37e 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -474,8 +474,7 @@ err:
static void cuse_fc_release(struct fuse_conn *fc)
{
- struct cuse_conn *cc = fc_to_cc(fc);
- kfree_rcu(cc, fc.rcu);
+ kfree(fc_to_cc(fc));
}
/**
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 1df83eebda92..bcbe34488862 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -888,6 +888,7 @@ struct fuse_mount {
/* Entry on fc->mounts */
struct list_head fc_entry;
+ struct rcu_head rcu;
};
static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 2a6d44f91729..516ea2979a90 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -930,6 +930,14 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
}
EXPORT_SYMBOL_GPL(fuse_conn_init);
+static void delayed_release(struct rcu_head *p)
+{
+ struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu);
+
+ put_user_ns(fc->user_ns);
+ fc->release(fc);
+}
+
void fuse_conn_put(struct fuse_conn *fc)
{
if (refcount_dec_and_test(&fc->count)) {
@@ -941,13 +949,12 @@ void fuse_conn_put(struct fuse_conn *fc)
if (fiq->ops->release)
fiq->ops->release(fiq);
put_pid_ns(fc->pid_ns);
- put_user_ns(fc->user_ns);
bucket = rcu_dereference_protected(fc->curr_bucket, 1);
if (bucket) {
WARN_ON(atomic_read(&bucket->count) != 1);
kfree(bucket);
}
- fc->release(fc);
+ call_rcu(&fc->rcu, delayed_release);
}
}
EXPORT_SYMBOL_GPL(fuse_conn_put);
@@ -1366,7 +1373,7 @@ EXPORT_SYMBOL_GPL(fuse_send_init);
void fuse_free_conn(struct fuse_conn *fc)
{
WARN_ON(!list_empty(&fc->devices));
- kfree_rcu(fc, rcu);
+ kfree(fc);
}
EXPORT_SYMBOL_GPL(fuse_free_conn);
@@ -1902,7 +1909,7 @@ static void fuse_sb_destroy(struct super_block *sb)
void fuse_mount_destroy(struct fuse_mount *fm)
{
fuse_conn_put(fm->fc);
- kfree(fm);
+ kfree_rcu(fm, rcu);
}
EXPORT_SYMBOL(fuse_mount_destroy);
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 177f1f41f225..2e215e8c3c88 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -32,25 +32,21 @@
static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
{
- struct dentry *parent = NULL;
+ struct dentry *parent;
struct gfs2_sbd *sdp;
struct gfs2_inode *dip;
- struct inode *dinode, *inode;
+ struct inode *inode;
struct gfs2_holder d_gh;
struct gfs2_inode *ip = NULL;
int error, valid = 0;
int had_lock = 0;
- if (flags & LOOKUP_RCU) {
- dinode = d_inode_rcu(READ_ONCE(dentry->d_parent));
- if (!dinode)
- return -ECHILD;
- } else {
- parent = dget_parent(dentry);
- dinode = d_inode(parent);
- }
- sdp = GFS2_SB(dinode);
- dip = GFS2_I(dinode);
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ parent = dget_parent(dentry);
+ sdp = GFS2_SB(d_inode(parent));
+ dip = GFS2_I(d_inode(parent));
inode = d_inode(dentry);
if (inode) {
@@ -66,8 +62,7 @@ static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
if (!had_lock) {
- error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED,
- flags & LOOKUP_RCU ? GL_NOBLOCK : 0, &d_gh);
+ error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
if (error)
goto out;
}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6bfc9383b7b8..1b95db2c3aac 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1882,10 +1882,10 @@ int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
WARN_ON_ONCE(!may_not_block);
return -ECHILD;
}
- if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
- int noblock = may_not_block ? GL_NOBLOCK : 0;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
- LM_FLAG_ANY | noblock, &i_gh);
+ if (gfs2_glock_is_locked_by_me(gl) == NULL) {
+ if (may_not_block)
+ return -ECHILD;
+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
if (error)
return error;
}
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 7ededcb720c1..012a3d003fbe 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -190,6 +190,7 @@ struct hfsplus_sb_info {
int work_queued; /* non-zero delayed work is queued */
struct delayed_work sync_work; /* FS sync delayed work */
spinlock_t work_lock; /* protects sync_work and work_queued */
+ struct rcu_head rcu;
};
#define HFSPLUS_SB_WRITEBACKUP 0
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 1986b4f18a90..97920202790f 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -277,6 +277,14 @@ void hfsplus_mark_mdb_dirty(struct super_block *sb)
spin_unlock(&sbi->work_lock);
}
+static void delayed_free(struct rcu_head *p)
+{
+ struct hfsplus_sb_info *sbi = container_of(p, struct hfsplus_sb_info, rcu);
+
+ unload_nls(sbi->nls);
+ kfree(sbi);
+}
+
static void hfsplus_put_super(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
@@ -302,9 +310,7 @@ static void hfsplus_put_super(struct super_block *sb)
hfs_btree_close(sbi->ext_tree);
kfree(sbi->s_vhdr_buf);
kfree(sbi->s_backup_vhdr_buf);
- unload_nls(sbi->nls);
- kfree(sb->s_fs_info);
- sb->s_fs_info = NULL;
+ call_rcu(&sbi->rcu, delayed_free);
}
static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index ea5b8e57d904..d746866ae3b6 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -100,6 +100,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
loff_t len, vma_len;
int ret;
struct hstate *h = hstate_file(file);
+ vm_flags_t vm_flags;
/*
* vma address alignment (but not the pgoff alignment) has
@@ -141,10 +142,20 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
ret = -ENOMEM;
+
+ vm_flags = vma->vm_flags;
+ /*
+ * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip
+ * reserving here. Note: only for SHM hugetlbfs file, the inode
+ * flag S_PRIVATE is set.
+ */
+ if (inode->i_flags & S_PRIVATE)
+ vm_flags |= VM_NORESERVE;
+
if (!hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma,
- vma->vm_flags))
+ vm_flags))
goto out;
ret = 0;
@@ -340,7 +351,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
} else {
folio_unlock(folio);
- if (!folio_test_has_hwpoisoned(folio))
+ if (!folio_test_hwpoison(folio))
want = nr;
else {
/*
@@ -1354,6 +1365,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
{
struct hugetlbfs_fs_context *ctx = fc->fs_private;
struct fs_parse_result result;
+ struct hstate *h;
char *rest;
unsigned long ps;
int opt;
@@ -1398,11 +1410,12 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
case Opt_pagesize:
ps = memparse(param->string, &rest);
- ctx->hstate = size_to_hstate(ps);
- if (!ctx->hstate) {
+ h = size_to_hstate(ps);
+ if (!h) {
pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
return -EINVAL;
}
+ ctx->hstate = h;
return 0;
case Opt_min_size:
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 8eec84c651bf..cb3cda1390ad 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -2763,9 +2763,7 @@ static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
* leafno - the number of the leaf to be updated.
* newval - the new value for the leaf.
*
- * RETURN VALUES:
- * 0 - success
- * -EIO - i/o error
+ * RETURN VALUES: none
*/
static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
{
@@ -2792,10 +2790,6 @@ static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
* get the buddy size (number of words covered) of
* the new value.
*/
-
- if ((newval - tp->dmt_budmin) > BUDMIN)
- return -EIO;
-
budsz = BUDSIZE(newval, tp->dmt_budmin);
/* try to join.
diff --git a/fs/namei.c b/fs/namei.c
index 4e0de939fea1..9342fa6a38c2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1717,7 +1717,11 @@ static inline int may_lookup(struct mnt_idmap *idmap,
{
if (nd->flags & LOOKUP_RCU) {
int err = inode_permission(idmap, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
- if (err != -ECHILD || !try_to_unlazy(nd))
+ if (!err) // success, keep going
+ return 0;
+ if (!try_to_unlazy(nd))
+ return -ECHILD; // redo it all non-lazy
+ if (err != -ECHILD) // hard error
return err;
}
return inode_permission(idmap, nd->inode, MAY_EXEC);
diff --git a/fs/namespace.c b/fs/namespace.c
index 437f60e96d40..5a51315c6678 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -4472,10 +4472,15 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
/*
* If this is an attached mount make sure it's located in the callers
* mount namespace. If it's not don't let the caller interact with it.
- * If this is a detached mount make sure it has an anonymous mount
- * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE.
+ *
+ * If this mount doesn't have a parent it's most often simply a
+ * detached mount with an anonymous mount namespace. IOW, something
+ * that's simply not attached yet. But there are apparently also users
+ * that do change mount properties on the rootfs itself. That obviously
+ * neither has a parent nor is it a detached mount so we cannot
+ * unconditionally check for detached mounts.
*/
- if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns)))
+ if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt))
goto out;
/*
diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig
index b4db21022cb4..bec805e0c44c 100644
--- a/fs/netfs/Kconfig
+++ b/fs/netfs/Kconfig
@@ -21,3 +21,42 @@ config NETFS_STATS
multi-CPU system these may be on cachelines that keep bouncing
between CPUs. On the other hand, the stats are very useful for
debugging purposes. Saying 'Y' here is recommended.
+
+config FSCACHE
+ bool "General filesystem local caching manager"
+ depends on NETFS_SUPPORT
+ help
+ This option enables a generic filesystem caching manager that can be
+ used by various network and other filesystems to cache data locally.
+ Different sorts of caches can be plugged in, depending on the
+ resources available.
+
+ See Documentation/filesystems/caching/fscache.rst for more information.
+
+config FSCACHE_STATS
+ bool "Gather statistical information on local caching"
+ depends on FSCACHE && PROC_FS
+ select NETFS_STATS
+ help
+ This option causes statistical information to be gathered on local
+ caching and exported through file:
+
+ /proc/fs/fscache/stats
+
+ The gathering of statistics adds a certain amount of overhead to
+ execution as there are a quite a few stats gathered, and on a
+ multi-CPU system these may be on cachelines that keep bouncing
+ between CPUs. On the other hand, the stats are very useful for
+ debugging purposes. Saying 'Y' here is recommended.
+
+ See Documentation/filesystems/caching/fscache.rst for more information.
+
+config FSCACHE_DEBUG
+ bool "Debug FS-Cache"
+ depends on FSCACHE
+ help
+ This permits debugging to be dynamically enabled in the local caching
+ management module. If this is set, the debugging output may be
+ enabled by setting bits in /sys/modules/fscache/parameter/debug.
+
+ See Documentation/filesystems/caching/fscache.rst for more information.
diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile
index 386d6fb92793..d4d1d799819e 100644
--- a/fs/netfs/Makefile
+++ b/fs/netfs/Makefile
@@ -2,11 +2,29 @@
netfs-y := \
buffered_read.o \
+ buffered_write.o \
+ direct_read.o \
+ direct_write.o \
io.o \
iterator.o \
+ locking.o \
main.o \
- objects.o
+ misc.o \
+ objects.o \
+ output.o
netfs-$(CONFIG_NETFS_STATS) += stats.o
-obj-$(CONFIG_NETFS_SUPPORT) := netfs.o
+netfs-$(CONFIG_FSCACHE) += \
+ fscache_cache.o \
+ fscache_cookie.o \
+ fscache_io.o \
+ fscache_main.o \
+ fscache_volume.o
+
+ifeq ($(CONFIG_PROC_FS),y)
+netfs-$(CONFIG_FSCACHE) += fscache_proc.o
+endif
+netfs-$(CONFIG_FSCACHE_STATS) += fscache_stats.o
+
+obj-$(CONFIG_NETFS_SUPPORT) += netfs.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 2cd3ccf4c439..3298c29b5548 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -16,6 +16,7 @@
void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
+ struct netfs_folio *finfo;
struct folio *folio;
pgoff_t start_page = rreq->start / PAGE_SIZE;
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
@@ -63,6 +64,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
break;
}
if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
folio_start_fscache(folio);
folio_started = true;
}
@@ -86,11 +88,20 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (!pg_failed) {
flush_dcache_folio(folio);
+ finfo = netfs_folio_info(folio);
+ if (finfo) {
+ trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
+ if (finfo->netfs_group)
+ folio_change_private(folio, finfo->netfs_group);
+ else
+ folio_detach_private(folio);
+ kfree(finfo);
+ }
folio_mark_uptodate(folio);
}
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
- if (folio_index(folio) == rreq->no_unlock_folio &&
+ if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
_debug("no unlock");
else
@@ -147,6 +158,15 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,
}
}
+/*
+ * Begin an operation, and fetch the stored zero point value from the cookie if
+ * available.
+ */
+static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
+{
+ return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx));
+}
+
/**
* netfs_readahead - Helper to manage a read request
* @ractl: The description of the readahead request
@@ -180,11 +200,9 @@ void netfs_readahead(struct readahead_control *ractl)
if (IS_ERR(rreq))
return;
- if (ctx->ops->begin_cache_operation) {
- ret = ctx->ops->begin_cache_operation(rreq);
- if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
- goto cleanup_free;
- }
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto cleanup_free;
netfs_stat(&netfs_n_rh_readahead);
trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
@@ -192,6 +210,10 @@ void netfs_readahead(struct readahead_control *ractl)
netfs_rreq_expand(rreq, ractl);
+ /* Set up the output buffer */
+ iov_iter_xarray(&rreq->iter, ITER_DEST, &ractl->mapping->i_pages,
+ rreq->start, rreq->len);
+
/* Drop the refs on the folios here rather than in the cache or
* filesystem. The locks will be dropped in netfs_rreq_unlock().
*/
@@ -199,6 +221,7 @@ void netfs_readahead(struct readahead_control *ractl)
;
netfs_begin_read(rreq, false);
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
return;
cleanup_free:
@@ -223,12 +246,13 @@ EXPORT_SYMBOL(netfs_readahead);
*/
int netfs_read_folio(struct file *file, struct folio *folio)
{
- struct address_space *mapping = folio_file_mapping(folio);
+ struct address_space *mapping = folio->mapping;
struct netfs_io_request *rreq;
struct netfs_inode *ctx = netfs_inode(mapping->host);
+ struct folio *sink = NULL;
int ret;
- _enter("%lx", folio_index(folio));
+ _enter("%lx", folio->index);
rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
@@ -238,15 +262,64 @@ int netfs_read_folio(struct file *file, struct folio *folio)
goto alloc_error;
}
- if (ctx->ops->begin_cache_operation) {
- ret = ctx->ops->begin_cache_operation(rreq);
- if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
- goto discard;
- }
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto discard;
netfs_stat(&netfs_n_rh_readpage);
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
- return netfs_begin_read(rreq, true);
+
+ /* Set up the output buffer */
+ if (folio_test_dirty(folio)) {
+ /* Handle someone trying to read from an unflushed streaming
+ * write. We fiddle the buffer so that a gap at the beginning
+ * and/or a gap at the end get copied to, but the middle is
+ * discarded.
+ */
+ struct netfs_folio *finfo = netfs_folio_info(folio);
+ struct bio_vec *bvec;
+ unsigned int from = finfo->dirty_offset;
+ unsigned int to = from + finfo->dirty_len;
+ unsigned int off = 0, i = 0;
+ size_t flen = folio_size(folio);
+ size_t nr_bvec = flen / PAGE_SIZE + 2;
+ size_t part;
+
+ ret = -ENOMEM;
+ bvec = kmalloc_array(nr_bvec, sizeof(*bvec), GFP_KERNEL);
+ if (!bvec)
+ goto discard;
+
+ sink = folio_alloc(GFP_KERNEL, 0);
+ if (!sink)
+ goto discard;
+
+ trace_netfs_folio(folio, netfs_folio_trace_read_gaps);
+
+ rreq->direct_bv = bvec;
+ rreq->direct_bv_count = nr_bvec;
+ if (from > 0) {
+ bvec_set_folio(&bvec[i++], folio, from, 0);
+ off = from;
+ }
+ while (off < to) {
+ part = min_t(size_t, to - off, PAGE_SIZE);
+ bvec_set_folio(&bvec[i++], sink, part, 0);
+ off += part;
+ }
+ if (to < flen)
+ bvec_set_folio(&bvec[i++], folio, flen - to, to);
+ iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len);
+ } else {
+ iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
+ rreq->start, rreq->len);
+ }
+
+ ret = netfs_begin_read(rreq, true);
+ if (sink)
+ folio_put(sink);
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ return ret < 0 ? ret : 0;
discard:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
@@ -387,14 +460,12 @@ retry:
ret = PTR_ERR(rreq);
goto error;
}
- rreq->no_unlock_folio = folio_index(folio);
+ rreq->no_unlock_folio = folio->index;
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
- if (ctx->ops->begin_cache_operation) {
- ret = ctx->ops->begin_cache_operation(rreq);
- if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
- goto error_put;
- }
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto error_put;
netfs_stat(&netfs_n_rh_write_begin);
trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
@@ -405,6 +476,10 @@ retry:
ractl._nr_pages = folio_nr_pages(folio);
netfs_rreq_expand(rreq, &ractl);
+ /* Set up the output buffer */
+ iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
+ rreq->start, rreq->len);
+
/* We hold the folio locks, so we can drop the references */
folio_get(folio);
while (readahead_folio(&ractl))
@@ -413,6 +488,7 @@ retry:
ret = netfs_begin_read(rreq, true);
if (ret < 0)
goto error;
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
have_folio:
ret = folio_wait_fscache_killable(folio);
@@ -434,3 +510,124 @@ error:
return ret;
}
EXPORT_SYMBOL(netfs_write_begin);
+
+/*
+ * Preload the data into a page we're proposing to write into.
+ */
+int netfs_prefetch_for_write(struct file *file, struct folio *folio,
+ size_t offset, size_t len)
+{
+ struct netfs_io_request *rreq;
+ struct address_space *mapping = folio->mapping;
+ struct netfs_inode *ctx = netfs_inode(mapping->host);
+ unsigned long long start = folio_pos(folio);
+ size_t flen = folio_size(folio);
+ int ret;
+
+ _enter("%zx @%llx", flen, start);
+
+ ret = -ENOMEM;
+
+ rreq = netfs_alloc_request(mapping, file, start, flen,
+ NETFS_READ_FOR_WRITE);
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
+ goto error;
+ }
+
+ rreq->no_unlock_folio = folio->index;
+ __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
+ ret = netfs_begin_cache_read(rreq, ctx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto error_put;
+
+ netfs_stat(&netfs_n_rh_write_begin);
+ trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write);
+
+ /* Set up the output buffer */
+ iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
+ rreq->start, rreq->len);
+
+ ret = netfs_begin_read(rreq, true);
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ return ret;
+
+error_put:
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+error:
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/**
+ * netfs_buffered_read_iter - Filesystem buffered I/O read routine
+ * @iocb: kernel I/O control block
+ * @iter: destination for the data read
+ *
+ * This is the ->read_iter() routine for all filesystems that can use the page
+ * cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
+ * returned when no data can be read without waiting for I/O requests to
+ * complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
+ * shall be made for the read or for readahead. When no data can be read,
+ * -EAGAIN shall be returned. When readahead would be triggered, a partial,
+ * possibly empty read shall be returned.
+ *
+ * Return:
+ * * number of bytes copied, even for partial reads
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
+ */
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct netfs_inode *ictx = netfs_inode(inode);
+ ssize_t ret;
+
+ if (WARN_ON_ONCE((iocb->ki_flags & IOCB_DIRECT) ||
+ test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)))
+ return -EINVAL;
+
+ ret = netfs_start_io_read(inode);
+ if (ret == 0) {
+ ret = filemap_read(iocb, iter, 0);
+ netfs_end_io_read(inode);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(netfs_buffered_read_iter);
+
+/**
+ * netfs_file_read_iter - Generic filesystem read routine
+ * @iocb: kernel I/O control block
+ * @iter: destination for the data read
+ *
+ * This is the ->read_iter() routine for all filesystems that can use the page
+ * cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be
+ * returned when no data can be read without waiting for I/O requests to
+ * complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests
+ * shall be made for the read or for readahead. When no data can be read,
+ * -EAGAIN shall be returned. When readahead would be triggered, a partial,
+ * possibly empty read shall be returned.
+ *
+ * Return:
+ * * number of bytes copied, even for partial reads
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
+ */
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct netfs_inode *ictx = netfs_inode(iocb->ki_filp->f_mapping->host);
+
+ if ((iocb->ki_flags & IOCB_DIRECT) ||
+ test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
+ return netfs_unbuffered_read_iter(iocb, iter);
+
+ return netfs_buffered_read_iter(iocb, iter);
+}
+EXPORT_SYMBOL(netfs_file_read_iter);
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
new file mode 100644
index 000000000000..9a0d32e4b422
--- /dev/null
+++ b/fs/netfs/buffered_write.c
@@ -0,0 +1,1257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem high-level write support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/pagevec.h>
+#include "internal.h"
+
+/*
+ * Determined write method. Adjust netfs_folio_traces if this is changed.
+ */
+enum netfs_how_to_modify {
+ NETFS_FOLIO_IS_UPTODATE, /* Folio is uptodate already */
+ NETFS_JUST_PREFETCH, /* We have to read the folio anyway */
+ NETFS_WHOLE_FOLIO_MODIFY, /* We're going to overwrite the whole folio */
+ NETFS_MODIFY_AND_CLEAR, /* We can assume there is no data to be downloaded. */
+ NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */
+ NETFS_STREAMING_WRITE_CONT, /* Continue streaming write. */
+ NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
+};
+
+static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
+
+static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
+{
+ if (netfs_group && !folio_get_private(folio))
+ folio_attach_private(folio, netfs_get_group(netfs_group));
+}
+
+#if IS_ENABLED(CONFIG_FSCACHE)
+static void netfs_folio_start_fscache(bool caching, struct folio *folio)
+{
+ if (caching)
+ folio_start_fscache(folio);
+}
+#else
+static void netfs_folio_start_fscache(bool caching, struct folio *folio)
+{
+}
+#endif
+
+/*
+ * Decide how we should modify a folio. We might be attempting to do
+ * write-streaming, in which case we don't want to a local RMW cycle if we can
+ * avoid it. If we're doing local caching or content crypto, we award that
+ * priority over avoiding RMW. If the file is open readably, then we also
+ * assume that we may want to read what we wrote.
+ */
+static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
+ struct file *file,
+ struct folio *folio,
+ void *netfs_group,
+ size_t flen,
+ size_t offset,
+ size_t len,
+ bool maybe_trouble)
+{
+ struct netfs_folio *finfo = netfs_folio_info(folio);
+ loff_t pos = folio_file_pos(folio);
+
+ _enter("");
+
+ if (netfs_folio_group(folio) != netfs_group)
+ return NETFS_FLUSH_CONTENT;
+
+ if (folio_test_uptodate(folio))
+ return NETFS_FOLIO_IS_UPTODATE;
+
+ if (pos >= ctx->zero_point)
+ return NETFS_MODIFY_AND_CLEAR;
+
+ if (!maybe_trouble && offset == 0 && len >= flen)
+ return NETFS_WHOLE_FOLIO_MODIFY;
+
+ if (file->f_mode & FMODE_READ)
+ goto no_write_streaming;
+ if (test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
+ goto no_write_streaming;
+
+ if (netfs_is_cache_enabled(ctx)) {
+ /* We don't want to get a streaming write on a file that loses
+ * caching service temporarily because the backing store got
+ * culled.
+ */
+ if (!test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
+ set_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags);
+ goto no_write_streaming;
+ }
+
+ if (!finfo)
+ return NETFS_STREAMING_WRITE;
+
+ /* We can continue a streaming write only if it continues on from the
+ * previous. If it overlaps, we must flush lest we suffer a partial
+ * copy and disjoint dirty regions.
+ */
+ if (offset == finfo->dirty_offset + finfo->dirty_len)
+ return NETFS_STREAMING_WRITE_CONT;
+ return NETFS_FLUSH_CONTENT;
+
+no_write_streaming:
+ if (finfo) {
+ netfs_stat(&netfs_n_wh_wstream_conflict);
+ return NETFS_FLUSH_CONTENT;
+ }
+ return NETFS_JUST_PREFETCH;
+}
+
+/*
+ * Grab a folio for writing and lock it. Attempt to allocate as large a folio
+ * as possible to hold as much of the remaining length as possible in one go.
+ */
+static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
+ loff_t pos, size_t part)
+{
+ pgoff_t index = pos / PAGE_SIZE;
+ fgf_t fgp_flags = FGP_WRITEBEGIN;
+
+ if (mapping_large_folio_support(mapping))
+ fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
+
+ return __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+}
+
+/**
+ * netfs_perform_write - Copy data into the pagecache.
+ * @iocb: The operation parameters
+ * @iter: The source buffer
+ * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
+ *
+ * Copy data into pagecache pages attached to the inode specified by @iocb.
+ * The caller must hold appropriate inode locks.
+ *
+ * Dirty pages are tagged with a netfs_folio struct if they're not up to date
+ * to indicate the range modified. Dirty pages may also be tagged with a
+ * netfs-specific grouping such that data from an old group gets flushed before
+ * a new one is started.
+ */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct address_space *mapping = inode->i_mapping;
+ struct netfs_inode *ctx = netfs_inode(inode);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .for_sync = true,
+ .nr_to_write = LONG_MAX,
+ .range_start = iocb->ki_pos,
+ .range_end = iocb->ki_pos + iter->count,
+ };
+ struct netfs_io_request *wreq = NULL;
+ struct netfs_folio *finfo;
+ struct folio *folio;
+ enum netfs_how_to_modify howto;
+ enum netfs_folio_trace trace;
+ unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
+ ssize_t written = 0, ret;
+ loff_t i_size, pos = iocb->ki_pos, from, to;
+ size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
+ bool maybe_trouble = false;
+
+ if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
+ iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
+ ) {
+ if (pos < i_size_read(inode)) {
+ ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
+ if (ret < 0) {
+ goto out;
+ }
+ }
+
+ wbc_attach_fdatawrite_inode(&wbc, mapping->host);
+
+ wreq = netfs_begin_writethrough(iocb, iter->count);
+ if (IS_ERR(wreq)) {
+ wbc_detach_inode(&wbc);
+ ret = PTR_ERR(wreq);
+ wreq = NULL;
+ goto out;
+ }
+ if (!is_sync_kiocb(iocb))
+ wreq->iocb = iocb;
+ wreq->cleanup = netfs_cleanup_buffered_write;
+ }
+
+ do {
+ size_t flen;
+ size_t offset; /* Offset into pagecache folio */
+ size_t part; /* Bytes to write to folio */
+ size_t copied; /* Bytes copied from user */
+
+ ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
+ if (unlikely(ret < 0))
+ break;
+
+ offset = pos & (max_chunk - 1);
+ part = min(max_chunk - offset, iov_iter_count(iter));
+
+ /* Bring in the user pages that we will copy from _first_ lest
+ * we hit a nasty deadlock on copying from the same page as
+ * we're writing to, without it being marked uptodate.
+ *
+ * Not only is this an optimisation, but it is also required to
+ * check that the address is actually valid, when atomic
+ * usercopies are used below.
+ *
+ * We rely on the page being held onto long enough by the LRU
+ * that we can grab it below if this causes it to be read.
+ */
+ ret = -EFAULT;
+ if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
+ break;
+
+ folio = netfs_grab_folio_for_write(mapping, pos, part);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ break;
+ }
+
+ flen = folio_size(folio);
+ offset = pos & (flen - 1);
+ part = min_t(size_t, flen - offset, part);
+
+ if (signal_pending(current)) {
+ ret = written ? -EINTR : -ERESTARTSYS;
+ goto error_folio_unlock;
+ }
+
+ /* See if we need to prefetch the area we're going to modify.
+ * We need to do this before we get a lock on the folio in case
+ * there's more than one writer competing for the same cache
+ * block.
+ */
+ howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
+ flen, offset, part, maybe_trouble);
+ _debug("howto %u", howto);
+ switch (howto) {
+ case NETFS_JUST_PREFETCH:
+ ret = netfs_prefetch_for_write(file, folio, offset, part);
+ if (ret < 0) {
+ _debug("prefetch = %zd", ret);
+ goto error_folio_unlock;
+ }
+ break;
+ case NETFS_FOLIO_IS_UPTODATE:
+ case NETFS_WHOLE_FOLIO_MODIFY:
+ case NETFS_STREAMING_WRITE_CONT:
+ break;
+ case NETFS_MODIFY_AND_CLEAR:
+ zero_user_segment(&folio->page, 0, offset);
+ break;
+ case NETFS_STREAMING_WRITE:
+ ret = -EIO;
+ if (WARN_ON(folio_get_private(folio)))
+ goto error_folio_unlock;
+ break;
+ case NETFS_FLUSH_CONTENT:
+ trace_netfs_folio(folio, netfs_flush_content);
+ from = folio_pos(folio);
+ to = from + folio_size(folio) - 1;
+ folio_unlock(folio);
+ folio_put(folio);
+ ret = filemap_write_and_wait_range(mapping, from, to);
+ if (ret < 0)
+ goto error_folio_unlock;
+ continue;
+ }
+
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_folio(folio);
+
+ copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
+
+ flush_dcache_folio(folio);
+
+ /* Deal with a (partially) failed copy */
+ if (copied == 0) {
+ ret = -EFAULT;
+ goto error_folio_unlock;
+ }
+
+ trace = (enum netfs_folio_trace)howto;
+ switch (howto) {
+ case NETFS_FOLIO_IS_UPTODATE:
+ case NETFS_JUST_PREFETCH:
+ netfs_set_group(folio, netfs_group);
+ break;
+ case NETFS_MODIFY_AND_CLEAR:
+ zero_user_segment(&folio->page, offset + copied, flen);
+ netfs_set_group(folio, netfs_group);
+ folio_mark_uptodate(folio);
+ break;
+ case NETFS_WHOLE_FOLIO_MODIFY:
+ if (unlikely(copied < part)) {
+ maybe_trouble = true;
+ iov_iter_revert(iter, copied);
+ copied = 0;
+ goto retry;
+ }
+ netfs_set_group(folio, netfs_group);
+ folio_mark_uptodate(folio);
+ break;
+ case NETFS_STREAMING_WRITE:
+ if (offset == 0 && copied == flen) {
+ netfs_set_group(folio, netfs_group);
+ folio_mark_uptodate(folio);
+ trace = netfs_streaming_filled_page;
+ break;
+ }
+ finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+ if (!finfo) {
+ iov_iter_revert(iter, copied);
+ ret = -ENOMEM;
+ goto error_folio_unlock;
+ }
+ finfo->netfs_group = netfs_get_group(netfs_group);
+ finfo->dirty_offset = offset;
+ finfo->dirty_len = copied;
+ folio_attach_private(folio, (void *)((unsigned long)finfo |
+ NETFS_FOLIO_INFO));
+ break;
+ case NETFS_STREAMING_WRITE_CONT:
+ finfo = netfs_folio_info(folio);
+ finfo->dirty_len += copied;
+ if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
+ if (finfo->netfs_group)
+ folio_change_private(folio, finfo->netfs_group);
+ else
+ folio_detach_private(folio);
+ folio_mark_uptodate(folio);
+ kfree(finfo);
+ trace = netfs_streaming_cont_filled_page;
+ }
+ break;
+ default:
+ WARN(true, "Unexpected modify type %u ix=%lx\n",
+ howto, folio->index);
+ ret = -EIO;
+ goto error_folio_unlock;
+ }
+
+ trace_netfs_folio(folio, trace);
+
+ /* Update the inode size if we moved the EOF marker */
+ i_size = i_size_read(inode);
+ pos += copied;
+ if (pos > i_size) {
+ if (ctx->ops->update_i_size) {
+ ctx->ops->update_i_size(inode, pos);
+ } else {
+ i_size_write(inode, pos);
+#if IS_ENABLED(CONFIG_FSCACHE)
+ fscache_update_cookie(ctx->cache, NULL, &pos);
+#endif
+ }
+ }
+ written += copied;
+
+ if (likely(!wreq)) {
+ folio_mark_dirty(folio);
+ } else {
+ if (folio_test_dirty(folio))
+ /* Sigh. mmap. */
+ folio_clear_dirty_for_io(folio);
+ /* We make multiple writes to the folio... */
+ if (!folio_test_writeback(folio)) {
+ folio_wait_fscache(folio);
+ folio_start_writeback(folio);
+ folio_start_fscache(folio);
+ if (wreq->iter.count == 0)
+ trace_netfs_folio(folio, netfs_folio_trace_wthru);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
+ }
+ netfs_advance_writethrough(wreq, copied,
+ offset + copied == flen);
+ }
+ retry:
+ folio_unlock(folio);
+ folio_put(folio);
+ folio = NULL;
+
+ cond_resched();
+ } while (iov_iter_count(iter));
+
+out:
+ if (unlikely(wreq)) {
+ ret = netfs_end_writethrough(wreq, iocb);
+ wbc_detach_inode(&wbc);
+ if (ret == -EIOCBQUEUED)
+ return ret;
+ }
+
+ iocb->ki_pos += written;
+ _leave(" = %zd [%zd]", written, ret);
+ return written ? written : ret;
+
+error_folio_unlock:
+ folio_unlock(folio);
+ folio_put(folio);
+ goto out;
+}
+EXPORT_SYMBOL(netfs_perform_write);
+
+/**
+ * netfs_buffered_write_iter_locked - write data to a file
+ * @iocb: IO state structure (file, offset, etc.)
+ * @from: iov_iter with data to write
+ * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
+ *
+ * This function does all the work needed for actually writing data to a
+ * file. It does all basic checks, removes SUID from the file, updates
+ * modification times and calls proper subroutines depending on whether we
+ * do direct IO or a standard buffered write.
+ *
+ * The caller must hold appropriate locks around this function and have called
+ * generic_write_checks() already. The caller is also responsible for doing
+ * any necessary syncing afterwards.
+ *
+ * This function does *not* take care of syncing data in case of O_SYNC write.
+ * A caller has to handle it. This is mainly due to the fact that we want to
+ * avoid syncing under i_rwsem.
+ *
+ * Return:
+ * * number of bytes written, even for truncated writes
+ * * negative error code if no data has been written at all
+ */
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+ struct netfs_group *netfs_group)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t ret;
+
+ trace_netfs_write_iter(iocb, from);
+
+ ret = file_remove_privs(file);
+ if (ret)
+ return ret;
+
+ ret = file_update_time(file);
+ if (ret)
+ return ret;
+
+ return netfs_perform_write(iocb, from, netfs_group);
+}
+EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
+
+/**
+ * netfs_file_write_iter - write data to a file
+ * @iocb: IO state structure
+ * @from: iov_iter with data to write
+ *
+ * Perform a write to a file, writing into the pagecache if possible and doing
+ * an unbuffered write instead if not.
+ *
+ * Return:
+ * * Negative error code if no data has been written at all of
+ * vfs_fsync_range() failed for a synchronous write
+ * * Number of bytes written, even for truncated writes
+ */
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ ssize_t ret;
+
+ _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ if ((iocb->ki_flags & IOCB_DIRECT) ||
+ test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
+ return netfs_unbuffered_write_iter(iocb, from);
+
+ ret = netfs_start_io_write(inode);
+ if (ret < 0)
+ return ret;
+
+ ret = generic_write_checks(iocb, from);
+ if (ret > 0)
+ ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
+ netfs_end_io_write(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_file_write_iter);
+
+/*
+ * Notification that a previously read-only page is about to become writable.
+ * Note that the caller indicates a single page of a multipage folio.
+ */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
+{
+ struct folio *folio = page_folio(vmf->page);
+ struct file *file = vmf->vma->vm_file;
+ struct inode *inode = file_inode(file);
+ vm_fault_t ret = VM_FAULT_RETRY;
+ int err;
+
+ _enter("%lx", folio->index);
+
+ sb_start_pagefault(inode->i_sb);
+
+ if (folio_wait_writeback_killable(folio))
+ goto out;
+
+ if (folio_lock_killable(folio) < 0)
+ goto out;
+
+ /* Can we see a streaming write here? */
+ if (WARN_ON(!folio_test_uptodate(folio))) {
+ ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
+ goto out;
+ }
+
+ if (netfs_folio_group(folio) != netfs_group) {
+ folio_unlock(folio);
+ err = filemap_fdatawait_range(inode->i_mapping,
+ folio_pos(folio),
+ folio_pos(folio) + folio_size(folio));
+ switch (err) {
+ case 0:
+ ret = VM_FAULT_RETRY;
+ goto out;
+ case -ENOMEM:
+ ret = VM_FAULT_OOM;
+ goto out;
+ default:
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+ }
+
+ if (folio_test_dirty(folio))
+ trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
+ netfs_set_group(folio, netfs_group);
+ file_update_time(file);
+ ret = VM_FAULT_LOCKED;
+out:
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_page_mkwrite);
+
+/*
+ * Kill all the pages in the given range
+ */
+static void netfs_kill_pages(struct address_space *mapping,
+ loff_t start, loff_t len)
+{
+ struct folio *folio;
+ pgoff_t index = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
+
+ _enter("%llx-%llx", start, start + len - 1);
+
+ do {
+ _debug("kill %lx (to %lx)", index, last);
+
+ folio = filemap_get_folio(mapping, index);
+ if (IS_ERR(folio)) {
+ next = index + 1;
+ continue;
+ }
+
+ next = folio_next_index(folio);
+
+ trace_netfs_folio(folio, netfs_folio_trace_kill);
+ folio_clear_uptodate(folio);
+ if (folio_test_fscache(folio))
+ folio_end_fscache(folio);
+ folio_end_writeback(folio);
+ folio_lock(folio);
+ generic_error_remove_folio(mapping, folio);
+ folio_unlock(folio);
+ folio_put(folio);
+
+ } while (index = next, index <= last);
+
+ _leave("");
+}
+
+/*
+ * Redirty all the pages in a given range.
+ */
+static void netfs_redirty_pages(struct address_space *mapping,
+ loff_t start, loff_t len)
+{
+ struct folio *folio;
+ pgoff_t index = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
+
+ _enter("%llx-%llx", start, start + len - 1);
+
+ do {
+ _debug("redirty %llx @%llx", len, start);
+
+ folio = filemap_get_folio(mapping, index);
+ if (IS_ERR(folio)) {
+ next = index + 1;
+ continue;
+ }
+
+ next = folio_next_index(folio);
+ trace_netfs_folio(folio, netfs_folio_trace_redirty);
+ filemap_dirty_folio(mapping, folio);
+ if (folio_test_fscache(folio))
+ folio_end_fscache(folio);
+ folio_end_writeback(folio);
+ folio_put(folio);
+ } while (index = next, index <= last);
+
+ balance_dirty_pages_ratelimited(mapping);
+
+ _leave("");
+}
+
+/*
+ * Completion of write to server
+ */
+static void netfs_pages_written_back(struct netfs_io_request *wreq)
+{
+ struct address_space *mapping = wreq->mapping;
+ struct netfs_folio *finfo;
+ struct netfs_group *group = NULL;
+ struct folio *folio;
+ pgoff_t last;
+ int gcount = 0;
+
+ XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE);
+
+ _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
+
+ rcu_read_lock();
+
+ last = (wreq->start + wreq->len - 1) / PAGE_SIZE;
+ xas_for_each(&xas, folio, last) {
+ WARN(!folio_test_writeback(folio),
+ "bad %zx @%llx page %lx %lx\n",
+ wreq->len, wreq->start, folio->index, last);
+
+ if ((finfo = netfs_folio_info(folio))) {
+ /* Streaming writes cannot be redirtied whilst under
+ * writeback, so discard the streaming record.
+ */
+ folio_detach_private(folio);
+ group = finfo->netfs_group;
+ gcount++;
+ trace_netfs_folio(folio, netfs_folio_trace_clear_s);
+ kfree(finfo);
+ } else if ((group = netfs_folio_group(folio))) {
+ /* Need to detach the group pointer if the page didn't
+ * get redirtied. If it has been redirtied, then it
+ * must be within the same group.
+ */
+ if (folio_test_dirty(folio)) {
+ trace_netfs_folio(folio, netfs_folio_trace_redirtied);
+ goto end_wb;
+ }
+ if (folio_trylock(folio)) {
+ if (!folio_test_dirty(folio)) {
+ folio_detach_private(folio);
+ gcount++;
+ trace_netfs_folio(folio, netfs_folio_trace_clear_g);
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_redirtied);
+ }
+ folio_unlock(folio);
+ goto end_wb;
+ }
+
+ xas_pause(&xas);
+ rcu_read_unlock();
+ folio_lock(folio);
+ if (!folio_test_dirty(folio)) {
+ folio_detach_private(folio);
+ gcount++;
+ trace_netfs_folio(folio, netfs_folio_trace_clear_g);
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_redirtied);
+ }
+ folio_unlock(folio);
+ rcu_read_lock();
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_clear);
+ }
+ end_wb:
+ if (folio_test_fscache(folio))
+ folio_end_fscache(folio);
+ xas_advance(&xas, folio_next_index(folio) - 1);
+ folio_end_writeback(folio);
+ }
+
+ rcu_read_unlock();
+ netfs_put_group_many(group, gcount);
+ _leave("");
+}
+
+/*
+ * Deal with the disposition of the folios that are under writeback to close
+ * out the operation.
+ */
+static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq)
+{
+ struct address_space *mapping = wreq->mapping;
+
+ _enter("");
+
+ switch (wreq->error) {
+ case 0:
+ netfs_pages_written_back(wreq);
+ break;
+
+ default:
+ pr_notice("R=%08x Unexpected error %d\n", wreq->debug_id, wreq->error);
+ fallthrough;
+ case -EACCES:
+ case -EPERM:
+ case -ENOKEY:
+ case -EKEYEXPIRED:
+ case -EKEYREJECTED:
+ case -EKEYREVOKED:
+ case -ENETRESET:
+ case -EDQUOT:
+ case -ENOSPC:
+ netfs_redirty_pages(mapping, wreq->start, wreq->len);
+ break;
+
+ case -EROFS:
+ case -EIO:
+ case -EREMOTEIO:
+ case -EFBIG:
+ case -ENOENT:
+ case -ENOMEDIUM:
+ case -ENXIO:
+ netfs_kill_pages(mapping, wreq->start, wreq->len);
+ break;
+ }
+
+ if (wreq->error)
+ mapping_set_error(mapping, wreq->error);
+ if (wreq->netfs_ops->done)
+ wreq->netfs_ops->done(wreq);
+}
+
+/*
+ * Extend the region to be written back to include subsequent contiguously
+ * dirty pages if possible, but don't sleep while doing so.
+ *
+ * If this page holds new content, then we can include filler zeros in the
+ * writeback.
+ */
+static void netfs_extend_writeback(struct address_space *mapping,
+ struct netfs_group *group,
+ struct xa_state *xas,
+ long *_count,
+ loff_t start,
+ loff_t max_len,
+ bool caching,
+ size_t *_len,
+ size_t *_top)
+{
+ struct netfs_folio *finfo;
+ struct folio_batch fbatch;
+ struct folio *folio;
+ unsigned int i;
+ pgoff_t index = (start + *_len) / PAGE_SIZE;
+ size_t len;
+ void *priv;
+ bool stop = true;
+
+ folio_batch_init(&fbatch);
+
+ do {
+ /* Firstly, we gather up a batch of contiguous dirty pages
+ * under the RCU read lock - but we can't clear the dirty flags
+ * there if any of those pages are mapped.
+ */
+ rcu_read_lock();
+
+ xas_for_each(xas, folio, ULONG_MAX) {
+ stop = true;
+ if (xas_retry(xas, folio))
+ continue;
+ if (xa_is_value(folio))
+ break;
+ if (folio->index != index) {
+ xas_reset(xas);
+ break;
+ }
+
+ if (!folio_try_get_rcu(folio)) {
+ xas_reset(xas);
+ continue;
+ }
+
+ /* Has the folio moved or been split? */
+ if (unlikely(folio != xas_reload(xas))) {
+ folio_put(folio);
+ xas_reset(xas);
+ break;
+ }
+
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
+ xas_reset(xas);
+ break;
+ }
+ if (!folio_test_dirty(folio) ||
+ folio_test_writeback(folio) ||
+ folio_test_fscache(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
+ xas_reset(xas);
+ break;
+ }
+
+ stop = false;
+ len = folio_size(folio);
+ priv = folio_get_private(folio);
+ if ((const struct netfs_group *)priv != group) {
+ stop = true;
+ finfo = netfs_folio_info(folio);
+ if (finfo->netfs_group != group ||
+ finfo->dirty_offset > 0) {
+ folio_unlock(folio);
+ folio_put(folio);
+ xas_reset(xas);
+ break;
+ }
+ len = finfo->dirty_len;
+ }
+
+ *_top += folio_size(folio);
+ index += folio_nr_pages(folio);
+ *_count -= folio_nr_pages(folio);
+ *_len += len;
+ if (*_len >= max_len || *_count <= 0)
+ stop = true;
+
+ if (!folio_batch_add(&fbatch, folio))
+ break;
+ if (stop)
+ break;
+ }
+
+ xas_pause(xas);
+ rcu_read_unlock();
+
+ /* Now, if we obtained any folios, we can shift them to being
+ * writable and mark them for caching.
+ */
+ if (!folio_batch_count(&fbatch))
+ break;
+
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ folio = fbatch.folios[i];
+ trace_netfs_folio(folio, netfs_folio_trace_store_plus);
+
+ if (!folio_clear_dirty_for_io(folio))
+ BUG();
+ folio_start_writeback(folio);
+ netfs_folio_start_fscache(caching, folio);
+ folio_unlock(folio);
+ }
+
+ folio_batch_release(&fbatch);
+ cond_resched();
+ } while (!stop);
+}
+
+/*
+ * Synchronously write back the locked page and any subsequent non-locked dirty
+ * pages.
+ */
+static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct netfs_group *group,
+ struct xa_state *xas,
+ struct folio *folio,
+ unsigned long long start,
+ unsigned long long end)
+{
+ struct netfs_io_request *wreq;
+ struct netfs_folio *finfo;
+ struct netfs_inode *ctx = netfs_inode(mapping->host);
+ unsigned long long i_size = i_size_read(&ctx->inode);
+ size_t len, max_len;
+ bool caching = netfs_is_cache_enabled(ctx);
+ long count = wbc->nr_to_write;
+ int ret;
+
+ _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
+
+ wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
+ NETFS_WRITEBACK);
+ if (IS_ERR(wreq)) {
+ folio_unlock(folio);
+ return PTR_ERR(wreq);
+ }
+
+ if (!folio_clear_dirty_for_io(folio))
+ BUG();
+ folio_start_writeback(folio);
+ netfs_folio_start_fscache(caching, folio);
+
+ count -= folio_nr_pages(folio);
+
+ /* Find all consecutive lockable dirty pages that have contiguous
+ * written regions, stopping when we find a page that is not
+ * immediately lockable, is not dirty or is missing, or we reach the
+ * end of the range.
+ */
+ trace_netfs_folio(folio, netfs_folio_trace_store);
+
+ len = wreq->len;
+ finfo = netfs_folio_info(folio);
+ if (finfo) {
+ start += finfo->dirty_offset;
+ if (finfo->dirty_offset + finfo->dirty_len != len) {
+ len = finfo->dirty_len;
+ goto cant_expand;
+ }
+ len = finfo->dirty_len;
+ }
+
+ if (start < i_size) {
+ /* Trim the write to the EOF; the extra data is ignored. Also
+ * put an upper limit on the size of a single storedata op.
+ */
+ max_len = 65536 * 4096;
+ max_len = min_t(unsigned long long, max_len, end - start + 1);
+ max_len = min_t(unsigned long long, max_len, i_size - start);
+
+ if (len < max_len)
+ netfs_extend_writeback(mapping, group, xas, &count, start,
+ max_len, caching, &len, &wreq->upper_len);
+ }
+
+cant_expand:
+ len = min_t(unsigned long long, len, i_size - start);
+
+ /* We now have a contiguous set of dirty pages, each with writeback
+ * set; the first page is still locked at this point, but all the rest
+ * have been unlocked.
+ */
+ folio_unlock(folio);
+ wreq->start = start;
+ wreq->len = len;
+
+ if (start < i_size) {
+ _debug("write back %zx @%llx [%llx]", len, start, i_size);
+
+ /* Speculatively write to the cache. We have to fix this up
+ * later if the store fails.
+ */
+ wreq->cleanup = netfs_cleanup_buffered_write;
+
+ iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start,
+ wreq->upper_len);
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback);
+ if (ret == 0 || ret == -EIOCBQUEUED)
+ wbc->nr_to_write -= len / PAGE_SIZE;
+ } else {
+ _debug("write discard %zx @%llx [%llx]", len, start, i_size);
+
+ /* The dirty region was entirely beyond the EOF. */
+ fscache_clear_page_bits(mapping, start, len, caching);
+ netfs_pages_written_back(wreq);
+ ret = 0;
+ }
+
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ _leave(" = 1");
+ return 1;
+}
+
+/*
+ * Write a region of pages back to the server
+ */
+static ssize_t netfs_writepages_begin(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct netfs_group *group,
+ struct xa_state *xas,
+ unsigned long long *_start,
+ unsigned long long end)
+{
+ const struct netfs_folio *finfo;
+ struct folio *folio;
+ unsigned long long start = *_start;
+ ssize_t ret;
+ void *priv;
+ int skips = 0;
+
+ _enter("%llx,%llx,", start, end);
+
+search_again:
+ /* Find the first dirty page in the group. */
+ rcu_read_lock();
+
+ for (;;) {
+ folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
+ if (xas_retry(xas, folio) || xa_is_value(folio))
+ continue;
+ if (!folio)
+ break;
+
+ if (!folio_try_get_rcu(folio)) {
+ xas_reset(xas);
+ continue;
+ }
+
+ if (unlikely(folio != xas_reload(xas))) {
+ folio_put(folio);
+ xas_reset(xas);
+ continue;
+ }
+
+ /* Skip any dirty folio that's not in the group of interest. */
+ priv = folio_get_private(folio);
+ if ((const struct netfs_group *)priv != group) {
+ finfo = netfs_folio_info(folio);
+ if (finfo->netfs_group != group) {
+ folio_put(folio);
+ continue;
+ }
+ }
+
+ xas_pause(xas);
+ break;
+ }
+ rcu_read_unlock();
+ if (!folio)
+ return 0;
+
+ start = folio_pos(folio); /* May regress with THPs */
+
+ _debug("wback %lx", folio->index);
+
+ /* At this point we hold neither the i_pages lock nor the page lock:
+ * the page may be truncated or invalidated (changing page->mapping to
+ * NULL), or even swizzled back from swapper_space to tmpfs file
+ * mapping
+ */
+lock_again:
+ if (wbc->sync_mode != WB_SYNC_NONE) {
+ ret = folio_lock_killable(folio);
+ if (ret < 0)
+ return ret;
+ } else {
+ if (!folio_trylock(folio))
+ goto search_again;
+ }
+
+ if (folio->mapping != mapping ||
+ !folio_test_dirty(folio)) {
+ start += folio_size(folio);
+ folio_unlock(folio);
+ goto search_again;
+ }
+
+ if (folio_test_writeback(folio) ||
+ folio_test_fscache(folio)) {
+ folio_unlock(folio);
+ if (wbc->sync_mode != WB_SYNC_NONE) {
+ folio_wait_writeback(folio);
+#ifdef CONFIG_FSCACHE
+ folio_wait_fscache(folio);
+#endif
+ goto lock_again;
+ }
+
+ start += folio_size(folio);
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ if (skips >= 5 || need_resched()) {
+ ret = 0;
+ goto out;
+ }
+ skips++;
+ }
+ goto search_again;
+ }
+
+ ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas,
+ folio, start, end);
+out:
+ if (ret > 0)
+ *_start = start + ret;
+ _leave(" = %zd [%llx]", ret, *_start);
+ return ret;
+}
+
+/*
+ * Write a region of pages back to the server
+ */
+static int netfs_writepages_region(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct netfs_group *group,
+ unsigned long long *_start,
+ unsigned long long end)
+{
+ ssize_t ret;
+
+ XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
+
+ do {
+ ret = netfs_writepages_begin(mapping, wbc, group, &xas,
+ _start, end);
+ if (ret > 0 && wbc->nr_to_write > 0)
+ cond_resched();
+ } while (ret > 0 && wbc->nr_to_write > 0);
+
+ return ret > 0 ? 0 : ret;
+}
+
+/*
+ * write some of the pending data back to the server
+ */
+int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct netfs_group *group = NULL;
+ loff_t start, end;
+ int ret;
+
+ _enter("");
+
+ /* We have to be careful as we can end up racing with setattr()
+ * truncating the pagecache since the caller doesn't take a lock here
+ * to prevent it.
+ */
+
+ if (wbc->range_cyclic && mapping->writeback_index) {
+ start = mapping->writeback_index * PAGE_SIZE;
+ ret = netfs_writepages_region(mapping, wbc, group,
+ &start, LLONG_MAX);
+ if (ret < 0)
+ goto out;
+
+ if (wbc->nr_to_write <= 0) {
+ mapping->writeback_index = start / PAGE_SIZE;
+ goto out;
+ }
+
+ start = 0;
+ end = mapping->writeback_index * PAGE_SIZE;
+ mapping->writeback_index = 0;
+ ret = netfs_writepages_region(mapping, wbc, group, &start, end);
+ if (ret == 0)
+ mapping->writeback_index = start / PAGE_SIZE;
+ } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
+ start = 0;
+ ret = netfs_writepages_region(mapping, wbc, group,
+ &start, LLONG_MAX);
+ if (wbc->nr_to_write > 0 && ret == 0)
+ mapping->writeback_index = start / PAGE_SIZE;
+ } else {
+ start = wbc->range_start;
+ ret = netfs_writepages_region(mapping, wbc, group,
+ &start, wbc->range_end);
+ }
+
+out:
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_writepages);
+
+/*
+ * Deal with the disposition of a laundered folio.
+ */
+static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
+{
+ if (wreq->error) {
+ pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
+ mapping_set_error(wreq->mapping, wreq->error);
+ }
+}
+
+/**
+ * netfs_launder_folio - Clean up a dirty folio that's being invalidated
+ * @folio: The folio to clean
+ *
+ * This is called to write back a folio that's being invalidated when an inode
+ * is getting torn down. Ideally, writepages would be used instead.
+ */
+int netfs_launder_folio(struct folio *folio)
+{
+ struct netfs_io_request *wreq;
+ struct address_space *mapping = folio->mapping;
+ struct netfs_folio *finfo = netfs_folio_info(folio);
+ struct netfs_group *group = netfs_folio_group(folio);
+ struct bio_vec bvec;
+ unsigned long long i_size = i_size_read(mapping->host);
+ unsigned long long start = folio_pos(folio);
+ size_t offset = 0, len;
+ int ret = 0;
+
+ if (finfo) {
+ offset = finfo->dirty_offset;
+ start += offset;
+ len = finfo->dirty_len;
+ } else {
+ len = folio_size(folio);
+ }
+ len = min_t(unsigned long long, len, i_size - start);
+
+ wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
+ if (IS_ERR(wreq)) {
+ ret = PTR_ERR(wreq);
+ goto out;
+ }
+
+ if (!folio_clear_dirty_for_io(folio))
+ goto out_put;
+
+ trace_netfs_folio(folio, netfs_folio_trace_launder);
+
+ _debug("launder %llx-%llx", start, start + len - 1);
+
+ /* Speculatively write to the cache. We have to fix this up later if
+ * the store fails.
+ */
+ wreq->cleanup = netfs_cleanup_launder_folio;
+
+ bvec_set_folio(&bvec, folio, len, offset);
+ iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
+
+out_put:
+ folio_detach_private(folio);
+ netfs_put_group(group);
+ kfree(finfo);
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+out:
+ folio_wait_fscache(folio);
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_launder_folio);
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
new file mode 100644
index 000000000000..ad4370b3935d
--- /dev/null
+++ b/fs/netfs/direct_read.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Direct I/O support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/**
+ * netfs_unbuffered_read_iter_locked - Perform an unbuffered or direct I/O read
+ * @iocb: The I/O control descriptor describing the read
+ * @iter: The output buffer (also specifies read length)
+ *
+ * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
+ * output buffer. No use is made of the pagecache.
+ *
+ * The caller must hold any appropriate locks.
+ */
+static ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct netfs_io_request *rreq;
+ ssize_t ret;
+ size_t orig_count = iov_iter_count(iter);
+ bool async = !is_sync_kiocb(iocb);
+
+ _enter("");
+
+ if (!orig_count)
+ return 0; /* Don't update atime */
+
+ ret = kiocb_write_and_wait(iocb, orig_count);
+ if (ret < 0)
+ return ret;
+ file_accessed(iocb->ki_filp);
+
+ rreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
+ iocb->ki_pos, orig_count,
+ NETFS_DIO_READ);
+ if (IS_ERR(rreq))
+ return PTR_ERR(rreq);
+
+ netfs_stat(&netfs_n_rh_dio_read);
+ trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_dio_read);
+
+ /* If this is an async op, we have to keep track of the destination
+ * buffer for ourselves as the caller's iterator will be trashed when
+ * we return.
+ *
+ * In such a case, extract an iterator to represent as much of the the
+ * output buffer as we can manage. Note that the extraction might not
+ * be able to allocate a sufficiently large bvec array and may shorten
+ * the request.
+ */
+ if (user_backed_iter(iter)) {
+ ret = netfs_extract_user_iter(iter, rreq->len, &rreq->iter, 0);
+ if (ret < 0)
+ goto out;
+ rreq->direct_bv = (struct bio_vec *)rreq->iter.bvec;
+ rreq->direct_bv_count = ret;
+ rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
+ rreq->len = iov_iter_count(&rreq->iter);
+ } else {
+ rreq->iter = *iter;
+ rreq->len = orig_count;
+ rreq->direct_bv_unpin = false;
+ iov_iter_advance(iter, orig_count);
+ }
+
+ // TODO: Set up bounce buffer if needed
+
+ if (async)
+ rreq->iocb = iocb;
+
+ ret = netfs_begin_read(rreq, is_sync_kiocb(iocb));
+ if (ret < 0)
+ goto out; /* May be -EIOCBQUEUED */
+ if (!async) {
+ // TODO: Copy from bounce buffer
+ iocb->ki_pos += rreq->transferred;
+ ret = rreq->transferred;
+ }
+
+out:
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+ if (ret > 0)
+ orig_count -= ret;
+ if (ret != -EIOCBQUEUED)
+ iov_iter_revert(iter, orig_count - iov_iter_count(iter));
+ return ret;
+}
+
+/**
+ * netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read
+ * @iocb: The I/O control descriptor describing the read
+ * @iter: The output buffer (also specifies read length)
+ *
+ * Perform an unbuffered I/O or direct I/O from the file in @iocb to the
+ * output buffer. No use is made of the pagecache.
+ */
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+
+ if (!iter->count)
+ return 0; /* Don't update atime */
+
+ ret = netfs_start_io_direct(inode);
+ if (ret == 0) {
+ ret = netfs_unbuffered_read_iter_locked(iocb, iter);
+ netfs_end_io_direct(inode);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(netfs_unbuffered_read_iter);
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
new file mode 100644
index 000000000000..bee047e20f5d
--- /dev/null
+++ b/fs/netfs/direct_write.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Unbuffered and direct write support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/uio.h>
+#include "internal.h"
+
+static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
+{
+ struct inode *inode = wreq->inode;
+ unsigned long long end = wreq->start + wreq->len;
+
+ if (!wreq->error &&
+ i_size_read(inode) < end) {
+ if (wreq->netfs_ops->update_i_size)
+ wreq->netfs_ops->update_i_size(inode, end);
+ else
+ i_size_write(inode, end);
+ }
+}
+
+/*
+ * Perform an unbuffered write where we may have to do an RMW operation on an
+ * encrypted file. This can also be used for direct I/O writes.
+ */
+static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group)
+{
+ struct netfs_io_request *wreq;
+ unsigned long long start = iocb->ki_pos;
+ unsigned long long end = start + iov_iter_count(iter);
+ ssize_t ret, n;
+ bool async = !is_sync_kiocb(iocb);
+
+ _enter("");
+
+ /* We're going to need a bounce buffer if what we transmit is going to
+ * be different in some way to the source buffer, e.g. because it gets
+ * encrypted/compressed or because it needs expanding to a block size.
+ */
+ // TODO
+
+ _debug("uw %llx-%llx", start, end);
+
+ wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
+ start, end - start,
+ iocb->ki_flags & IOCB_DIRECT ?
+ NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
+ if (IS_ERR(wreq))
+ return PTR_ERR(wreq);
+
+ {
+ /* If this is an async op and we're not using a bounce buffer,
+ * we have to save the source buffer as the iterator is only
+ * good until we return. In such a case, extract an iterator
+ * to represent as much of the the output buffer as we can
+ * manage. Note that the extraction might not be able to
+ * allocate a sufficiently large bvec array and may shorten the
+ * request.
+ */
+ if (async || user_backed_iter(iter)) {
+ n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0);
+ if (n < 0) {
+ ret = n;
+ goto out;
+ }
+ wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
+ wreq->direct_bv_count = n;
+ wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
+ wreq->len = iov_iter_count(&wreq->iter);
+ } else {
+ wreq->iter = *iter;
+ }
+
+ wreq->io_iter = wreq->iter;
+ }
+
+ /* Copy the data into the bounce buffer and encrypt it. */
+ // TODO
+
+ /* Dispatch the write. */
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ if (async)
+ wreq->iocb = iocb;
+ wreq->cleanup = netfs_cleanup_dio_write;
+ ret = netfs_begin_write(wreq, is_sync_kiocb(iocb),
+ iocb->ki_flags & IOCB_DIRECT ?
+ netfs_write_trace_dio_write :
+ netfs_write_trace_unbuffered_write);
+ if (ret < 0) {
+ _debug("begin = %zd", ret);
+ goto out;
+ }
+
+ if (!async) {
+ trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
+ wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE);
+
+ ret = wreq->error;
+ _debug("waited = %zd", ret);
+ if (ret == 0) {
+ ret = wreq->transferred;
+ iocb->ki_pos += ret;
+ }
+ } else {
+ ret = -EIOCBQUEUED;
+ }
+
+out:
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ return ret;
+}
+
+/**
+ * netfs_unbuffered_write_iter - Unbuffered write to a file
+ * @iocb: IO state structure
+ * @from: iov_iter with data to write
+ *
+ * Do an unbuffered write to a file, writing the data directly to the server
+ * and not lodging the data in the pagecache.
+ *
+ * Return:
+ * * Negative error code if no data has been written at all of
+ * vfs_fsync_range() failed for a synchronous write
+ * * Number of bytes written, even for truncated writes
+ */
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ unsigned long long end;
+ ssize_t ret;
+
+ _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ trace_netfs_write_iter(iocb, from);
+ netfs_stat(&netfs_n_rh_dio_write);
+
+ ret = netfs_start_io_direct(inode);
+ if (ret < 0)
+ return ret;
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out;
+ ret = file_remove_privs(file);
+ if (ret < 0)
+ goto out;
+ ret = file_update_time(file);
+ if (ret < 0)
+ goto out;
+ ret = kiocb_invalidate_pages(iocb, iov_iter_count(from));
+ if (ret < 0)
+ goto out;
+ end = iocb->ki_pos + iov_iter_count(from);
+ if (end > ictx->zero_point)
+ ictx->zero_point = end;
+
+ fscache_invalidate(netfs_i_cookie(ictx), NULL, i_size_read(inode),
+ FSCACHE_INVAL_DIO_WRITE);
+ ret = netfs_unbuffered_write_iter_locked(iocb, from, NULL);
+out:
+ netfs_end_io_direct(inode);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_unbuffered_write_iter);
diff --git a/fs/fscache/cache.c b/fs/netfs/fscache_cache.c
index d645f8b302a2..9397ed39b0b4 100644
--- a/fs/fscache/cache.c
+++ b/fs/netfs/fscache_cache.c
@@ -179,13 +179,14 @@ EXPORT_SYMBOL(fscache_acquire_cache);
void fscache_put_cache(struct fscache_cache *cache,
enum fscache_cache_trace where)
{
- unsigned int debug_id = cache->debug_id;
+ unsigned int debug_id;
bool zero;
int ref;
if (IS_ERR_OR_NULL(cache))
return;
+ debug_id = cache->debug_id;
zero = __refcount_dec_and_test(&cache->ref, &ref);
trace_fscache_cache(debug_id, ref - 1, where);
diff --git a/fs/fscache/cookie.c b/fs/netfs/fscache_cookie.c
index bce2492186d0..bce2492186d0 100644
--- a/fs/fscache/cookie.c
+++ b/fs/netfs/fscache_cookie.c
diff --git a/fs/netfs/fscache_internal.h b/fs/netfs/fscache_internal.h
new file mode 100644
index 000000000000..a09b948fcef2
--- /dev/null
+++ b/fs/netfs/fscache_internal.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Internal definitions for FS-Cache
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include "internal.h"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "FS-Cache: " fmt
diff --git a/fs/fscache/io.c b/fs/netfs/fscache_io.c
index 0d2b8dec8f82..ad572f7ee897 100644
--- a/fs/fscache/io.c
+++ b/fs/netfs/fscache_io.c
@@ -158,46 +158,6 @@ int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
}
EXPORT_SYMBOL(__fscache_begin_write_operation);
-/**
- * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
- * @mapping: The mapping the folio belongs to.
- * @folio: The folio being dirtied.
- * @cookie: The cookie referring to the cache object
- *
- * Set the dirty flag on a folio and pin an in-use cache object in memory
- * so that writeback can later write to it. This is intended
- * to be called from the filesystem's ->dirty_folio() method.
- *
- * Return: true if the dirty flag was set on the folio, false otherwise.
- */
-bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
- struct fscache_cookie *cookie)
-{
- struct inode *inode = mapping->host;
- bool need_use = false;
-
- _enter("");
-
- if (!filemap_dirty_folio(mapping, folio))
- return false;
- if (!fscache_cookie_valid(cookie))
- return true;
-
- if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
- spin_lock(&inode->i_lock);
- if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
- inode->i_state |= I_PINNING_FSCACHE_WB;
- need_use = true;
- }
- spin_unlock(&inode->i_lock);
-
- if (need_use)
- fscache_use_cookie(cookie, true);
- }
- return true;
-}
-EXPORT_SYMBOL(fscache_dirty_folio);
-
struct fscache_write_request {
struct netfs_cache_resources cache_resources;
struct address_space *mapping;
@@ -277,7 +237,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
fscache_access_io_write) < 0)
goto abandon_free;
- ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
+ ret = cres->ops->prepare_write(cres, &start, &len, len, i_size, false);
if (ret < 0)
goto abandon_end;
diff --git a/fs/fscache/main.c b/fs/netfs/fscache_main.c
index dad85fd84f6f..42e98bb523e3 100644
--- a/fs/fscache/main.c
+++ b/fs/netfs/fscache_main.c
@@ -8,18 +8,9 @@
#define FSCACHE_DEBUG_LEVEL CACHE
#include <linux/module.h>
#include <linux/init.h>
-#define CREATE_TRACE_POINTS
#include "internal.h"
-
-MODULE_DESCRIPTION("FS Cache Manager");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
-unsigned fscache_debug;
-module_param_named(debug, fscache_debug, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(fscache_debug,
- "FS-Cache debugging mask");
+#define CREATE_TRACE_POINTS
+#include <trace/events/fscache.h>
EXPORT_TRACEPOINT_SYMBOL(fscache_access_cache);
EXPORT_TRACEPOINT_SYMBOL(fscache_access_volume);
@@ -71,7 +62,7 @@ unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
/*
* initialise the fs caching module
*/
-static int __init fscache_init(void)
+int __init fscache_init(void)
{
int ret = -ENOMEM;
@@ -92,7 +83,7 @@ static int __init fscache_init(void)
goto error_cookie_jar;
}
- pr_notice("Loaded\n");
+ pr_notice("FS-Cache loaded\n");
return 0;
error_cookie_jar:
@@ -103,19 +94,15 @@ error_wq:
return ret;
}
-fs_initcall(fscache_init);
-
/*
* clean up on module removal
*/
-static void __exit fscache_exit(void)
+void __exit fscache_exit(void)
{
_enter("");
kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup();
destroy_workqueue(fscache_wq);
- pr_notice("Unloaded\n");
+ pr_notice("FS-Cache unloaded\n");
}
-
-module_exit(fscache_exit);
diff --git a/fs/fscache/proc.c b/fs/netfs/fscache_proc.c
index dc3b0e9c8cce..874d951bc390 100644
--- a/fs/fscache/proc.c
+++ b/fs/netfs/fscache_proc.c
@@ -12,41 +12,34 @@
#include "internal.h"
/*
- * initialise the /proc/fs/fscache/ directory
+ * Add files to /proc/fs/netfs/.
*/
int __init fscache_proc_init(void)
{
- if (!proc_mkdir("fs/fscache", NULL))
- goto error_dir;
+ if (!proc_symlink("fs/fscache", NULL, "netfs"))
+ goto error_sym;
- if (!proc_create_seq("fs/fscache/caches", S_IFREG | 0444, NULL,
+ if (!proc_create_seq("fs/netfs/caches", S_IFREG | 0444, NULL,
&fscache_caches_seq_ops))
goto error;
- if (!proc_create_seq("fs/fscache/volumes", S_IFREG | 0444, NULL,
+ if (!proc_create_seq("fs/netfs/volumes", S_IFREG | 0444, NULL,
&fscache_volumes_seq_ops))
goto error;
- if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL,
+ if (!proc_create_seq("fs/netfs/cookies", S_IFREG | 0444, NULL,
&fscache_cookies_seq_ops))
goto error;
-
-#ifdef CONFIG_FSCACHE_STATS
- if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL,
- fscache_stats_show))
- goto error;
-#endif
-
return 0;
error:
remove_proc_entry("fs/fscache", NULL);
-error_dir:
+error_sym:
return -ENOMEM;
}
/*
- * clean up the /proc/fs/fscache/ directory
+ * Clean up the /proc/fs/fscache symlink.
*/
void fscache_proc_cleanup(void)
{
diff --git a/fs/fscache/stats.c b/fs/netfs/fscache_stats.c
index fc94e5e79f1c..add21abdf713 100644
--- a/fs/fscache/stats.c
+++ b/fs/netfs/fscache_stats.c
@@ -48,13 +48,15 @@ atomic_t fscache_n_no_create_space;
EXPORT_SYMBOL(fscache_n_no_create_space);
atomic_t fscache_n_culled;
EXPORT_SYMBOL(fscache_n_culled);
+atomic_t fscache_n_dio_misfit;
+EXPORT_SYMBOL(fscache_n_dio_misfit);
/*
* display the general statistics
*/
-int fscache_stats_show(struct seq_file *m, void *v)
+int fscache_stats_show(struct seq_file *m)
{
- seq_puts(m, "FS-Cache statistics\n");
+ seq_puts(m, "-- FS-Cache statistics --\n");
seq_printf(m, "Cookies: n=%d v=%d vcol=%u voom=%u\n",
atomic_read(&fscache_n_cookies),
atomic_read(&fscache_n_volumes),
@@ -93,10 +95,9 @@ int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_no_create_space),
atomic_read(&fscache_n_culled));
- seq_printf(m, "IO : rd=%u wr=%u\n",
+ seq_printf(m, "IO : rd=%u wr=%u mis=%u\n",
atomic_read(&fscache_n_read),
- atomic_read(&fscache_n_write));
-
- netfs_stats_show(m);
+ atomic_read(&fscache_n_write),
+ atomic_read(&fscache_n_dio_misfit));
return 0;
}
diff --git a/fs/fscache/volume.c b/fs/netfs/fscache_volume.c
index cdf991bdd9de..cdf991bdd9de 100644
--- a/fs/fscache/volume.c
+++ b/fs/netfs/fscache_volume.c
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 43fac1b14e40..ec7045d24400 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -5,9 +5,13 @@
* Written by David Howells (dhowells@redhat.com)
*/
+#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <linux/netfs.h>
#include <linux/fscache.h>
+#include <linux/fscache-cache.h>
#include <trace/events/netfs.h>
+#include <trace/events/fscache.h>
#ifdef pr_fmt
#undef pr_fmt
@@ -19,6 +23,8 @@
* buffered_read.c
*/
void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
+int netfs_prefetch_for_write(struct file *file, struct folio *folio,
+ size_t offset, size_t len);
/*
* io.c
@@ -29,6 +35,41 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
* main.c
*/
extern unsigned int netfs_debug;
+extern struct list_head netfs_io_requests;
+extern spinlock_t netfs_proc_lock;
+
+#ifdef CONFIG_PROC_FS
+static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
+{
+ spin_lock(&netfs_proc_lock);
+ list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests);
+ spin_unlock(&netfs_proc_lock);
+}
+static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq)
+{
+ if (!list_empty(&rreq->proc_link)) {
+ spin_lock(&netfs_proc_lock);
+ list_del_rcu(&rreq->proc_link);
+ spin_unlock(&netfs_proc_lock);
+ }
+}
+#else
+static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {}
+static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
+#endif
+
+/*
+ * misc.c
+ */
+#define NETFS_FLAG_PUT_MARK BIT(0)
+#define NETFS_FLAG_PAGECACHE_MARK BIT(1)
+int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
+ struct folio *folio, unsigned int flags,
+ gfp_t gfp_mask);
+int netfs_add_folios_to_buffer(struct xarray *buffer,
+ struct address_space *mapping,
+ pgoff_t index, pgoff_t to, gfp_t gfp_mask);
+void netfs_clear_buffer(struct xarray *buffer);
/*
* objects.c
@@ -50,9 +91,20 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
}
/*
+ * output.c
+ */
+int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
+ enum netfs_write_trace what);
+struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
+int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end);
+int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb);
+
+/*
* stats.c
*/
#ifdef CONFIG_NETFS_STATS
+extern atomic_t netfs_n_rh_dio_read;
+extern atomic_t netfs_n_rh_dio_write;
extern atomic_t netfs_n_rh_readahead;
extern atomic_t netfs_n_rh_readpage;
extern atomic_t netfs_n_rh_rreq;
@@ -71,7 +123,15 @@ extern atomic_t netfs_n_rh_write_begin;
extern atomic_t netfs_n_rh_write_done;
extern atomic_t netfs_n_rh_write_failed;
extern atomic_t netfs_n_rh_write_zskip;
+extern atomic_t netfs_n_wh_wstream_conflict;
+extern atomic_t netfs_n_wh_upload;
+extern atomic_t netfs_n_wh_upload_done;
+extern atomic_t netfs_n_wh_upload_failed;
+extern atomic_t netfs_n_wh_write;
+extern atomic_t netfs_n_wh_write_done;
+extern atomic_t netfs_n_wh_write_failed;
+int netfs_stats_show(struct seq_file *m, void *v);
static inline void netfs_stat(atomic_t *stat)
{
@@ -103,6 +163,176 @@ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
#endif
}
+/*
+ * Get a ref on a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
+{
+ if (netfs_group)
+ refcount_inc(&netfs_group->ref);
+ return netfs_group;
+}
+
+/*
+ * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline void netfs_put_group(struct netfs_group *netfs_group)
+{
+ if (netfs_group && refcount_dec_and_test(&netfs_group->ref))
+ netfs_group->free(netfs_group);
+}
+
+/*
+ * Dispose of a netfs group attached to a dirty page (e.g. a ceph snap).
+ */
+static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
+{
+ if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref))
+ netfs_group->free(netfs_group);
+}
+
+/*
+ * fscache-cache.c
+ */
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_caches_seq_ops;
+#endif
+bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
+void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
+
+static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
+{
+ return smp_load_acquire(&cache->state);
+}
+
+static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
+{
+ return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
+}
+
+static inline void fscache_set_cache_state(struct fscache_cache *cache,
+ enum fscache_cache_state new_state)
+{
+ smp_store_release(&cache->state, new_state);
+
+}
+
+static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
+ enum fscache_cache_state old_state,
+ enum fscache_cache_state new_state)
+{
+ return try_cmpxchg_release(&cache->state, &old_state, new_state);
+}
+
+/*
+ * fscache-cookie.c
+ */
+extern struct kmem_cache *fscache_cookie_jar;
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_cookies_seq_ops;
+#endif
+extern struct timer_list fscache_cookie_lru_timer;
+
+extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
+extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+static inline void fscache_see_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
+ where);
+}
+
+/*
+ * fscache-main.c
+ */
+extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
+#ifdef CONFIG_FSCACHE
+int __init fscache_init(void);
+void __exit fscache_exit(void);
+#else
+static inline int fscache_init(void) { return 0; }
+static inline void fscache_exit(void) {}
+#endif
+
+/*
+ * fscache-proc.c
+ */
+#ifdef CONFIG_PROC_FS
+extern int __init fscache_proc_init(void);
+extern void fscache_proc_cleanup(void);
+#else
+#define fscache_proc_init() (0)
+#define fscache_proc_cleanup() do {} while (0)
+#endif
+
+/*
+ * fscache-stats.c
+ */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_volumes;
+extern atomic_t fscache_n_volumes_collision;
+extern atomic_t fscache_n_volumes_nomem;
+extern atomic_t fscache_n_cookies;
+extern atomic_t fscache_n_cookies_lru;
+extern atomic_t fscache_n_cookies_lru_expired;
+extern atomic_t fscache_n_cookies_lru_removed;
+extern atomic_t fscache_n_cookies_lru_dropped;
+
+extern atomic_t fscache_n_acquires;
+extern atomic_t fscache_n_acquires_ok;
+extern atomic_t fscache_n_acquires_oom;
+
+extern atomic_t fscache_n_invalidates;
+
+extern atomic_t fscache_n_relinquishes;
+extern atomic_t fscache_n_relinquishes_retire;
+extern atomic_t fscache_n_relinquishes_dropped;
+
+extern atomic_t fscache_n_resizes;
+extern atomic_t fscache_n_resizes_null;
+
+static inline void fscache_stat(atomic_t *stat)
+{
+ atomic_inc(stat);
+}
+
+static inline void fscache_stat_d(atomic_t *stat)
+{
+ atomic_dec(stat);
+}
+
+#define __fscache_stat(stat) (stat)
+
+int fscache_stats_show(struct seq_file *m);
+#else
+
+#define __fscache_stat(stat) (NULL)
+#define fscache_stat(stat) do {} while (0)
+#define fscache_stat_d(stat) do {} while (0)
+
+static inline int fscache_stats_show(struct seq_file *m) { return 0; }
+#endif
+
+/*
+ * fscache-volume.c
+ */
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_volumes_seq_ops;
+#endif
+
+struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+bool fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+void fscache_create_volume(struct fscache_volume *volume, bool wait);
+
/*****************************************************************************/
/*
* debug tracing
@@ -143,3 +373,57 @@ do { \
#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
+
+/*
+ * assertions
+ */
+#if 1 /* defined(__KDEBUGALL) */
+
+#define ASSERT(X) \
+do { \
+ if (unlikely(!(X))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTCMP(X, OP, Y) \
+do { \
+ if (unlikely(!((X) OP (Y)))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ pr_err("%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIF(C, X) \
+do { \
+ if (unlikely((C) && !(X))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIFCMP(C, X, OP, Y) \
+do { \
+ if (unlikely((C) && !((X) OP (Y)))) { \
+ pr_err("\n"); \
+ pr_err("Assertion failed\n"); \
+ pr_err("%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#else
+
+#define ASSERT(X) do {} while (0)
+#define ASSERTCMP(X, OP, Y) do {} while (0)
+#define ASSERTIF(C, X) do {} while (0)
+#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
+
+#endif /* assert or not */
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index 7f753380e047..4261ad6c55b6 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -21,12 +21,7 @@
*/
static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
{
- struct iov_iter iter;
-
- iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages,
- subreq->start + subreq->transferred,
- subreq->len - subreq->transferred);
- iov_iter_zero(iov_iter_count(&iter), &iter);
+ iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
}
static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
@@ -46,14 +41,9 @@ static void netfs_read_from_cache(struct netfs_io_request *rreq,
enum netfs_read_from_hole read_hole)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
- struct iov_iter iter;
netfs_stat(&netfs_n_rh_read);
- iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
- subreq->start + subreq->transferred,
- subreq->len - subreq->transferred);
-
- cres->ops->read(cres, subreq->start, &iter, read_hole,
+ cres->ops->read(cres, subreq->start, &subreq->io_iter, read_hole,
netfs_cache_read_terminated, subreq);
}
@@ -88,6 +78,13 @@ static void netfs_read_from_server(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
netfs_stat(&netfs_n_rh_download);
+
+ if (rreq->origin != NETFS_DIO_READ &&
+ iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
+ pr_warn("R=%08x[%u] ITER PRE-MISMATCH %zx != %zx-%zx %lx\n",
+ rreq->debug_id, subreq->debug_index,
+ iov_iter_count(&subreq->io_iter), subreq->len,
+ subreq->transferred, subreq->flags);
rreq->netfs_ops->issue_read(subreq);
}
@@ -127,9 +124,10 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
/* We might have multiple writes from the same huge
* folio, but we mustn't unlock a folio more than once.
*/
- if (have_unlocked && folio_index(folio) <= unlocked)
+ if (have_unlocked && folio->index <= unlocked)
continue;
- unlocked = folio_index(folio);
+ unlocked = folio_next_index(folio) - 1;
+ trace_netfs_folio(folio, netfs_folio_trace_end_copy);
folio_end_fscache(folio);
have_unlocked = true;
}
@@ -201,7 +199,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
}
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
- rreq->i_size, true);
+ subreq->len, rreq->i_size, true);
if (ret < 0) {
trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
@@ -260,6 +258,30 @@ static void netfs_rreq_short_read(struct netfs_io_request *rreq,
}
/*
+ * Reset the subrequest iterator prior to resubmission.
+ */
+static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ size_t remaining = subreq->len - subreq->transferred;
+ size_t count = iov_iter_count(&subreq->io_iter);
+
+ if (count == remaining)
+ return;
+
+ _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
+ rreq->debug_id, subreq->debug_index,
+ iov_iter_count(&subreq->io_iter), subreq->transferred,
+ subreq->len, rreq->i_size,
+ subreq->io_iter.iter_type);
+
+ if (count < remaining)
+ iov_iter_revert(&subreq->io_iter, remaining - count);
+ else
+ iov_iter_advance(&subreq->io_iter, count - remaining);
+}
+
+/*
* Resubmit any short or failed operations. Returns true if we got the rreq
* ref back.
*/
@@ -287,6 +309,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
atomic_inc(&rreq->nr_outstanding);
+ netfs_reset_subreq_iter(rreq, subreq);
netfs_read_from_server(rreq, subreq);
} else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
netfs_rreq_short_read(rreq, subreq);
@@ -321,6 +344,43 @@ static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
}
/*
+ * Determine how much we can admit to having read from a DIO read.
+ */
+static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ unsigned int i;
+ size_t transferred = 0;
+
+ for (i = 0; i < rreq->direct_bv_count; i++)
+ flush_dcache_page(rreq->direct_bv[i].bv_page);
+
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ if (subreq->error || subreq->transferred == 0)
+ break;
+ transferred += subreq->transferred;
+ if (subreq->transferred < subreq->len)
+ break;
+ }
+
+ for (i = 0; i < rreq->direct_bv_count; i++)
+ flush_dcache_page(rreq->direct_bv[i].bv_page);
+
+ rreq->transferred = transferred;
+ task_io_account_read(transferred);
+
+ if (rreq->iocb) {
+ rreq->iocb->ki_pos += transferred;
+ if (rreq->iocb->ki_complete)
+ rreq->iocb->ki_complete(
+ rreq->iocb, rreq->error ? rreq->error : transferred);
+ }
+ if (rreq->netfs_ops->done)
+ rreq->netfs_ops->done(rreq);
+ inode_dio_end(rreq->inode);
+}
+
+/*
* Assess the state of a read request and decide what to do next.
*
* Note that we could be in an ordinary kernel thread, on a workqueue or in
@@ -340,8 +400,12 @@ again:
return;
}
- netfs_rreq_unlock_folios(rreq);
+ if (rreq->origin != NETFS_DIO_READ)
+ netfs_rreq_unlock_folios(rreq);
+ else
+ netfs_rreq_assess_dio(rreq);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
@@ -399,9 +463,9 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
struct netfs_io_request *rreq = subreq->rreq;
int u;
- _enter("[%u]{%llx,%lx},%zd",
- subreq->debug_index, subreq->start, subreq->flags,
- transferred_or_error);
+ _enter("R=%x[%x]{%llx,%lx},%zd",
+ rreq->debug_id, subreq->debug_index,
+ subreq->start, subreq->flags, transferred_or_error);
switch (subreq->source) {
case NETFS_READ_FROM_CACHE:
@@ -501,15 +565,20 @@ static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest
*/
static enum netfs_io_source
netfs_rreq_prepare_read(struct netfs_io_request *rreq,
- struct netfs_io_subrequest *subreq)
+ struct netfs_io_subrequest *subreq,
+ struct iov_iter *io_iter)
{
- enum netfs_io_source source;
+ enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
+ struct netfs_inode *ictx = netfs_inode(rreq->inode);
+ size_t lsize;
_enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
- source = netfs_cache_prepare_read(subreq, rreq->i_size);
- if (source == NETFS_INVALID_READ)
- goto out;
+ if (rreq->origin != NETFS_DIO_READ) {
+ source = netfs_cache_prepare_read(subreq, rreq->i_size);
+ if (source == NETFS_INVALID_READ)
+ goto out;
+ }
if (source == NETFS_DOWNLOAD_FROM_SERVER) {
/* Call out to the netfs to let it shrink the request to fit
@@ -518,19 +587,52 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
* to make serial calls, it can indicate a short read and then
* we will call it again.
*/
+ if (rreq->origin != NETFS_DIO_READ) {
+ if (subreq->start >= ictx->zero_point) {
+ source = NETFS_FILL_WITH_ZEROES;
+ goto set;
+ }
+ if (subreq->len > ictx->zero_point - subreq->start)
+ subreq->len = ictx->zero_point - subreq->start;
+ }
if (subreq->len > rreq->i_size - subreq->start)
subreq->len = rreq->i_size - subreq->start;
+ if (rreq->rsize && subreq->len > rreq->rsize)
+ subreq->len = rreq->rsize;
if (rreq->netfs_ops->clamp_length &&
!rreq->netfs_ops->clamp_length(subreq)) {
source = NETFS_INVALID_READ;
goto out;
}
+
+ if (subreq->max_nr_segs) {
+ lsize = netfs_limit_iter(io_iter, 0, subreq->len,
+ subreq->max_nr_segs);
+ if (subreq->len > lsize) {
+ subreq->len = lsize;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_limited);
+ }
+ }
}
- if (WARN_ON(subreq->len == 0))
+set:
+ if (subreq->len > rreq->len)
+ pr_warn("R=%08x[%u] SREQ>RREQ %zx > %zx\n",
+ rreq->debug_id, subreq->debug_index,
+ subreq->len, rreq->len);
+
+ if (WARN_ON(subreq->len == 0)) {
source = NETFS_INVALID_READ;
+ goto out;
+ }
+ subreq->source = source;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+ subreq->io_iter = *io_iter;
+ iov_iter_truncate(&subreq->io_iter, subreq->len);
+ iov_iter_advance(io_iter, subreq->len);
out:
subreq->source = source;
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
@@ -541,6 +643,7 @@ out:
* Slice off a piece of a read request and submit an I/O request for it.
*/
static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
+ struct iov_iter *io_iter,
unsigned int *_debug_index)
{
struct netfs_io_subrequest *subreq;
@@ -552,7 +655,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
subreq->debug_index = (*_debug_index)++;
subreq->start = rreq->start + rreq->submitted;
- subreq->len = rreq->len - rreq->submitted;
+ subreq->len = io_iter->count;
_debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
@@ -565,7 +668,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
* (the starts must coincide), in which case, we go around the loop
* again and ask it to download the next piece.
*/
- source = netfs_rreq_prepare_read(rreq, subreq);
+ source = netfs_rreq_prepare_read(rreq, subreq, io_iter);
if (source == NETFS_INVALID_READ)
goto subreq_failed;
@@ -603,6 +706,7 @@ subreq_failed:
*/
int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
{
+ struct iov_iter io_iter;
unsigned int debug_index = 0;
int ret;
@@ -611,50 +715,73 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
if (rreq->len == 0) {
pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
- netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
return -EIO;
}
- INIT_WORK(&rreq->work, netfs_rreq_work);
+ if (rreq->origin == NETFS_DIO_READ)
+ inode_dio_begin(rreq->inode);
- if (sync)
- netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+ // TODO: Use bounce buffer if requested
+ rreq->io_iter = rreq->iter;
+
+ INIT_WORK(&rreq->work, netfs_rreq_work);
/* Chop the read into slices according to what the cache and the netfs
* want and submit each one.
*/
+ netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
atomic_set(&rreq->nr_outstanding, 1);
+ io_iter = rreq->io_iter;
do {
- if (!netfs_rreq_submit_slice(rreq, &debug_index))
+ _debug("submit %llx + %zx >= %llx",
+ rreq->start, rreq->submitted, rreq->i_size);
+ if (rreq->origin == NETFS_DIO_READ &&
+ rreq->start + rreq->submitted >= rreq->i_size)
+ break;
+ if (!netfs_rreq_submit_slice(rreq, &io_iter, &debug_index))
+ break;
+ if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
+ test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
break;
} while (rreq->submitted < rreq->len);
+ if (!rreq->submitted) {
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_no_submit);
+ if (rreq->origin == NETFS_DIO_READ)
+ inode_dio_end(rreq->inode);
+ ret = 0;
+ goto out;
+ }
+
if (sync) {
- /* Keep nr_outstanding incremented so that the ref always belongs to
- * us, and the service code isn't punted off to a random thread pool to
- * process.
+ /* Keep nr_outstanding incremented so that the ref always
+ * belongs to us, and the service code isn't punted off to a
+ * random thread pool to process. Note that this might start
+ * further work, such as writing to the cache.
*/
- for (;;) {
- wait_var_event(&rreq->nr_outstanding,
- atomic_read(&rreq->nr_outstanding) == 1);
+ wait_var_event(&rreq->nr_outstanding,
+ atomic_read(&rreq->nr_outstanding) == 1);
+ if (atomic_dec_and_test(&rreq->nr_outstanding))
netfs_rreq_assess(rreq, false);
- if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
- break;
- cond_resched();
- }
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
+ wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE);
ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len) {
+ if (ret == 0 && rreq->submitted < rreq->len &&
+ rreq->origin != NETFS_DIO_READ) {
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
ret = -EIO;
}
- netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
} else {
/* If we decrement nr_outstanding to 0, the ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_outstanding))
netfs_rreq_assess(rreq, false);
- ret = 0;
+ ret = -EIOCBQUEUED;
}
+
+out:
return ret;
}
diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
index 2ff07ba655a0..b781bbbf1d8d 100644
--- a/fs/netfs/iterator.c
+++ b/fs/netfs/iterator.c
@@ -101,3 +101,100 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
return npages;
}
EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
+
+/*
+ * Select the span of a bvec iterator we're going to use. Limit it by both maximum
+ * size and maximum number of segments. Returns the size of the span in bytes.
+ */
+static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs)
+{
+ const struct bio_vec *bvecs = iter->bvec;
+ unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
+ size_t len, span = 0, n = iter->count;
+ size_t skip = iter->iov_offset + start_offset;
+
+ if (WARN_ON(!iov_iter_is_bvec(iter)) ||
+ WARN_ON(start_offset > n) ||
+ n == 0)
+ return 0;
+
+ while (n && ix < nbv && skip) {
+ len = bvecs[ix].bv_len;
+ if (skip < len)
+ break;
+ skip -= len;
+ n -= len;
+ ix++;
+ }
+
+ while (n && ix < nbv) {
+ len = min3(n, bvecs[ix].bv_len - skip, max_size);
+ span += len;
+ nsegs++;
+ ix++;
+ if (span >= max_size || nsegs >= max_segs)
+ break;
+ skip = 0;
+ n -= len;
+ }
+
+ return min(span, max_size);
+}
+
+/*
+ * Select the span of an xarray iterator we're going to use. Limit it by both
+ * maximum size and maximum number of segments. It is assumed that segments
+ * can be larger than a page in size, provided they're physically contiguous.
+ * Returns the size of the span in bytes.
+ */
+static size_t netfs_limit_xarray(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs)
+{
+ struct folio *folio;
+ unsigned int nsegs = 0;
+ loff_t pos = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = pos / PAGE_SIZE;
+ size_t span = 0, n = iter->count;
+
+ XA_STATE(xas, iter->xarray, index);
+
+ if (WARN_ON(!iov_iter_is_xarray(iter)) ||
+ WARN_ON(start_offset > n) ||
+ n == 0)
+ return 0;
+ max_size = min(max_size, n - start_offset);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t offset, flen, len;
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ flen = folio_size(folio);
+ offset = offset_in_folio(folio, pos);
+ len = min(max_size, flen - offset);
+ span += len;
+ nsegs++;
+ if (span >= max_size || nsegs >= max_segs)
+ break;
+ }
+
+ rcu_read_unlock();
+ return min(span, max_size);
+}
+
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs)
+{
+ if (iov_iter_is_bvec(iter))
+ return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
+ if (iov_iter_is_xarray(iter))
+ return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
+ BUG();
+}
+EXPORT_SYMBOL(netfs_limit_iter);
diff --git a/fs/netfs/locking.c b/fs/netfs/locking.c
new file mode 100644
index 000000000000..75dc52a49b3a
--- /dev/null
+++ b/fs/netfs/locking.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I/O and data path helper functionality.
+ *
+ * Borrowed from NFS Copyright (c) 2016 Trond Myklebust
+ */
+
+#include <linux/kernel.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/*
+ * inode_dio_wait_interruptible - wait for outstanding DIO requests to finish
+ * @inode: inode to wait for
+ *
+ * Waits for all pending direct I/O requests to finish so that we can
+ * proceed with a truncate or equivalent operation.
+ *
+ * Must be called under a lock that serializes taking new references
+ * to i_dio_count, usually by inode->i_mutex.
+ */
+static int inode_dio_wait_interruptible(struct inode *inode)
+{
+ if (!atomic_read(&inode->i_dio_count))
+ return 0;
+
+ wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
+ DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
+
+ for (;;) {
+ prepare_to_wait(wq, &q.wq_entry, TASK_INTERRUPTIBLE);
+ if (!atomic_read(&inode->i_dio_count))
+ break;
+ if (signal_pending(current))
+ break;
+ schedule();
+ }
+ finish_wait(wq, &q.wq_entry);
+
+ return atomic_read(&inode->i_dio_count) ? -ERESTARTSYS : 0;
+}
+
+/* Call with exclusively locked inode->i_rwsem */
+static int netfs_block_o_direct(struct netfs_inode *ictx)
+{
+ if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags))
+ return 0;
+ clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+ return inode_dio_wait_interruptible(&ictx->inode);
+}
+
+/**
+ * netfs_start_io_read - declare the file is being used for buffered reads
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is unset,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that buffered read operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas direct I/O
+ * operations need to wait to grab an exclusive lock in order to set
+ * NETFS_ICTX_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
+ */
+int netfs_start_io_read(struct inode *inode)
+ __acquires(inode->i_rwsem)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ /* Be an optimist! */
+ if (down_read_interruptible(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) == 0)
+ return 0;
+ up_read(&inode->i_rwsem);
+
+ /* Slow path.... */
+ if (down_write_killable(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ if (netfs_block_o_direct(ictx) < 0) {
+ up_write(&inode->i_rwsem);
+ return -ERESTARTSYS;
+ }
+ downgrade_write(&inode->i_rwsem);
+ return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_read);
+
+/**
+ * netfs_end_io_read - declare that the buffered read operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_read(struct inode *inode)
+ __releases(inode->i_rwsem)
+{
+ up_read(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_read);
+
+/**
+ * netfs_start_io_write - declare the file is being used for buffered writes
+ * @inode: file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ */
+int netfs_start_io_write(struct inode *inode)
+ __acquires(inode->i_rwsem)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ if (down_write_killable(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ if (netfs_block_o_direct(ictx) < 0) {
+ up_write(&inode->i_rwsem);
+ return -ERESTARTSYS;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_write);
+
+/**
+ * netfs_end_io_write - declare that the buffered write operation is done
+ * @inode: file inode
+ *
+ * Declare that a buffered write operation is done, and release the
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_write(struct inode *inode)
+ __releases(inode->i_rwsem)
+{
+ up_write(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_write);
+
+/* Call with exclusively locked inode->i_rwsem */
+static int netfs_block_buffered(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+ int ret;
+
+ if (!test_bit(NETFS_ICTX_ODIRECT, &ictx->flags)) {
+ set_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+ if (inode->i_mapping->nrpages != 0) {
+ unmap_mapping_range(inode->i_mapping, 0, 0, 0);
+ ret = filemap_fdatawait(inode->i_mapping);
+ if (ret < 0) {
+ clear_bit(NETFS_ICTX_ODIRECT, &ictx->flags);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * netfs_start_io_direct - declare the file is being used for direct i/o
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is about to start, and ensure
+ * that we block all buffered I/O.
+ * On exit, the function ensures that the NETFS_ICTX_ODIRECT flag is set,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that direct I/O operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas buffered I/O
+ * operations need to wait to grab an exclusive lock in order to clear
+ * NETFS_ICTX_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
+ */
+int netfs_start_io_direct(struct inode *inode)
+ __acquires(inode->i_rwsem)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+ int ret;
+
+ /* Be an optimist! */
+ if (down_read_interruptible(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ if (test_bit(NETFS_ICTX_ODIRECT, &ictx->flags) != 0)
+ return 0;
+ up_read(&inode->i_rwsem);
+
+ /* Slow path.... */
+ if (down_write_killable(&inode->i_rwsem) < 0)
+ return -ERESTARTSYS;
+ ret = netfs_block_buffered(inode);
+ if (ret < 0) {
+ up_write(&inode->i_rwsem);
+ return ret;
+ }
+ downgrade_write(&inode->i_rwsem);
+ return 0;
+}
+EXPORT_SYMBOL(netfs_start_io_direct);
+
+/**
+ * netfs_end_io_direct - declare that the direct i/o operation is done
+ * @inode: file inode
+ *
+ * Declare that a direct I/O operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void netfs_end_io_direct(struct inode *inode)
+ __releases(inode->i_rwsem)
+{
+ up_read(&inode->i_rwsem);
+}
+EXPORT_SYMBOL(netfs_end_io_direct);
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
index 068568702957..5e77618a7940 100644
--- a/fs/netfs/main.c
+++ b/fs/netfs/main.c
@@ -7,6 +7,8 @@
#include <linux/module.h>
#include <linux/export.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/netfs.h>
@@ -15,6 +17,113 @@ MODULE_DESCRIPTION("Network fs support");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
+EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
+
unsigned netfs_debug;
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
+
+#ifdef CONFIG_PROC_FS
+LIST_HEAD(netfs_io_requests);
+DEFINE_SPINLOCK(netfs_proc_lock);
+
+static const char *netfs_origins[nr__netfs_io_origin] = {
+ [NETFS_READAHEAD] = "RA",
+ [NETFS_READPAGE] = "RP",
+ [NETFS_READ_FOR_WRITE] = "RW",
+ [NETFS_WRITEBACK] = "WB",
+ [NETFS_WRITETHROUGH] = "WT",
+ [NETFS_LAUNDER_WRITE] = "LW",
+ [NETFS_UNBUFFERED_WRITE] = "UW",
+ [NETFS_DIO_READ] = "DR",
+ [NETFS_DIO_WRITE] = "DW",
+};
+
+/*
+ * Generate a list of I/O requests in /proc/fs/netfs/requests
+ */
+static int netfs_requests_seq_show(struct seq_file *m, void *v)
+{
+ struct netfs_io_request *rreq;
+
+ if (v == &netfs_io_requests) {
+ seq_puts(m,
+ "REQUEST OR REF FL ERR OPS COVERAGE\n"
+ "======== == === == ==== === =========\n"
+ );
+ return 0;
+ }
+
+ rreq = list_entry(v, struct netfs_io_request, proc_link);
+ seq_printf(m,
+ "%08x %s %3d %2lx %4d %3d @%04llx %zx/%zx",
+ rreq->debug_id,
+ netfs_origins[rreq->origin],
+ refcount_read(&rreq->ref),
+ rreq->flags,
+ rreq->error,
+ atomic_read(&rreq->nr_outstanding),
+ rreq->start, rreq->submitted, rreq->len);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static void *netfs_requests_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(rcu)
+{
+ rcu_read_lock();
+ return seq_list_start_head(&netfs_io_requests, *_pos);
+}
+
+static void *netfs_requests_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &netfs_io_requests, _pos);
+}
+
+static void netfs_requests_seq_stop(struct seq_file *m, void *v)
+ __releases(rcu)
+{
+ rcu_read_unlock();
+}
+
+static const struct seq_operations netfs_requests_seq_ops = {
+ .start = netfs_requests_seq_start,
+ .next = netfs_requests_seq_next,
+ .stop = netfs_requests_seq_stop,
+ .show = netfs_requests_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
+
+static int __init netfs_init(void)
+{
+ int ret = -ENOMEM;
+
+ if (!proc_mkdir("fs/netfs", NULL))
+ goto error;
+ if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
+ &netfs_requests_seq_ops))
+ goto error_proc;
+#ifdef CONFIG_FSCACHE_STATS
+ if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
+ netfs_stats_show))
+ goto error_proc;
+#endif
+
+ ret = fscache_init();
+ if (ret < 0)
+ goto error_proc;
+ return 0;
+
+error_proc:
+ remove_proc_entry("fs/netfs", NULL);
+error:
+ return ret;
+}
+fs_initcall(netfs_init);
+
+static void __exit netfs_exit(void)
+{
+ fscache_exit();
+ remove_proc_entry("fs/netfs", NULL);
+}
+module_exit(netfs_exit);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
new file mode 100644
index 000000000000..90051ced8e2a
--- /dev/null
+++ b/fs/netfs/misc.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Miscellaneous routines.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/swap.h>
+#include "internal.h"
+
+/*
+ * Attach a folio to the buffer and maybe set marks on it to say that we need
+ * to put the folio later and twiddle the pagecache flags.
+ */
+int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index,
+ struct folio *folio, unsigned int flags,
+ gfp_t gfp_mask)
+{
+ XA_STATE_ORDER(xas, xa, index, folio_order(folio));
+
+retry:
+ xas_lock(&xas);
+ for (;;) {
+ xas_store(&xas, folio);
+ if (!xas_error(&xas))
+ break;
+ xas_unlock(&xas);
+ if (!xas_nomem(&xas, gfp_mask))
+ return xas_error(&xas);
+ goto retry;
+ }
+
+ if (flags & NETFS_FLAG_PUT_MARK)
+ xas_set_mark(&xas, NETFS_BUF_PUT_MARK);
+ if (flags & NETFS_FLAG_PAGECACHE_MARK)
+ xas_set_mark(&xas, NETFS_BUF_PAGECACHE_MARK);
+ xas_unlock(&xas);
+ return xas_error(&xas);
+}
+
+/*
+ * Create the specified range of folios in the buffer attached to the read
+ * request. The folios are marked with NETFS_BUF_PUT_MARK so that we know that
+ * these need freeing later.
+ */
+int netfs_add_folios_to_buffer(struct xarray *buffer,
+ struct address_space *mapping,
+ pgoff_t index, pgoff_t to, gfp_t gfp_mask)
+{
+ struct folio *folio;
+ int ret;
+
+ if (to + 1 == index) /* Page range is inclusive */
+ return 0;
+
+ do {
+ /* TODO: Figure out what order folio can be allocated here */
+ folio = filemap_alloc_folio(readahead_gfp_mask(mapping), 0);
+ if (!folio)
+ return -ENOMEM;
+ folio->index = index;
+ ret = netfs_xa_store_and_mark(buffer, index, folio,
+ NETFS_FLAG_PUT_MARK, gfp_mask);
+ if (ret < 0) {
+ folio_put(folio);
+ return ret;
+ }
+
+ index += folio_nr_pages(folio);
+ } while (index <= to && index != 0);
+
+ return 0;
+}
+
+/*
+ * Clear an xarray buffer, putting a ref on the folios that have
+ * NETFS_BUF_PUT_MARK set.
+ */
+void netfs_clear_buffer(struct xarray *buffer)
+{
+ struct folio *folio;
+ XA_STATE(xas, buffer, 0);
+
+ rcu_read_lock();
+ xas_for_each_marked(&xas, folio, ULONG_MAX, NETFS_BUF_PUT_MARK) {
+ folio_put(folio);
+ }
+ rcu_read_unlock();
+ xa_destroy(buffer);
+}
+
+/**
+ * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
+ * @mapping: The mapping the folio belongs to.
+ * @folio: The folio being dirtied.
+ *
+ * Set the dirty flag on a folio and pin an in-use cache object in memory so
+ * that writeback can later write to it. This is intended to be called from
+ * the filesystem's ->dirty_folio() method.
+ *
+ * Return: true if the dirty flag was set on the folio, false otherwise.
+ */
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ struct fscache_cookie *cookie = netfs_i_cookie(ictx);
+ bool need_use = false;
+
+ _enter("");
+
+ if (!filemap_dirty_folio(mapping, folio))
+ return false;
+ if (!fscache_cookie_valid(cookie))
+ return true;
+
+ if (!(inode->i_state & I_PINNING_NETFS_WB)) {
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_state & I_PINNING_NETFS_WB)) {
+ inode->i_state |= I_PINNING_NETFS_WB;
+ need_use = true;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (need_use)
+ fscache_use_cookie(cookie, true);
+ }
+ return true;
+}
+EXPORT_SYMBOL(netfs_dirty_folio);
+
+/**
+ * netfs_unpin_writeback - Unpin writeback resources
+ * @inode: The inode on which the cookie resides
+ * @wbc: The writeback control
+ *
+ * Unpin the writeback resources pinned by netfs_dirty_folio(). This is
+ * intended to be called as/by the netfs's ->write_inode() method.
+ */
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc)
+{
+ struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
+
+ if (wbc->unpinned_netfs_wb)
+ fscache_unuse_cookie(cookie, NULL, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(netfs_unpin_writeback);
+
+/**
+ * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode
+ * @inode: The inode to clean up
+ * @aux: Auxiliary data to apply to the inode
+ *
+ * Clear any writeback resources held by an inode when the inode is evicted.
+ * This must be called before clear_inode() is called.
+ */
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
+{
+ struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
+
+ if (inode->i_state & I_PINNING_NETFS_WB) {
+ loff_t i_size = i_size_read(inode);
+ fscache_unuse_cookie(cookie, aux, &i_size);
+ }
+}
+EXPORT_SYMBOL(netfs_clear_inode_writeback);
+
+/**
+ * netfs_invalidate_folio - Invalidate or partially invalidate a folio
+ * @folio: Folio proposed for release
+ * @offset: Offset of the invalidated region
+ * @length: Length of the invalidated region
+ *
+ * Invalidate part or all of a folio for a network filesystem. The folio will
+ * be removed afterwards if the invalidated region covers the entire folio.
+ */
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+{
+ struct netfs_folio *finfo = NULL;
+ size_t flen = folio_size(folio);
+
+ _enter("{%lx},%zx,%zx", folio->index, offset, length);
+
+ folio_wait_fscache(folio);
+
+ if (!folio_test_private(folio))
+ return;
+
+ finfo = netfs_folio_info(folio);
+
+ if (offset == 0 && length >= flen)
+ goto erase_completely;
+
+ if (finfo) {
+ /* We have a partially uptodate page from a streaming write. */
+ unsigned int fstart = finfo->dirty_offset;
+ unsigned int fend = fstart + finfo->dirty_len;
+ unsigned int end = offset + length;
+
+ if (offset >= fend)
+ return;
+ if (end <= fstart)
+ return;
+ if (offset <= fstart && end >= fend)
+ goto erase_completely;
+ if (offset <= fstart && end > fstart)
+ goto reduce_len;
+ if (offset > fstart && end >= fend)
+ goto move_start;
+ /* A partial write was split. The caller has already zeroed
+ * it, so just absorb the hole.
+ */
+ }
+ return;
+
+erase_completely:
+ netfs_put_group(netfs_folio_group(folio));
+ folio_detach_private(folio);
+ folio_clear_uptodate(folio);
+ kfree(finfo);
+ return;
+reduce_len:
+ finfo->dirty_len = offset + length - finfo->dirty_offset;
+ return;
+move_start:
+ finfo->dirty_len -= offset - finfo->dirty_offset;
+ finfo->dirty_offset = offset;
+}
+EXPORT_SYMBOL(netfs_invalidate_folio);
+
+/**
+ * netfs_release_folio - Try to release a folio
+ * @folio: Folio proposed for release
+ * @gfp: Flags qualifying the release
+ *
+ * Request release of a folio and clean up its private state if it's not busy.
+ * Returns true if the folio can now be released, false if not
+ */
+bool netfs_release_folio(struct folio *folio, gfp_t gfp)
+{
+ struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
+ unsigned long long end;
+
+ end = folio_pos(folio) + folio_size(folio);
+ if (end > ctx->zero_point)
+ ctx->zero_point = end;
+
+ if (folio_test_private(folio))
+ return false;
+ if (folio_test_fscache(folio)) {
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ folio_wait_fscache(folio);
+ }
+
+ fscache_note_page_release(netfs_i_cookie(ctx));
+ return true;
+}
+EXPORT_SYMBOL(netfs_release_folio);
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index e17cdf53f6a7..610ceb5bd86c 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -20,14 +20,20 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct inode *inode = file ? file_inode(file) : mapping->host;
struct netfs_inode *ctx = netfs_inode(inode);
struct netfs_io_request *rreq;
+ bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
+ origin == NETFS_DIO_READ ||
+ origin == NETFS_DIO_WRITE);
+ bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
int ret;
- rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
+ rreq = kzalloc(ctx->ops->io_request_size ?: sizeof(struct netfs_io_request),
+ GFP_KERNEL);
if (!rreq)
return ERR_PTR(-ENOMEM);
rreq->start = start;
rreq->len = len;
+ rreq->upper_len = len;
rreq->origin = origin;
rreq->netfs_ops = ctx->ops;
rreq->mapping = mapping;
@@ -35,8 +41,14 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
INIT_LIST_HEAD(&rreq->subrequests);
+ INIT_WORK(&rreq->work, NULL);
refcount_set(&rreq->ref, 1);
+
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+ if (cached)
+ __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
+ if (file && file->f_flags & O_NONBLOCK)
+ __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
if (rreq->netfs_ops->init_request) {
ret = rreq->netfs_ops->init_request(rreq, file);
if (ret < 0) {
@@ -45,6 +57,8 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
}
}
+ trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
+ netfs_proc_add_rreq(rreq);
netfs_stat(&netfs_n_rh_rreq);
return rreq;
}
@@ -74,33 +88,47 @@ static void netfs_free_request(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
+ unsigned int i;
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+ netfs_proc_del_rreq(rreq);
netfs_clear_subrequests(rreq, false);
if (rreq->netfs_ops->free_request)
rreq->netfs_ops->free_request(rreq);
if (rreq->cache_resources.ops)
rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
- kfree(rreq);
+ if (rreq->direct_bv) {
+ for (i = 0; i < rreq->direct_bv_count; i++) {
+ if (rreq->direct_bv[i].bv_page) {
+ if (rreq->direct_bv_unpin)
+ unpin_user_page(rreq->direct_bv[i].bv_page);
+ }
+ }
+ kvfree(rreq->direct_bv);
+ }
+ kfree_rcu(rreq, rcu);
netfs_stat_d(&netfs_n_rh_rreq);
}
void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
enum netfs_rreq_ref_trace what)
{
- unsigned int debug_id = rreq->debug_id;
+ unsigned int debug_id;
bool dead;
int r;
- dead = __refcount_dec_and_test(&rreq->ref, &r);
- trace_netfs_rreq_ref(debug_id, r - 1, what);
- if (dead) {
- if (was_async) {
- rreq->work.func = netfs_free_request;
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
- } else {
- netfs_free_request(&rreq->work);
+ if (rreq) {
+ debug_id = rreq->debug_id;
+ dead = __refcount_dec_and_test(&rreq->ref, &r);
+ trace_netfs_rreq_ref(debug_id, r - 1, what);
+ if (dead) {
+ if (was_async) {
+ rreq->work.func = netfs_free_request;
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ BUG();
+ } else {
+ netfs_free_request(&rreq->work);
+ }
}
}
}
@@ -112,8 +140,11 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq
{
struct netfs_io_subrequest *subreq;
- subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
+ subreq = kzalloc(rreq->netfs_ops->io_subrequest_size ?:
+ sizeof(struct netfs_io_subrequest),
+ GFP_KERNEL);
if (subreq) {
+ INIT_WORK(&subreq->work, NULL);
INIT_LIST_HEAD(&subreq->rreq_link);
refcount_set(&subreq->ref, 2);
subreq->rreq = rreq;
@@ -140,6 +171,8 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
struct netfs_io_request *rreq = subreq->rreq;
trace_netfs_sreq(subreq, netfs_sreq_trace_free);
+ if (rreq->netfs_ops->free_subrequest)
+ rreq->netfs_ops->free_subrequest(subreq);
kfree(subreq);
netfs_stat_d(&netfs_n_rh_sreq);
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
diff --git a/fs/netfs/output.c b/fs/netfs/output.c
new file mode 100644
index 000000000000..625eb68f3e5a
--- /dev/null
+++ b/fs/netfs/output.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem high-level write support.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/writeback.h>
+#include <linux/pagevec.h>
+#include "internal.h"
+
+/**
+ * netfs_create_write_request - Create a write operation.
+ * @wreq: The write request this is storing from.
+ * @dest: The destination type
+ * @start: Start of the region this write will modify
+ * @len: Length of the modification
+ * @worker: The worker function to handle the write(s)
+ *
+ * Allocate a write operation, set it up and add it to the list on a write
+ * request.
+ */
+struct netfs_io_subrequest *netfs_create_write_request(struct netfs_io_request *wreq,
+ enum netfs_io_source dest,
+ loff_t start, size_t len,
+ work_func_t worker)
+{
+ struct netfs_io_subrequest *subreq;
+
+ subreq = netfs_alloc_subrequest(wreq);
+ if (subreq) {
+ INIT_WORK(&subreq->work, worker);
+ subreq->source = dest;
+ subreq->start = start;
+ subreq->len = len;
+ subreq->debug_index = wreq->subreq_counter++;
+
+ switch (subreq->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload);
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write);
+ break;
+ default:
+ BUG();
+ }
+
+ subreq->io_iter = wreq->io_iter;
+ iov_iter_advance(&subreq->io_iter, subreq->start - wreq->start);
+ iov_iter_truncate(&subreq->io_iter, subreq->len);
+
+ trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+ atomic_inc(&wreq->nr_outstanding);
+ list_add_tail(&subreq->rreq_link, &wreq->subrequests);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ }
+
+ return subreq;
+}
+EXPORT_SYMBOL(netfs_create_write_request);
+
+/*
+ * Process a completed write request once all the component operations have
+ * been completed.
+ */
+static void netfs_write_terminated(struct netfs_io_request *wreq, bool was_async)
+{
+ struct netfs_io_subrequest *subreq;
+ struct netfs_inode *ctx = netfs_inode(wreq->inode);
+ size_t transferred = 0;
+
+ _enter("R=%x[]", wreq->debug_id);
+
+ trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
+
+ list_for_each_entry(subreq, &wreq->subrequests, rreq_link) {
+ if (subreq->error || subreq->transferred == 0)
+ break;
+ transferred += subreq->transferred;
+ if (subreq->transferred < subreq->len)
+ break;
+ }
+ wreq->transferred = transferred;
+
+ list_for_each_entry(subreq, &wreq->subrequests, rreq_link) {
+ if (!subreq->error)
+ continue;
+ switch (subreq->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ /* Depending on the type of failure, this may prevent
+ * writeback completion unless we're in disconnected
+ * mode.
+ */
+ if (!wreq->error)
+ wreq->error = subreq->error;
+ break;
+
+ case NETFS_WRITE_TO_CACHE:
+ /* Failure doesn't prevent writeback completion unless
+ * we're in disconnected mode.
+ */
+ if (subreq->error != -ENOBUFS)
+ ctx->ops->invalidate_cache(wreq);
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ if (!wreq->error)
+ wreq->error = -EIO;
+ return;
+ }
+ }
+
+ wreq->cleanup(wreq);
+
+ if (wreq->origin == NETFS_DIO_WRITE &&
+ wreq->mapping->nrpages) {
+ pgoff_t first = wreq->start >> PAGE_SHIFT;
+ pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
+ invalidate_inode_pages2_range(wreq->mapping, first, last);
+ }
+
+ if (wreq->origin == NETFS_DIO_WRITE)
+ inode_dio_end(wreq->inode);
+
+ _debug("finished");
+ trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
+ clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
+ wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
+
+ if (wreq->iocb) {
+ wreq->iocb->ki_pos += transferred;
+ if (wreq->iocb->ki_complete)
+ wreq->iocb->ki_complete(
+ wreq->iocb, wreq->error ? wreq->error : transferred);
+ }
+
+ netfs_clear_subrequests(wreq, was_async);
+ netfs_put_request(wreq, was_async, netfs_rreq_trace_put_complete);
+}
+
+/*
+ * Deal with the completion of writing the data to the cache.
+ */
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct netfs_io_subrequest *subreq = _op;
+ struct netfs_io_request *wreq = subreq->rreq;
+ unsigned int u;
+
+ _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
+
+ switch (subreq->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload_done);
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write_done);
+ break;
+ case NETFS_INVALID_WRITE:
+ break;
+ default:
+ BUG();
+ }
+
+ if (IS_ERR_VALUE(transferred_or_error)) {
+ subreq->error = transferred_or_error;
+ trace_netfs_failure(wreq, subreq, transferred_or_error,
+ netfs_fail_write);
+ goto failed;
+ }
+
+ if (WARN(transferred_or_error > subreq->len - subreq->transferred,
+ "Subreq excess write: R%x[%x] %zd > %zu - %zu",
+ wreq->debug_id, subreq->debug_index,
+ transferred_or_error, subreq->len, subreq->transferred))
+ transferred_or_error = subreq->len - subreq->transferred;
+
+ subreq->error = 0;
+ subreq->transferred += transferred_or_error;
+
+ if (iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
+ pr_warn("R=%08x[%u] ITER POST-MISMATCH %zx != %zx-%zx %x\n",
+ wreq->debug_id, subreq->debug_index,
+ iov_iter_count(&subreq->io_iter), subreq->len,
+ subreq->transferred, subreq->io_iter.iter_type);
+
+ if (subreq->transferred < subreq->len)
+ goto incomplete;
+
+ __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+out:
+ trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+ /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+ u = atomic_dec_return(&wreq->nr_outstanding);
+ if (u == 0)
+ netfs_write_terminated(wreq, was_async);
+ else if (u == 1)
+ wake_up_var(&wreq->nr_outstanding);
+
+ netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+ return;
+
+incomplete:
+ if (transferred_or_error == 0) {
+ if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+ subreq->error = -ENODATA;
+ goto failed;
+ }
+ } else {
+ __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ }
+
+ __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+ set_bit(NETFS_RREQ_INCOMPLETE_IO, &wreq->flags);
+ goto out;
+
+failed:
+ switch (subreq->source) {
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write_failed);
+ set_bit(NETFS_RREQ_INCOMPLETE_IO, &wreq->flags);
+ break;
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload_failed);
+ set_bit(NETFS_RREQ_FAILED, &wreq->flags);
+ wreq->error = subreq->error;
+ break;
+ default:
+ break;
+ }
+ goto out;
+}
+EXPORT_SYMBOL(netfs_write_subrequest_terminated);
+
+static void netfs_write_to_cache_op(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *wreq = subreq->rreq;
+ struct netfs_cache_resources *cres = &wreq->cache_resources;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+
+ cres->ops->write(cres, subreq->start, &subreq->io_iter,
+ netfs_write_subrequest_terminated, subreq);
+}
+
+static void netfs_write_to_cache_op_worker(struct work_struct *work)
+{
+ struct netfs_io_subrequest *subreq =
+ container_of(work, struct netfs_io_subrequest, work);
+
+ netfs_write_to_cache_op(subreq);
+}
+
+/**
+ * netfs_queue_write_request - Queue a write request for attention
+ * @subreq: The write request to be queued
+ *
+ * Queue the specified write request for processing by a worker thread. We
+ * pass the caller's ref on the request to the worker thread.
+ */
+void netfs_queue_write_request(struct netfs_io_subrequest *subreq)
+{
+ if (!queue_work(system_unbound_wq, &subreq->work))
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_wip);
+}
+EXPORT_SYMBOL(netfs_queue_write_request);
+
+/*
+ * Set up a op for writing to the cache.
+ */
+static void netfs_set_up_write_to_cache(struct netfs_io_request *wreq)
+{
+ struct netfs_cache_resources *cres = &wreq->cache_resources;
+ struct netfs_io_subrequest *subreq;
+ struct netfs_inode *ctx = netfs_inode(wreq->inode);
+ struct fscache_cookie *cookie = netfs_i_cookie(ctx);
+ loff_t start = wreq->start;
+ size_t len = wreq->len;
+ int ret;
+
+ if (!fscache_cookie_enabled(cookie)) {
+ clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags);
+ return;
+ }
+
+ _debug("write to cache");
+ ret = fscache_begin_write_operation(cres, cookie);
+ if (ret < 0)
+ return;
+
+ ret = cres->ops->prepare_write(cres, &start, &len, wreq->upper_len,
+ i_size_read(wreq->inode), true);
+ if (ret < 0)
+ return;
+
+ subreq = netfs_create_write_request(wreq, NETFS_WRITE_TO_CACHE, start, len,
+ netfs_write_to_cache_op_worker);
+ if (!subreq)
+ return;
+
+ netfs_write_to_cache_op(subreq);
+}
+
+/*
+ * Begin the process of writing out a chunk of data.
+ *
+ * We are given a write request that holds a series of dirty regions and
+ * (partially) covers a sequence of folios, all of which are present. The
+ * pages must have been marked as writeback as appropriate.
+ *
+ * We need to perform the following steps:
+ *
+ * (1) If encrypting, create an output buffer and encrypt each block of the
+ * data into it, otherwise the output buffer will point to the original
+ * folios.
+ *
+ * (2) If the data is to be cached, set up a write op for the entire output
+ * buffer to the cache, if the cache wants to accept it.
+ *
+ * (3) If the data is to be uploaded (ie. not merely cached):
+ *
+ * (a) If the data is to be compressed, create a compression buffer and
+ * compress the data into it.
+ *
+ * (b) For each destination we want to upload to, set up write ops to write
+ * to that destination. We may need multiple writes if the data is not
+ * contiguous or the span exceeds wsize for a server.
+ */
+int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
+ enum netfs_write_trace what)
+{
+ struct netfs_inode *ctx = netfs_inode(wreq->inode);
+
+ _enter("R=%x %llx-%llx f=%lx",
+ wreq->debug_id, wreq->start, wreq->start + wreq->len - 1,
+ wreq->flags);
+
+ trace_netfs_write(wreq, what);
+ if (wreq->len == 0 || wreq->iter.count == 0) {
+ pr_err("Zero-sized write [R=%x]\n", wreq->debug_id);
+ return -EIO;
+ }
+
+ if (wreq->origin == NETFS_DIO_WRITE)
+ inode_dio_begin(wreq->inode);
+
+ wreq->io_iter = wreq->iter;
+
+ /* ->outstanding > 0 carries a ref */
+ netfs_get_request(wreq, netfs_rreq_trace_get_for_outstanding);
+ atomic_set(&wreq->nr_outstanding, 1);
+
+ /* Start the encryption/compression going. We can do that in the
+ * background whilst we generate a list of write ops that we want to
+ * perform.
+ */
+ // TODO: Encrypt or compress the region as appropriate
+
+ /* We need to write all of the region to the cache */
+ if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
+ netfs_set_up_write_to_cache(wreq);
+
+ /* However, we don't necessarily write all of the region to the server.
+ * Caching of reads is being managed this way also.
+ */
+ if (test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
+ ctx->ops->create_write_requests(wreq, wreq->start, wreq->len);
+
+ if (atomic_dec_and_test(&wreq->nr_outstanding))
+ netfs_write_terminated(wreq, false);
+
+ if (!may_wait)
+ return -EIOCBQUEUED;
+
+ wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE);
+ return wreq->error;
+}
+
+/*
+ * Begin a write operation for writing through the pagecache.
+ */
+struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
+{
+ struct netfs_io_request *wreq;
+ struct file *file = iocb->ki_filp;
+
+ wreq = netfs_alloc_request(file->f_mapping, file, iocb->ki_pos, len,
+ NETFS_WRITETHROUGH);
+ if (IS_ERR(wreq))
+ return wreq;
+
+ trace_netfs_write(wreq, netfs_write_trace_writethrough);
+
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ iov_iter_xarray(&wreq->iter, ITER_SOURCE, &wreq->mapping->i_pages, wreq->start, 0);
+ wreq->io_iter = wreq->iter;
+
+ /* ->outstanding > 0 carries a ref */
+ netfs_get_request(wreq, netfs_rreq_trace_get_for_outstanding);
+ atomic_set(&wreq->nr_outstanding, 1);
+ return wreq;
+}
+
+static void netfs_submit_writethrough(struct netfs_io_request *wreq, bool final)
+{
+ struct netfs_inode *ictx = netfs_inode(wreq->inode);
+ unsigned long long start;
+ size_t len;
+
+ if (!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
+ return;
+
+ start = wreq->start + wreq->submitted;
+ len = wreq->iter.count - wreq->submitted;
+ if (!final) {
+ len /= wreq->wsize; /* Round to number of maximum packets */
+ len *= wreq->wsize;
+ }
+
+ ictx->ops->create_write_requests(wreq, start, len);
+ wreq->submitted += len;
+}
+
+/*
+ * Advance the state of the write operation used when writing through the
+ * pagecache. Data has been copied into the pagecache that we need to append
+ * to the request. If we've added more than wsize then we need to create a new
+ * subrequest.
+ */
+int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end)
+{
+ _enter("ic=%zu sb=%zu ws=%u cp=%zu tp=%u",
+ wreq->iter.count, wreq->submitted, wreq->wsize, copied, to_page_end);
+
+ wreq->iter.count += copied;
+ wreq->io_iter.count += copied;
+ if (to_page_end && wreq->io_iter.count - wreq->submitted >= wreq->wsize)
+ netfs_submit_writethrough(wreq, false);
+
+ return wreq->error;
+}
+
+/*
+ * End a write operation used when writing through the pagecache.
+ */
+int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb)
+{
+ int ret = -EIOCBQUEUED;
+
+ _enter("ic=%zu sb=%zu ws=%u",
+ wreq->iter.count, wreq->submitted, wreq->wsize);
+
+ if (wreq->submitted < wreq->io_iter.count)
+ netfs_submit_writethrough(wreq, true);
+
+ if (atomic_dec_and_test(&wreq->nr_outstanding))
+ netfs_write_terminated(wreq, false);
+
+ if (is_sync_kiocb(iocb)) {
+ wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE);
+ ret = wreq->error;
+ }
+
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ return ret;
+}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index 5510a7a14a40..deeba9f9dcf5 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -9,6 +9,8 @@
#include <linux/seq_file.h>
#include "internal.h"
+atomic_t netfs_n_rh_dio_read;
+atomic_t netfs_n_rh_dio_write;
atomic_t netfs_n_rh_readahead;
atomic_t netfs_n_rh_readpage;
atomic_t netfs_n_rh_rreq;
@@ -27,32 +29,48 @@ atomic_t netfs_n_rh_write_begin;
atomic_t netfs_n_rh_write_done;
atomic_t netfs_n_rh_write_failed;
atomic_t netfs_n_rh_write_zskip;
+atomic_t netfs_n_wh_wstream_conflict;
+atomic_t netfs_n_wh_upload;
+atomic_t netfs_n_wh_upload_done;
+atomic_t netfs_n_wh_upload_failed;
+atomic_t netfs_n_wh_write;
+atomic_t netfs_n_wh_write_done;
+atomic_t netfs_n_wh_write_failed;
-void netfs_stats_show(struct seq_file *m)
+int netfs_stats_show(struct seq_file *m, void *v)
{
- seq_printf(m, "RdHelp : RA=%u RP=%u WB=%u WBZ=%u rr=%u sr=%u\n",
+ seq_printf(m, "Netfs : DR=%u DW=%u RA=%u RP=%u WB=%u WBZ=%u\n",
+ atomic_read(&netfs_n_rh_dio_read),
+ atomic_read(&netfs_n_rh_dio_write),
atomic_read(&netfs_n_rh_readahead),
atomic_read(&netfs_n_rh_readpage),
atomic_read(&netfs_n_rh_write_begin),
- atomic_read(&netfs_n_rh_write_zskip),
- atomic_read(&netfs_n_rh_rreq),
- atomic_read(&netfs_n_rh_sreq));
- seq_printf(m, "RdHelp : ZR=%u sh=%u sk=%u\n",
+ atomic_read(&netfs_n_rh_write_zskip));
+ seq_printf(m, "Netfs : ZR=%u sh=%u sk=%u\n",
atomic_read(&netfs_n_rh_zero),
atomic_read(&netfs_n_rh_short_read),
atomic_read(&netfs_n_rh_write_zskip));
- seq_printf(m, "RdHelp : DL=%u ds=%u df=%u di=%u\n",
+ seq_printf(m, "Netfs : DL=%u ds=%u df=%u di=%u\n",
atomic_read(&netfs_n_rh_download),
atomic_read(&netfs_n_rh_download_done),
atomic_read(&netfs_n_rh_download_failed),
atomic_read(&netfs_n_rh_download_instead));
- seq_printf(m, "RdHelp : RD=%u rs=%u rf=%u\n",
+ seq_printf(m, "Netfs : RD=%u rs=%u rf=%u\n",
atomic_read(&netfs_n_rh_read),
atomic_read(&netfs_n_rh_read_done),
atomic_read(&netfs_n_rh_read_failed));
- seq_printf(m, "RdHelp : WR=%u ws=%u wf=%u\n",
- atomic_read(&netfs_n_rh_write),
- atomic_read(&netfs_n_rh_write_done),
- atomic_read(&netfs_n_rh_write_failed));
+ seq_printf(m, "Netfs : UL=%u us=%u uf=%u\n",
+ atomic_read(&netfs_n_wh_upload),
+ atomic_read(&netfs_n_wh_upload_done),
+ atomic_read(&netfs_n_wh_upload_failed));
+ seq_printf(m, "Netfs : WR=%u ws=%u wf=%u\n",
+ atomic_read(&netfs_n_wh_write),
+ atomic_read(&netfs_n_wh_write_done),
+ atomic_read(&netfs_n_wh_write_failed));
+ seq_printf(m, "Netfs : rr=%u sr=%u wsc=%u\n",
+ atomic_read(&netfs_n_rh_rreq),
+ atomic_read(&netfs_n_rh_sreq),
+ atomic_read(&netfs_n_wh_wstream_conflict));
+ return fscache_stats_show(m);
}
EXPORT_SYMBOL(netfs_stats_show);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 01ac733a6320..f7e32d76e34d 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -169,8 +169,8 @@ config ROOT_NFS
config NFS_FSCACHE
bool "Provide NFS client caching support"
- depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y
- select NETFS_SUPPORT
+ depends on NFS_FS=m && NETFS_SUPPORT || NFS_FS=y && NETFS_SUPPORT=y
+ select FSCACHE
help
Say Y here if you want NFS data to be cached locally on disc through
the general filesystem cache manager
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 44eca51b2808..fbdc9ca80f71 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -246,7 +246,7 @@ void nfs_free_client(struct nfs_client *clp)
put_nfs_version(clp->cl_nfs_mod);
kfree(clp->cl_hostname);
kfree(clp->cl_acceptor);
- kfree(clp);
+ kfree_rcu(clp, rcu);
}
EXPORT_SYMBOL_GPL(nfs_free_client);
@@ -1006,6 +1006,14 @@ struct nfs_server *nfs_alloc_server(void)
}
EXPORT_SYMBOL_GPL(nfs_alloc_server);
+static void delayed_free(struct rcu_head *p)
+{
+ struct nfs_server *server = container_of(p, struct nfs_server, rcu);
+
+ nfs_free_iostats(server->io_stats);
+ kfree(server);
+}
+
/*
* Free up a server record
*/
@@ -1031,10 +1039,9 @@ void nfs_free_server(struct nfs_server *server)
ida_destroy(&server->lockowner_id);
ida_destroy(&server->openowner_id);
- nfs_free_iostats(server->io_stats);
put_cred(server->cred);
- kfree(server);
nfs_release_automount_timer();
+ call_rcu(&server->rcu, delayed_free);
}
EXPORT_SYMBOL_GPL(nfs_free_server);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index c8ecbe999059..ac505671efbd 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1431,9 +1431,9 @@ static bool nfs_verifier_is_delegated(struct dentry *dentry)
static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
{
struct inode *inode = d_inode(dentry);
- struct inode *dir = d_inode(dentry->d_parent);
+ struct inode *dir = d_inode_rcu(dentry->d_parent);
- if (!nfs_verify_change_attribute(dir, verf))
+ if (!dir || !nfs_verify_change_attribute(dir, verf))
return;
if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
nfs_set_verifier_delegated(&verf);
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index b05717fe0d4e..2d1bfee225c3 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -274,12 +274,6 @@ static void nfs_netfs_free_request(struct netfs_io_request *rreq)
put_nfs_open_context(rreq->netfs_priv);
}
-static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq)
-{
- return fscache_begin_read_operation(&rreq->cache_resources,
- netfs_i_cookie(netfs_inode(rreq->inode)));
-}
-
static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
{
struct nfs_netfs_io_data *netfs;
@@ -387,7 +381,6 @@ void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
const struct netfs_request_ops nfs_netfs_ops = {
.init_request = nfs_netfs_init_request,
.free_request = nfs_netfs_free_request,
- .begin_cache_operation = nfs_netfs_begin_cache_operation,
.issue_read = nfs_netfs_issue_read,
.clamp_length = nfs_netfs_clamp_length
};
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 5407ab8c8783..e3cb4923316b 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -80,7 +80,7 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
}
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
{
- netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops);
+ netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
}
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2fa54cfd4882..7d6c657e0409 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4945,10 +4945,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
*/
fl->fl_break_time = 0;
- spin_lock(&fp->fi_lock);
fp->fi_had_conflict = true;
nfsd_break_one_deleg(dp);
- spin_unlock(&fp->fi_lock);
return false;
}
@@ -5557,12 +5555,13 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (status)
goto out_unlock;
+ status = -EAGAIN;
+ if (fp->fi_had_conflict)
+ goto out_unlock;
+
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
- if (fp->fi_had_conflict)
- status = -EAGAIN;
- else
- status = hash_delegation_locked(dp, fp);
+ status = hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
@@ -7911,14 +7910,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{
struct file_lock *fl;
int status = false;
- struct nfsd_file *nf = find_any_file(fp);
+ struct nfsd_file *nf;
struct inode *inode;
struct file_lock_context *flctx;
+ spin_lock(&fp->fi_lock);
+ nf = find_any_file_locked(fp);
if (!nf) {
/* Any valid lock stateid should have some sort of access */
WARN_ON_ONCE(1);
- return status;
+ goto out;
}
inode = file_inode(nf->nf_file);
@@ -7934,7 +7935,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
}
spin_unlock(&flctx->flc_lock);
}
- nfsd_file_put(nf);
+out:
+ spin_unlock(&fp->fi_lock);
return status;
}
@@ -7944,10 +7946,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
* @cstate: NFSv4 COMPOUND state
* @u: RELEASE_LOCKOWNER arguments
*
- * The lockowner's so_count is bumped when a lock record is added
- * or when copying a conflicting lock. The latter case is brief,
- * but can lead to fleeting false positives when looking for
- * locks-in-use.
+ * Check if theree are any locks still held and if not - free the lockowner
+ * and any lock state that is owned.
*
* Return values:
* %nfs_ok: lockowner released or not found
@@ -7983,10 +7983,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
spin_unlock(&clp->cl_lock);
return nfs_ok;
}
- if (atomic_read(&lo->lo_owner.so_count) != 2) {
- spin_unlock(&clp->cl_lock);
- nfs4_put_stateowner(&lo->lo_owner);
- return nfserr_locks_held;
+
+ list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
+ if (check_for_locks(stp->st_stid.sc_file, lo)) {
+ spin_unlock(&clp->cl_lock);
+ nfs4_put_stateowner(&lo->lo_owner);
+ return nfserr_locks_held;
+ }
}
unhash_lockowner_locked(lo);
while (!list_empty(&lo->lo_owner.so_stateids)) {
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index bec33b89a075..0e3fc5ba33c7 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -107,7 +107,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb);
mapped:
- folio_wait_stable(folio);
+ /*
+ * Since checksumming including data blocks is performed to determine
+ * the validity of the log to be written and used for recovery, it is
+ * necessary to wait for writeback to finish here, regardless of the
+ * stable write requirement of the backing device.
+ */
+ folio_wait_writeback(folio);
out:
sb_end_pagefault(inode->i_sb);
return vmf_fs_error(ret);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 0955b657938f..a9b8d77c8c1d 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -472,9 +472,10 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
struct nilfs_recovery_block *rb,
- struct page *page)
+ loff_t pos, struct page *page)
{
struct buffer_head *bh_org;
+ size_t from = pos & ~PAGE_MASK;
void *kaddr;
bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
@@ -482,7 +483,7 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
return -EIO;
kaddr = kmap_atomic(page);
- memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
+ memcpy(kaddr + from, bh_org->b_data, bh_org->b_size);
kunmap_atomic(kaddr);
brelse(bh_org);
return 0;
@@ -521,7 +522,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
goto failed_inode;
}
- err = nilfs_recovery_copy_block(nilfs, rb, page);
+ err = nilfs_recovery_copy_block(nilfs, rb, pos, page);
if (unlikely(err))
goto failed_page;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 2590a0860eab..2bfb08052d39 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1703,7 +1703,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- set_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
if (bh->b_folio != bd_folio) {
folio_lock(bd_folio);
@@ -1714,6 +1713,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
}
break;
}
+ set_buffer_async_write(bh);
if (bh->b_folio != fs_folio) {
nilfs_begin_folio_io(fs_folio);
fs_folio = bh->b_folio;
@@ -1800,7 +1800,6 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- clear_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
clear_buffer_uptodate(bh);
if (bh->b_folio != bd_folio) {
@@ -1809,6 +1808,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
}
break;
}
+ clear_buffer_async_write(bh);
if (bh->b_folio != fs_folio) {
nilfs_end_folio_io(fs_folio, err);
fs_folio = bh->b_folio;
@@ -1896,8 +1896,9 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
BIT(BH_NILFS_Redirected));
- set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh == segbuf->sb_super_root) {
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
if (bh->b_folio != bd_folio) {
folio_end_writeback(bd_folio);
bd_folio = bh->b_folio;
@@ -1905,6 +1906,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
update_sr = true;
break;
}
+ set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh->b_folio != fs_folio) {
nilfs_end_folio_io(fs_folio, 0);
fs_folio = bh->b_folio;
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index 63f70259edc0..7aadf5010999 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -886,7 +886,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
struct runs_tree *run = &ni->file.run;
struct ntfs_sb_info *sbi;
u8 cluster_bits;
- struct ATTRIB *attr = NULL, *attr_b;
+ struct ATTRIB *attr, *attr_b;
struct ATTR_LIST_ENTRY *le, *le_b;
struct mft_inode *mi, *mi_b;
CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
@@ -904,12 +904,8 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
*len = 0;
up_read(&ni->file.run_lock);
- if (*len) {
- if (*lcn != SPARSE_LCN || !new)
- return 0; /* Fast normal way without allocation. */
- else if (clen > *len)
- clen = *len;
- }
+ if (*len && (*lcn != SPARSE_LCN || !new))
+ return 0; /* Fast normal way without allocation. */
/* No cluster in cache or we need to allocate cluster in hole. */
sbi = ni->mi.sbi;
@@ -918,6 +914,17 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
ni_lock(ni);
down_write(&ni->file.run_lock);
+ /* Repeat the code above (under write lock). */
+ if (!run_lookup_entry(run, vcn, lcn, len, NULL))
+ *len = 0;
+
+ if (*len) {
+ if (*lcn != SPARSE_LCN || !new)
+ goto out; /* normal way without allocation. */
+ if (clen > *len)
+ clen = *len;
+ }
+
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
if (!attr_b) {
@@ -1736,8 +1743,10 @@ repack:
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
0, NULL, &mi_b);
- if (!attr_b)
- return -ENOENT;
+ if (!attr_b) {
+ err = -ENOENT;
+ goto out;
+ }
attr = attr_b;
le = le_b;
@@ -1818,13 +1827,15 @@ ins_ext:
ok:
run_truncate_around(run, vcn);
out:
- if (new_valid > data_size)
- new_valid = data_size;
+ if (attr_b) {
+ if (new_valid > data_size)
+ new_valid = data_size;
- valid_size = le64_to_cpu(attr_b->nres.valid_size);
- if (new_valid != valid_size) {
- attr_b->nres.valid_size = cpu_to_le64(valid_size);
- mi_b->dirty = true;
+ valid_size = le64_to_cpu(attr_b->nres.valid_size);
+ if (new_valid != valid_size) {
+ attr_b->nres.valid_size = cpu_to_le64(valid_size);
+ mi_b->dirty = true;
+ }
}
return err;
@@ -2073,7 +2084,7 @@ next_attr:
/* Update inode size. */
ni->i_valid = valid_size;
- ni->vfs_inode.i_size = data_size;
+ i_size_write(&ni->vfs_inode, data_size);
inode_set_bytes(&ni->vfs_inode, total_size);
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
mark_inode_dirty(&ni->vfs_inode);
@@ -2488,7 +2499,7 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
mi_b->dirty = true;
done:
- ni->vfs_inode.i_size += bytes;
+ i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
mark_inode_dirty(&ni->vfs_inode);
diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
index 7c01735d1219..9f4bd8d26090 100644
--- a/fs/ntfs3/attrlist.c
+++ b/fs/ntfs3/attrlist.c
@@ -29,7 +29,7 @@ static inline bool al_is_valid_le(const struct ntfs_inode *ni,
void al_destroy(struct ntfs_inode *ni)
{
run_close(&ni->attr_list.run);
- kfree(ni->attr_list.le);
+ kvfree(ni->attr_list.le);
ni->attr_list.le = NULL;
ni->attr_list.size = 0;
ni->attr_list.dirty = false;
@@ -127,12 +127,13 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
{
size_t off;
u16 sz;
+ const unsigned le_min_size = le_size(0);
if (!le) {
le = ni->attr_list.le;
} else {
sz = le16_to_cpu(le->size);
- if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
+ if (sz < le_min_size) {
/* Impossible 'cause we should not return such le. */
return NULL;
}
@@ -141,7 +142,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
/* Check boundary. */
off = PtrOffset(ni->attr_list.le, le);
- if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
+ if (off + le_min_size > ni->attr_list.size) {
/* The regular end of list. */
return NULL;
}
@@ -149,8 +150,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
sz = le16_to_cpu(le->size);
/* Check le for errors. */
- if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
- off + sz > ni->attr_list.size ||
+ if (sz < le_min_size || off + sz > ni->attr_list.size ||
sz < le->name_off + le->name_len * sizeof(short)) {
return NULL;
}
@@ -318,7 +318,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
memcpy(ptr, al->le, off);
memcpy(Add2Ptr(ptr, off + sz), le, old_size - off);
le = Add2Ptr(ptr, off);
- kfree(al->le);
+ kvfree(al->le);
al->le = ptr;
} else {
memmove(Add2Ptr(le, sz), le, old_size - off);
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index 63f14a0232f6..845f9b22deef 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -124,7 +124,7 @@ void wnd_close(struct wnd_bitmap *wnd)
{
struct rb_node *node, *next;
- kfree(wnd->free_bits);
+ kvfree(wnd->free_bits);
wnd->free_bits = NULL;
run_close(&wnd->run);
@@ -1360,7 +1360,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
memset(new_free + wnd->nwnd, 0,
(new_wnd - wnd->nwnd) * sizeof(short));
- kfree(wnd->free_bits);
+ kvfree(wnd->free_bits);
wnd->free_bits = new_free;
}
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index ec0566b322d5..5cf3d9decf64 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -309,11 +309,31 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
return 0;
}
- /* NTFS: symlinks are "dir + reparse" or "file + reparse" */
- if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
- dt_type = DT_LNK;
- else
- dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+ /*
+ * NTFS: symlinks are "dir + reparse" or "file + reparse"
+ * Unfortunately reparse attribute is used for many purposes (several dozens).
+ * It is not possible here to know is this name symlink or not.
+ * To get exactly the type of name we should to open inode (read mft).
+ * getattr for opened file (fstat) correctly returns symlink.
+ */
+ dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+
+ /*
+ * It is not reliable to detect the type of name using duplicated information
+ * stored in parent directory.
+ * The only correct way to get the type of name - read MFT record and find ATTR_STD.
+ * The code below is not good idea.
+ * It does additional locks/reads just to get the type of name.
+ * Should we use additional mount option to enable branch below?
+ */
+ if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) &&
+ ino != ni->mi.rno) {
+ struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
+ if (!IS_ERR_OR_NULL(inode)) {
+ dt_type = fs_umode_to_dtype(inode->i_mode);
+ iput(inode);
+ }
+ }
return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
}
@@ -495,11 +515,9 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
struct INDEX_HDR *hdr;
const struct ATTR_FILE_NAME *fname;
u32 e_size, off, end;
- u64 vbo = 0;
size_t drs = 0, fles = 0, bit = 0;
- loff_t i_size = ni->vfs_inode.i_size;
struct indx_node *node = NULL;
- u8 index_bits = ni->dir.index_bits;
+ size_t max_indx = i_size_read(&ni->vfs_inode) >> ni->dir.index_bits;
if (is_empty)
*is_empty = true;
@@ -518,8 +536,10 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
e = Add2Ptr(hdr, off);
e_size = le16_to_cpu(e->size);
if (e_size < sizeof(struct NTFS_DE) ||
- off + e_size > end)
+ off + e_size > end) {
+ /* Looks like corruption. */
break;
+ }
if (de_is_last(e))
break;
@@ -543,7 +563,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
fles += 1;
}
- if (vbo >= i_size)
+ if (bit >= max_indx)
goto out;
err = indx_used_bit(&ni->dir, ni, &bit);
@@ -553,8 +573,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
if (bit == MINUS_ONE_T)
goto out;
- vbo = (u64)bit << index_bits;
- if (vbo >= i_size)
+ if (bit >= max_indx)
goto out;
err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
@@ -564,7 +583,6 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
hdr = &node->index->ihdr;
bit += 1;
- vbo = (u64)bit << ni->dir.idx2vbn_bits;
}
out:
@@ -593,5 +611,9 @@ const struct file_operations ntfs_dir_operations = {
.iterate_shared = ntfs_readdir,
.fsync = generic_file_fsync,
.open = ntfs_file_open,
+ .unlocked_ioctl = ntfs_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ntfs_compat_ioctl,
+#endif
};
// clang-format on
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index a5a30a24ce5d..5418662c80d8 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -48,7 +48,7 @@ static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
return 0;
}
-static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
+long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
@@ -61,7 +61,7 @@ static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
-static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
+long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
{
return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
@@ -188,6 +188,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
u32 bh_next, bh_off, to;
sector_t iblock;
struct folio *folio;
+ bool dirty = false;
for (; idx < idx_end; idx += 1, from = 0) {
page_off = (loff_t)idx << PAGE_SHIFT;
@@ -223,29 +224,27 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
/* Ok, it's mapped. Make sure it's up-to-date. */
if (folio_test_uptodate(folio))
set_buffer_uptodate(bh);
-
- if (!buffer_uptodate(bh)) {
- err = bh_read(bh, 0);
- if (err < 0) {
- folio_unlock(folio);
- folio_put(folio);
- goto out;
- }
+ else if (bh_read(bh, 0) < 0) {
+ err = -EIO;
+ folio_unlock(folio);
+ folio_put(folio);
+ goto out;
}
mark_buffer_dirty(bh);
-
} while (bh_off = bh_next, iblock += 1,
head != (bh = bh->b_this_page));
folio_zero_segment(folio, from, to);
+ dirty = true;
folio_unlock(folio);
folio_put(folio);
cond_resched();
}
out:
- mark_inode_dirty(inode);
+ if (dirty)
+ mark_inode_dirty(inode);
return err;
}
@@ -261,6 +260,9 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
bool rw = vma->vm_flags & VM_WRITE;
int err;
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if (is_encrypted(ni)) {
ntfs_inode_warn(inode, "mmap encrypted not supported");
return -EOPNOTSUPP;
@@ -499,10 +501,14 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
ni_lock(ni);
err = attr_punch_hole(ni, vbo, len, &frame_size);
ni_unlock(ni);
+ if (!err)
+ goto ok;
+
if (err != E_NTFS_NOTALIGNED)
goto out;
/* Process not aligned punch. */
+ err = 0;
mask = frame_size - 1;
vbo_a = (vbo + mask) & ~mask;
end_a = end & ~mask;
@@ -525,6 +531,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
ni_lock(ni);
err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
ni_unlock(ni);
+ if (err)
+ goto out;
}
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
/*
@@ -564,6 +572,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
ni_lock(ni);
err = attr_insert_range(ni, vbo, len);
ni_unlock(ni);
+ if (err)
+ goto out;
} else {
/* Check new size. */
u8 cluster_bits = sbi->cluster_bits;
@@ -633,11 +643,18 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
&ni->file.run, i_size, &ni->i_valid,
true, NULL);
ni_unlock(ni);
+ if (err)
+ goto out;
} else if (new_size > i_size) {
- inode->i_size = new_size;
+ i_size_write(inode, new_size);
}
}
+ok:
+ err = file_modified(file);
+ if (err)
+ goto out;
+
out:
if (map_locked)
filemap_invalidate_unlock(mapping);
@@ -663,6 +680,9 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
umode_t mode = inode->i_mode;
int err;
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
err = setattr_prepare(idmap, dentry, attr);
if (err)
goto out;
@@ -676,7 +696,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
goto out;
}
inode_dio_wait(inode);
- oldsize = inode->i_size;
+ oldsize = i_size_read(inode);
newsize = attr->ia_size;
if (newsize <= oldsize)
@@ -688,7 +708,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
goto out;
ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
- inode->i_size = newsize;
+ i_size_write(inode, newsize);
}
setattr_copy(idmap, inode, attr);
@@ -718,6 +738,9 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
struct inode *inode = file->f_mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if (is_encrypted(ni)) {
ntfs_inode_warn(inode, "encrypted i/o not supported");
return -EOPNOTSUPP;
@@ -752,6 +775,9 @@ static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
struct inode *inode = in->f_mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if (is_encrypted(ni)) {
ntfs_inode_warn(inode, "encrypted i/o not supported");
return -EOPNOTSUPP;
@@ -821,7 +847,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
size_t count = iov_iter_count(from);
loff_t pos = iocb->ki_pos;
struct inode *inode = file_inode(file);
- loff_t i_size = inode->i_size;
+ loff_t i_size = i_size_read(inode);
struct address_space *mapping = inode->i_mapping;
struct ntfs_inode *ni = ntfs_i(inode);
u64 valid = ni->i_valid;
@@ -1028,6 +1054,8 @@ out:
iocb->ki_pos += written;
if (iocb->ki_pos > ni->i_valid)
ni->i_valid = iocb->ki_pos;
+ if (iocb->ki_pos > i_size)
+ i_size_write(inode, iocb->ki_pos);
return written;
}
@@ -1041,8 +1069,12 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t ret;
+ int err;
struct ntfs_inode *ni = ntfs_i(inode);
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if (is_encrypted(ni)) {
ntfs_inode_warn(inode, "encrypted i/o not supported");
return -EOPNOTSUPP;
@@ -1068,6 +1100,12 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret <= 0)
goto out;
+ err = file_modified(iocb->ki_filp);
+ if (err) {
+ ret = err;
+ goto out;
+ }
+
if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
/* Should never be here, see ntfs_file_open(). */
ret = -EOPNOTSUPP;
@@ -1097,6 +1135,9 @@ int ntfs_file_open(struct inode *inode, struct file *file)
{
struct ntfs_inode *ni = ntfs_i(inode);
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
(file->f_flags & O_DIRECT))) {
return -EOPNOTSUPP;
@@ -1138,7 +1179,8 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
down_write(&ni->file.run_lock);
err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
- inode->i_size, &ni->i_valid, false, NULL);
+ i_size_read(inode), &ni->i_valid, false,
+ NULL);
up_write(&ni->file.run_lock);
ni_unlock(ni);
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 3df2d9e34b91..7f27382e0ce2 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -778,7 +778,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
run_deallocate(sbi, &ni->attr_list.run, true);
run_close(&ni->attr_list.run);
ni->attr_list.size = 0;
- kfree(ni->attr_list.le);
+ kvfree(ni->attr_list.le);
ni->attr_list.le = NULL;
ni->attr_list.dirty = false;
@@ -927,7 +927,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
return 0;
out:
- kfree(ni->attr_list.le);
+ kvfree(ni->attr_list.le);
ni->attr_list.le = NULL;
ni->attr_list.size = 0;
return err;
@@ -2099,7 +2099,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
gfp_t gfp_mask;
struct page *pg;
- if (vbo >= ni->vfs_inode.i_size) {
+ if (vbo >= i_size_read(&ni->vfs_inode)) {
SetPageUptodate(page);
err = 0;
goto out;
@@ -2173,7 +2173,7 @@ int ni_decompress_file(struct ntfs_inode *ni)
{
struct ntfs_sb_info *sbi = ni->mi.sbi;
struct inode *inode = &ni->vfs_inode;
- loff_t i_size = inode->i_size;
+ loff_t i_size = i_size_read(inode);
struct address_space *mapping = inode->i_mapping;
gfp_t gfp_mask = mapping_gfp_mask(mapping);
struct page **pages = NULL;
@@ -2508,6 +2508,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
err = -EOPNOTSUPP;
goto out1;
#else
+ loff_t i_size = i_size_read(&ni->vfs_inode);
u32 frame_bits = ni_ext_compress_bits(ni);
u64 frame64 = frame_vbo >> frame_bits;
u64 frames, vbo_data;
@@ -2548,7 +2549,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
}
}
- frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
+ frames = (i_size - 1) >> frame_bits;
err = attr_wof_frame_info(ni, attr, run, frame64, frames,
frame_bits, &ondisk_size, &vbo_data);
@@ -2556,8 +2557,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
goto out2;
if (frame64 == frames) {
- unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
- (frame_size - 1));
+ unc_size = 1 + ((i_size - 1) & (frame_size - 1));
ondisk_size = attr_size(attr) - vbo_data;
} else {
unc_size = frame_size;
@@ -3259,6 +3259,9 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
if (is_bad_inode(inode) || sb_rdonly(sb))
return 0;
+ if (unlikely(ntfs3_forced_shutdown(sb)))
+ return -EIO;
+
if (!ni_trylock(ni)) {
/* 'ni' is under modification, skip for now. */
mark_inode_dirty_sync(inode);
@@ -3288,7 +3291,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
modified = true;
}
- ts = inode_get_mtime(inode);
+ ts = inode_get_ctime(inode);
dup.c_time = kernel2nt(&ts);
if (std->c_time != dup.c_time) {
std->c_time = dup.c_time;
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 98ccb6650858..855519713bf7 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -465,7 +465,7 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
{
const struct RESTART_AREA *ra;
u16 cl, fl, ul;
- u32 off, l_size, file_dat_bits, file_size_round;
+ u32 off, l_size, seq_bits;
u16 ro = le16_to_cpu(rhdr->ra_off);
u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
@@ -511,13 +511,15 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
/* Make sure the sequence number bits match the log file size. */
l_size = le64_to_cpu(ra->l_size);
- file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
- file_size_round = 1u << (file_dat_bits + 3);
- if (file_size_round != l_size &&
- (file_size_round < l_size || (file_size_round / 2) > l_size)) {
- return false;
+ seq_bits = sizeof(u64) * 8 + 3;
+ while (l_size) {
+ l_size >>= 1;
+ seq_bits -= 1;
}
+ if (seq_bits != ra->seq_num_bits)
+ return false;
+
/* The log page data offset and record header length must be quad-aligned. */
if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
!IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
@@ -974,6 +976,16 @@ skip_looking:
return e;
}
+struct restart_info {
+ u64 last_lsn;
+ struct RESTART_HDR *r_page;
+ u32 vbo;
+ bool chkdsk_was_run;
+ bool valid_page;
+ bool initialized;
+ bool restart;
+};
+
#define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
#define NTFSLOG_WRAPPED 0x00000001
@@ -987,6 +999,7 @@ struct ntfs_log {
struct ntfs_inode *ni;
u32 l_size;
+ u32 orig_file_size;
u32 sys_page_size;
u32 sys_page_mask;
u32 page_size;
@@ -1040,6 +1053,8 @@ struct ntfs_log {
struct CLIENT_ID client_id;
u32 client_undo_commit;
+
+ struct restart_info rst_info, rst_info2;
};
static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
@@ -1105,16 +1120,6 @@ static inline bool verify_client_lsn(struct ntfs_log *log,
lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
}
-struct restart_info {
- u64 last_lsn;
- struct RESTART_HDR *r_page;
- u32 vbo;
- bool chkdsk_was_run;
- bool valid_page;
- bool initialized;
- bool restart;
-};
-
static int read_log_page(struct ntfs_log *log, u32 vbo,
struct RECORD_PAGE_HDR **buffer, bool *usa_error)
{
@@ -1176,7 +1181,7 @@ out:
* restart page header. It will stop the first time we find a
* valid page header.
*/
-static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+static int log_read_rst(struct ntfs_log *log, bool first,
struct restart_info *info)
{
u32 skip, vbo;
@@ -1192,7 +1197,7 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
}
/* Loop continuously until we succeed. */
- for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
+ for (; vbo < log->l_size; vbo = 2 * vbo + skip, skip = 0) {
bool usa_error;
bool brst, bchk;
struct RESTART_AREA *ra;
@@ -1285,22 +1290,17 @@ check_result:
/*
* Ilog_init_pg_hdr - Init @log from restart page header.
*/
-static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
- u32 page_size, u16 major_ver, u16 minor_ver)
+static void log_init_pg_hdr(struct ntfs_log *log, u16 major_ver, u16 minor_ver)
{
- log->sys_page_size = sys_page_size;
- log->sys_page_mask = sys_page_size - 1;
- log->page_size = page_size;
- log->page_mask = page_size - 1;
- log->page_bits = blksize_bits(page_size);
+ log->sys_page_size = log->page_size;
+ log->sys_page_mask = log->page_mask;
log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
if (!log->clst_per_page)
log->clst_per_page = 1;
- log->first_page = major_ver >= 2 ?
- 0x22 * page_size :
- ((sys_page_size << 1) + (page_size << 1));
+ log->first_page = major_ver >= 2 ? 0x22 * log->page_size :
+ 4 * log->page_size;
log->major_ver = major_ver;
log->minor_ver = minor_ver;
}
@@ -1308,12 +1308,11 @@ static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
/*
* log_create - Init @log in cases when we don't have a restart area to use.
*/
-static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
+static void log_create(struct ntfs_log *log, const u64 last_lsn,
u32 open_log_count, bool wrapped, bool use_multi_page)
{
- log->l_size = l_size;
/* All file offsets must be quadword aligned. */
- log->file_data_bits = blksize_bits(l_size) - 3;
+ log->file_data_bits = blksize_bits(log->l_size) - 3;
log->seq_num_mask = (8 << log->file_data_bits) - 1;
log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
log->seq_num = (last_lsn >> log->file_data_bits) + 2;
@@ -3720,10 +3719,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
struct ntfs_sb_info *sbi = ni->mi.sbi;
struct ntfs_log *log;
- struct restart_info rst_info, rst_info2;
- u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
+ u64 rec_lsn, checkpt_lsn = 0, rlsn = 0;
struct ATTR_NAME_ENTRY *attr_names = NULL;
- struct ATTR_NAME_ENTRY *ane;
struct RESTART_TABLE *dptbl = NULL;
struct RESTART_TABLE *trtbl = NULL;
const struct RESTART_TABLE *rt;
@@ -3741,9 +3738,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
struct TRANSACTION_ENTRY *tr;
struct DIR_PAGE_ENTRY *dp;
u32 i, bytes_per_attr_entry;
- u32 l_size = ni->vfs_inode.i_size;
- u32 orig_file_size = l_size;
- u32 page_size, vbo, tail, off, dlen;
+ u32 vbo, tail, off, dlen;
u32 saved_len, rec_len, transact_id;
bool use_second_page;
struct RESTART_AREA *ra2, *ra = NULL;
@@ -3758,52 +3753,50 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
u16 t16;
u32 t32;
- /* Get the size of page. NOTE: To replay we can use default page. */
-#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
- page_size = norm_file_page(PAGE_SIZE, &l_size, true);
-#else
- page_size = norm_file_page(PAGE_SIZE, &l_size, false);
-#endif
- if (!page_size)
- return -EINVAL;
-
log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS);
if (!log)
return -ENOMEM;
log->ni = ni;
- log->l_size = l_size;
- log->one_page_buf = kmalloc(page_size, GFP_NOFS);
+ log->l_size = log->orig_file_size = ni->vfs_inode.i_size;
+ /* Get the size of page. NOTE: To replay we can use default page. */
+#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
+ log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, true);
+#else
+ log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, false);
+#endif
+ if (!log->page_size) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ log->one_page_buf = kmalloc(log->page_size, GFP_NOFS);
if (!log->one_page_buf) {
err = -ENOMEM;
goto out;
}
- log->page_size = page_size;
- log->page_mask = page_size - 1;
- log->page_bits = blksize_bits(page_size);
+ log->page_mask = log->page_size - 1;
+ log->page_bits = blksize_bits(log->page_size);
/* Look for a restart area on the disk. */
- memset(&rst_info, 0, sizeof(struct restart_info));
- err = log_read_rst(log, l_size, true, &rst_info);
+ err = log_read_rst(log, true, &log->rst_info);
if (err)
goto out;
/* remember 'initialized' */
- *initialized = rst_info.initialized;
+ *initialized = log->rst_info.initialized;
- if (!rst_info.restart) {
- if (rst_info.initialized) {
+ if (!log->rst_info.restart) {
+ if (log->rst_info.initialized) {
/* No restart area but the file is not initialized. */
err = -EINVAL;
goto out;
}
- log_init_pg_hdr(log, page_size, page_size, 1, 1);
- log_create(log, l_size, 0, get_random_u32(), false, false);
-
- log->ra = ra;
+ log_init_pg_hdr(log, 1, 1);
+ log_create(log, 0, get_random_u32(), false, false);
ra = log_create_ra(log);
if (!ra) {
@@ -3820,25 +3813,26 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
* If the restart offset above wasn't zero then we won't
* look for a second restart.
*/
- if (rst_info.vbo)
+ if (log->rst_info.vbo)
goto check_restart_area;
- memset(&rst_info2, 0, sizeof(struct restart_info));
- err = log_read_rst(log, l_size, false, &rst_info2);
+ err = log_read_rst(log, false, &log->rst_info2);
if (err)
goto out;
/* Determine which restart area to use. */
- if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
+ if (!log->rst_info2.restart ||
+ log->rst_info2.last_lsn <= log->rst_info.last_lsn)
goto use_first_page;
use_second_page = true;
- if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
+ if (log->rst_info.chkdsk_was_run &&
+ log->page_size != log->rst_info.vbo) {
struct RECORD_PAGE_HDR *sp = NULL;
bool usa_error;
- if (!read_log_page(log, page_size, &sp, &usa_error) &&
+ if (!read_log_page(log, log->page_size, &sp, &usa_error) &&
sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
use_second_page = false;
}
@@ -3846,52 +3840,43 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
}
if (use_second_page) {
- kfree(rst_info.r_page);
- memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
- rst_info2.r_page = NULL;
+ kfree(log->rst_info.r_page);
+ memcpy(&log->rst_info, &log->rst_info2,
+ sizeof(struct restart_info));
+ log->rst_info2.r_page = NULL;
}
use_first_page:
- kfree(rst_info2.r_page);
+ kfree(log->rst_info2.r_page);
check_restart_area:
/*
* If the restart area is at offset 0, we want
* to write the second restart area first.
*/
- log->init_ra = !!rst_info.vbo;
+ log->init_ra = !!log->rst_info.vbo;
/* If we have a valid page then grab a pointer to the restart area. */
- ra2 = rst_info.valid_page ?
- Add2Ptr(rst_info.r_page,
- le16_to_cpu(rst_info.r_page->ra_off)) :
+ ra2 = log->rst_info.valid_page ?
+ Add2Ptr(log->rst_info.r_page,
+ le16_to_cpu(log->rst_info.r_page->ra_off)) :
NULL;
- if (rst_info.chkdsk_was_run ||
+ if (log->rst_info.chkdsk_was_run ||
(ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
bool wrapped = false;
bool use_multi_page = false;
u32 open_log_count;
/* Do some checks based on whether we have a valid log page. */
- if (!rst_info.valid_page) {
- open_log_count = get_random_u32();
- goto init_log_instance;
- }
- open_log_count = le32_to_cpu(ra2->open_log_count);
-
- /*
- * If the restart page size isn't changing then we want to
- * check how much work we need to do.
- */
- if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
- goto init_log_instance;
+ open_log_count = log->rst_info.valid_page ?
+ le32_to_cpu(ra2->open_log_count) :
+ get_random_u32();
-init_log_instance:
- log_init_pg_hdr(log, page_size, page_size, 1, 1);
+ log_init_pg_hdr(log, 1, 1);
- log_create(log, l_size, rst_info.last_lsn, open_log_count,
- wrapped, use_multi_page);
+ log_create(log, log->rst_info.last_lsn, open_log_count, wrapped,
+ use_multi_page);
ra = log_create_ra(log);
if (!ra) {
@@ -3916,28 +3901,27 @@ init_log_instance:
* use the log file. We must use the system page size instead of the
* default size if there is not a clean shutdown.
*/
- t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
- if (page_size != t32) {
- l_size = orig_file_size;
- page_size =
- norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
+ t32 = le32_to_cpu(log->rst_info.r_page->sys_page_size);
+ if (log->page_size != t32) {
+ log->l_size = log->orig_file_size;
+ log->page_size = norm_file_page(t32, &log->l_size,
+ t32 == DefaultLogPageSize);
}
- if (page_size != t32 ||
- page_size != le32_to_cpu(rst_info.r_page->page_size)) {
+ if (log->page_size != t32 ||
+ log->page_size != le32_to_cpu(log->rst_info.r_page->page_size)) {
err = -EINVAL;
goto out;
}
/* If the file size has shrunk then we won't mount it. */
- if (l_size < le64_to_cpu(ra2->l_size)) {
+ if (log->l_size < le64_to_cpu(ra2->l_size)) {
err = -EINVAL;
goto out;
}
- log_init_pg_hdr(log, page_size, page_size,
- le16_to_cpu(rst_info.r_page->major_ver),
- le16_to_cpu(rst_info.r_page->minor_ver));
+ log_init_pg_hdr(log, le16_to_cpu(log->rst_info.r_page->major_ver),
+ le16_to_cpu(log->rst_info.r_page->minor_ver));
log->l_size = le64_to_cpu(ra2->l_size);
log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
@@ -3945,7 +3929,7 @@ init_log_instance:
log->seq_num_mask = (8 << log->file_data_bits) - 1;
log->last_lsn = le64_to_cpu(ra2->current_lsn);
log->seq_num = log->last_lsn >> log->file_data_bits;
- log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
+ log->ra_off = le16_to_cpu(log->rst_info.r_page->ra_off);
log->restart_size = log->sys_page_size - log->ra_off;
log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
log->ra_size = le16_to_cpu(ra2->ra_len);
@@ -4045,7 +4029,7 @@ find_oldest:
log->current_avail = current_log_avail(log);
/* Remember which restart area to write first. */
- log->init_ra = rst_info.vbo;
+ log->init_ra = log->rst_info.vbo;
process_log:
/* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */
@@ -4105,7 +4089,7 @@ process_log:
log->client_id.seq_num = cr->seq_num;
log->client_id.client_idx = client;
- err = read_rst_area(log, &rst, &ra_lsn);
+ err = read_rst_area(log, &rst, &checkpt_lsn);
if (err)
goto out;
@@ -4114,9 +4098,8 @@ process_log:
bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
- checkpt_lsn = le64_to_cpu(rst->check_point_start);
- if (!checkpt_lsn)
- checkpt_lsn = ra_lsn;
+ if (rst->check_point_start)
+ checkpt_lsn = le64_to_cpu(rst->check_point_start);
/* Allocate and Read the Transaction Table. */
if (!rst->transact_table_len)
@@ -4330,23 +4313,20 @@ check_attr_table:
lcb = NULL;
check_attribute_names2:
- if (!rst->attr_names_len)
- goto trace_attribute_table;
-
- ane = attr_names;
- if (!oatbl)
- goto trace_attribute_table;
- while (ane->off) {
- /* TODO: Clear table on exit! */
- oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
- t16 = le16_to_cpu(ane->name_bytes);
- oe->name_len = t16 / sizeof(short);
- oe->ptr = ane->name;
- oe->is_attr_name = 2;
- ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
- }
-
-trace_attribute_table:
+ if (rst->attr_names_len && oatbl) {
+ struct ATTR_NAME_ENTRY *ane = attr_names;
+ while (ane->off) {
+ /* TODO: Clear table on exit! */
+ oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
+ t16 = le16_to_cpu(ane->name_bytes);
+ oe->name_len = t16 / sizeof(short);
+ oe->ptr = ane->name;
+ oe->is_attr_name = 2;
+ ane = Add2Ptr(ane,
+ sizeof(struct ATTR_NAME_ENTRY) + t16);
+ }
+ }
+
/*
* If the checkpt_lsn is zero, then this is a freshly
* formatted disk and we have no work to do.
@@ -5189,7 +5169,7 @@ out:
kfree(oatbl);
kfree(dptbl);
kfree(attr_names);
- kfree(rst_info.r_page);
+ kfree(log->rst_info.r_page);
kfree(ra);
kfree(log->one_page_buf);
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index fbfe21dbb425..ae2ef5c11868 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -853,7 +853,8 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
/*
* sb can be NULL here. In this case sbi->flags should be 0 too.
*/
- if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
+ if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
+ unlikely(ntfs3_forced_shutdown(sb)))
return;
blocksize = sb->s_blocksize;
@@ -1006,6 +1007,30 @@ static inline __le32 security_hash(const void *sd, size_t bytes)
return cpu_to_le32(hash);
}
+/*
+ * simple wrapper for sb_bread_unmovable.
+ */
+struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
+{
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+ struct buffer_head *bh;
+
+ if (unlikely(block >= sbi->volume.blocks)) {
+ /* prevent generic message "attempt to access beyond end of device" */
+ ntfs_err(sb, "try to read out of volume at offset 0x%llx",
+ (u64)block << sb->s_blocksize_bits);
+ return NULL;
+ }
+
+ bh = sb_bread_unmovable(sb, block);
+ if (bh)
+ return bh;
+
+ ntfs_err(sb, "failed to read volume at offset 0x%llx",
+ (u64)block << sb->s_blocksize_bits);
+ return NULL;
+}
+
int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
{
struct block_device *bdev = sb->s_bdev;
@@ -2128,8 +2153,8 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
if (le32_to_cpu(d_security->size) == new_sec_size &&
d_security->key.hash == hash_key.hash &&
!memcmp(d_security + 1, sd, size_sd)) {
- *security_id = d_security->key.sec_id;
/* Such security already exists. */
+ *security_id = d_security->key.sec_id;
err = 0;
goto out;
}
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index cf92b2433f7a..daabaad63aaf 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -1462,7 +1462,7 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
goto out2;
if (in->name == I30_NAME) {
- ni->vfs_inode.i_size = data_size;
+ i_size_write(&ni->vfs_inode, data_size);
inode_set_bytes(&ni->vfs_inode, alloc_size);
}
@@ -1544,7 +1544,7 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
}
if (in->name == I30_NAME)
- ni->vfs_inode.i_size = data_size;
+ i_size_write(&ni->vfs_inode, data_size);
*vbn = bit << indx->idx2vbn_bits;
@@ -2090,7 +2090,7 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
return err;
if (in->name == I30_NAME)
- ni->vfs_inode.i_size = new_data;
+ i_size_write(&ni->vfs_inode, new_data);
bpb = bitmap_size(bit);
if (bpb * 8 == nbits)
@@ -2576,7 +2576,7 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
&indx->alloc_run, 0, NULL, false, NULL);
if (in->name == I30_NAME)
- ni->vfs_inode.i_size = 0;
+ i_size_write(&ni->vfs_inode, 0);
err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
false, NULL);
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 5e3d71374918..eb7a8c9fba01 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -345,9 +345,7 @@ next_attr:
inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
.PrintNameLength) /
sizeof(u16);
-
ni->i_valid = inode->i_size;
-
/* Clear directory bit. */
if (ni->ni_flags & NI_FLAG_DIR) {
indx_clear(&ni->dir);
@@ -412,7 +410,6 @@ end_enum:
goto out;
if (!is_match && name) {
- /* Reuse rec as buffer for ascii name. */
err = -ENOENT;
goto out;
}
@@ -427,6 +424,7 @@ end_enum:
if (names != le16_to_cpu(rec->hard_links)) {
/* Correct minor error on the fly. Do not mark inode as dirty. */
+ ntfs_inode_warn(inode, "Correct links count -> %u.", names);
rec->hard_links = cpu_to_le16(names);
ni->mi.dirty = true;
}
@@ -653,9 +651,10 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
off = vbo & (PAGE_SIZE - 1);
folio_set_bh(bh, folio, off);
- err = bh_read(bh, 0);
- if (err < 0)
+ if (bh_read(bh, 0) < 0) {
+ err = -EIO;
goto out;
+ }
folio_zero_segment(folio, off + voff, off + block_size);
}
}
@@ -853,9 +852,13 @@ static int ntfs_resident_writepage(struct folio *folio,
struct writeback_control *wbc, void *data)
{
struct address_space *mapping = data;
- struct ntfs_inode *ni = ntfs_i(mapping->host);
+ struct inode *inode = mapping->host;
+ struct ntfs_inode *ni = ntfs_i(inode);
int ret;
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
ni_lock(ni);
ret = attr_data_write_resident(ni, &folio->page);
ni_unlock(ni);
@@ -869,7 +872,12 @@ static int ntfs_resident_writepage(struct folio *folio,
static int ntfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- if (is_resident(ntfs_i(mapping->host)))
+ struct inode *inode = mapping->host;
+
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
+ if (is_resident(ntfs_i(inode)))
return write_cache_pages(mapping, wbc, ntfs_resident_writepage,
mapping);
return mpage_writepages(mapping, wbc, ntfs_get_block);
@@ -889,6 +897,9 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
*pagep = NULL;
if (is_resident(ni)) {
struct page *page =
@@ -974,7 +985,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
}
if (pos + err > inode->i_size) {
- inode->i_size = pos + err;
+ i_size_write(inode, pos + err);
dirty = true;
}
@@ -1306,6 +1317,11 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
goto out1;
}
+ if (unlikely(ntfs3_forced_shutdown(sb))) {
+ err = -EIO;
+ goto out2;
+ }
+
/* Mark rw ntfs as dirty. it will be cleared at umount. */
ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index ee3093be5170..cae41db0aaa7 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -181,6 +181,9 @@ static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
struct ntfs_inode *ni = ntfs_i(dir);
int err;
+ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
+ return -EIO;
+
ni_lock_dir(ni);
err = ntfs_unlink_inode(dir, dentry);
@@ -199,6 +202,9 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
u32 size = strlen(symname);
struct inode *inode;
+ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
+ return -EIO;
+
inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777, 0,
symname, size, NULL);
@@ -227,6 +233,9 @@ static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
struct ntfs_inode *ni = ntfs_i(dir);
int err;
+ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
+ return -EIO;
+
ni_lock_dir(ni);
err = ntfs_unlink_inode(dir, dentry);
@@ -264,6 +273,9 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
1024);
static_assert(PATH_MAX >= 4 * 1024);
+ if (unlikely(ntfs3_forced_shutdown(sb)))
+ return -EIO;
+
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 86aecbb01a92..9c7478150a03 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -523,12 +523,10 @@ struct ATTR_LIST_ENTRY {
__le64 vcn; // 0x08: Starting VCN of this attribute.
struct MFT_REF ref; // 0x10: MFT record number with attribute.
__le16 id; // 0x18: struct ATTRIB ID.
- __le16 name[3]; // 0x1A: Just to align. To get real name can use bNameOffset.
+ __le16 name[]; // 0x1A: To get real name use name_off.
}; // sizeof(0x20)
-static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
-
static inline u32 le_size(u8 name_len)
{
return ALIGN(offsetof(struct ATTR_LIST_ENTRY, name) +
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index f6706143d14b..79356fd29a14 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -61,6 +61,8 @@ enum utf16_endian;
/* sbi->flags */
#define NTFS_FLAGS_NODISCARD 0x00000001
+/* ntfs in shutdown state. */
+#define NTFS_FLAGS_SHUTDOWN_BIT 0x00000002 /* == 4*/
/* Set when LogFile is replaying. */
#define NTFS_FLAGS_LOG_REPLAYING 0x00000008
/* Set when we changed first MFT's which copy must be updated in $MftMirr. */
@@ -226,7 +228,7 @@ struct ntfs_sb_info {
u64 maxbytes; // Maximum size for normal files.
u64 maxbytes_sparse; // Maximum size for sparse file.
- u32 flags; // See NTFS_FLAGS_XXX.
+ unsigned long flags; // See NTFS_FLAGS_
CLST zone_max; // Maximum MFT zone length in clusters
CLST bad_clusters; // The count of marked bad clusters.
@@ -473,7 +475,7 @@ bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
int al_update(struct ntfs_inode *ni, int sync);
static inline size_t al_aligned(size_t size)
{
- return (size + 1023) & ~(size_t)1023;
+ return size_add(size, 1023) & ~(size_t)1023;
}
/* Globals from bitfunc.c */
@@ -500,6 +502,8 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
int ntfs_file_open(struct inode *inode, struct file *file);
int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
+long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg);
+long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg);
extern const struct inode_operations ntfs_special_inode_operations;
extern const struct inode_operations ntfs_file_inode_operations;
extern const struct file_operations ntfs_file_operations;
@@ -584,6 +588,7 @@ bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
int log_replay(struct ntfs_inode *ni, bool *initialized);
/* Globals from fsntfs.c */
+struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block);
bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
bool simple);
@@ -872,7 +877,7 @@ int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode,
int ntfs_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry);
ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
-extern const struct xattr_handler * const ntfs_xattr_handlers[];
+extern const struct xattr_handler *const ntfs_xattr_handlers[];
int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size);
void ntfs_get_wsl_perm(struct inode *inode);
@@ -999,6 +1004,11 @@ static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
return sb->s_fs_info;
}
+static inline int ntfs3_forced_shutdown(struct super_block *sb)
+{
+ return test_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags);
+}
+
/*
* ntfs_up_cluster - Align up on cluster boundary.
*/
@@ -1025,19 +1035,6 @@ static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
}
-static inline struct buffer_head *ntfs_bread(struct super_block *sb,
- sector_t block)
-{
- struct buffer_head *bh = sb_bread(sb, block);
-
- if (bh)
- return bh;
-
- ntfs_err(sb, "failed to read volume at offset 0x%llx",
- (u64)block << sb->s_blocksize_bits);
- return NULL;
-}
-
static inline struct ntfs_inode *ntfs_i(struct inode *inode)
{
return container_of(inode, struct ntfs_inode, vfs_inode);
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 53629b1f65e9..6aa3a9d44df1 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -279,7 +279,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
if (t16 > asize)
return NULL;
- if (t16 + le32_to_cpu(attr->res.data_size) > asize)
+ if (le32_to_cpu(attr->res.data_size) > asize - t16)
return NULL;
t32 = sizeof(short) * attr->name_len;
@@ -535,8 +535,20 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
return false;
if (ni && is_attr_indexed(attr)) {
- le16_add_cpu(&ni->mi.mrec->hard_links, -1);
- ni->mi.dirty = true;
+ u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
+ struct ATTR_FILE_NAME *fname =
+ attr->type != ATTR_NAME ?
+ NULL :
+ resident_data_ex(attr,
+ SIZEOF_ATTRIBUTE_FILENAME);
+ if (fname && fname->type == FILE_NAME_DOS) {
+ /* Do not decrease links count deleting DOS name. */
+ } else if (!links) {
+ /* minor error. Not critical. */
+ } else {
+ ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
+ ni->mi.dirty = true;
+ }
}
used -= asize;
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 9153dffde950..cef5467fd928 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -122,13 +122,12 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
if (name) {
struct dentry *de = d_find_alias(inode);
- const u32 name_len = ARRAY_SIZE(s_name_buf) - 1;
if (de) {
spin_lock(&de->d_lock);
- snprintf(name, name_len, " \"%s\"", de->d_name.name);
+ snprintf(name, sizeof(s_name_buf), " \"%s\"",
+ de->d_name.name);
spin_unlock(&de->d_lock);
- name[name_len] = 0; /* To be sure. */
} else {
name[0] = 0;
}
@@ -625,7 +624,7 @@ static void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
{
kfree(sbi->new_rec);
kvfree(ntfs_put_shared(sbi->upcase));
- kfree(sbi->def_table);
+ kvfree(sbi->def_table);
kfree(sbi->compress.lznt);
#ifdef CONFIG_NTFS3_LZX_XPRESS
xpress_free_decompressor(sbi->compress.xpress);
@@ -715,6 +714,14 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
}
/*
+ * ntfs_shutdown - super_operations::shutdown
+ */
+static void ntfs_shutdown(struct super_block *sb)
+{
+ set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags);
+}
+
+/*
* ntfs_sync_fs - super_operations::sync_fs
*/
static int ntfs_sync_fs(struct super_block *sb, int wait)
@@ -724,6 +731,9 @@ static int ntfs_sync_fs(struct super_block *sb, int wait)
struct ntfs_inode *ni;
struct inode *inode;
+ if (unlikely(ntfs3_forced_shutdown(sb)))
+ return -EIO;
+
ni = sbi->security.ni;
if (ni) {
inode = &ni->vfs_inode;
@@ -763,6 +773,7 @@ static const struct super_operations ntfs_sops = {
.put_super = ntfs_put_super,
.statfs = ntfs_statfs,
.show_options = ntfs_show_options,
+ .shutdown = ntfs_shutdown,
.sync_fs = ntfs_sync_fs,
.write_inode = ntfs3_write_inode,
};
@@ -866,6 +877,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
u16 fn, ao;
u8 cluster_bits;
u32 boot_off = 0;
+ sector_t boot_block = 0;
const char *hint = "Primary boot";
/* Save original dev_size. Used with alternative boot. */
@@ -873,11 +885,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
- bh = ntfs_bread(sb, 0);
+read_boot:
+ bh = ntfs_bread(sb, boot_block);
if (!bh)
- return -EIO;
+ return boot_block ? -EINVAL : -EIO;
-check_boot:
err = -EINVAL;
/* Corrupted image; do not read OOB */
@@ -1108,26 +1120,24 @@ check_boot:
}
out:
- if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) {
+ brelse(bh);
+
+ if (err == -EINVAL && !boot_block && dev_size0 > PAGE_SHIFT) {
u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
u64 lbo = dev_size0 - sizeof(*boot);
- /*
- * Try alternative boot (last sector)
- */
- brelse(bh);
-
- sb_set_blocksize(sb, block_size);
- bh = ntfs_bread(sb, lbo >> blksize_bits(block_size));
- if (!bh)
- return -EINVAL;
-
+ boot_block = lbo >> blksize_bits(block_size);
boot_off = lbo & (block_size - 1);
- hint = "Alternative boot";
- dev_size = dev_size0; /* restore original size. */
- goto check_boot;
+ if (boot_block && block_size >= boot_off + sizeof(*boot)) {
+ /*
+ * Try alternative boot (last sector)
+ */
+ sb_set_blocksize(sb, block_size);
+ hint = "Alternative boot";
+ dev_size = dev_size0; /* restore original size. */
+ goto read_boot;
+ }
}
- brelse(bh);
return err;
}
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index 4274b6f31cfa..53e7d1fa036a 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -219,6 +219,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
if (!ea->name_len)
break;
+ if (ea->name_len > ea_size)
+ break;
+
if (buffer) {
/* Check if we can use field ea->name */
if (off + ea_size > size)
@@ -744,6 +747,9 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
int err;
struct ntfs_inode *ni = ntfs_i(inode);
+ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
/* Dispatch request. */
if (!strcmp(name, SYSTEM_DOS_ATTRIB)) {
/* system.dos_attrib */
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index b8e25ca51016..8586e2f5d243 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -265,20 +265,18 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
if (IS_ERR(old_file))
return PTR_ERR(old_file);
+ /* Try to use clone_file_range to clone up within the same fs */
+ cloned = vfs_clone_file_range(old_file, 0, new_file, 0, len, 0);
+ if (cloned == len)
+ goto out_fput;
+
+ /* Couldn't clone, so now we try to copy the data */
error = rw_verify_area(READ, old_file, &old_pos, len);
if (!error)
error = rw_verify_area(WRITE, new_file, &new_pos, len);
if (error)
goto out_fput;
- /* Try to use clone_file_range to clone up within the same fs */
- ovl_start_write(dentry);
- cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0);
- ovl_end_write(dentry);
- if (cloned == len)
- goto out_fput;
- /* Couldn't clone, so now we try to copy the data */
-
/* Check if lower fs supports seek operation */
if (old_file->f_mode & FMODE_LSEEK)
skip_hole = true;
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 984ffdaeed6c..5764f91d283e 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -18,10 +18,11 @@
struct ovl_lookup_data {
struct super_block *sb;
- struct vfsmount *mnt;
+ const struct ovl_layer *layer;
struct qstr name;
bool is_dir;
bool opaque;
+ bool xwhiteouts;
bool stop;
bool last;
char *redirect;
@@ -201,17 +202,13 @@ struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
return real;
}
-static bool ovl_is_opaquedir(struct ovl_fs *ofs, const struct path *path)
-{
- return ovl_path_check_dir_xattr(ofs, path, OVL_XATTR_OPAQUE);
-}
-
static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d,
const char *name,
struct dentry *base, int len,
bool drop_negative)
{
- struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->mnt), name, base, len);
+ struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), name,
+ base, len);
if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
if (drop_negative && ret->d_lockref.count == 1) {
@@ -232,10 +229,13 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
size_t prelen, const char *post,
struct dentry **ret, bool drop_negative)
{
+ struct ovl_fs *ofs = OVL_FS(d->sb);
struct dentry *this;
struct path path;
int err;
bool last_element = !post[0];
+ bool is_upper = d->layer->idx == 0;
+ char val;
this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative);
if (IS_ERR(this)) {
@@ -253,8 +253,8 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
}
path.dentry = this;
- path.mnt = d->mnt;
- if (ovl_path_is_whiteout(OVL_FS(d->sb), &path)) {
+ path.mnt = d->layer->mnt;
+ if (ovl_path_is_whiteout(ofs, &path)) {
d->stop = d->opaque = true;
goto put_and_out;
}
@@ -272,7 +272,7 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
d->stop = true;
goto put_and_out;
}
- err = ovl_check_metacopy_xattr(OVL_FS(d->sb), &path, NULL);
+ err = ovl_check_metacopy_xattr(ofs, &path, NULL);
if (err < 0)
goto out_err;
@@ -292,7 +292,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
if (d->last)
goto out;
- if (ovl_is_opaquedir(OVL_FS(d->sb), &path)) {
+ /* overlay.opaque=x means xwhiteouts directory */
+ val = ovl_get_opaquedir_val(ofs, &path);
+ if (last_element && !is_upper && val == 'x') {
+ d->xwhiteouts = true;
+ ovl_layer_set_xwhiteouts(ofs, d->layer);
+ } else if (val == 'y') {
d->stop = true;
if (last_element)
d->opaque = true;
@@ -863,7 +868,8 @@ fail:
* Returns next layer in stack starting from top.
* Returns -1 if this is the last layer.
*/
-int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path,
+ const struct ovl_layer **layer)
{
struct ovl_entry *oe = OVL_E(dentry);
struct ovl_path *lowerstack = ovl_lowerstack(oe);
@@ -871,13 +877,16 @@ int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
BUG_ON(idx < 0);
if (idx == 0) {
ovl_path_upper(dentry, path);
- if (path->dentry)
+ if (path->dentry) {
+ *layer = &OVL_FS(dentry->d_sb)->layers[0];
return ovl_numlower(oe) ? 1 : -1;
+ }
idx++;
}
BUG_ON(idx > ovl_numlower(oe));
path->dentry = lowerstack[idx - 1].dentry;
- path->mnt = lowerstack[idx - 1].layer->mnt;
+ *layer = lowerstack[idx - 1].layer;
+ path->mnt = (*layer)->mnt;
return (idx < ovl_numlower(oe)) ? idx + 1 : -1;
}
@@ -1055,7 +1064,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
old_cred = ovl_override_creds(dentry->d_sb);
upperdir = ovl_dentry_upper(dentry->d_parent);
if (upperdir) {
- d.mnt = ovl_upper_mnt(ofs);
+ d.layer = &ofs->layers[0];
err = ovl_lookup_layer(upperdir, &d, &upperdentry, true);
if (err)
goto out;
@@ -1111,7 +1120,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
else if (d.is_dir || !ofs->numdatalayer)
d.last = lower.layer->idx == ovl_numlower(roe);
- d.mnt = lower.layer->mnt;
+ d.layer = lower.layer;
err = ovl_lookup_layer(lower.dentry, &d, &this, false);
if (err)
goto out_put;
@@ -1278,6 +1287,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
if (upperopaque)
ovl_dentry_set_opaque(dentry);
+ if (d.xwhiteouts)
+ ovl_dentry_set_xwhiteouts(dentry);
if (upperdentry)
ovl_dentry_set_upper_alias(dentry);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 5ba11eb43767..ee949f3e7c77 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -50,7 +50,6 @@ enum ovl_xattr {
OVL_XATTR_METACOPY,
OVL_XATTR_PROTATTR,
OVL_XATTR_XWHITEOUT,
- OVL_XATTR_XWHITEOUTS,
};
enum ovl_inode_flag {
@@ -70,6 +69,8 @@ enum ovl_entry_flag {
OVL_E_UPPER_ALIAS,
OVL_E_OPAQUE,
OVL_E_CONNECTED,
+ /* Lower stack may contain xwhiteout entries */
+ OVL_E_XWHITEOUTS,
};
enum {
@@ -477,6 +478,10 @@ bool ovl_dentry_test_flag(unsigned long flag, struct dentry *dentry);
bool ovl_dentry_is_opaque(struct dentry *dentry);
bool ovl_dentry_is_whiteout(struct dentry *dentry);
void ovl_dentry_set_opaque(struct dentry *dentry);
+bool ovl_dentry_has_xwhiteouts(struct dentry *dentry);
+void ovl_dentry_set_xwhiteouts(struct dentry *dentry);
+void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs,
+ const struct ovl_layer *layer);
bool ovl_dentry_has_upper_alias(struct dentry *dentry);
void ovl_dentry_set_upper_alias(struct dentry *dentry);
bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags);
@@ -494,11 +499,10 @@ struct file *ovl_path_open(const struct path *path, int flags);
int ovl_copy_up_start(struct dentry *dentry, int flags);
void ovl_copy_up_end(struct dentry *dentry);
bool ovl_already_copied_up(struct dentry *dentry, int flags);
-bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
- enum ovl_xattr ox);
+char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path,
+ enum ovl_xattr ox);
bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path);
bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path);
-bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path);
bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs,
const struct path *upperpath);
@@ -573,7 +577,13 @@ static inline bool ovl_is_impuredir(struct super_block *sb,
.mnt = ovl_upper_mnt(ofs),
};
- return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE);
+ return ovl_get_dir_xattr_val(ofs, &upperpath, OVL_XATTR_IMPURE) == 'y';
+}
+
+static inline char ovl_get_opaquedir_val(struct ovl_fs *ofs,
+ const struct path *path)
+{
+ return ovl_get_dir_xattr_val(ofs, path, OVL_XATTR_OPAQUE);
}
static inline bool ovl_redirect_follow(struct ovl_fs *ofs)
@@ -680,7 +690,8 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh);
struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
struct dentry *origin, bool verify);
-int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path,
+ const struct ovl_layer **layer);
int ovl_verify_lowerdata(struct dentry *dentry);
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 5fa9c58af65f..cb449ab310a7 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -40,6 +40,8 @@ struct ovl_layer {
int idx;
/* One fsid per unique underlying sb (upper fsid == 0) */
int fsid;
+ /* xwhiteouts were found on this layer */
+ bool has_xwhiteouts;
};
struct ovl_path {
@@ -59,7 +61,7 @@ struct ovl_fs {
unsigned int numfs;
/* Number of data-only lower layers */
unsigned int numdatalayer;
- const struct ovl_layer *layers;
+ struct ovl_layer *layers;
struct ovl_sb *fs;
/* workbasedir is the path at workdir= mount option */
struct dentry *workbasedir;
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index e71156baa7bc..0ca8af060b0c 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -305,8 +305,6 @@ static inline int ovl_dir_read(const struct path *realpath,
if (IS_ERR(realfile))
return PTR_ERR(realfile);
- rdd->in_xwhiteouts_dir = rdd->dentry &&
- ovl_path_check_xwhiteouts_xattr(OVL_FS(rdd->dentry->d_sb), realpath);
rdd->first_maybe_whiteout = NULL;
rdd->ctx.pos = 0;
do {
@@ -359,10 +357,13 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
.is_lowest = false,
};
int idx, next;
+ const struct ovl_layer *layer;
for (idx = 0; idx != -1; idx = next) {
- next = ovl_path_next(idx, dentry, &realpath);
+ next = ovl_path_next(idx, dentry, &realpath, &layer);
rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
+ rdd.in_xwhiteouts_dir = layer->has_xwhiteouts &&
+ ovl_dentry_has_xwhiteouts(dentry);
if (next != -1) {
err = ovl_dir_read(&realpath, &rdd);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 4ab66e3d4cff..2eef6c70b2ae 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1249,6 +1249,7 @@ static struct dentry *ovl_get_root(struct super_block *sb,
struct ovl_entry *oe)
{
struct dentry *root;
+ struct ovl_fs *ofs = OVL_FS(sb);
struct ovl_path *lowerpath = ovl_lowerstack(oe);
unsigned long ino = d_inode(lowerpath->dentry)->i_ino;
int fsid = lowerpath->layer->fsid;
@@ -1270,6 +1271,20 @@ static struct dentry *ovl_get_root(struct super_block *sb,
ovl_set_flag(OVL_IMPURE, d_inode(root));
}
+ /* Look for xwhiteouts marker except in the lowermost layer */
+ for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) {
+ struct path path = {
+ .mnt = lowerpath->layer->mnt,
+ .dentry = lowerpath->dentry,
+ };
+
+ /* overlay.opaque=x means xwhiteouts directory */
+ if (ovl_get_opaquedir_val(ofs, &path) == 'x') {
+ ovl_layer_set_xwhiteouts(ofs, lowerpath->layer);
+ ovl_dentry_set_xwhiteouts(root);
+ }
+ }
+
/* Root is always merge -> can have whiteouts */
ovl_set_flag(OVL_WHITEOUTS, d_inode(root));
ovl_dentry_set_flag(OVL_E_CONNECTED, root);
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 0217094c23ea..a8e17f14d7a2 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -461,6 +461,33 @@ void ovl_dentry_set_opaque(struct dentry *dentry)
ovl_dentry_set_flag(OVL_E_OPAQUE, dentry);
}
+bool ovl_dentry_has_xwhiteouts(struct dentry *dentry)
+{
+ return ovl_dentry_test_flag(OVL_E_XWHITEOUTS, dentry);
+}
+
+void ovl_dentry_set_xwhiteouts(struct dentry *dentry)
+{
+ ovl_dentry_set_flag(OVL_E_XWHITEOUTS, dentry);
+}
+
+/*
+ * ovl_layer_set_xwhiteouts() is called before adding the overlay dir
+ * dentry to dcache, while readdir of that same directory happens after
+ * the overlay dir dentry is in dcache, so if some cpu observes that
+ * ovl_dentry_is_xwhiteouts(), it will also observe layer->has_xwhiteouts
+ * for the layers where xwhiteouts marker was found in that merge dir.
+ */
+void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs,
+ const struct ovl_layer *layer)
+{
+ if (layer->has_xwhiteouts)
+ return;
+
+ /* Write once to read-mostly layer properties */
+ ofs->layers[layer->idx].has_xwhiteouts = true;
+}
+
/*
* For hard links and decoded file handles, it's possible for ovl_dentry_upper()
* to return positive, while there's no actual upper alias for the inode.
@@ -739,19 +766,6 @@ bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path)
return res >= 0;
}
-bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path)
-{
- struct dentry *dentry = path->dentry;
- int res;
-
- /* xattr.whiteouts must be a directory */
- if (!d_is_dir(dentry))
- return false;
-
- res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUTS, NULL, 0);
- return res >= 0;
-}
-
/*
* Load persistent uuid from xattr into s_uuid if found, or store a new
* random generated value in s_uuid and in xattr.
@@ -811,20 +825,17 @@ fail:
return false;
}
-bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
- enum ovl_xattr ox)
+char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path,
+ enum ovl_xattr ox)
{
int res;
char val;
if (!d_is_dir(path->dentry))
- return false;
+ return 0;
res = ovl_path_getxattr(ofs, path, ox, &val, 1);
- if (res == 1 && val == 'y')
- return true;
-
- return false;
+ return res == 1 ? val : 0;
}
#define OVL_XATTR_OPAQUE_POSTFIX "opaque"
@@ -837,7 +848,6 @@ bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
#define OVL_XATTR_METACOPY_POSTFIX "metacopy"
#define OVL_XATTR_PROTATTR_POSTFIX "protattr"
#define OVL_XATTR_XWHITEOUT_POSTFIX "whiteout"
-#define OVL_XATTR_XWHITEOUTS_POSTFIX "whiteouts"
#define OVL_XATTR_TAB_ENTRY(x) \
[x] = { [false] = OVL_XATTR_TRUSTED_PREFIX x ## _POSTFIX, \
@@ -854,7 +864,6 @@ const char *const ovl_xattr_table[][2] = {
OVL_XATTR_TAB_ENTRY(OVL_XATTR_METACOPY),
OVL_XATTR_TAB_ENTRY(OVL_XATTR_PROTATTR),
OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUT),
- OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUTS),
};
int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ff08a8957552..34a47fb0c57f 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -477,13 +477,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
int permitted;
struct mm_struct *mm;
unsigned long long start_time;
- unsigned long cmin_flt = 0, cmaj_flt = 0;
- unsigned long min_flt = 0, maj_flt = 0;
- u64 cutime, cstime, utime, stime;
- u64 cgtime, gtime;
+ unsigned long cmin_flt, cmaj_flt, min_flt, maj_flt;
+ u64 cutime, cstime, cgtime, utime, stime, gtime;
unsigned long rsslim = 0;
unsigned long flags;
int exit_code = task->exit_code;
+ struct signal_struct *sig = task->signal;
+ unsigned int seq = 1;
state = *get_task_state(task);
vsize = eip = esp = 0;
@@ -511,12 +511,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
sigemptyset(&sigign);
sigemptyset(&sigcatch);
- cutime = cstime = utime = stime = 0;
- cgtime = gtime = 0;
if (lock_task_sighand(task, &flags)) {
- struct signal_struct *sig = task->signal;
-
if (sig->tty) {
struct pid *pgrp = tty_get_pgrp(sig->tty);
tty_pgrp = pid_nr_ns(pgrp, ns);
@@ -527,28 +523,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
num_threads = get_nr_threads(task);
collect_sigign_sigcatch(task, &sigign, &sigcatch);
- cmin_flt = sig->cmin_flt;
- cmaj_flt = sig->cmaj_flt;
- cutime = sig->cutime;
- cstime = sig->cstime;
- cgtime = sig->cgtime;
rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
- /* add up live thread stats at the group level */
if (whole) {
- struct task_struct *t;
-
- __for_each_thread(sig, t) {
- min_flt += t->min_flt;
- maj_flt += t->maj_flt;
- gtime += task_gtime(t);
- }
-
- min_flt += sig->min_flt;
- maj_flt += sig->maj_flt;
- thread_group_cputime_adjusted(task, &utime, &stime);
- gtime += sig->gtime;
-
if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
exit_code = sig->group_exit_code;
}
@@ -562,10 +539,41 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
if (permitted && (!whole || num_threads < 2))
wchan = !task_is_running(task);
- if (!whole) {
+
+ do {
+ seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
+
+ cmin_flt = sig->cmin_flt;
+ cmaj_flt = sig->cmaj_flt;
+ cutime = sig->cutime;
+ cstime = sig->cstime;
+ cgtime = sig->cgtime;
+
+ if (whole) {
+ struct task_struct *t;
+
+ min_flt = sig->min_flt;
+ maj_flt = sig->maj_flt;
+ gtime = sig->gtime;
+
+ rcu_read_lock();
+ __for_each_thread(sig, t) {
+ min_flt += t->min_flt;
+ maj_flt += t->maj_flt;
+ gtime += task_gtime(t);
+ }
+ rcu_read_unlock();
+ }
+ } while (need_seqretry(&sig->stats_lock, seq));
+ done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
+
+ if (whole) {
+ thread_group_cputime_adjusted(task, &utime, &stime);
+ } else {
+ task_cputime_adjusted(task, &utime, &stime);
min_flt = task->min_flt;
maj_flt = task->maj_flt;
- task_cputime_adjusted(task, &utime, &stime);
gtime = task_gtime(task);
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 98a031ac2648..18550c071d71 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1878,8 +1878,6 @@ void proc_pid_evict_inode(struct proc_inode *ei)
hlist_del_init_rcu(&ei->sibling_inodes);
spin_unlock(&pid->lock);
}
-
- put_pid(pid);
}
struct inode *proc_pid_make_inode(struct super_block *sb,
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index b33e490e3fd9..05350f3c2812 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -30,7 +30,6 @@
static void proc_evict_inode(struct inode *inode)
{
- struct proc_dir_entry *de;
struct ctl_table_header *head;
struct proc_inode *ei = PROC_I(inode);
@@ -38,17 +37,8 @@ static void proc_evict_inode(struct inode *inode)
clear_inode(inode);
/* Stop tracking associated processes */
- if (ei->pid) {
+ if (ei->pid)
proc_pid_evict_inode(ei);
- ei->pid = NULL;
- }
-
- /* Let go of any associated proc directory entry */
- de = ei->pde;
- if (de) {
- pde_put(de);
- ei->pde = NULL;
- }
head = ei->sysctl;
if (head) {
@@ -80,6 +70,13 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
static void proc_free_inode(struct inode *inode)
{
+ struct proc_inode *ei = PROC_I(inode);
+
+ if (ei->pid)
+ put_pid(ei->pid);
+ /* Let go of any associated proc directory entry */
+ if (ei->pde)
+ pde_put(ei->pde);
kmem_cache_free(proc_inode_cachep, PROC_I(inode));
}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index b55dbc70287b..06a297a27ba3 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -271,7 +271,7 @@ static void proc_kill_sb(struct super_block *sb)
kill_anon_super(sb);
put_pid_ns(fs_info->pid_ns);
- kfree(fs_info);
+ kfree_rcu(fs_info, rcu);
}
static struct file_system_type proc_fs_type = {
diff --git a/fs/remap_range.c b/fs/remap_range.c
index f8c1120b8311..de07f978ce3e 100644
--- a/fs/remap_range.c
+++ b/fs/remap_range.c
@@ -373,9 +373,9 @@ int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
}
EXPORT_SYMBOL(generic_remap_file_range_prep);
-loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t len, unsigned int remap_flags)
+loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags)
{
loff_t ret;
@@ -391,23 +391,6 @@ loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
if (!file_in->f_op->remap_file_range)
return -EOPNOTSUPP;
- ret = file_in->f_op->remap_file_range(file_in, pos_in,
- file_out, pos_out, len, remap_flags);
- if (ret < 0)
- return ret;
-
- fsnotify_access(file_in);
- fsnotify_modify(file_out);
- return ret;
-}
-EXPORT_SYMBOL(do_clone_file_range);
-
-loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t len, unsigned int remap_flags)
-{
- loff_t ret;
-
ret = remap_verify_area(file_in, pos_in, len, false);
if (ret)
return ret;
@@ -417,10 +400,14 @@ loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
return ret;
file_start_write(file_out);
- ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
- remap_flags);
+ ret = file_in->f_op->remap_file_range(file_in, pos_in,
+ file_out, pos_out, len, remap_flags);
file_end_write(file_out);
+ if (ret < 0)
+ return ret;
+ fsnotify_access(file_in);
+ fsnotify_modify(file_out);
return ret;
}
EXPORT_SYMBOL(vfs_clone_file_range);
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index d64a306a414b..3de5047a7ff9 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -145,21 +145,27 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
struct cached_fid *cfid;
struct cached_fids *cfids;
const char *npath;
+ int retries = 0, cur_sleep = 1;
if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
return -EOPNOTSUPP;
ses = tcon->ses;
- server = ses->server;
cfids = tcon->cfids;
- if (!server->ops->new_lease_key)
- return -EIO;
-
if (cifs_sb->root == NULL)
return -ENOENT;
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ oplock = SMB2_OPLOCK_LEVEL_II;
+ server = cifs_pick_channel(ses);
+
+ if (!server->ops->new_lease_key)
+ return -EIO;
+
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
return -ENOMEM;
@@ -236,6 +242,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
.desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES,
.disposition = FILE_OPEN,
.fid = pfid,
+ .replay = !!(retries),
};
rc = SMB2_open_init(tcon, server,
@@ -268,6 +275,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
*/
cfid->has_lease = true;
+ if (retries) {
+ smb2_set_replay(server, &rqst[0]);
+ smb2_set_replay(server, &rqst[1]);
+ }
+
rc = compound_send_recv(xid, ses, server,
flags, 2, rqst,
resp_buftype, rsp_iov);
@@ -367,6 +379,11 @@ out:
atomic_inc(&tcon->num_remote_opens);
}
kfree(utf16_path);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index 60027f5aebe8..3e4209f41c18 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -659,6 +659,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
spin_lock(&tcon->stat_lock);
tcon->bytes_read = 0;
tcon->bytes_written = 0;
+ tcon->stats_from_time = ktime_get_real_seconds();
spin_unlock(&tcon->stat_lock);
if (server->ops->clear_stats)
server->ops->clear_stats(tcon);
@@ -737,8 +738,9 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n%d) %s", i, tcon->tree_name);
if (tcon->need_reconnect)
seq_puts(m, "\tDISCONNECTED ");
- seq_printf(m, "\nSMBs: %d",
- atomic_read(&tcon->num_smbs_sent));
+ seq_printf(m, "\nSMBs: %d since %ptTs UTC",
+ atomic_read(&tcon->num_smbs_sent),
+ &tcon->stats_from_time);
if (server->ops->print_stats)
server->ops->print_stats(m, tcon);
}
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index ef4c2e3c9fa6..6322f0f68a17 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -572,7 +572,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
UniStrupr(user);
} else {
- memset(user, '\0', 2);
+ *(u16 *)user = 0;
}
rc = crypto_shash_update(ses->server->secmech.hmacmd5,
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 99b0ade833aa..0c269396ae15 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -396,7 +396,7 @@ cifs_alloc_inode(struct super_block *sb)
spin_lock_init(&cifs_inode->writers_lock);
cifs_inode->writers = 0;
cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
- cifs_inode->server_eof = 0;
+ cifs_inode->netfs.remote_i_size = 0;
cifs_inode->uniqueid = 0;
cifs_inode->createtime = 0;
cifs_inode->epoch = 0;
@@ -430,7 +430,7 @@ static void
cifs_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
- if (inode->i_state & I_PINNING_FSCACHE_WB)
+ if (inode->i_state & I_PINNING_NETFS_WB)
cifs_fscache_unuse_inode_cookie(inode, true);
cifs_fscache_release_inode_cookie(inode);
clear_inode(inode);
@@ -681,6 +681,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
if (tcon->ses->server->min_offload)
seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
+ if (tcon->ses->server->retrans)
+ seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
seq_printf(s, ",echo_interval=%lu",
tcon->ses->server->echo_interval / HZ);
@@ -793,8 +795,7 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
- return 0;
+ return netfs_unpin_writeback(inode, wbc);
}
static int cifs_drop_inode(struct inode *inode)
@@ -1171,6 +1172,9 @@ const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
{
char *target_path;
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
target_path = kmalloc(PATH_MAX, GFP_KERNEL);
if (!target_path)
return ERR_PTR(-ENOMEM);
@@ -1222,7 +1226,7 @@ static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *s
if (rc < 0)
goto set_failed;
- netfs_resize_file(&src_cifsi->netfs, src_end);
+ netfs_resize_file(&src_cifsi->netfs, src_end, true);
fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
return 0;
@@ -1353,7 +1357,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
smb_file_src, smb_file_target, off, len, destoff);
if (rc == 0 && new_size > i_size_read(target_inode)) {
truncate_setsize(target_inode, new_size);
- netfs_resize_file(&target_cifsi->netfs, new_size);
+ netfs_resize_file(&target_cifsi->netfs, new_size, true);
fscache_resize_cookie(cifs_inode_cookie(target_inode),
new_size);
}
@@ -1379,6 +1383,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
struct inode *src_inode = file_inode(src_file);
struct inode *target_inode = file_inode(dst_file);
struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
+ struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
struct cifsFileInfo *smb_file_src;
struct cifsFileInfo *smb_file_target;
struct cifs_tcon *src_tcon;
@@ -1427,7 +1432,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
* Advance the EOF marker after the flush above to the end of the range
* if it's short of that.
*/
- if (src_cifsi->server_eof < off + len) {
+ if (src_cifsi->netfs.remote_i_size < off + len) {
rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
if (rc < 0)
goto unlock;
@@ -1451,12 +1456,22 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
/* Discard all the folios that overlap the destination region. */
truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
+ fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
+ i_size_read(target_inode), 0);
+
rc = file_modified(dst_file);
if (!rc) {
rc = target_tcon->ses->server->ops->copychunk_range(xid,
smb_file_src, smb_file_target, off, len, destoff);
- if (rc > 0 && destoff + rc > i_size_read(target_inode))
+ if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
truncate_setsize(target_inode, destoff + rc);
+ netfs_resize_file(&target_cifsi->netfs,
+ i_size_read(target_inode), true);
+ fscache_resize_cookie(cifs_inode_cookie(target_inode),
+ i_size_read(target_inode));
+ }
+ if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
+ target_cifsi->netfs.zero_point = destoff + rc;
}
file_accessed(src_file);
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 879d5ef8a66e..53c75cfb33ab 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -50,6 +50,11 @@
#define CIFS_DEF_ACTIMEO (1 * HZ)
/*
+ * max sleep time before retry to server
+ */
+#define CIFS_MAX_SLEEP 2000
+
+/*
* max attribute cache timeout (jiffies) - 2^30
*/
#define CIFS_MAX_ACTIMEO (1 << 30)
@@ -82,7 +87,7 @@
#define SMB_INTERFACE_POLL_INTERVAL 600
/* maximum number of PDUs in one compound */
-#define MAX_COMPOUND 5
+#define MAX_COMPOUND 7
/*
* Default number of credits to keep available for SMB3.
@@ -204,6 +209,8 @@ struct cifs_open_info_data {
};
} reparse;
char *symlink_target;
+ struct cifs_sid posix_owner;
+ struct cifs_sid posix_group;
union {
struct smb2_file_all_info fi;
struct smb311_posix_qinfo posix_fi;
@@ -751,6 +758,7 @@ struct TCP_Server_Info {
unsigned int max_read;
unsigned int max_write;
unsigned int min_offload;
+ unsigned int retrans;
__le16 compress_algorithm;
__u16 signing_algorithm;
__le16 cipher_type;
@@ -1024,6 +1032,8 @@ struct cifs_chan {
__u8 signkey[SMB3_SIGN_KEY_SIZE];
};
+#define CIFS_SES_FLAG_SCALE_CHANNELS (0x1)
+
/*
* Session structure. One of these for each uid session with a particular host
*/
@@ -1056,6 +1066,7 @@ struct cifs_ses {
enum securityEnum sectype; /* what security flavor was specified? */
bool sign; /* is signing required? */
bool domainAuto:1;
+ unsigned int flags;
__u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
@@ -1207,6 +1218,7 @@ struct cifs_tcon {
__u64 bytes_read;
__u64 bytes_written;
spinlock_t stat_lock; /* protects the two fields above */
+ time64_t stats_from_time;
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
@@ -1366,6 +1378,7 @@ struct cifs_open_parms {
struct cifs_fid *fid;
umode_t mode;
bool reconnect:1;
+ bool replay:1; /* indicates that this open is for a replay */
};
struct cifs_fid {
@@ -1497,6 +1510,7 @@ struct cifs_writedata {
struct smbd_mr *mr;
#endif
struct cifs_credits credits;
+ bool replay;
};
/*
@@ -1557,7 +1571,6 @@ struct cifsInodeInfo {
spinlock_t writers_lock;
unsigned int writers; /* Number of writers on this inode */
unsigned long time; /* jiffies of last update of inode */
- u64 server_eof; /* current file size on server -- protected by i_lock */
u64 uniqueid; /* server inode number */
u64 createtime; /* creation time on server */
__u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for this inode */
@@ -1827,6 +1840,13 @@ static inline bool is_retryable_error(int error)
return false;
}
+static inline bool is_replayable_error(int error)
+{
+ if (error == -EAGAIN || error == -ECONNABORTED)
+ return true;
+ return false;
+}
+
/* cifs_get_writable_file() flags */
#define FIND_WR_ANY 0
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 3052a208c6ca..ac9595504f4b 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -233,6 +233,12 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
/* check if iface is still active */
spin_lock(&ses->chan_lock);
+ if (cifs_ses_get_chan_index(ses, server) ==
+ CIFS_INVAL_CHAN_INDEX) {
+ spin_unlock(&ses->chan_lock);
+ continue;
+ }
+
if (!cifs_chan_is_iface_active(ses, server)) {
spin_unlock(&ses->chan_lock);
cifs_chan_update_iface(ses, server);
@@ -1574,6 +1580,9 @@ static int match_server(struct TCP_Server_Info *server,
if (server->min_offload != ctx->min_offload)
return 0;
+ if (server->retrans != ctx->retrans)
+ return 0;
+
return 1;
}
@@ -1798,6 +1807,7 @@ smbd_connected:
goto out_err_crypto_release;
}
tcp_ses->min_offload = ctx->min_offload;
+ tcp_ses->retrans = ctx->retrans;
/*
* at this point we are the only ones with the pointer
* to the struct since the kernel thread not created yet
@@ -3434,8 +3444,18 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
* the user on mount
*/
if ((cifs_sb->ctx->wsize == 0) ||
- (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
- cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
+ (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) {
+ cifs_sb->ctx->wsize =
+ round_down(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE);
+ /*
+ * in the very unlikely event that the server sent a max write size under PAGE_SIZE,
+ * (which would get rounded down to 0) then reset wsize to absolute minimum eg 4096
+ */
+ if (cifs_sb->ctx->wsize == 0) {
+ cifs_sb->ctx->wsize = PAGE_SIZE;
+ cifs_dbg(VFS, "wsize too small, reset to minimum ie PAGE_SIZE, usually 4096\n");
+ }
+ }
if ((cifs_sb->ctx->rsize == 0) ||
(cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
@@ -4224,6 +4244,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
/* only send once per connect */
spin_lock(&tcon->tc_lock);
+
+ /* if tcon is marked for needing reconnect, update state */
+ if (tcon->need_reconnect)
+ tcon->status = TID_NEED_TCON;
+
if (tcon->status == TID_GOOD) {
spin_unlock(&tcon->tc_lock);
return 0;
diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
index a8a1d386da65..449c59830039 100644
--- a/fs/smb/client/dfs.c
+++ b/fs/smb/client/dfs.c
@@ -565,6 +565,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
/* only send once per connect */
spin_lock(&tcon->tc_lock);
+
+ /* if tcon is marked for needing reconnect, update state */
+ if (tcon->need_reconnect)
+ tcon->status = TID_NEED_TCON;
+
if (tcon->status == TID_GOOD) {
spin_unlock(&tcon->tc_lock);
return 0;
@@ -625,8 +630,8 @@ out:
spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_TCON)
tcon->status = TID_GOOD;
- spin_unlock(&tcon->tc_lock);
tcon->need_reconnect = false;
+ spin_unlock(&tcon->tc_lock);
}
return rc;
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 1b4262aff8fa..f391c9b803d8 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -87,7 +87,7 @@ void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len
continue;
if (!folio_test_writeback(folio)) {
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio_index(folio), end);
+ len, start, folio->index, end);
continue;
}
@@ -120,7 +120,7 @@ void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len
continue;
if (!folio_test_writeback(folio)) {
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio_index(folio), end);
+ len, start, folio->index, end);
continue;
}
@@ -151,7 +151,7 @@ void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int le
xas_for_each(&xas, folio, end) {
if (!folio_test_writeback(folio)) {
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio_index(folio), end);
+ len, start, folio->index, end);
continue;
}
@@ -175,6 +175,9 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
/* only send once per connect */
spin_lock(&tcon->tc_lock);
+ if (tcon->need_reconnect)
+ tcon->status = TID_NEED_RECON;
+
if (tcon->status != TID_NEED_RECON) {
spin_unlock(&tcon->tc_lock);
return;
@@ -2120,8 +2123,8 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
{
loff_t end_of_write = offset + bytes_written;
- if (end_of_write > cifsi->server_eof)
- cifsi->server_eof = end_of_write;
+ if (end_of_write > cifsi->netfs.remote_i_size)
+ netfs_resize_file(&cifsi->netfs, end_of_write, true);
}
static ssize_t
@@ -2651,7 +2654,7 @@ static void cifs_extend_writeback(struct address_space *mapping,
continue;
if (xa_is_value(folio))
break;
- if (folio_index(folio) != index)
+ if (folio->index != index)
break;
if (!folio_try_get_rcu(folio)) {
xas_reset(&xas);
@@ -2899,7 +2902,7 @@ redo_folio:
goto skip_write;
}
- if (folio_mapping(folio) != mapping ||
+ if (folio->mapping != mapping ||
!folio_test_dirty(folio)) {
start += folio_size(folio);
folio_unlock(folio);
@@ -3247,8 +3250,8 @@ cifs_uncached_writev_complete(struct work_struct *work)
spin_lock(&inode->i_lock);
cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
- if (cifsi->server_eof > inode->i_size)
- i_size_write(inode, cifsi->server_eof);
+ if (cifsi->netfs.remote_i_size > inode->i_size)
+ i_size_write(inode, cifsi->netfs.remote_i_size);
spin_unlock(&inode->i_lock);
complete(&wdata->done);
@@ -3300,6 +3303,7 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
if (wdata->cfile->invalidHandle)
rc = -EAGAIN;
else {
+ wdata->replay = true;
#ifdef CONFIG_CIFS_SMB_DIRECT
if (wdata->mr) {
wdata->mr->need_invalidate = true;
@@ -5043,27 +5047,13 @@ static void cifs_swap_deactivate(struct file *file)
/* do we need to unpin (or unlock) the file */
}
-/*
- * Mark a page as having been made dirty and thus needing writeback. We also
- * need to pin the cache object to write back to.
- */
-#ifdef CONFIG_CIFS_FSCACHE
-static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
-{
- return fscache_dirty_folio(mapping, folio,
- cifs_inode_cookie(mapping->host));
-}
-#else
-#define cifs_dirty_folio filemap_dirty_folio
-#endif
-
const struct address_space_operations cifs_addr_ops = {
.read_folio = cifs_read_folio,
.readahead = cifs_readahead,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
- .dirty_folio = cifs_dirty_folio,
+ .dirty_folio = netfs_dirty_folio,
.release_folio = cifs_release_folio,
.direct_IO = cifs_direct_io,
.invalidate_folio = cifs_invalidate_folio,
@@ -5087,7 +5077,7 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
- .dirty_folio = cifs_dirty_folio,
+ .dirty_folio = netfs_dirty_folio,
.release_folio = cifs_release_folio,
.invalidate_folio = cifs_invalidate_folio,
.launder_folio = cifs_launder_folio,
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index a3493da12ad1..4b2f5aa2ea0e 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -139,6 +139,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_u32("dir_mode", Opt_dirmode),
fsparam_u32("port", Opt_port),
fsparam_u32("min_enc_offload", Opt_min_enc_offload),
+ fsparam_u32("retrans", Opt_retrans),
fsparam_u32("esize", Opt_min_enc_offload),
fsparam_u32("bsize", Opt_blocksize),
fsparam_u32("rasize", Opt_rasize),
@@ -210,7 +211,7 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
switch (match_token(value, cifs_secflavor_tokens, args)) {
case Opt_sec_krb5p:
- cifs_errorf(fc, "sec=krb5p is not supported!\n");
+ cifs_errorf(fc, "sec=krb5p is not supported. Use sec=krb5,seal instead\n");
return 1;
case Opt_sec_krb5i:
ctx->sign = true;
@@ -1064,6 +1065,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
case Opt_min_enc_offload:
ctx->min_offload = result.uint_32;
break;
+ case Opt_retrans:
+ ctx->retrans = result.uint_32;
+ break;
case Opt_blocksize:
/*
* inode blocksize realistically should never need to be
@@ -1107,6 +1111,17 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
case Opt_wsize:
ctx->wsize = result.uint_32;
ctx->got_wsize = true;
+ if (ctx->wsize % PAGE_SIZE != 0) {
+ ctx->wsize = round_down(ctx->wsize, PAGE_SIZE);
+ if (ctx->wsize == 0) {
+ ctx->wsize = PAGE_SIZE;
+ cifs_dbg(VFS, "wsize too small, reset to minimum %ld\n", PAGE_SIZE);
+ } else {
+ cifs_dbg(VFS,
+ "wsize rounded down to %d to multiple of PAGE_SIZE %ld\n",
+ ctx->wsize, PAGE_SIZE);
+ }
+ }
break;
case Opt_acregmax:
ctx->acregmax = HZ * result.uint_32;
@@ -1619,6 +1634,8 @@ int smb3_init_fs_context(struct fs_context *fc)
ctx->backupuid_specified = false; /* no backup intent for a user */
ctx->backupgid_specified = false; /* no backup intent for a group */
+ ctx->retrans = 1;
+
/*
* short int override_uid = -1;
* short int override_gid = -1;
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index cf46916286d0..182ce11cbe93 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -118,6 +118,7 @@ enum cifs_param {
Opt_file_mode,
Opt_dirmode,
Opt_min_enc_offload,
+ Opt_retrans,
Opt_blocksize,
Opt_rasize,
Opt_rsize,
@@ -245,6 +246,7 @@ struct smb3_fs_context {
unsigned int rsize;
unsigned int wsize;
unsigned int min_offload;
+ unsigned int retrans;
bool sockopt_tcp_nodelay:1;
/* attribute cache timemout for files and directories in jiffies */
unsigned long acregmax;
diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
index e5cad149f5a2..c4a3cb736881 100644
--- a/fs/smb/client/fscache.c
+++ b/fs/smb/client/fscache.c
@@ -180,7 +180,7 @@ static int fscache_fallback_write_pages(struct inode *inode, loff_t start, size_
if (ret < 0)
return ret;
- ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
+ ret = cres.ops->prepare_write(&cres, &start, &len, len, i_size_read(inode),
no_space_allocated_yet);
if (ret == 0)
ret = fscache_write(&cres, start, &iter, NULL, NULL);
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 9f37c1758f73..d02f8ba29cb5 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -104,7 +104,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
mtime = inode_get_mtime(inode);
if (timespec64_equal(&mtime, &fattr->cf_mtime) &&
- cifs_i->server_eof == fattr->cf_eof) {
+ cifs_i->netfs.remote_i_size == fattr->cf_eof) {
cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
__func__, cifs_i->uniqueid);
return;
@@ -194,7 +194,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
else
clear_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags);
- cifs_i->server_eof = fattr->cf_eof;
+ cifs_i->netfs.remote_i_size = fattr->cf_eof;
/*
* Can't safely change the file size here if the client is writing to
* it due to potential races.
@@ -665,8 +665,6 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
/* Fill a cifs_fattr struct with info from POSIX info struct */
static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr,
struct cifs_open_info_data *data,
- struct cifs_sid *owner,
- struct cifs_sid *group,
struct super_block *sb)
{
struct smb311_posix_qinfo *info = &data->posix_fi;
@@ -722,8 +720,8 @@ out_reparse:
fattr->cf_symlink_target = data->symlink_target;
data->symlink_target = NULL;
}
- sid_to_id(cifs_sb, owner, fattr, SIDOWNER);
- sid_to_id(cifs_sb, group, fattr, SIDGROUP);
+ sid_to_id(cifs_sb, &data->posix_owner, fattr, SIDOWNER);
+ sid_to_id(cifs_sb, &data->posix_group, fattr, SIDGROUP);
cifs_dbg(FYI, "POSIX query info: mode 0x%x uniqueid 0x%llx nlink %d\n",
fattr->cf_mode, fattr->cf_uniqueid, fattr->cf_nlink);
@@ -1070,9 +1068,7 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
const unsigned int xid,
struct cifs_tcon *tcon,
const char *full_path,
- struct cifs_fattr *fattr,
- struct cifs_sid *owner,
- struct cifs_sid *group)
+ struct cifs_fattr *fattr)
{
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -1117,7 +1113,7 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
}
if (tcon->posix_extensions)
- smb311_posix_info_to_fattr(fattr, data, owner, group, sb);
+ smb311_posix_info_to_fattr(fattr, data, sb);
else
cifs_open_info_to_fattr(fattr, data, sb);
out:
@@ -1171,8 +1167,7 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
*/
if (cifs_open_data_reparse(data)) {
rc = reparse_info_to_fattr(data, sb, xid, tcon,
- full_path, fattr,
- NULL, NULL);
+ full_path, fattr);
} else {
cifs_open_info_to_fattr(fattr, data, sb);
}
@@ -1317,10 +1312,10 @@ static int smb311_posix_get_fattr(struct cifs_open_info_data *data,
const unsigned int xid)
{
struct cifs_open_info_data tmp_data = {};
+ struct TCP_Server_Info *server;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_tcon *tcon;
struct tcon_link *tlink;
- struct cifs_sid owner, group;
int tmprc;
int rc = 0;
@@ -1328,14 +1323,14 @@ static int smb311_posix_get_fattr(struct cifs_open_info_data *data,
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
+ server = tcon->ses->server;
/*
* 1. Fetch file metadata if not provided (data)
*/
if (!data) {
- rc = smb311_posix_query_path_info(xid, tcon, cifs_sb,
- full_path, &tmp_data,
- &owner, &group);
+ rc = server->ops->query_path_info(xid, tcon, cifs_sb,
+ full_path, &tmp_data);
data = &tmp_data;
}
@@ -1347,11 +1342,9 @@ static int smb311_posix_get_fattr(struct cifs_open_info_data *data,
case 0:
if (cifs_open_data_reparse(data)) {
rc = reparse_info_to_fattr(data, sb, xid, tcon,
- full_path, fattr,
- &owner, &group);
+ full_path, fattr);
} else {
- smb311_posix_info_to_fattr(fattr, data,
- &owner, &group, sb);
+ smb311_posix_info_to_fattr(fattr, data, sb);
}
break;
case -EREMOTE:
@@ -2865,7 +2858,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
set_size_out:
if (rc == 0) {
- cifsInode->server_eof = attrs->ia_size;
+ netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
cifs_setsize(inode, attrs->ia_size);
/*
* i_blocks is not related to (i_size / i_blksize), but instead
@@ -3018,6 +3011,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
if ((attrs->ia_valid & ATTR_SIZE) &&
attrs->ia_size != i_size_read(inode)) {
truncate_setsize(inode, attrs->ia_size);
+ netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
}
@@ -3217,6 +3211,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
if ((attrs->ia_valid & ATTR_SIZE) &&
attrs->ia_size != i_size_read(inode)) {
truncate_setsize(inode, attrs->ia_size);
+ netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
}
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index c2137ea3c253..0748d7b757b9 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -140,6 +140,7 @@ tcon_info_alloc(bool dir_leases_enabled)
spin_lock_init(&ret_buf->stat_lock);
atomic_set(&ret_buf->num_local_opens, 0);
atomic_set(&ret_buf->num_remote_opens, 0);
+ ret_buf->stats_from_time = ktime_get_real_seconds();
#ifdef CONFIG_CIFS_DFS_UPCALL
INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
#endif
diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
index a6968573b775..4a517b280f2b 100644
--- a/fs/smb/client/namespace.c
+++ b/fs/smb/client/namespace.c
@@ -168,6 +168,21 @@ static char *automount_fullpath(struct dentry *dentry, void *page)
return s;
}
+static void fs_context_set_ids(struct smb3_fs_context *ctx)
+{
+ kuid_t uid = current_fsuid();
+ kgid_t gid = current_fsgid();
+
+ if (ctx->multiuser) {
+ if (!ctx->uid_specified)
+ ctx->linux_uid = uid;
+ if (!ctx->gid_specified)
+ ctx->linux_gid = gid;
+ }
+ if (!ctx->cruid_specified)
+ ctx->cred_uid = uid;
+}
+
/*
* Create a vfsmount that we can automount
*/
@@ -205,6 +220,7 @@ static struct vfsmount *cifs_do_automount(struct path *path)
tmp.leaf_fullpath = NULL;
tmp.UNC = tmp.prepath = NULL;
tmp.dfs_root_ses = NULL;
+ fs_context_set_ids(&tmp);
rc = smb3_fs_context_dup(ctx, &tmp);
if (rc) {
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 056cae1ddcce..b520eea7bfce 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -133,15 +133,15 @@ retry:
* Query dir responses don't provide enough
* information about reparse points other than
* their reparse tags. Save an invalidation by
- * not clobbering the existing mode, size and
- * symlink target (if any) when reparse tag and
- * ctime haven't changed.
+ * not clobbering some existing attributes when
+ * reparse tag and ctime haven't changed.
*/
rc = 0;
if (fattr->cf_cifsattrs & ATTR_REPARSE) {
if (likely(reparse_inode_match(inode, fattr))) {
fattr->cf_mode = inode->i_mode;
- fattr->cf_eof = CIFS_I(inode)->server_eof;
+ fattr->cf_rdev = inode->i_rdev;
+ fattr->cf_eof = CIFS_I(inode)->netfs.remote_i_size;
fattr->cf_symlink_target = NULL;
} else {
CIFS_I(inode)->time = 0;
@@ -307,14 +307,16 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
}
static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
- SEARCH_ID_FULL_DIR_INFO *info,
+ const void *info,
struct cifs_sb_info *cifs_sb)
{
+ const FILE_FULL_DIRECTORY_INFO *di = info;
+
__dir_info_to_fattr(fattr, info);
- /* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
+ /* See MS-FSCC 2.4.14, 2.4.19 */
if (fattr->cf_cifsattrs & ATTR_REPARSE)
- fattr->cf_cifstag = le32_to_cpu(info->EaSize);
+ fattr->cf_cifstag = le32_to_cpu(di->EaSize);
cifs_fill_common_info(fattr, cifs_sb);
}
@@ -396,7 +398,7 @@ ffirst_retry:
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
} else /* not srvinos - BB fixme add check for backlevel? */ {
- cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
}
search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
@@ -645,10 +647,10 @@ static int cifs_entry_is_dot(struct cifs_dirent *de, bool is_unicode)
static int is_dir_changed(struct file *file)
{
struct inode *inode = file_inode(file);
- struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
+ struct cifsInodeInfo *cifs_inode_info = CIFS_I(inode);
- if (cifsInfo->time == 0)
- return 1; /* directory was changed, perhaps due to unlink */
+ if (cifs_inode_info->time == 0)
+ return 1; /* directory was changed, e.g. unlink or new file */
else
return 0;
@@ -987,10 +989,9 @@ static int cifs_filldir(char *find_entry, struct file *file,
(FIND_FILE_STANDARD_INFO *)find_entry,
cifs_sb);
break;
+ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
- cifs_fulldir_info_to_fattr(&fattr,
- (SEARCH_ID_FULL_DIR_INFO *)find_entry,
- cifs_sb);
+ cifs_fulldir_info_to_fattr(&fattr, find_entry, cifs_sb);
break;
default:
cifs_dir_info_to_fattr(&fattr,
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index cde81042bebd..8f37373fd333 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -75,6 +75,10 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
{
unsigned int i;
+ /* if the channel is waiting for termination */
+ if (server && server->terminate)
+ return CIFS_INVAL_CHAN_INDEX;
+
for (i = 0; i < ses->chan_count; i++) {
if (ses->chans[i].server == server)
return i;
@@ -84,7 +88,6 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
if (server)
cifs_dbg(VFS, "unable to get chan index for server: 0x%llx",
server->conn_id);
- WARN_ON(1);
return CIFS_INVAL_CHAN_INDEX;
}
@@ -269,6 +272,8 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
&iface->sockaddr,
rc);
kref_put(&iface->refcount, release_iface);
+ /* failure to add chan should increase weight */
+ iface->weight_fulfilled++;
continue;
}
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 5053a5550abe..05818cd6d932 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -56,6 +56,35 @@ static inline __u32 file_create_options(struct dentry *dentry)
return 0;
}
+/* Parse owner and group from SMB3.1.1 POSIX query info */
+static int parse_posix_sids(struct cifs_open_info_data *data,
+ struct kvec *rsp_iov)
+{
+ struct smb2_query_info_rsp *qi = rsp_iov->iov_base;
+ unsigned int out_len = le32_to_cpu(qi->OutputBufferLength);
+ unsigned int qi_len = sizeof(data->posix_fi);
+ int owner_len, group_len;
+ u8 *sidsbuf, *sidsbuf_end;
+
+ if (out_len <= qi_len)
+ return -EINVAL;
+
+ sidsbuf = (u8 *)qi + le16_to_cpu(qi->OutputBufferOffset) + qi_len;
+ sidsbuf_end = sidsbuf + out_len - qi_len;
+
+ owner_len = posix_info_sid_size(sidsbuf, sidsbuf_end);
+ if (owner_len == -1)
+ return -EINVAL;
+
+ memcpy(&data->posix_owner, sidsbuf, owner_len);
+ group_len = posix_info_sid_size(sidsbuf + owner_len, sidsbuf_end);
+ if (group_len == -1)
+ return -EINVAL;
+
+ memcpy(&data->posix_group, sidsbuf + owner_len, group_len);
+ return 0;
+}
+
/*
* note: If cfile is passed, the reference to it is dropped here.
* So make sure that you do not reuse cfile after return from this func.
@@ -69,7 +98,6 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
__u32 desired_access, __u32 create_disposition,
__u32 create_options, umode_t mode, struct kvec *in_iov,
int *cmds, int num_cmds, struct cifsFileInfo *cfile,
- __u8 **extbuf, size_t *extbuflen,
struct kvec *out_iov, int *out_buftype)
{
@@ -92,6 +120,14 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int size[2];
void *data[2];
int len;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ oplock = SMB2_OPLOCK_LEVEL_NONE;
+ num_rqst = 0;
+ server = cifs_pick_channel(ses);
vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
if (vars == NULL)
@@ -99,8 +135,6 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
rqst = &vars->rqst[0];
rsp_iov = &vars->rsp_iov[0];
- server = cifs_pick_channel(ses);
-
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -435,15 +469,24 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
num_rqst++;
if (cfile) {
+ if (retries)
+ for (i = 1; i < num_rqst - 2; i++)
+ smb2_set_replay(server, &rqst[i]);
+
rc = compound_send_recv(xid, ses, server,
flags, num_rqst - 2,
&rqst[1], &resp_buftype[1],
&rsp_iov[1]);
- } else
+ } else {
+ if (retries)
+ for (i = 0; i < num_rqst; i++)
+ smb2_set_replay(server, &rqst[i]);
+
rc = compound_send_recv(xid, ses, server,
flags, num_rqst,
rqst, resp_buftype,
rsp_iov);
+ }
finished:
num_rqst = 0;
@@ -494,21 +537,9 @@ finished:
&rsp_iov[i + 1], sizeof(idata->posix_fi) /* add SIDs */,
(char *)&idata->posix_fi);
}
- if (rc == 0) {
- unsigned int length = le32_to_cpu(qi_rsp->OutputBufferLength);
-
- if (length > sizeof(idata->posix_fi)) {
- char *base = (char *)rsp_iov[i + 1].iov_base +
- le16_to_cpu(qi_rsp->OutputBufferOffset) +
- sizeof(idata->posix_fi);
- *extbuflen = length - sizeof(idata->posix_fi);
- *extbuf = kmemdup(base, *extbuflen, GFP_KERNEL);
- if (!*extbuf)
- rc = -ENOMEM;
- } else {
- rc = -EINVAL;
- }
- }
+ if (rc == 0)
+ rc = parse_posix_sids(idata, &rsp_iov[i + 1]);
+
SMB2_query_info_free(&rqst[num_rqst++]);
if (rc)
trace_smb3_posix_query_info_compound_err(xid, ses->Suid,
@@ -604,9 +635,6 @@ finished:
}
SMB2_close_free(&rqst[num_rqst]);
- if (cfile)
- cifsFileInfo_put(cfile);
-
num_cmds += 2;
if (out_iov && out_buftype) {
memcpy(out_iov, rsp_iov, num_cmds * sizeof(*out_iov));
@@ -616,7 +644,16 @@ finished:
for (i = 0; i < num_cmds; i++)
free_rsp_buf(resp_buftype[i], rsp_iov[i].iov_base);
}
+ num_cmds -= 2; /* correct num_cmds as there could be a retry */
kfree(vars);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
+ if (cfile)
+ cifsFileInfo_put(cfile);
+
return rc;
}
@@ -662,7 +699,7 @@ int smb2_query_path_info(const unsigned int xid,
struct smb2_hdr *hdr;
struct kvec in_iov[2], out_iov[3] = {};
int out_buftype[3] = {};
- int cmds[2] = { SMB2_OP_QUERY_INFO, };
+ int cmds[2];
bool islink;
int i, num_cmds;
int rc, rc2;
@@ -670,20 +707,36 @@ int smb2_query_path_info(const unsigned int xid,
data->adjust_tz = false;
data->reparse_point = false;
- if (strcmp(full_path, ""))
- rc = -ENOENT;
- else
- rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid);
- /* If it is a root and its handle is cached then use it */
- if (!rc) {
- if (cfid->file_all_info_is_valid) {
- memcpy(&data->fi, &cfid->file_all_info, sizeof(data->fi));
+ /*
+ * BB TODO: Add support for using cached root handle in SMB3.1.1 POSIX.
+ * Create SMB2_query_posix_info worker function to do non-compounded
+ * query when we already have an open file handle for this. For now this
+ * is fast enough (always using the compounded version).
+ */
+ if (!tcon->posix_extensions) {
+ if (*full_path) {
+ rc = -ENOENT;
} else {
- rc = SMB2_query_info(xid, tcon, cfid->fid.persistent_fid,
- cfid->fid.volatile_fid, &data->fi);
+ rc = open_cached_dir(xid, tcon, full_path,
+ cifs_sb, false, &cfid);
+ }
+ /* If it is a root and its handle is cached then use it */
+ if (!rc) {
+ if (cfid->file_all_info_is_valid) {
+ memcpy(&data->fi, &cfid->file_all_info,
+ sizeof(data->fi));
+ } else {
+ rc = SMB2_query_info(xid, tcon,
+ cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid,
+ &data->fi);
+ }
+ close_cached_dir(cfid);
+ return rc;
}
- close_cached_dir(cfid);
- return rc;
+ cmds[0] = SMB2_OP_QUERY_INFO;
+ } else {
+ cmds[0] = SMB2_OP_POSIX_QUERY_INFO;
}
in_iov[0].iov_base = data;
@@ -693,9 +746,8 @@ int smb2_query_path_info(const unsigned int xid,
cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, ACL_NO_MODE,
- in_iov, cmds, 1, cfile,
- NULL, NULL, out_iov, out_buftype);
+ create_options, ACL_NO_MODE, in_iov,
+ cmds, 1, cfile, out_iov, out_buftype);
hdr = out_iov[0].iov_base;
/*
* If first iov is unset, then SMB session was dropped or we've got a
@@ -707,6 +759,10 @@ int smb2_query_path_info(const unsigned int xid,
switch (rc) {
case 0:
case -EOPNOTSUPP:
+ /*
+ * BB TODO: When support for special files added to Samba
+ * re-verify this path.
+ */
rc = parse_create_response(data, cifs_sb, &out_iov[0]);
if (rc || !data->reparse_point)
goto out;
@@ -722,8 +778,8 @@ int smb2_query_path_info(const unsigned int xid,
cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, ACL_NO_MODE, in_iov, cmds,
- num_cmds, cfile, NULL, NULL, NULL, NULL);
+ create_options, ACL_NO_MODE, in_iov,
+ cmds, num_cmds, cfile, NULL, NULL);
break;
case -EREMOTE:
break;
@@ -746,101 +802,6 @@ out:
return rc;
}
-int smb311_posix_query_path_info(const unsigned int xid,
- struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct cifs_open_info_data *data,
- struct cifs_sid *owner,
- struct cifs_sid *group)
-{
- int rc;
- __u32 create_options = 0;
- struct cifsFileInfo *cfile;
- struct kvec in_iov[2], out_iov[3] = {};
- int out_buftype[3] = {};
- __u8 *sidsbuf = NULL;
- __u8 *sidsbuf_end = NULL;
- size_t sidsbuflen = 0;
- size_t owner_len, group_len;
- int cmds[2] = { SMB2_OP_POSIX_QUERY_INFO, };
- int i, num_cmds;
-
- data->adjust_tz = false;
- data->reparse_point = false;
-
- /*
- * BB TODO: Add support for using the cached root handle.
- * Create SMB2_query_posix_info worker function to do non-compounded query
- * when we already have an open file handle for this. For now this is fast enough
- * (always using the compounded version).
- */
- in_iov[0].iov_base = data;
- in_iov[0].iov_len = sizeof(*data);
- in_iov[1] = in_iov[0];
-
- cifs_get_readable_path(tcon, full_path, &cfile);
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, ACL_NO_MODE, in_iov, cmds, 1,
- cfile, &sidsbuf, &sidsbuflen, out_iov, out_buftype);
- /*
- * If first iov is unset, then SMB session was dropped or we've got a
- * cached open file (@cfile).
- */
- if (!out_iov[0].iov_base || out_buftype[0] == CIFS_NO_BUFFER)
- goto out;
-
- switch (rc) {
- case 0:
- case -EOPNOTSUPP:
- /* BB TODO: When support for special files added to Samba re-verify this path */
- rc = parse_create_response(data, cifs_sb, &out_iov[0]);
- if (rc || !data->reparse_point)
- goto out;
-
- if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK) {
- /* symlink already parsed in create response */
- num_cmds = 1;
- } else {
- cmds[1] = SMB2_OP_GET_REPARSE;
- num_cmds = 2;
- }
- create_options |= OPEN_REPARSE_POINT;
- cifs_get_readable_path(tcon, full_path, &cfile);
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, ACL_NO_MODE, in_iov, cmds,
- num_cmds, cfile, &sidsbuf, &sidsbuflen, NULL, NULL);
- break;
- }
-
-out:
- if (rc == 0) {
- sidsbuf_end = sidsbuf + sidsbuflen;
-
- owner_len = posix_info_sid_size(sidsbuf, sidsbuf_end);
- if (owner_len == -1) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(owner, sidsbuf, owner_len);
-
- group_len = posix_info_sid_size(
- sidsbuf + owner_len, sidsbuf_end);
- if (group_len == -1) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(group, sidsbuf + owner_len, group_len);
- }
-
- kfree(sidsbuf);
- for (i = 0; i < ARRAY_SIZE(out_buftype); i++)
- free_rsp_buf(out_buftype[i], out_iov[i].iov_base);
- return rc;
-}
-
int
smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
struct cifs_tcon *tcon, const char *name,
@@ -848,9 +809,9 @@ smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
{
return smb2_compound_op(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, mode, NULL,
- &(int){SMB2_OP_MKDIR}, 1,
- NULL, NULL, NULL, NULL, NULL);
+ CREATE_NOT_FILE, mode,
+ NULL, &(int){SMB2_OP_MKDIR}, 1,
+ NULL, NULL, NULL);
}
void
@@ -875,7 +836,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
CREATE_NOT_FILE, ACL_NO_MODE, &in_iov,
&(int){SMB2_OP_SET_INFO}, 1,
- cfile, NULL, NULL, NULL, NULL);
+ cfile, NULL, NULL);
if (tmprc == 0)
cifs_i->cifsAttrs = dosattrs;
}
@@ -887,8 +848,9 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
drop_cached_dir_by_name(xid, tcon, name, cifs_sb);
return smb2_compound_op(xid, tcon, cifs_sb, name,
DELETE, FILE_OPEN, CREATE_NOT_FILE,
- ACL_NO_MODE, NULL, &(int){SMB2_OP_RMDIR}, 1,
- NULL, NULL, NULL, NULL, NULL);
+ ACL_NO_MODE, NULL,
+ &(int){SMB2_OP_RMDIR}, 1,
+ NULL, NULL, NULL);
}
int
@@ -897,8 +859,9 @@ smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
{
return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
- ACL_NO_MODE, NULL, &(int){SMB2_OP_DELETE}, 1,
- NULL, NULL, NULL, NULL, NULL);
+ ACL_NO_MODE, NULL,
+ &(int){SMB2_OP_DELETE}, 1,
+ NULL, NULL, NULL);
}
static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
@@ -919,8 +882,8 @@ static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
in_iov.iov_base = smb2_to_name;
in_iov.iov_len = 2 * UniStrnlen((wchar_t *)smb2_to_name, PATH_MAX);
rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
- FILE_OPEN, create_options, ACL_NO_MODE, &in_iov,
- &command, 1, cfile, NULL, NULL, NULL, NULL);
+ FILE_OPEN, create_options, ACL_NO_MODE,
+ &in_iov, &command, 1, cfile, NULL, NULL);
smb2_rename_path:
kfree(smb2_to_name);
return rc;
@@ -971,7 +934,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
FILE_WRITE_DATA, FILE_OPEN,
0, ACL_NO_MODE, &in_iov,
&(int){SMB2_OP_SET_EOF}, 1,
- cfile, NULL, NULL, NULL, NULL);
+ cfile, NULL, NULL);
}
int
@@ -999,8 +962,8 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_WRITE_ATTRIBUTES, FILE_OPEN,
0, ACL_NO_MODE, &in_iov,
- &(int){SMB2_OP_SET_INFO}, 1, cfile,
- NULL, NULL, NULL, NULL);
+ &(int){SMB2_OP_SET_INFO}, 1,
+ cfile, NULL, NULL);
cifs_put_tlink(tlink);
return rc;
}
@@ -1035,7 +998,7 @@ struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
da, cd, co, ACL_NO_MODE, in_iov,
- cmds, 2, cfile, NULL, NULL, NULL, NULL);
+ cmds, 2, cfile, NULL, NULL);
if (!rc) {
rc = smb311_posix_get_inode_info(&new, full_path,
data, sb, xid);
@@ -1045,7 +1008,7 @@ struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
da, cd, co, ACL_NO_MODE, in_iov,
- cmds, 2, cfile, NULL, NULL, NULL, NULL);
+ cmds, 2, cfile, NULL, NULL);
if (!rc) {
rc = cifs_get_inode_info(&new, full_path,
data, sb, xid, NULL);
@@ -1072,8 +1035,8 @@ int smb2_query_reparse_point(const unsigned int xid,
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
OPEN_REPARSE_POINT, ACL_NO_MODE, &in_iov,
- &(int){SMB2_OP_GET_REPARSE}, 1, cfile,
- NULL, NULL, NULL, NULL);
+ &(int){SMB2_OP_GET_REPARSE}, 1,
+ cfile, NULL, NULL);
if (rc)
goto out;
diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c
index 1a90dd78b238..ac1895358908 100644
--- a/fs/smb/client/smb2maperror.c
+++ b/fs/smb/client/smb2maperror.c
@@ -1210,6 +1210,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_INVALID_TASK_INDEX, -EIO, "STATUS_INVALID_TASK_INDEX"},
{STATUS_THREAD_ALREADY_IN_TASK, -EIO, "STATUS_THREAD_ALREADY_IN_TASK"},
{STATUS_CALLBACK_BYPASS, -EIO, "STATUS_CALLBACK_BYPASS"},
+ {STATUS_SERVER_UNAVAILABLE, -EAGAIN, "STATUS_SERVER_UNAVAILABLE"},
+ {STATUS_FILE_NOT_AVAILABLE, -EAGAIN, "STATUS_FILE_NOT_AVAILABLE"},
{STATUS_PORT_CLOSED, -EIO, "STATUS_PORT_CLOSED"},
{STATUS_MESSAGE_LOST, -EIO, "STATUS_MESSAGE_LOST"},
{STATUS_INVALID_MESSAGE, -EIO, "STATUS_INVALID_MESSAGE"},
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 01a5bd7e6a30..4695433fcf39 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -614,11 +614,12 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
"multichannel not available\n"
"Empty network interface list returned by server %s\n",
ses->server->hostname);
- rc = -EINVAL;
+ rc = -EOPNOTSUPP;
+ ses->iface_last_update = jiffies;
goto out;
}
- while (bytes_left >= sizeof(*p)) {
+ while (bytes_left >= (ssize_t)sizeof(*p)) {
memset(&tmp_iface, 0, sizeof(tmp_iface));
tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
@@ -712,7 +713,6 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
ses->iface_count++;
spin_unlock(&ses->iface_lock);
- ses->iface_last_update = jiffies;
next_iface:
nb_iface++;
next = le32_to_cpu(p->Next);
@@ -734,11 +734,7 @@ next_iface:
if ((bytes_left > 8) || p->Next)
cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
-
- if (!ses->iface_count) {
- rc = -EINVAL;
- goto out;
- }
+ ses->iface_last_update = jiffies;
out:
/*
@@ -1112,7 +1108,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
{
struct smb2_compound_vars *vars;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
struct smb_rqst *rqst;
struct kvec *rsp_iov;
__le16 *utf16_path = NULL;
@@ -1128,6 +1124,13 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_file_full_ea_info *ea = NULL;
struct smb2_query_info_rsp *rsp;
int rc, used_len = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = CIFS_CP_CREATE_CLOSE_OP;
+ oplock = SMB2_OPLOCK_LEVEL_NONE;
+ server = cifs_pick_channel(ses);
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -1201,6 +1204,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
.fid = &fid,
+ .replay = !!(retries),
};
rc = SMB2_open_init(tcon, server,
@@ -1248,6 +1252,12 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
goto sea_exit;
smb2_set_related(&rqst[2]);
+ if (retries) {
+ smb2_set_replay(server, &rqst[0]);
+ smb2_set_replay(server, &rqst[1]);
+ smb2_set_replay(server, &rqst[2]);
+ }
+
rc = compound_send_recv(xid, ses, server,
flags, 3, rqst,
resp_buftype, rsp_iov);
@@ -1264,6 +1274,11 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
kfree(vars);
out_free_path:
kfree(utf16_path);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
#endif
@@ -1488,7 +1503,7 @@ smb2_ioctl_query_info(const unsigned int xid,
struct smb_rqst *rqst;
struct kvec *rsp_iov;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
char __user *arg = (char __user *)p;
struct smb_query_info qi;
struct smb_query_info __user *pqi;
@@ -1505,6 +1520,13 @@ smb2_ioctl_query_info(const unsigned int xid,
void *data[2];
int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
void (*free_req1_func)(struct smb_rqst *r);
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = CIFS_CP_CREATE_CLOSE_OP;
+ oplock = SMB2_OPLOCK_LEVEL_NONE;
+ server = cifs_pick_channel(ses);
vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
if (vars == NULL)
@@ -1548,6 +1570,7 @@ smb2_ioctl_query_info(const unsigned int xid,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, create_options),
.fid = &fid,
+ .replay = !!(retries),
};
if (qi.flags & PASSTHRU_FSCTL) {
@@ -1645,6 +1668,12 @@ smb2_ioctl_query_info(const unsigned int xid,
goto free_req_1;
smb2_set_related(&rqst[2]);
+ if (retries) {
+ smb2_set_replay(server, &rqst[0]);
+ smb2_set_replay(server, &rqst[1]);
+ smb2_set_replay(server, &rqst[2]);
+ }
+
rc = compound_send_recv(xid, ses, server,
flags, 3, rqst,
resp_buftype, rsp_iov);
@@ -1705,6 +1734,11 @@ free_output_buffer:
kfree(buffer);
free_vars:
kfree(vars);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -2231,8 +2265,14 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct smb2_query_directory_rsp *qd_rsp = NULL;
struct smb2_create_rsp *op_rsp = NULL;
- struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
- int retry_count = 0;
+ struct TCP_Server_Info *server;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ oplock = SMB2_OPLOCK_LEVEL_NONE;
+ server = cifs_pick_channel(tcon->ses);
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
@@ -2257,6 +2297,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
.fid = fid,
+ .replay = !!(retries),
};
rc = SMB2_open_init(tcon, server,
@@ -2282,14 +2323,15 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
smb2_set_related(&rqst[1]);
-again:
+ if (retries) {
+ smb2_set_replay(server, &rqst[0]);
+ smb2_set_replay(server, &rqst[1]);
+ }
+
rc = compound_send_recv(xid, tcon->ses, server,
flags, 2, rqst,
resp_buftype, rsp_iov);
- if (rc == -EAGAIN && retry_count++ < 10)
- goto again;
-
/* If the open failed there is nothing to do */
op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
@@ -2337,6 +2379,11 @@ again:
SMB2_query_directory_free(&rqst[1]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -2462,6 +2509,22 @@ smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
}
void
+smb2_set_replay(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+{
+ struct smb2_hdr *shdr;
+
+ if (server->dialect < SMB30_PROT_ID)
+ return;
+
+ shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
+ if (shdr == NULL) {
+ cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
+ return;
+ }
+ shdr->Flags |= SMB2_FLAGS_REPLAY_OPERATION;
+}
+
+void
smb2_set_related(struct smb_rqst *rqst)
{
struct smb2_hdr *shdr;
@@ -2534,6 +2597,27 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
}
/*
+ * helper function for exponential backoff and check if replayable
+ */
+bool smb2_should_replay(struct cifs_tcon *tcon,
+ int *pretries,
+ int *pcur_sleep)
+{
+ if (!pretries || !pcur_sleep)
+ return false;
+
+ if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) {
+ msleep(*pcur_sleep);
+ (*pcur_sleep) = ((*pcur_sleep) << 1);
+ if ((*pcur_sleep) > CIFS_MAX_SLEEP)
+ (*pcur_sleep) = CIFS_MAX_SLEEP;
+ return true;
+ }
+
+ return false;
+}
+
+/*
* Passes the query info response back to the caller on success.
* Caller need to free this with free_rsp_buf().
*/
@@ -2546,7 +2630,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
{
struct smb2_compound_vars *vars;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
int flags = CIFS_CP_CREATE_CLOSE_OP;
struct smb_rqst *rqst;
int resp_buftype[3];
@@ -2557,6 +2641,13 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
__le16 *utf16_path;
struct cached_fid *cfid = NULL;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = CIFS_CP_CREATE_CLOSE_OP;
+ oplock = SMB2_OPLOCK_LEVEL_NONE;
+ server = cifs_pick_channel(ses);
if (!path)
path = "";
@@ -2593,6 +2684,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
.disposition = FILE_OPEN,
.create_options = cifs_create_options(cifs_sb, 0),
.fid = &fid,
+ .replay = !!(retries),
};
rc = SMB2_open_init(tcon, server,
@@ -2637,6 +2729,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
goto qic_exit;
smb2_set_related(&rqst[2]);
+ if (retries) {
+ if (!cfid) {
+ smb2_set_replay(server, &rqst[0]);
+ smb2_set_replay(server, &rqst[2]);
+ }
+ smb2_set_replay(server, &rqst[1]);
+ }
+
if (cfid) {
rc = compound_send_recv(xid, ses, server,
flags, 1, &rqst[1],
@@ -2669,6 +2769,11 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
kfree(vars);
out_free_path:
kfree(utf16_path);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -3217,6 +3322,9 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
cfile->fid.volatile_fid, cfile->pid, new_size);
if (rc >= 0) {
truncate_setsize(inode, new_size);
+ netfs_resize_file(&cifsi->netfs, new_size, true);
+ if (offset < cifsi->netfs.zero_point)
+ cifsi->netfs.zero_point = offset;
fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
}
}
@@ -3440,7 +3548,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->pid, new_eof);
if (rc == 0) {
- cifsi->server_eof = new_eof;
+ netfs_resize_file(&cifsi->netfs, new_eof, true);
cifs_setsize(inode, new_eof);
cifs_truncate_page(inode->i_mapping, inode->i_size);
truncate_setsize(inode, new_eof);
@@ -3532,8 +3640,9 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
int rc;
unsigned int xid;
struct inode *inode = file_inode(file);
- struct cifsFileInfo *cfile = file->private_data;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifsFileInfo *cfile = file->private_data;
+ struct netfs_inode *ictx = &cifsi->netfs;
loff_t old_eof, new_eof;
xid = get_xid();
@@ -3553,6 +3662,7 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
goto out_2;
truncate_pagecache_range(inode, off, old_eof);
+ ictx->zero_point = old_eof;
rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
old_eof - off - len, off);
@@ -3567,9 +3677,10 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
rc = 0;
- cifsi->server_eof = i_size_read(inode) - len;
- truncate_setsize(inode, cifsi->server_eof);
- fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
+ truncate_setsize(inode, new_eof);
+ netfs_resize_file(&cifsi->netfs, new_eof, true);
+ ictx->zero_point = new_eof;
+ fscache_resize_cookie(cifs_inode_cookie(inode), new_eof);
out_2:
filemap_invalidate_unlock(inode->i_mapping);
out:
@@ -3585,6 +3696,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
unsigned int xid;
struct cifsFileInfo *cfile = file->private_data;
struct inode *inode = file_inode(file);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
__u64 count, old_eof, new_eof;
xid = get_xid();
@@ -3612,6 +3724,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
goto out_2;
truncate_setsize(inode, new_eof);
+ netfs_resize_file(&cifsi->netfs, i_size_read(inode), true);
fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
@@ -5104,7 +5217,7 @@ static int smb2_create_reparse_symlink(const unsigned int xid,
struct inode *new;
struct kvec iov;
__le16 *path;
- char *sym;
+ char *sym, sep = CIFS_DIR_SEP(cifs_sb);
u16 len, plen;
int rc = 0;
@@ -5118,7 +5231,8 @@ static int smb2_create_reparse_symlink(const unsigned int xid,
.symlink_target = sym,
};
- path = cifs_convert_path_to_utf16(symname, cifs_sb);
+ convert_delimiter(sym, sep);
+ path = cifs_convert_path_to_utf16(sym, cifs_sb);
if (!path) {
rc = -ENOMEM;
goto out;
@@ -5141,7 +5255,10 @@ static int smb2_create_reparse_symlink(const unsigned int xid,
buf->PrintNameLength = cpu_to_le16(plen);
memcpy(buf->PathBuffer, path, plen);
buf->Flags = cpu_to_le32(*symname != '/' ? SYMLINK_FLAG_RELATIVE : 0);
+ if (*sym != sep)
+ buf->Flags = cpu_to_le32(SYMLINK_FLAG_RELATIVE);
+ convert_delimiter(sym, '/');
iov.iov_base = buf;
iov.iov_len = len;
new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index bd25c34dc398..608ee05491e2 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -156,6 +156,56 @@ out:
return;
}
+/* helper function for code reuse */
+static int
+cifs_chan_skip_or_disable(struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
+ bool from_reconnect)
+{
+ struct TCP_Server_Info *pserver;
+ unsigned int chan_index;
+
+ if (SERVER_IS_CHAN(server)) {
+ cifs_dbg(VFS,
+ "server %s does not support multichannel anymore. Skip secondary channel\n",
+ ses->server->hostname);
+
+ spin_lock(&ses->chan_lock);
+ chan_index = cifs_ses_get_chan_index(ses, server);
+ if (chan_index == CIFS_INVAL_CHAN_INDEX) {
+ spin_unlock(&ses->chan_lock);
+ goto skip_terminate;
+ }
+
+ ses->chans[chan_index].server = NULL;
+ server->terminate = true;
+ spin_unlock(&ses->chan_lock);
+
+ /*
+ * the above reference of server by channel
+ * needs to be dropped without holding chan_lock
+ * as cifs_put_tcp_session takes a higher lock
+ * i.e. cifs_tcp_ses_lock
+ */
+ cifs_put_tcp_session(server, from_reconnect);
+
+ cifs_signal_cifsd_for_reconnect(server, false);
+
+ /* mark primary server as needing reconnect */
+ pserver = server->primary_server;
+ cifs_signal_cifsd_for_reconnect(pserver, false);
+skip_terminate:
+ return -EHOSTDOWN;
+ }
+
+ cifs_server_dbg(VFS,
+ "server does not support multichannel anymore. Disable all other channels\n");
+ cifs_disable_secondary_channels(ses);
+
+
+ return 0;
+}
+
static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
struct TCP_Server_Info *server, bool from_reconnect)
@@ -164,8 +214,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
struct nls_table *nls_codepage = NULL;
struct cifs_ses *ses;
int xid;
- struct TCP_Server_Info *pserver;
- unsigned int chan_index;
/*
* SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
@@ -310,44 +358,11 @@ again:
*/
if (ses->chan_count > 1 &&
!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
- if (SERVER_IS_CHAN(server)) {
- cifs_dbg(VFS, "server %s does not support " \
- "multichannel anymore. skipping secondary channel\n",
- ses->server->hostname);
-
- spin_lock(&ses->chan_lock);
- chan_index = cifs_ses_get_chan_index(ses, server);
- if (chan_index == CIFS_INVAL_CHAN_INDEX) {
- spin_unlock(&ses->chan_lock);
- goto skip_terminate;
- }
-
- ses->chans[chan_index].server = NULL;
- spin_unlock(&ses->chan_lock);
-
- /*
- * the above reference of server by channel
- * needs to be dropped without holding chan_lock
- * as cifs_put_tcp_session takes a higher lock
- * i.e. cifs_tcp_ses_lock
- */
- cifs_put_tcp_session(server, from_reconnect);
-
- server->terminate = true;
- cifs_signal_cifsd_for_reconnect(server, false);
-
- /* mark primary server as needing reconnect */
- pserver = server->primary_server;
- cifs_signal_cifsd_for_reconnect(pserver, false);
-
-skip_terminate:
+ rc = cifs_chan_skip_or_disable(ses, server,
+ from_reconnect);
+ if (rc) {
mutex_unlock(&ses->session_mutex);
- rc = -EHOSTDOWN;
goto out;
- } else {
- cifs_server_dbg(VFS, "does not support " \
- "multichannel anymore. disabling all other channels\n");
- cifs_disable_secondary_channels(ses);
}
}
@@ -384,6 +399,15 @@ skip_sess_setup:
goto out;
}
+ spin_lock(&ses->ses_lock);
+ if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) {
+ spin_unlock(&ses->ses_lock);
+ mutex_unlock(&ses->session_mutex);
+ goto skip_add_channels;
+ }
+ ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS;
+ spin_unlock(&ses->ses_lock);
+
if (!rc &&
(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
mutex_unlock(&ses->session_mutex);
@@ -395,14 +419,29 @@ skip_sess_setup:
rc = SMB3_request_interfaces(xid, tcon, false);
free_xid(xid);
- if (rc)
+ if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
+ /*
+ * some servers like Azure SMB server do not advertise
+ * that multichannel has been disabled with server
+ * capabilities, rather return STATUS_NOT_IMPLEMENTED.
+ * treat this as server not supporting multichannel
+ */
+
+ rc = cifs_chan_skip_or_disable(ses, server,
+ from_reconnect);
+ goto skip_add_channels;
+ } else if (rc)
cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
__func__, rc);
if (ses->chan_max > ses->chan_count &&
+ ses->iface_count &&
!SERVER_IS_CHAN(server)) {
- if (ses->chan_count == 1)
+ if (ses->chan_count == 1) {
cifs_server_dbg(VFS, "supports multichannel now\n");
+ queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+ (SMB_INTERFACE_POLL_INTERVAL * HZ));
+ }
cifs_try_adding_channels(ses);
}
@@ -410,6 +449,11 @@ skip_sess_setup:
mutex_unlock(&ses->session_mutex);
}
+skip_add_channels:
+ spin_lock(&ses->ses_lock);
+ ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS;
+ spin_unlock(&ses->ses_lock);
+
if (smb2_command != SMB2_INTERNAL_CMD)
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
@@ -1958,10 +2002,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
__le16 *unc_path = NULL;
int flags = 0;
unsigned int total_len;
- struct TCP_Server_Info *server;
-
- /* always use master channel */
- server = ses->server;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
cifs_dbg(FYI, "TCON\n");
@@ -2094,6 +2135,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
struct smb2_tree_disconnect_req *req; /* response is trivial */
int rc = 0;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = 0;
unsigned int total_len;
struct kvec iov[1];
@@ -2116,7 +2158,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
invalidate_all_cached_dirs(tcon);
- rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
+ rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server,
(void **) &req,
&total_len);
if (rc)
@@ -2134,7 +2176,7 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, ses->server,
+ rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -2279,7 +2321,7 @@ int smb2_parse_contexts(struct TCP_Server_Info *server,
noff = le16_to_cpu(cc->NameOffset);
nlen = le16_to_cpu(cc->NameLength);
- if (noff + nlen >= doff)
+ if (noff + nlen > doff)
return -EINVAL;
name = (char *)cc + noff;
@@ -2362,8 +2404,13 @@ create_durable_v2_buf(struct cifs_open_parms *oparms)
*/
buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
- generate_random_uuid(buf->dcontext.CreateGuid);
- memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
+
+ /* for replay, we should not overwrite the existing create guid */
+ if (!oparms->replay) {
+ generate_random_uuid(buf->dcontext.CreateGuid);
+ memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
+ } else
+ memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16);
/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
buf->Name[0] = 'D';
@@ -2736,7 +2783,14 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
int flags = 0;
unsigned int total_len;
__le16 *utf16_path = NULL;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ n_iov = 2;
+ server = cifs_pick_channel(ses);
cifs_dbg(FYI, "mkdir\n");
@@ -2840,6 +2894,10 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
/* no need to inc num_remote_opens because we close it just below */
trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES);
+
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
/* resource #4: response buffer */
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@@ -2877,6 +2935,11 @@ err_free_req:
cifs_small_buf_release(req);
err_free_path:
kfree(utf16_path);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -3072,12 +3135,19 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
struct smb2_create_rsp *rsp = NULL;
struct cifs_tcon *tcon = oparms->tcon;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
struct kvec iov[SMB2_CREATE_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ server = cifs_pick_channel(ses);
+ oparms->replay = !!(retries);
cifs_dbg(FYI, "create/open\n");
if (!ses || !server)
@@ -3099,6 +3169,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
oparms->create_options, oparms->desired_access);
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags,
&rsp_iov);
@@ -3152,6 +3225,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
creat_exit:
SMB2_open_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -3276,15 +3354,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
-
- cifs_dbg(FYI, "SMB2 IOCTL\n");
-
- if (out_data != NULL)
- *out_data = NULL;
-
- /* zero out returned data len, in case of error */
- if (plen)
- *plen = 0;
+ int retries = 0, cur_sleep = 1;
if (!tcon)
return -EIO;
@@ -3293,10 +3363,23 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (!ses)
return -EIO;
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
server = cifs_pick_channel(ses);
+
if (!server)
return -EIO;
+ cifs_dbg(FYI, "SMB2 IOCTL\n");
+
+ if (out_data != NULL)
+ *out_data = NULL;
+
+ /* zero out returned data len, in case of error */
+ if (plen)
+ *plen = 0;
+
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -3311,6 +3394,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (rc)
goto ioctl_exit;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags,
&rsp_iov);
@@ -3380,6 +3466,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
ioctl_exit:
SMB2_ioctl_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -3451,13 +3542,20 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_rqst rqst;
struct smb2_close_rsp *rsp = NULL;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
bool query_attrs = false;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ query_attrs = false;
+ server = cifs_pick_channel(ses);
cifs_dbg(FYI, "Close\n");
@@ -3483,6 +3581,9 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
if (rc)
goto close_exit;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
@@ -3516,6 +3617,11 @@ close_exit:
cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
persistent_fid, tmp_rc);
}
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -3646,12 +3752,19 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
struct TCP_Server_Info *server;
int flags = 0;
bool allocated = false;
+ int retries = 0, cur_sleep = 1;
cifs_dbg(FYI, "Query Info\n");
if (!ses)
return -EIO;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ allocated = false;
server = cifs_pick_channel(ses);
+
if (!server)
return -EIO;
@@ -3673,6 +3786,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
ses->Suid, info_class, (__u32)info_type);
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
@@ -3715,6 +3831,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
qinf_exit:
SMB2_query_info_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -3815,7 +3936,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
u32 *plen /* returned data len */)
{
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
struct smb_rqst rqst;
struct smb2_change_notify_rsp *smb_rsp;
struct kvec iov[1];
@@ -3823,6 +3944,12 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buftype = CIFS_NO_BUFFER;
int flags = 0;
int rc = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ server = cifs_pick_channel(ses);
cifs_dbg(FYI, "change notify\n");
if (!ses || !server)
@@ -3847,6 +3974,10 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
(u8)watch_tree, completion_filter);
+
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@@ -3881,6 +4012,11 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
if (rqst.rq_iov)
cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -3918,7 +4054,7 @@ void smb2_reconnect_server(struct work_struct *work)
struct cifs_ses *ses, *ses2;
struct cifs_tcon *tcon, *tcon2;
struct list_head tmp_list, tmp_ses_list;
- bool tcon_exist = false, ses_exist = false;
+ bool ses_exist = false;
bool tcon_selected = false;
int rc;
bool resched = false;
@@ -3964,7 +4100,7 @@ void smb2_reconnect_server(struct work_struct *work)
if (tcon->need_reconnect || tcon->need_reopen_files) {
tcon->tc_count++;
list_add_tail(&tcon->rlist, &tmp_list);
- tcon_selected = tcon_exist = true;
+ tcon_selected = true;
}
}
/*
@@ -3973,7 +4109,7 @@ void smb2_reconnect_server(struct work_struct *work)
*/
if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
- tcon_selected = tcon_exist = true;
+ tcon_selected = true;
cifs_smb_ses_inc_refcount(ses);
}
/*
@@ -4123,10 +4259,16 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
struct smb_rqst rqst;
struct kvec iov[1];
struct kvec rsp_iov = {NULL, 0};
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
int resp_buftype = CIFS_NO_BUFFER;
int flags = 0;
int rc = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ server = cifs_pick_channel(ses);
cifs_dbg(FYI, "flush\n");
if (!ses || !(ses->server))
@@ -4146,6 +4288,10 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
goto flush_exit;
trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
+
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@@ -4160,6 +4306,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
flush_exit:
SMB2_flush_free(&rqst);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -4639,7 +4790,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
struct cifs_io_parms *io_parms = NULL;
int credit_request;
- if (!wdata->server)
+ if (!wdata->server || wdata->replay)
server = wdata->server = cifs_pick_channel(tcon->ses);
/*
@@ -4724,6 +4875,8 @@ smb2_async_writev(struct cifs_writedata *wdata,
rqst.rq_nvec = 1;
rqst.rq_iter = wdata->iter;
rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter);
+ if (wdata->replay)
+ smb2_set_replay(server, &rqst);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (wdata->mr)
iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
@@ -4797,18 +4950,21 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
int flags = 0;
unsigned int total_len;
struct TCP_Server_Info *server;
+ int retries = 0, cur_sleep = 1;
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
*nbytes = 0;
-
- if (n_vec < 1)
- return rc;
-
if (!io_parms->server)
io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
server = io_parms->server;
if (server == NULL)
return -ECONNABORTED;
+ if (n_vec < 1)
+ return rc;
+
rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
(void **) &req, &total_len);
if (rc)
@@ -4842,6 +4998,9 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
rqst.rq_iov = iov;
rqst.rq_nvec = n_vec + 1;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
&rqst,
&resp_buftype, flags, &rsp_iov);
@@ -4866,6 +5025,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
cifs_small_buf_release(req);
free_rsp_buf(resp_buftype, rsp);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(io_parms->tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -5048,6 +5212,9 @@ int SMB2_query_directory_init(const unsigned int xid,
case SMB_FIND_FILE_POSIX_INFO:
req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
break;
+ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+ req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
+ break;
default:
cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
info_level);
@@ -5117,6 +5284,9 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
/* note that posix payload are variable size */
info_buf_size = sizeof(struct smb2_posix_info);
break;
+ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+ info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
+ break;
default:
cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
@@ -5177,8 +5347,14 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec rsp_iov;
int rc = 0;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
int flags = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ server = cifs_pick_channel(ses);
if (!ses || !(ses->server))
return -EIO;
@@ -5198,6 +5374,9 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
if (rc)
goto qdir_exit;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
@@ -5232,6 +5411,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
qdir_exit:
SMB2_query_directory_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -5298,8 +5482,14 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
int flags = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ server = cifs_pick_channel(ses);
if (!ses || !server)
return -EIO;
@@ -5327,6 +5517,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
+ if (retries)
+ smb2_set_replay(server, &rqst);
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags,
@@ -5342,6 +5534,11 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
free_rsp_buf(resp_buftype, rsp);
kfree(iov);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -5394,12 +5591,18 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct smb2_oplock_break *req = NULL;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
int flags = CIFS_OBREAK_OP;
unsigned int total_len;
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buf_type;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = CIFS_OBREAK_OP;
+ server = cifs_pick_channel(ses);
cifs_dbg(FYI, "SMB2_oplock_break\n");
rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
@@ -5424,15 +5627,21 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
-
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
}
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -5518,9 +5727,15 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
FILE_SYSTEM_POSIX_INFO *info = NULL;
int flags = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ server = cifs_pick_channel(ses);
rc = build_qfs_info_req(&iov, tcon, server,
FS_POSIX_INFORMATION,
@@ -5536,6 +5751,9 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
free_qfs_info_req(&iov);
@@ -5555,6 +5773,11 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
posix_qfsinf_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -5569,9 +5792,15 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
struct smb2_fs_full_size_info *info = NULL;
int flags = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ server = cifs_pick_channel(ses);
rc = build_qfs_info_req(&iov, tcon, server,
FS_FULL_SIZE_INFORMATION,
@@ -5587,6 +5816,9 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
free_qfs_info_req(&iov);
@@ -5606,6 +5838,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
qfsinf_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -5620,9 +5857,15 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype, max_len, min_len;
struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server = cifs_pick_channel(ses);
+ struct TCP_Server_Info *server;
unsigned int rsp_len, offset;
int flags = 0;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = 0;
+ server = cifs_pick_channel(ses);
if (level == FS_DEVICE_INFORMATION) {
max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
@@ -5654,6 +5897,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
free_qfs_info_req(&iov);
@@ -5691,6 +5937,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
qfsattr_exit:
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
@@ -5708,7 +5959,13 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int count;
int flags = CIFS_NO_RSP_BUF;
unsigned int total_len;
- struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+ struct TCP_Server_Info *server;
+ int retries = 0, cur_sleep = 1;
+
+replay_again:
+ /* reinitialize for possible replay */
+ flags = CIFS_NO_RSP_BUF;
+ server = cifs_pick_channel(tcon->ses);
cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
@@ -5739,6 +5996,9 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
+ if (retries)
+ smb2_set_replay(server, &rqst);
+
rc = cifs_send_recv(xid, tcon->ses, server,
&rqst, &resp_buf_type, flags,
&rsp_iov);
@@ -5750,6 +6010,10 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
tcon->ses->Suid, rc);
}
+ if (is_replayable_error(rc) &&
+ smb2_should_replay(tcon, &retries, &cur_sleep))
+ goto replay_again;
+
return rc;
}
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 343ada691e76..b3069911e9dd 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -122,6 +122,11 @@ extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
extern void smb2_set_next_command(struct cifs_tcon *tcon,
struct smb_rqst *rqst);
extern void smb2_set_related(struct smb_rqst *rqst);
+extern void smb2_set_replay(struct TCP_Server_Info *server,
+ struct smb_rqst *rqst);
+extern bool smb2_should_replay(struct cifs_tcon *tcon,
+ int *pretries,
+ int *pcur_sleep);
/*
* SMB2 Worker functions - most of protocol specific implementation details
@@ -299,9 +304,7 @@ int smb311_posix_query_path_info(const unsigned int xid,
struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const char *full_path,
- struct cifs_open_info_data *data,
- struct cifs_sid *owner,
- struct cifs_sid *group);
+ struct cifs_open_info_data *data);
int posix_info_parse(const void *beg, const void *end,
struct smb2_posix_info_parsed *out);
int posix_info_sid_size(const void *beg, const void *end);
diff --git a/fs/smb/client/smb2status.h b/fs/smb/client/smb2status.h
index a9e958166fc5..9c6d79b0bd49 100644
--- a/fs/smb/client/smb2status.h
+++ b/fs/smb/client/smb2status.h
@@ -982,6 +982,8 @@ struct ntstatus {
#define STATUS_INVALID_TASK_INDEX cpu_to_le32(0xC0000501)
#define STATUS_THREAD_ALREADY_IN_TASK cpu_to_le32(0xC0000502)
#define STATUS_CALLBACK_BYPASS cpu_to_le32(0xC0000503)
+#define STATUS_SERVER_UNAVAILABLE cpu_to_le32(0xC0000466)
+#define STATUS_FILE_NOT_AVAILABLE cpu_to_le32(0xC0000467)
#define STATUS_PORT_CLOSED cpu_to_le32(0xC0000700)
#define STATUS_MESSAGE_LOST cpu_to_le32(0xC0000701)
#define STATUS_INVALID_MESSAGE cpu_to_le32(0xC0000702)
diff --git a/fs/smb/client/smbencrypt.c b/fs/smb/client/smbencrypt.c
index f0ce26414f17..1d1ee9f18f37 100644
--- a/fs/smb/client/smbencrypt.c
+++ b/fs/smb/client/smbencrypt.c
@@ -26,13 +26,6 @@
#include "cifsproto.h"
#include "../common/md4.h"
-#ifndef false
-#define false 0
-#endif
-#ifndef true
-#define true 1
-#endif
-
/* following came from the other byteorder.h to avoid include conflicts */
#define CVAL(buf,pos) (((unsigned char *)(buf))[pos])
#define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 4f717ad7c21b..994d70193432 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -400,10 +400,17 @@ unmask:
server->conn_id, server->hostname);
}
smbd_done:
- if (rc < 0 && rc != -EINTR)
+ /*
+ * there's hardly any use for the layers above to know the
+ * actual error code here. All they should do at this point is
+ * to retry the connection and hope it goes away.
+ */
+ if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
- else if (rc > 0)
+ rc = -ECONNABORTED;
+ cifs_signal_cifsd_for_reconnect(server, false);
+ } else if (rc > 0)
rc = 0;
out:
cifs_in_send_dec(server);
@@ -428,8 +435,8 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
if (!(flags & CIFS_TRANSFORM_REQ))
return __smb_send_rqst(server, num_rqst, rqst);
- if (num_rqst > MAX_COMPOUND - 1)
- return -ENOMEM;
+ if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
+ return -EIO;
if (!server->ops->init_transform_rq) {
cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
@@ -1026,6 +1033,9 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
if (!server || server->terminate)
continue;
+ if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
+ continue;
+
/*
* strictly speaking, we should pick up req_lock to read
* server->in_flight. But it shouldn't matter much here if we
diff --git a/fs/smb/server/asn1.c b/fs/smb/server/asn1.c
index 4a4b2b03ff33..b931a99ab9c8 100644
--- a/fs/smb/server/asn1.c
+++ b/fs/smb/server/asn1.c
@@ -214,10 +214,15 @@ static int ksmbd_neg_token_alloc(void *context, size_t hdrlen,
{
struct ksmbd_conn *conn = context;
+ if (!vlen)
+ return -EINVAL;
+
conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
if (!conn->mechToken)
return -ENOMEM;
+ conn->mechTokenLen = (unsigned int)vlen;
+
return 0;
}
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index d311c2ee10bd..09e1e7771592 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -416,13 +416,7 @@ static void stop_sessions(void)
again:
down_read(&conn_list_lock);
list_for_each_entry(conn, &conn_list, conns_list) {
- struct task_struct *task;
-
t = conn->transport;
- task = t->handler;
- if (task)
- ksmbd_debug(CONN, "Stop session handler %s/%d\n",
- task->comm, task_pid_nr(task));
ksmbd_conn_set_exiting(conn);
if (t->ops->shutdown) {
up_read(&conn_list_lock);
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 3c005246a32e..0e04cf8b1d89 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -88,6 +88,7 @@ struct ksmbd_conn {
__u16 dialect;
char *mechToken;
+ unsigned int mechTokenLen;
struct ksmbd_conn_ops *conn_ops;
@@ -134,7 +135,6 @@ struct ksmbd_transport_ops {
struct ksmbd_transport {
struct ksmbd_conn *conn;
struct ksmbd_transport_ops *ops;
- struct task_struct *handler;
};
#define KSMBD_TCP_RECV_TIMEOUT (7 * HZ)
diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
index b7521e41402e..0ebf91ffa236 100644
--- a/fs/smb/server/ksmbd_netlink.h
+++ b/fs/smb/server/ksmbd_netlink.h
@@ -304,7 +304,8 @@ enum ksmbd_event {
KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE = 15,
- KSMBD_EVENT_MAX
+ __KSMBD_EVENT_MAX,
+ KSMBD_EVENT_MAX = __KSMBD_EVENT_MAX - 1
};
/*
diff --git a/fs/smb/server/misc.c b/fs/smb/server/misc.c
index 9e8afaa686e3..1a5faa6f6e7b 100644
--- a/fs/smb/server/misc.c
+++ b/fs/smb/server/misc.c
@@ -261,6 +261,7 @@ out_ascii:
/**
* ksmbd_extract_sharename() - get share name from tree connect request
+ * @um: pointer to a unicode_map structure for character encoding handling
* @treename: buffer containing tree name and share name
*
* Return: share name on success, otherwise error
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 001926d3b348..53dfaac425c6 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -1197,6 +1197,12 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
bool prev_op_has_lease;
__le32 prev_op_state = 0;
+ /* Only v2 leases handle the directory */
+ if (S_ISDIR(file_inode(fp->filp)->i_mode)) {
+ if (!lctx || lctx->version != 2)
+ return 0;
+ }
+
opinfo = alloc_opinfo(work, pid, tid);
if (!opinfo)
return -ENOMEM;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 3143819935dc..0c97d3c86072 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1414,7 +1414,10 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
char *name;
unsigned int name_off, name_len, secbuf_len;
- secbuf_len = le16_to_cpu(req->SecurityBufferLength);
+ if (conn->use_spnego && conn->mechToken)
+ secbuf_len = conn->mechTokenLen;
+ else
+ secbuf_len = le16_to_cpu(req->SecurityBufferLength);
if (secbuf_len < sizeof(struct authenticate_message)) {
ksmbd_debug(SMB, "blob len %d too small\n", secbuf_len);
return NULL;
@@ -1505,7 +1508,10 @@ static int ntlm_authenticate(struct ksmbd_work *work,
struct authenticate_message *authblob;
authblob = user_authblob(conn, req);
- sz = le16_to_cpu(req->SecurityBufferLength);
+ if (conn->use_spnego && conn->mechToken)
+ sz = conn->mechTokenLen;
+ else
+ sz = le16_to_cpu(req->SecurityBufferLength);
rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
if (rc) {
set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
@@ -1778,8 +1784,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
negblob_off = le16_to_cpu(req->SecurityBufferOffset);
negblob_len = le16_to_cpu(req->SecurityBufferLength);
- if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
- negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+ if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer)) {
rc = -EINVAL;
goto out_err;
}
@@ -1788,8 +1793,15 @@ int smb2_sess_setup(struct ksmbd_work *work)
negblob_off);
if (decode_negotiation_token(conn, negblob, negblob_len) == 0) {
- if (conn->mechToken)
+ if (conn->mechToken) {
negblob = (struct negotiate_message *)conn->mechToken;
+ negblob_len = conn->mechTokenLen;
+ }
+ }
+
+ if (negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+ rc = -EINVAL;
+ goto out_err;
}
if (server_conf.auth_mechs & conn->auth_mechs) {
@@ -6161,8 +6173,10 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
offsetof(struct smb2_read_rsp, Buffer),
aux_payload_buf, nbytes);
- if (err)
+ if (err) {
+ kvfree(aux_payload_buf);
goto out;
+ }
kvfree(rpc_resp);
} else {
err = ksmbd_iov_pin_rsp(work, (void *)rsp,
@@ -6372,8 +6386,10 @@ int smb2_read(struct ksmbd_work *work)
err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
offsetof(struct smb2_read_rsp, Buffer),
aux_payload_buf, nbytes);
- if (err)
+ if (err) {
+ kvfree(aux_payload_buf);
goto out;
+ }
ksmbd_fd_put(work, fp);
return 0;
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index b49d47bdafc9..f29bb03f0dc4 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -74,7 +74,7 @@ static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
static int ksmbd_ipc_heartbeat_request(void);
-static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
+static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX + 1] = {
[KSMBD_EVENT_UNSPEC] = {
.len = 0,
},
@@ -403,7 +403,7 @@ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
return -EPERM;
#endif
- if (type >= KSMBD_EVENT_MAX) {
+ if (type > KSMBD_EVENT_MAX) {
WARN_ON(1);
return -EINVAL;
}
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index c5629a68c8b7..8faa25c6e129 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -2039,6 +2039,7 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
{
struct smb_direct_transport *t;
+ struct task_struct *handler;
int ret;
if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
@@ -2056,11 +2057,11 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
if (ret)
goto out_err;
- KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
- KSMBD_TRANS(t)->conn, "ksmbd:r%u",
- smb_direct_port);
- if (IS_ERR(KSMBD_TRANS(t)->handler)) {
- ret = PTR_ERR(KSMBD_TRANS(t)->handler);
+ handler = kthread_run(ksmbd_conn_handler_loop,
+ KSMBD_TRANS(t)->conn, "ksmbd:r%u",
+ smb_direct_port);
+ if (IS_ERR(handler)) {
+ ret = PTR_ERR(handler);
pr_err("Can't start thread\n");
goto out_err;
}
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index eff7a1d793f0..002a3f0dc7c5 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -185,6 +185,7 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
struct sockaddr *csin;
int rc = 0;
struct tcp_transport *t;
+ struct task_struct *handler;
t = alloc_transport(client_sk);
if (!t) {
@@ -199,13 +200,13 @@ static int ksmbd_tcp_new_connection(struct socket *client_sk)
goto out_error;
}
- KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
- KSMBD_TRANS(t)->conn,
- "ksmbd:%u",
- ksmbd_tcp_get_port(csin));
- if (IS_ERR(KSMBD_TRANS(t)->handler)) {
+ handler = kthread_run(ksmbd_conn_handler_loop,
+ KSMBD_TRANS(t)->conn,
+ "ksmbd:%u",
+ ksmbd_tcp_get_port(csin));
+ if (IS_ERR(handler)) {
pr_err("cannot start conn thread\n");
- rc = PTR_ERR(KSMBD_TRANS(t)->handler);
+ rc = PTR_ERR(handler);
free_transport(t);
}
return rc;
@@ -364,6 +365,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
* @t: TCP transport instance
* @buf: buffer to store read data from socket
* @to_read: number of bytes to read from socket
+ * @max_retries: number of retries if reading from socket fails
*
* Return: on success return number of bytes read from socket,
* otherwise return error number
@@ -415,6 +417,7 @@ static void tcp_destroy_socket(struct socket *ksmbd_socket)
/**
* create_socket - create socket for ksmbd/0
+ * @iface: interface to bind the created socket to
*
* Return: 0 on success, error number otherwise
*/
diff --git a/fs/super.c b/fs/super.c
index d35e85295489..d6efeba0d0ce 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -274,9 +274,10 @@ static void destroy_super_work(struct work_struct *work)
{
struct super_block *s = container_of(work, struct super_block,
destroy_work);
- int i;
-
- for (i = 0; i < SB_FREEZE_LEVELS; i++)
+ security_sb_free(s);
+ put_user_ns(s->s_user_ns);
+ kfree(s->s_subtype);
+ for (int i = 0; i < SB_FREEZE_LEVELS; i++)
percpu_free_rwsem(&s->s_writers.rw_sem[i]);
kfree(s);
}
@@ -296,9 +297,6 @@ static void destroy_unused_super(struct super_block *s)
super_unlock_excl(s);
list_lru_destroy(&s->s_dentry_lru);
list_lru_destroy(&s->s_inode_lru);
- security_sb_free(s);
- put_user_ns(s->s_user_ns);
- kfree(s->s_subtype);
shrinker_free(s->s_shrink);
/* no delays needed */
destroy_super_work(&s->destroy_work);
@@ -409,9 +407,6 @@ static void __put_super(struct super_block *s)
WARN_ON(s->s_dentry_lru.node);
WARN_ON(s->s_inode_lru.node);
WARN_ON(!list_empty(&s->s_mounts));
- security_sb_free(s);
- put_user_ns(s->s_user_ns);
- kfree(s->s_subtype);
call_rcu(&s->rcu, destroy_super_rcu);
}
}
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index 6795fda2af19..110e8a272189 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -34,7 +34,15 @@ static DEFINE_MUTEX(eventfs_mutex);
/* Choose something "unique" ;-) */
#define EVENTFS_FILE_INODE_INO 0x12c4e37
-#define EVENTFS_DIR_INODE_INO 0x134b2f5
+
+/* Just try to make something consistent and unique */
+static int eventfs_dir_ino(struct eventfs_inode *ei)
+{
+ if (!ei->ino)
+ ei->ino = get_next_ino();
+
+ return ei->ino;
+}
/*
* The eventfs_inode (ei) itself is protected by SRCU. It is released from
@@ -54,6 +62,46 @@ enum {
#define EVENTFS_MODE_MASK (EVENTFS_SAVE_MODE - 1)
+/*
+ * eventfs_inode reference count management.
+ *
+ * NOTE! We count only references from dentries, in the
+ * form 'dentry->d_fsdata'. There are also references from
+ * directory inodes ('ti->private'), but the dentry reference
+ * count is always a superset of the inode reference count.
+ */
+static void release_ei(struct kref *ref)
+{
+ struct eventfs_inode *ei = container_of(ref, struct eventfs_inode, kref);
+
+ WARN_ON_ONCE(!ei->is_freed);
+
+ kfree(ei->entry_attrs);
+ kfree_const(ei->name);
+ kfree_rcu(ei, rcu);
+}
+
+static inline void put_ei(struct eventfs_inode *ei)
+{
+ if (ei)
+ kref_put(&ei->kref, release_ei);
+}
+
+static inline void free_ei(struct eventfs_inode *ei)
+{
+ if (ei) {
+ ei->is_freed = 1;
+ put_ei(ei);
+ }
+}
+
+static inline struct eventfs_inode *get_ei(struct eventfs_inode *ei)
+{
+ if (ei)
+ kref_get(&ei->kref);
+ return ei;
+}
+
static struct dentry *eventfs_root_lookup(struct inode *dir,
struct dentry *dentry,
unsigned int flags);
@@ -148,33 +196,30 @@ static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
return ret;
}
-static void update_top_events_attr(struct eventfs_inode *ei, struct dentry *dentry)
+static void update_top_events_attr(struct eventfs_inode *ei, struct super_block *sb)
{
- struct inode *inode;
+ struct inode *root;
/* Only update if the "events" was on the top level */
if (!ei || !(ei->attr.mode & EVENTFS_TOPLEVEL))
return;
/* Get the tracefs root inode. */
- inode = d_inode(dentry->d_sb->s_root);
- ei->attr.uid = inode->i_uid;
- ei->attr.gid = inode->i_gid;
+ root = d_inode(sb->s_root);
+ ei->attr.uid = root->i_uid;
+ ei->attr.gid = root->i_gid;
}
static void set_top_events_ownership(struct inode *inode)
{
struct tracefs_inode *ti = get_tracefs(inode);
struct eventfs_inode *ei = ti->private;
- struct dentry *dentry;
/* The top events directory doesn't get automatically updated */
if (!ei || !ei->is_events || !(ei->attr.mode & EVENTFS_TOPLEVEL))
return;
- dentry = ei->dentry;
-
- update_top_events_attr(ei, dentry);
+ update_top_events_attr(ei, inode->i_sb);
if (!(ei->attr.mode & EVENTFS_SAVE_UID))
inode->i_uid = ei->attr.uid;
@@ -225,10 +270,11 @@ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
{
struct eventfs_inode *ei;
- mutex_lock(&eventfs_mutex);
do {
- /* The parent always has an ei, except for events itself */
- ei = dentry->d_parent->d_fsdata;
+ // The parent is stable because we do not do renames
+ dentry = dentry->d_parent;
+ // ... and directories always have d_fsdata
+ ei = dentry->d_fsdata;
/*
* If the ei is being freed, the ownership of the children
@@ -238,12 +284,10 @@ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
ei = NULL;
break;
}
-
- dentry = ei->dentry;
+ // Walk upwards until you find the events inode
} while (!ei->is_events);
- mutex_unlock(&eventfs_mutex);
- update_top_events_attr(ei, dentry);
+ update_top_events_attr(ei, dentry->d_sb);
return ei;
}
@@ -273,50 +317,11 @@ static void update_inode_attr(struct dentry *dentry, struct inode *inode,
inode->i_gid = attr->gid;
}
-static void update_gid(struct eventfs_inode *ei, kgid_t gid, int level)
-{
- struct eventfs_inode *ei_child;
-
- /* at most we have events/system/event */
- if (WARN_ON_ONCE(level > 3))
- return;
-
- ei->attr.gid = gid;
-
- if (ei->entry_attrs) {
- for (int i = 0; i < ei->nr_entries; i++) {
- ei->entry_attrs[i].gid = gid;
- }
- }
-
- /*
- * Only eventfs_inode with dentries are updated, make sure
- * all eventfs_inodes are updated. If one of the children
- * do not have a dentry, this function must traverse it.
- */
- list_for_each_entry_srcu(ei_child, &ei->children, list,
- srcu_read_lock_held(&eventfs_srcu)) {
- if (!ei_child->dentry)
- update_gid(ei_child, gid, level + 1);
- }
-}
-
-void eventfs_update_gid(struct dentry *dentry, kgid_t gid)
-{
- struct eventfs_inode *ei = dentry->d_fsdata;
- int idx;
-
- idx = srcu_read_lock(&eventfs_srcu);
- update_gid(ei, gid, 0);
- srcu_read_unlock(&eventfs_srcu, idx);
-}
-
/**
- * create_file - create a file in the tracefs filesystem
- * @name: the name of the file to create.
+ * lookup_file - look up a file in the tracefs filesystem
+ * @dentry: the dentry to look up
* @mode: the permission that the file should have.
* @attr: saved attributes changed by user
- * @parent: parent dentry for this file.
* @data: something that the caller will want to get to later on.
* @fop: struct file_operations that should be used for this file.
*
@@ -324,30 +329,25 @@ void eventfs_update_gid(struct dentry *dentry, kgid_t gid)
* directory. The inode.i_private pointer will point to @data in the open()
* call.
*/
-static struct dentry *create_file(const char *name, umode_t mode,
+static struct dentry *lookup_file(struct eventfs_inode *parent_ei,
+ struct dentry *dentry,
+ umode_t mode,
struct eventfs_attr *attr,
- struct dentry *parent, void *data,
+ void *data,
const struct file_operations *fop)
{
struct tracefs_inode *ti;
- struct dentry *dentry;
struct inode *inode;
if (!(mode & S_IFMT))
mode |= S_IFREG;
if (WARN_ON_ONCE(!S_ISREG(mode)))
- return NULL;
-
- WARN_ON_ONCE(!parent);
- dentry = eventfs_start_creating(name, parent);
-
- if (IS_ERR(dentry))
- return dentry;
+ return ERR_PTR(-EIO);
inode = tracefs_get_inode(dentry->d_sb);
if (unlikely(!inode))
- return eventfs_failed_creating(dentry);
+ return ERR_PTR(-ENOMEM);
/* If the user updated the directory's attributes, use them */
update_inode_attr(dentry, inode, attr, mode);
@@ -361,32 +361,31 @@ static struct dentry *create_file(const char *name, umode_t mode,
ti = get_tracefs(inode);
ti->flags |= TRACEFS_EVENT_INODE;
- d_instantiate(dentry, inode);
- fsnotify_create(dentry->d_parent->d_inode, dentry);
- return eventfs_end_creating(dentry);
+
+ // Files have their parent's ei as their fsdata
+ dentry->d_fsdata = get_ei(parent_ei);
+
+ d_add(dentry, inode);
+ return NULL;
};
/**
- * create_dir - create a dir in the tracefs filesystem
+ * lookup_dir_entry - look up a dir in the tracefs filesystem
+ * @dentry: the directory to look up
* @ei: the eventfs_inode that represents the directory to create
- * @parent: parent dentry for this file.
*
- * This function will create a dentry for a directory represented by
+ * This function will look up a dentry for a directory represented by
* a eventfs_inode.
*/
-static struct dentry *create_dir(struct eventfs_inode *ei, struct dentry *parent)
+static struct dentry *lookup_dir_entry(struct dentry *dentry,
+ struct eventfs_inode *pei, struct eventfs_inode *ei)
{
struct tracefs_inode *ti;
- struct dentry *dentry;
struct inode *inode;
- dentry = eventfs_start_creating(ei->name, parent);
- if (IS_ERR(dentry))
- return dentry;
-
inode = tracefs_get_inode(dentry->d_sb);
if (unlikely(!inode))
- return eventfs_failed_creating(dentry);
+ return ERR_PTR(-ENOMEM);
/* If the user updated the directory's attributes, use them */
update_inode_attr(dentry, inode, &ei->attr,
@@ -396,68 +395,50 @@ static struct dentry *create_dir(struct eventfs_inode *ei, struct dentry *parent
inode->i_fop = &eventfs_file_operations;
/* All directories will have the same inode number */
- inode->i_ino = EVENTFS_DIR_INODE_INO;
+ inode->i_ino = eventfs_dir_ino(ei);
ti = get_tracefs(inode);
ti->flags |= TRACEFS_EVENT_INODE;
+ /* Only directories have ti->private set to an ei, not files */
+ ti->private = ei;
- inc_nlink(inode);
- d_instantiate(dentry, inode);
- inc_nlink(dentry->d_parent->d_inode);
- fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
- return eventfs_end_creating(dentry);
+ dentry->d_fsdata = get_ei(ei);
+
+ d_add(dentry, inode);
+ return NULL;
}
-static void free_ei(struct eventfs_inode *ei)
+static inline struct eventfs_inode *alloc_ei(const char *name)
{
- kfree_const(ei->name);
- kfree(ei->d_children);
- kfree(ei->entry_attrs);
- kfree(ei);
+ struct eventfs_inode *ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+
+ if (!ei)
+ return NULL;
+
+ ei->name = kstrdup_const(name, GFP_KERNEL);
+ if (!ei->name) {
+ kfree(ei);
+ return NULL;
+ }
+ kref_init(&ei->kref);
+ return ei;
}
/**
- * eventfs_set_ei_status_free - remove the dentry reference from an eventfs_inode
- * @ti: the tracefs_inode of the dentry
+ * eventfs_d_release - dentry is going away
* @dentry: dentry which has the reference to remove.
*
* Remove the association between a dentry from an eventfs_inode.
*/
-void eventfs_set_ei_status_free(struct tracefs_inode *ti, struct dentry *dentry)
+void eventfs_d_release(struct dentry *dentry)
{
- struct eventfs_inode *ei;
- int i;
-
- mutex_lock(&eventfs_mutex);
-
- ei = dentry->d_fsdata;
- if (!ei)
- goto out;
-
- /* This could belong to one of the files of the ei */
- if (ei->dentry != dentry) {
- for (i = 0; i < ei->nr_entries; i++) {
- if (ei->d_children[i] == dentry)
- break;
- }
- if (WARN_ON_ONCE(i == ei->nr_entries))
- goto out;
- ei->d_children[i] = NULL;
- } else if (ei->is_freed) {
- free_ei(ei);
- } else {
- ei->dentry = NULL;
- }
-
- dentry->d_fsdata = NULL;
- out:
- mutex_unlock(&eventfs_mutex);
+ put_ei(dentry->d_fsdata);
}
/**
- * create_file_dentry - create a dentry for a file of an eventfs_inode
+ * lookup_file_dentry - create a dentry for a file of an eventfs_inode
* @ei: the eventfs_inode that the file will be created under
- * @idx: the index into the d_children[] of the @ei
+ * @idx: the index into the entry_attrs[] of the @ei
* @parent: The parent dentry of the created file.
* @name: The name of the file to create
* @mode: The mode of the file.
@@ -468,163 +449,17 @@ void eventfs_set_ei_status_free(struct tracefs_inode *ti, struct dentry *dentry)
* address located at @e_dentry.
*/
static struct dentry *
-create_file_dentry(struct eventfs_inode *ei, int idx,
- struct dentry *parent, const char *name, umode_t mode, void *data,
+lookup_file_dentry(struct dentry *dentry,
+ struct eventfs_inode *ei, int idx,
+ umode_t mode, void *data,
const struct file_operations *fops)
{
struct eventfs_attr *attr = NULL;
- struct dentry **e_dentry = &ei->d_children[idx];
- struct dentry *dentry;
-
- WARN_ON_ONCE(!inode_is_locked(parent->d_inode));
- mutex_lock(&eventfs_mutex);
- if (ei->is_freed) {
- mutex_unlock(&eventfs_mutex);
- return NULL;
- }
- /* If the e_dentry already has a dentry, use it */
- if (*e_dentry) {
- dget(*e_dentry);
- mutex_unlock(&eventfs_mutex);
- return *e_dentry;
- }
-
- /* ei->entry_attrs are protected by SRCU */
if (ei->entry_attrs)
attr = &ei->entry_attrs[idx];
- mutex_unlock(&eventfs_mutex);
-
- dentry = create_file(name, mode, attr, parent, data, fops);
-
- mutex_lock(&eventfs_mutex);
-
- if (IS_ERR_OR_NULL(dentry)) {
- /*
- * When the mutex was released, something else could have
- * created the dentry for this e_dentry. In which case
- * use that one.
- *
- * If ei->is_freed is set, the e_dentry is currently on its
- * way to being freed, don't return it. If e_dentry is NULL
- * it means it was already freed.
- */
- if (ei->is_freed) {
- dentry = NULL;
- } else {
- dentry = *e_dentry;
- dget(dentry);
- }
- mutex_unlock(&eventfs_mutex);
- return dentry;
- }
-
- if (!*e_dentry && !ei->is_freed) {
- *e_dentry = dentry;
- dentry->d_fsdata = ei;
- } else {
- /*
- * Should never happen unless we get here due to being freed.
- * Otherwise it means two dentries exist with the same name.
- */
- WARN_ON_ONCE(!ei->is_freed);
- dentry = NULL;
- }
- mutex_unlock(&eventfs_mutex);
-
- return dentry;
-}
-
-/**
- * eventfs_post_create_dir - post create dir routine
- * @ei: eventfs_inode of recently created dir
- *
- * Map the meta-data of files within an eventfs dir to their parent dentry
- */
-static void eventfs_post_create_dir(struct eventfs_inode *ei)
-{
- struct eventfs_inode *ei_child;
- struct tracefs_inode *ti;
-
- lockdep_assert_held(&eventfs_mutex);
-
- /* srcu lock already held */
- /* fill parent-child relation */
- list_for_each_entry_srcu(ei_child, &ei->children, list,
- srcu_read_lock_held(&eventfs_srcu)) {
- ei_child->d_parent = ei->dentry;
- }
-
- ti = get_tracefs(ei->dentry->d_inode);
- ti->private = ei;
-}
-
-/**
- * create_dir_dentry - Create a directory dentry for the eventfs_inode
- * @pei: The eventfs_inode parent of ei.
- * @ei: The eventfs_inode to create the directory for
- * @parent: The dentry of the parent of this directory
- *
- * This creates and attaches a directory dentry to the eventfs_inode @ei.
- */
-static struct dentry *
-create_dir_dentry(struct eventfs_inode *pei, struct eventfs_inode *ei,
- struct dentry *parent)
-{
- struct dentry *dentry = NULL;
-
- WARN_ON_ONCE(!inode_is_locked(parent->d_inode));
-
- mutex_lock(&eventfs_mutex);
- if (pei->is_freed || ei->is_freed) {
- mutex_unlock(&eventfs_mutex);
- return NULL;
- }
- if (ei->dentry) {
- /* If the eventfs_inode already has a dentry, use it */
- dentry = ei->dentry;
- dget(dentry);
- mutex_unlock(&eventfs_mutex);
- return dentry;
- }
- mutex_unlock(&eventfs_mutex);
-
- dentry = create_dir(ei, parent);
-
- mutex_lock(&eventfs_mutex);
-
- if (IS_ERR_OR_NULL(dentry) && !ei->is_freed) {
- /*
- * When the mutex was released, something else could have
- * created the dentry for this e_dentry. In which case
- * use that one.
- *
- * If ei->is_freed is set, the e_dentry is currently on its
- * way to being freed.
- */
- dentry = ei->dentry;
- if (dentry)
- dget(dentry);
- mutex_unlock(&eventfs_mutex);
- return dentry;
- }
-
- if (!ei->dentry && !ei->is_freed) {
- ei->dentry = dentry;
- eventfs_post_create_dir(ei);
- dentry->d_fsdata = ei;
- } else {
- /*
- * Should never happen unless we get here due to being freed.
- * Otherwise it means two dentries exist with the same name.
- */
- WARN_ON_ONCE(!ei->is_freed);
- dentry = NULL;
- }
- mutex_unlock(&eventfs_mutex);
-
- return dentry;
+ return lookup_file(ei, dentry, mode, attr, data, fops);
}
/**
@@ -641,79 +476,50 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
struct dentry *dentry,
unsigned int flags)
{
- const struct file_operations *fops;
- const struct eventfs_entry *entry;
struct eventfs_inode *ei_child;
struct tracefs_inode *ti;
struct eventfs_inode *ei;
- struct dentry *ei_dentry = NULL;
- struct dentry *ret = NULL;
- struct dentry *d;
const char *name = dentry->d_name.name;
- umode_t mode;
- void *data;
- int idx;
- int i;
- int r;
+ struct dentry *result = NULL;
ti = get_tracefs(dir);
if (!(ti->flags & TRACEFS_EVENT_INODE))
- return NULL;
-
- /* Grab srcu to prevent the ei from going away */
- idx = srcu_read_lock(&eventfs_srcu);
+ return ERR_PTR(-EIO);
- /*
- * Grab the eventfs_mutex to consistent value from ti->private.
- * This s
- */
mutex_lock(&eventfs_mutex);
- ei = READ_ONCE(ti->private);
- if (ei && !ei->is_freed)
- ei_dentry = READ_ONCE(ei->dentry);
- mutex_unlock(&eventfs_mutex);
- if (!ei || !ei_dentry)
+ ei = ti->private;
+ if (!ei || ei->is_freed)
goto out;
- data = ei->data;
-
- list_for_each_entry_srcu(ei_child, &ei->children, list,
- srcu_read_lock_held(&eventfs_srcu)) {
+ list_for_each_entry(ei_child, &ei->children, list) {
if (strcmp(ei_child->name, name) != 0)
continue;
- ret = simple_lookup(dir, dentry, flags);
- if (IS_ERR(ret))
+ if (ei_child->is_freed)
goto out;
- d = create_dir_dentry(ei, ei_child, ei_dentry);
- dput(d);
+ result = lookup_dir_entry(dentry, ei, ei_child);
goto out;
}
- for (i = 0; i < ei->nr_entries; i++) {
- entry = &ei->entries[i];
- if (strcmp(name, entry->name) == 0) {
- void *cdata = data;
- mutex_lock(&eventfs_mutex);
- /* If ei->is_freed, then the event itself may be too */
- if (!ei->is_freed)
- r = entry->callback(name, &mode, &cdata, &fops);
- else
- r = -1;
- mutex_unlock(&eventfs_mutex);
- if (r <= 0)
- continue;
- ret = simple_lookup(dir, dentry, flags);
- if (IS_ERR(ret))
- goto out;
- d = create_file_dentry(ei, i, ei_dentry, name, mode, cdata, fops);
- dput(d);
- break;
- }
+ for (int i = 0; i < ei->nr_entries; i++) {
+ void *data;
+ umode_t mode;
+ const struct file_operations *fops;
+ const struct eventfs_entry *entry = &ei->entries[i];
+
+ if (strcmp(name, entry->name) != 0)
+ continue;
+
+ data = ei->data;
+ if (entry->callback(name, &mode, &data, &fops) <= 0)
+ goto out;
+
+ result = lookup_file_dentry(dentry, ei, i, mode, data, fops);
+ goto out;
}
out:
- srcu_read_unlock(&eventfs_srcu, idx);
- return ret;
+ mutex_unlock(&eventfs_mutex);
+ return result;
}
/*
@@ -802,7 +608,7 @@ static int eventfs_iterate(struct file *file, struct dir_context *ctx)
name = ei_child->name;
- ino = EVENTFS_DIR_INODE_INO;
+ ino = eventfs_dir_ino(ei_child);
if (!dir_emit(ctx, name, strlen(name), ino, DT_DIR))
goto out_dec;
@@ -863,25 +669,10 @@ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode
if (!parent)
return ERR_PTR(-EINVAL);
- ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+ ei = alloc_ei(name);
if (!ei)
return ERR_PTR(-ENOMEM);
- ei->name = kstrdup_const(name, GFP_KERNEL);
- if (!ei->name) {
- kfree(ei);
- return ERR_PTR(-ENOMEM);
- }
-
- if (size) {
- ei->d_children = kcalloc(size, sizeof(*ei->d_children), GFP_KERNEL);
- if (!ei->d_children) {
- kfree_const(ei->name);
- kfree(ei);
- return ERR_PTR(-ENOMEM);
- }
- }
-
ei->entries = entries;
ei->nr_entries = size;
ei->data = data;
@@ -889,10 +680,8 @@ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode
INIT_LIST_HEAD(&ei->list);
mutex_lock(&eventfs_mutex);
- if (!parent->is_freed) {
+ if (!parent->is_freed)
list_add_tail(&ei->list, &parent->children);
- ei->d_parent = parent->dentry;
- }
mutex_unlock(&eventfs_mutex);
/* Was the parent freed? */
@@ -932,28 +721,20 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
if (IS_ERR(dentry))
return ERR_CAST(dentry);
- ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+ ei = alloc_ei(name);
if (!ei)
- goto fail_ei;
+ goto fail;
inode = tracefs_get_inode(dentry->d_sb);
if (unlikely(!inode))
goto fail;
- if (size) {
- ei->d_children = kcalloc(size, sizeof(*ei->d_children), GFP_KERNEL);
- if (!ei->d_children)
- goto fail;
- }
-
- ei->dentry = dentry;
+ // Note: we have a ref to the dentry from tracefs_start_creating()
+ ei->events_dir = dentry;
ei->entries = entries;
ei->nr_entries = size;
ei->is_events = 1;
ei->data = data;
- ei->name = kstrdup_const(name, GFP_KERNEL);
- if (!ei->name)
- goto fail;
/* Save the ownership of this directory */
uid = d_inode(dentry->d_parent)->i_uid;
@@ -984,11 +765,19 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
inode->i_op = &eventfs_root_dir_inode_operations;
inode->i_fop = &eventfs_file_operations;
- dentry->d_fsdata = ei;
+ dentry->d_fsdata = get_ei(ei);
- /* directory inodes start off with i_nlink == 2 (for "." entry) */
- inc_nlink(inode);
+ /*
+ * Keep all eventfs directories with i_nlink == 1.
+ * Due to the dynamic nature of the dentry creations and not
+ * wanting to add a pointer to the parent eventfs_inode in the
+ * eventfs_inode structure, keeping the i_nlink in sync with the
+ * number of directories would cause too much complexity for
+ * something not worth much. Keeping directory links at 1
+ * tells userspace not to trust the link number.
+ */
d_instantiate(dentry, inode);
+ /* The dentry of the "events" parent does keep track though */
inc_nlink(dentry->d_parent->d_inode);
fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
tracefs_end_creating(dentry);
@@ -996,72 +785,11 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
return ei;
fail:
- kfree(ei->d_children);
- kfree(ei);
- fail_ei:
+ free_ei(ei);
tracefs_failed_creating(dentry);
return ERR_PTR(-ENOMEM);
}
-static LLIST_HEAD(free_list);
-
-static void eventfs_workfn(struct work_struct *work)
-{
- struct eventfs_inode *ei, *tmp;
- struct llist_node *llnode;
-
- llnode = llist_del_all(&free_list);
- llist_for_each_entry_safe(ei, tmp, llnode, llist) {
- /* This dput() matches the dget() from unhook_dentry() */
- for (int i = 0; i < ei->nr_entries; i++) {
- if (ei->d_children[i])
- dput(ei->d_children[i]);
- }
- /* This should only get here if it had a dentry */
- if (!WARN_ON_ONCE(!ei->dentry))
- dput(ei->dentry);
- }
-}
-
-static DECLARE_WORK(eventfs_work, eventfs_workfn);
-
-static void free_rcu_ei(struct rcu_head *head)
-{
- struct eventfs_inode *ei = container_of(head, struct eventfs_inode, rcu);
-
- if (ei->dentry) {
- /* Do not free the ei until all references of dentry are gone */
- if (llist_add(&ei->llist, &free_list))
- queue_work(system_unbound_wq, &eventfs_work);
- return;
- }
-
- /* If the ei doesn't have a dentry, neither should its children */
- for (int i = 0; i < ei->nr_entries; i++) {
- WARN_ON_ONCE(ei->d_children[i]);
- }
-
- free_ei(ei);
-}
-
-static void unhook_dentry(struct dentry *dentry)
-{
- if (!dentry)
- return;
- /*
- * Need to add a reference to the dentry that is expected by
- * simple_recursive_removal(), which will include a dput().
- */
- dget(dentry);
-
- /*
- * Also add a reference for the dput() in eventfs_workfn().
- * That is required as that dput() will free the ei after
- * the SRCU grace period is over.
- */
- dget(dentry);
-}
-
/**
* eventfs_remove_rec - remove eventfs dir or file from list
* @ei: eventfs_inode to be removed.
@@ -1074,8 +802,6 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
{
struct eventfs_inode *ei_child;
- if (!ei)
- return;
/*
* Check recursion depth. It should never be greater than 3:
* 0 - events/
@@ -1087,28 +813,11 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
return;
/* search for nested folders or files */
- list_for_each_entry_srcu(ei_child, &ei->children, list,
- lockdep_is_held(&eventfs_mutex)) {
- /* Children only have dentry if parent does */
- WARN_ON_ONCE(ei_child->dentry && !ei->dentry);
+ list_for_each_entry(ei_child, &ei->children, list)
eventfs_remove_rec(ei_child, level + 1);
- }
-
-
- ei->is_freed = 1;
- for (int i = 0; i < ei->nr_entries; i++) {
- if (ei->d_children[i]) {
- /* Children only have dentry if parent does */
- WARN_ON_ONCE(!ei->dentry);
- unhook_dentry(ei->d_children[i]);
- }
- }
-
- unhook_dentry(ei->dentry);
-
- list_del_rcu(&ei->list);
- call_srcu(&eventfs_srcu, &ei->rcu, free_rcu_ei);
+ list_del(&ei->list);
+ free_ei(ei);
}
/**
@@ -1119,22 +828,12 @@ static void eventfs_remove_rec(struct eventfs_inode *ei, int level)
*/
void eventfs_remove_dir(struct eventfs_inode *ei)
{
- struct dentry *dentry;
-
if (!ei)
return;
mutex_lock(&eventfs_mutex);
- dentry = ei->dentry;
eventfs_remove_rec(ei, 0);
mutex_unlock(&eventfs_mutex);
-
- /*
- * If any of the ei children has a dentry, then the ei itself
- * must have a dentry.
- */
- if (dentry)
- simple_recursive_removal(dentry, NULL);
}
/**
@@ -1147,7 +846,11 @@ void eventfs_remove_events_dir(struct eventfs_inode *ei)
{
struct dentry *dentry;
- dentry = ei->dentry;
+ dentry = ei->events_dir;
+ if (!dentry)
+ return;
+
+ ei->events_dir = NULL;
eventfs_remove_dir(ei);
/*
@@ -1157,5 +860,6 @@ void eventfs_remove_events_dir(struct eventfs_inode *ei)
* sticks around while the other ei->dentry are created
* and destroyed dynamically.
*/
+ d_invalidate(dentry);
dput(dentry);
}
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index e1b172c0e091..d65ffad4c327 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -38,8 +38,6 @@ static struct inode *tracefs_alloc_inode(struct super_block *sb)
if (!ti)
return NULL;
- ti->flags = 0;
-
return &ti->vfs_inode;
}
@@ -379,21 +377,30 @@ static const struct super_operations tracefs_super_operations = {
.show_options = tracefs_show_options,
};
-static void tracefs_dentry_iput(struct dentry *dentry, struct inode *inode)
+/*
+ * It would be cleaner if eventfs had its own dentry ops.
+ *
+ * Note that d_revalidate is called potentially under RCU,
+ * so it can't take the eventfs mutex etc. It's fine - if
+ * we open a file just as it's marked dead, things will
+ * still work just fine, and just see the old stale case.
+ */
+static void tracefs_d_release(struct dentry *dentry)
{
- struct tracefs_inode *ti;
+ if (dentry->d_fsdata)
+ eventfs_d_release(dentry);
+}
- if (!dentry || !inode)
- return;
+static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ struct eventfs_inode *ei = dentry->d_fsdata;
- ti = get_tracefs(inode);
- if (ti && ti->flags & TRACEFS_EVENT_INODE)
- eventfs_set_ei_status_free(ti, dentry);
- iput(inode);
+ return !(ei && ei->is_freed);
}
static const struct dentry_operations tracefs_dentry_operations = {
- .d_iput = tracefs_dentry_iput,
+ .d_revalidate = tracefs_d_revalidate,
+ .d_release = tracefs_d_release,
};
static int trace_fill_super(struct super_block *sb, void *data, int silent)
@@ -497,75 +504,6 @@ struct dentry *tracefs_end_creating(struct dentry *dentry)
return dentry;
}
-/**
- * eventfs_start_creating - start the process of creating a dentry
- * @name: Name of the file created for the dentry
- * @parent: The parent dentry where this dentry will be created
- *
- * This is a simple helper function for the dynamically created eventfs
- * files. When the directory of the eventfs files are accessed, their
- * dentries are created on the fly. This function is used to start that
- * process.
- */
-struct dentry *eventfs_start_creating(const char *name, struct dentry *parent)
-{
- struct dentry *dentry;
- int error;
-
- /* Must always have a parent. */
- if (WARN_ON_ONCE(!parent))
- return ERR_PTR(-EINVAL);
-
- error = simple_pin_fs(&trace_fs_type, &tracefs_mount,
- &tracefs_mount_count);
- if (error)
- return ERR_PTR(error);
-
- if (unlikely(IS_DEADDIR(parent->d_inode)))
- dentry = ERR_PTR(-ENOENT);
- else
- dentry = lookup_one_len(name, parent, strlen(name));
-
- if (!IS_ERR(dentry) && dentry->d_inode) {
- dput(dentry);
- dentry = ERR_PTR(-EEXIST);
- }
-
- if (IS_ERR(dentry))
- simple_release_fs(&tracefs_mount, &tracefs_mount_count);
-
- return dentry;
-}
-
-/**
- * eventfs_failed_creating - clean up a failed eventfs dentry creation
- * @dentry: The dentry to clean up
- *
- * If after calling eventfs_start_creating(), a failure is detected, the
- * resources created by eventfs_start_creating() needs to be cleaned up. In
- * that case, this function should be called to perform that clean up.
- */
-struct dentry *eventfs_failed_creating(struct dentry *dentry)
-{
- dput(dentry);
- simple_release_fs(&tracefs_mount, &tracefs_mount_count);
- return NULL;
-}
-
-/**
- * eventfs_end_creating - Finish the process of creating a eventfs dentry
- * @dentry: The dentry that has successfully been created.
- *
- * This function is currently just a place holder to match
- * eventfs_start_creating(). In case any synchronization needs to be added,
- * this function will be used to implement that without having to modify
- * the callers of eventfs_start_creating().
- */
-struct dentry *eventfs_end_creating(struct dentry *dentry)
-{
- return dentry;
-}
-
/* Find the inode that this will use for default */
static struct inode *instance_inode(struct dentry *parent, struct inode *inode)
{
@@ -779,7 +717,11 @@ static void init_once(void *foo)
{
struct tracefs_inode *ti = (struct tracefs_inode *) foo;
+ /* inode_init_once() calls memset() on the vfs_inode portion */
inode_init_once(&ti->vfs_inode);
+
+ /* Zero out the rest */
+ memset_after(ti, 0, vfs_inode);
}
static int __init tracefs_init(void)
diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h
index 12b7d0150ae9..beb3dcd0e434 100644
--- a/fs/tracefs/internal.h
+++ b/fs/tracefs/internal.h
@@ -11,9 +11,10 @@ enum {
};
struct tracefs_inode {
+ struct inode vfs_inode;
+ /* The below gets initialized with memset_after(ti, 0, vfs_inode) */
unsigned long flags;
void *private;
- struct inode vfs_inode;
};
/*
@@ -31,42 +32,37 @@ struct eventfs_attr {
/*
* struct eventfs_inode - hold the properties of the eventfs directories.
* @list: link list into the parent directory
+ * @rcu: Union with @list for freeing
+ * @children: link list into the child eventfs_inode
* @entries: the array of entries representing the files in the directory
* @name: the name of the directory to create
- * @children: link list into the child eventfs_inode
- * @dentry: the dentry of the directory
- * @d_parent: pointer to the parent's dentry
- * @d_children: The array of dentries to represent the files when created
+ * @events_dir: the dentry of the events directory
* @entry_attrs: Saved mode and ownership of the @d_children
- * @attr: Saved mode and ownership of eventfs_inode itself
* @data: The private data to pass to the callbacks
+ * @attr: Saved mode and ownership of eventfs_inode itself
* @is_freed: Flag set if the eventfs is on its way to be freed
* Note if is_freed is set, then dentry is corrupted.
+ * @is_events: Flag set for only the top level "events" directory
* @nr_entries: The number of items in @entries
+ * @ino: The saved inode number
*/
struct eventfs_inode {
- struct list_head list;
+ union {
+ struct list_head list;
+ struct rcu_head rcu;
+ };
+ struct list_head children;
const struct eventfs_entry *entries;
const char *name;
- struct list_head children;
- struct dentry *dentry; /* Check is_freed to access */
- struct dentry *d_parent;
- struct dentry **d_children;
+ struct dentry *events_dir;
struct eventfs_attr *entry_attrs;
- struct eventfs_attr attr;
void *data;
- /*
- * Union - used for deletion
- * @llist: for calling dput() if needed after RCU
- * @rcu: eventfs_inode to delete in RCU
- */
- union {
- struct llist_node llist;
- struct rcu_head rcu;
- };
+ struct eventfs_attr attr;
+ struct kref kref;
unsigned int is_freed:1;
unsigned int is_events:1;
unsigned int nr_entries:30;
+ unsigned int ino;
};
static inline struct tracefs_inode *get_tracefs(const struct inode *inode)
@@ -78,10 +74,7 @@ struct dentry *tracefs_start_creating(const char *name, struct dentry *parent);
struct dentry *tracefs_end_creating(struct dentry *dentry);
struct dentry *tracefs_failed_creating(struct dentry *dentry);
struct inode *tracefs_get_inode(struct super_block *sb);
-struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
-struct dentry *eventfs_failed_creating(struct dentry *dentry);
-struct dentry *eventfs_end_creating(struct dentry *dentry);
-void eventfs_update_gid(struct dentry *dentry, kgid_t gid);
-void eventfs_set_ei_status_free(struct tracefs_inode *ti, struct dentry *dentry);
+
+void eventfs_d_release(struct dentry *dentry);
#endif /* _TRACEFS_INTERNAL_H */
diff --git a/fs/verity/measure.c b/fs/verity/measure.c
index bf7a5f4cccaf..3969d54158d1 100644
--- a/fs/verity/measure.c
+++ b/fs/verity/measure.c
@@ -159,9 +159,9 @@ __bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr_ker
__bpf_kfunc_end_defs();
-BTF_SET8_START(fsverity_set_ids)
+BTF_KFUNCS_START(fsverity_set_ids)
BTF_ID_FLAGS(func, bpf_get_fsverity_digest, KF_TRUSTED_ARGS)
-BTF_SET8_END(fsverity_set_ids)
+BTF_KFUNCS_END(fsverity_set_ids)
static int bpf_get_fsverity_digest_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 9976a00a73f9..e965a48e7db9 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -421,10 +421,10 @@ xfs_attr_complete_op(
bool do_replace = args->op_flags & XFS_DA_OP_REPLACE;
args->op_flags &= ~XFS_DA_OP_REPLACE;
- if (do_replace) {
- args->attr_filter &= ~XFS_ATTR_INCOMPLETE;
+ args->attr_filter &= ~XFS_ATTR_INCOMPLETE;
+ if (do_replace)
return replace_state;
- }
+
return XFS_DAS_DONE;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 98aaca933bdd..f362345467fa 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3277,7 +3277,7 @@ xfs_bmap_alloc_account(
struct xfs_bmalloca *ap)
{
bool isrt = XFS_IS_REALTIME_INODE(ap->ip) &&
- (ap->flags & XFS_BMAPI_ATTRFORK);
+ !(ap->flags & XFS_BMAPI_ATTRFORK);
uint fld;
if (ap->flags & XFS_BMAPI_COWFORK) {
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 31100120b2c5..e31663cb7b43 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1119,20 +1119,6 @@ xfs_rtbitmap_blockcount(
}
/*
- * Compute the maximum level number of the realtime summary file, as defined by
- * mkfs. The historic use of highbit32 on a 64-bit quantity prohibited correct
- * use of rt volumes with more than 2^32 extents.
- */
-uint8_t
-xfs_compute_rextslog(
- xfs_rtbxlen_t rtextents)
-{
- if (!rtextents)
- return 0;
- return xfs_highbit64(rtextents);
-}
-
-/*
* Compute the number of rtbitmap words needed to populate every block of a
* bitmap that is large enough to track the given number of rt extents.
*/
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.h b/fs/xfs/libxfs/xfs_rtbitmap.h
index 274dc7dae1fa..152a66750af5 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.h
+++ b/fs/xfs/libxfs/xfs_rtbitmap.h
@@ -351,20 +351,6 @@ xfs_rtfree_extent(
int xfs_rtfree_blocks(struct xfs_trans *tp, xfs_fsblock_t rtbno,
xfs_filblks_t rtlen);
-uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents);
-
-/* Do we support an rt volume having this number of rtextents? */
-static inline bool
-xfs_validate_rtextents(
- xfs_rtbxlen_t rtextents)
-{
- /* No runt rt volumes */
- if (rtextents == 0)
- return false;
-
- return true;
-}
-
xfs_filblks_t xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t
rtextents);
unsigned long long xfs_rtbitmap_wordcount(struct xfs_mount *mp,
@@ -383,8 +369,6 @@ unsigned long long xfs_rtsummary_wordcount(struct xfs_mount *mp,
# define xfs_rtsummary_read_buf(a,b) (-ENOSYS)
# define xfs_rtbuf_cache_relse(a) (0)
# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (-ENOSYS)
-# define xfs_compute_rextslog(rtx) (0)
-# define xfs_validate_rtextents(rtx) (false)
static inline xfs_filblks_t
xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t rtextents)
{
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 4a9e8588f4c9..5bb6e2bd6dee 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -1377,3 +1377,17 @@ xfs_validate_stripe_geometry(
}
return true;
}
+
+/*
+ * Compute the maximum level number of the realtime summary file, as defined by
+ * mkfs. The historic use of highbit32 on a 64-bit quantity prohibited correct
+ * use of rt volumes with more than 2^32 extents.
+ */
+uint8_t
+xfs_compute_rextslog(
+ xfs_rtbxlen_t rtextents)
+{
+ if (!rtextents)
+ return 0;
+ return xfs_highbit64(rtextents);
+}
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 19134b23c10b..2e8e8d63d4eb 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -38,4 +38,6 @@ extern int xfs_sb_get_secondary(struct xfs_mount *mp,
extern bool xfs_validate_stripe_geometry(struct xfs_mount *mp,
__s64 sunit, __s64 swidth, int sectorsize, bool silent);
+uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents);
+
#endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 20b5375f2d9c..62e02d5380ad 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -251,4 +251,16 @@ bool xfs_verify_fileoff(struct xfs_mount *mp, xfs_fileoff_t off);
bool xfs_verify_fileext(struct xfs_mount *mp, xfs_fileoff_t off,
xfs_fileoff_t len);
+/* Do we support an rt volume having this number of rtextents? */
+static inline bool
+xfs_validate_rtextents(
+ xfs_rtbxlen_t rtextents)
+{
+ /* No runt rt volumes */
+ if (rtextents == 0)
+ return false;
+
+ return true;
+}
+
#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index 441ca9977652..46583517377f 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -15,6 +15,7 @@
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bit.h"
+#include "xfs_sb.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/repair.h"
diff --git a/fs/xfs/scrub/rtsummary.c b/fs/xfs/scrub/rtsummary.c
index fabd0ed9dfa6..b1ff4f33324a 100644
--- a/fs/xfs/scrub/rtsummary.c
+++ b/fs/xfs/scrub/rtsummary.c
@@ -16,6 +16,7 @@
#include "xfs_rtbitmap.h"
#include "xfs_bit.h"
#include "xfs_bmap.h"
+#include "xfs_sb.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index aff20ddd4a9f..98401de832ee 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -350,7 +350,6 @@ xfs_setup_dax_always(
return -EINVAL;
}
- xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
return 0;
disable_dax:
@@ -1496,6 +1495,18 @@ xfs_fs_fill_super(
mp->m_super = sb;
+ /*
+ * Copy VFS mount flags from the context now that all parameter parsing
+ * is guaranteed to have been completed by either the old mount API or
+ * the newer fsopen/fsconfig API.
+ */
+ if (fc->sb_flags & SB_RDONLY)
+ set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+ if (fc->sb_flags & SB_DIRSYNC)
+ mp->m_features |= XFS_FEAT_DIRSYNC;
+ if (fc->sb_flags & SB_SYNCHRONOUS)
+ mp->m_features |= XFS_FEAT_WSYNC;
+
error = xfs_fs_validate_params(mp);
if (error)
return error;
@@ -1965,6 +1976,11 @@ static const struct fs_context_operations xfs_context_ops = {
.free = xfs_fs_free,
};
+/*
+ * WARNING: do not initialise any parameters in this function that depend on
+ * mount option parsing having already been performed as this can be called from
+ * fsopen() before any parameters have been set.
+ */
static int xfs_init_fs_context(
struct fs_context *fc)
{
@@ -1996,16 +2012,6 @@ static int xfs_init_fs_context(
mp->m_logbsize = -1;
mp->m_allocsize_log = 16; /* 64k */
- /*
- * Copy binary VFS mount flags we are interested in.
- */
- if (fc->sb_flags & SB_RDONLY)
- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
- if (fc->sb_flags & SB_DIRSYNC)
- mp->m_features |= XFS_FEAT_DIRSYNC;
- if (fc->sb_flags & SB_SYNCHRONOUS)
- mp->m_features |= XFS_FEAT_WSYNC;
-
fc->s_fs_info = mp;
fc->ops = &xfs_context_ops;
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index 6ab2318a9c8e..dba5dcb62bef 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -348,7 +348,12 @@ static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
struct zonefs_inode_info *zi = ZONEFS_I(inode);
if (error) {
- zonefs_io_error(inode, true);
+ /*
+ * For Sync IOs, error recovery is called from
+ * zonefs_file_dio_write().
+ */
+ if (!is_sync_kiocb(iocb))
+ zonefs_io_error(inode, true);
return error;
}
@@ -491,6 +496,14 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
ret = -EINVAL;
goto inode_unlock;
}
+ /*
+ * Advance the zone write pointer offset. This assumes that the
+ * IO will succeed, which is OK to do because we do not allow
+ * partial writes (IOMAP_DIO_PARTIAL is not set) and if the IO
+ * fails, the error path will correct the write pointer offset.
+ */
+ z->z_wpoffset += count;
+ zonefs_inode_account_active(inode);
mutex_unlock(&zi->i_truncate_mutex);
}
@@ -504,20 +517,19 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
if (ret == -ENOTBLK)
ret = -EBUSY;
- if (zonefs_zone_is_seq(z) &&
- (ret > 0 || ret == -EIOCBQUEUED)) {
- if (ret > 0)
- count = ret;
-
- /*
- * Update the zone write pointer offset assuming the write
- * operation succeeded. If it did not, the error recovery path
- * will correct it. Also do active seq file accounting.
- */
- mutex_lock(&zi->i_truncate_mutex);
- z->z_wpoffset += count;
- zonefs_inode_account_active(inode);
- mutex_unlock(&zi->i_truncate_mutex);
+ /*
+ * For a failed IO or partial completion, trigger error recovery
+ * to update the zone write pointer offset to a correct value.
+ * For asynchronous IOs, zonefs_file_write_dio_end_io() may already
+ * have executed error recovery if the IO already completed when we
+ * reach here. However, we cannot know that and execute error recovery
+ * again (that will not change anything).
+ */
+ if (zonefs_zone_is_seq(z)) {
+ if (ret > 0 && ret != count)
+ ret = -EIO;
+ if (ret < 0 && ret != -EIOCBQUEUED)
+ zonefs_io_error(inode, true);
}
inode_unlock:
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 93971742613a..b6e8e7c96251 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -246,16 +246,18 @@ static void zonefs_inode_update_mode(struct inode *inode)
z->z_mode = inode->i_mode;
}
-struct zonefs_ioerr_data {
- struct inode *inode;
- bool write;
-};
-
static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
void *data)
{
- struct zonefs_ioerr_data *err = data;
- struct inode *inode = err->inode;
+ struct blk_zone *z = data;
+
+ *z = *zone;
+ return 0;
+}
+
+static void zonefs_handle_io_error(struct inode *inode, struct blk_zone *zone,
+ bool write)
+{
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
@@ -270,8 +272,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
data_size = zonefs_check_zone_condition(sb, z, zone);
isize = i_size_read(inode);
if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
- !err->write && isize == data_size)
- return 0;
+ !write && isize == data_size)
+ return;
/*
* At this point, we detected either a bad zone or an inconsistency
@@ -292,7 +294,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* In all cases, warn about inode size inconsistency and handle the
* IO error according to the zone condition and to the mount options.
*/
- if (zonefs_zone_is_seq(z) && isize != data_size)
+ if (isize != data_size)
zonefs_warn(sb,
"inode %lu: invalid size %lld (should be %lld)\n",
inode->i_ino, isize, data_size);
@@ -352,8 +354,6 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
zonefs_i_size_write(inode, data_size);
z->z_wpoffset = data_size;
zonefs_inode_account_active(inode);
-
- return 0;
}
/*
@@ -367,23 +367,25 @@ void __zonefs_io_error(struct inode *inode, bool write)
{
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
- struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
unsigned int noio_flag;
- unsigned int nr_zones = 1;
- struct zonefs_ioerr_data err = {
- .inode = inode,
- .write = write,
- };
+ struct blk_zone zone;
int ret;
/*
- * The only files that have more than one zone are conventional zone
- * files with aggregated conventional zones, for which the inode zone
- * size is always larger than the device zone size.
+ * Conventional zone have no write pointer and cannot become read-only
+ * or offline. So simply fake a report for a single or aggregated zone
+ * and let zonefs_handle_io_error() correct the zone inode information
+ * according to the mount options.
*/
- if (z->z_size > bdev_zone_sectors(sb->s_bdev))
- nr_zones = z->z_size >>
- (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+ if (!zonefs_zone_is_seq(z)) {
+ zone.start = z->z_sector;
+ zone.len = z->z_size >> SECTOR_SHIFT;
+ zone.wp = zone.start + zone.len;
+ zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
+ zone.cond = BLK_ZONE_COND_NOT_WP;
+ zone.capacity = zone.len;
+ goto handle_io_error;
+ }
/*
* Memory allocations in blkdev_report_zones() can trigger a memory
@@ -394,12 +396,20 @@ void __zonefs_io_error(struct inode *inode, bool write)
* the GFP_NOIO context avoids both problems.
*/
noio_flag = memalloc_noio_save();
- ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
- zonefs_io_error_cb, &err);
- if (ret != nr_zones)
+ ret = blkdev_report_zones(sb->s_bdev, z->z_sector, 1,
+ zonefs_io_error_cb, &zone);
+ memalloc_noio_restore(noio_flag);
+
+ if (ret != 1) {
zonefs_err(sb, "Get inode %lu zone information failed %d\n",
inode->i_ino, ret);
- memalloc_noio_restore(noio_flag);
+ zonefs_warn(sb, "remounting filesystem read-only\n");
+ sb->s_flags |= SB_RDONLY;
+ return;
+ }
+
+handle_io_error:
+ zonefs_handle_io_error(inode, &zone, write);
}
static struct kmem_cache *zonefs_inode_cachep;
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 43e18db89c14..ad928cce268b 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -2,6 +2,8 @@
#ifndef __ASM_GENERIC_CHECKSUM_H
#define __ASM_GENERIC_CHECKSUM_H
+#include <linux/bitops.h>
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -31,9 +33,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return (__force __sum16)~sum;
+ return (__force __sum16)((~sum - ror32(sum, 16)) >> 16);
}
#endif
diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
index c4c423e97f06..4453906105ca 100644
--- a/include/drm/bridge/aux-bridge.h
+++ b/include/drm/bridge/aux-bridge.h
@@ -9,6 +9,8 @@
#include <drm/drm_connector.h>
+struct auxiliary_device;
+
#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
int drm_aux_bridge_register(struct device *parent);
#else
@@ -19,10 +21,23 @@ static inline int drm_aux_bridge_register(struct device *parent)
#endif
#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np);
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev);
struct device *drm_dp_hpd_bridge_register(struct device *parent,
struct device_node *np);
void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
#else
+static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
struct device_node *np)
{
diff --git a/include/dt-bindings/dma/fsl-edma.h b/include/dt-bindings/dma/fsl-edma.h
new file mode 100644
index 000000000000..fd11478cfe9c
--- /dev/null
+++ b/include/dt-bindings/dma/fsl-edma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _FSL_EDMA_DT_BINDING_H_
+#define _FSL_EDMA_DT_BINDING_H_
+
+/* Receive Channel */
+#define FSL_EDMA_RX 0x1
+
+/* iMX8 audio remote DMA */
+#define FSL_EDMA_REMOTE 0x2
+
+/* FIFO is continue memory region */
+#define FSL_EDMA_MULTI_FIFO 0x4
+
+/* Channel need stick to even channel */
+#define FSL_EDMA_EVEN_CH 0x8
+
+/* Channel need stick to odd channel */
+#define FSL_EDMA_ODD_CH 0x10
+
+#endif
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index ae12696ec492..2ad261082bba 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -141,8 +141,6 @@ struct bdi_writeback {
struct delayed_work dwork; /* work item used for writeback */
struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
- unsigned long dirty_sleep; /* last wait */
-
struct list_head bdi_node; /* anchored at bdi->wb_list */
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -179,6 +177,11 @@ struct backing_dev_info {
* any dirty wbs, which is depended upon by bdi_has_dirty().
*/
atomic_long_t tot_write_bandwidth;
+ /*
+ * Jiffies when last process was dirty throttled on this bdi. Used by
+ * blk-wbt.
+ */
+ unsigned long last_bdp_sleep;
struct bdi_writeback wb; /* the root writeback info for this bdi */
struct list_head wb_list; /* list of all wbs */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ec4db73e5f4e..875d792bffff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -286,6 +286,11 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
{
struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
+ if (unlikely(i >= bio->bi_vcnt)) {
+ fi->folio = NULL;
+ return;
+ }
+
fi->folio = page_folio(bvec->bv_page);
fi->offset = bvec->bv_offset +
PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
@@ -303,10 +308,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
fi->offset = 0;
fi->length = min(folio_size(fi->folio), fi->_seg_count);
fi->_next = folio_next(fi->folio);
- } else if (fi->_i + 1 < bio->bi_vcnt) {
- bio_first_folio(fi, bio, fi->_i + 1);
} else {
- fi->folio = NULL;
+ bio_first_folio(fi, bio, fi->_i + 1);
}
}
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 99451431e4d6..5814e9ee40ba 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -54,6 +54,7 @@ struct device;
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
+ * bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
@@ -169,6 +170,8 @@ bool __bitmap_subset(const unsigned long *bitmap1,
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
@@ -425,6 +428,15 @@ unsigned long bitmap_weight_and(const unsigned long *src1,
return __bitmap_weight_and(src1, src2, nbits);
}
+static __always_inline
+unsigned long bitmap_weight_andnot(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_andnot(src1, src2, nbits);
+}
+
static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
unsigned int nbits)
{
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a676e116085f..7a8150a5f051 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -391,9 +391,6 @@ struct blk_mq_hw_ctx {
*/
struct blk_mq_tags *sched_tags;
- /** @run: Number of dispatched requests. */
- unsigned long run;
-
/** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
/** @queue_num: Index of this hardware queue. */
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a789266feac3..fb3c3e7181e6 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -196,7 +196,8 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
- cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
+ cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk && \
+ sk_fullsock(sk)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
CGROUP_INET_INGRESS); \
\
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e30100597d0a..814dc913a968 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -52,6 +52,10 @@ struct module;
struct bpf_func_state;
struct ftrace_ops;
struct cgroup;
+struct bpf_token;
+struct user_namespace;
+struct super_block;
+struct inode;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -247,10 +251,7 @@ struct bpf_list_node_kern {
} __attribute__((aligned(8)));
struct bpf_map {
- /* The first two cachelines with read-mostly members of which some
- * are also accessed in fast-path (e.g. ops, max_entries).
- */
- const struct bpf_map_ops *ops ____cacheline_aligned;
+ const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
void *security;
@@ -272,17 +273,14 @@ struct bpf_map {
struct obj_cgroup *objcg;
#endif
char name[BPF_OBJ_NAME_LEN];
- /* The 3rd and 4th cacheline with misc members to avoid false sharing
- * particularly with refcounting.
- */
- atomic64_t refcnt ____cacheline_aligned;
+ struct mutex freeze_mutex;
+ atomic64_t refcnt;
atomic64_t usercnt;
/* rcu is used before freeing and work is only used during freeing */
union {
struct work_struct work;
struct rcu_head rcu;
};
- struct mutex freeze_mutex;
atomic64_t writecnt;
/* 'Ownership' of program-containing map is claimed by the first program
* that is going to use this map or by the first program which FD is
@@ -1185,7 +1183,6 @@ struct bpf_trampoline {
int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */
struct bpf_tramp_image *cur_image;
- struct module *mod;
};
struct bpf_attach_target_info {
@@ -1412,6 +1409,7 @@ struct bpf_jit_poke_descriptor {
struct bpf_ctx_arg_aux {
u32 offset;
enum bpf_reg_type reg_type;
+ struct btf *btf;
u32 btf_id;
};
@@ -1485,6 +1483,7 @@ struct bpf_prog_aux {
#ifdef CONFIG_SECURITY
void *security;
#endif
+ struct bpf_token *token;
struct bpf_prog_offload *offload;
struct btf *btf;
struct bpf_func_info *func_info;
@@ -1609,6 +1608,31 @@ struct bpf_link_primer {
u32 id;
};
+struct bpf_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+
+ /* BPF token-related delegation options */
+ u64 delegate_cmds;
+ u64 delegate_maps;
+ u64 delegate_progs;
+ u64 delegate_attachs;
+};
+
+struct bpf_token {
+ struct work_struct work;
+ atomic64_t refcnt;
+ struct user_namespace *userns;
+ u64 allowed_cmds;
+ u64 allowed_maps;
+ u64 allowed_progs;
+ u64 allowed_attachs;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
+};
+
struct bpf_struct_ops_value;
struct btf_member;
@@ -1673,19 +1697,64 @@ struct bpf_struct_ops {
void (*unreg)(void *kdata);
int (*update)(void *kdata, void *old_kdata);
int (*validate)(void *kdata);
- const struct btf_type *type;
- const struct btf_type *value_type;
+ void *cfi_stubs;
+ struct module *owner;
const char *name;
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+};
+
+/* Every member of a struct_ops type has an instance even a member is not
+ * an operator (function pointer). The "info" field will be assigned to
+ * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
+ * argument information required by the verifier to verify the program.
+ *
+ * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
+ * corresponding entry for an given argument.
+ */
+struct bpf_struct_ops_arg_info {
+ struct bpf_ctx_arg_aux *info;
+ u32 cnt;
+};
+
+struct bpf_struct_ops_desc {
+ struct bpf_struct_ops *st_ops;
+
+ const struct btf_type *type;
+ const struct btf_type *value_type;
u32 type_id;
u32 value_id;
- void *cfi_stubs;
+
+ /* Collection of argument information for each member */
+ struct bpf_struct_ops_arg_info *arg_info;
+};
+
+enum bpf_struct_ops_state {
+ BPF_STRUCT_OPS_STATE_INIT,
+ BPF_STRUCT_OPS_STATE_INUSE,
+ BPF_STRUCT_OPS_STATE_TOBEFREE,
+ BPF_STRUCT_OPS_STATE_READY,
+};
+
+struct bpf_struct_ops_common_value {
+ refcount_t refcnt;
+ enum bpf_struct_ops_state state;
};
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+/* This macro helps developer to register a struct_ops type and generate
+ * type information correctly. Developers should use this macro to register
+ * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
+ */
+#define register_bpf_struct_ops(st_ops, type) \
+ ({ \
+ struct bpf_struct_ops_##type { \
+ struct bpf_struct_ops_common_value common; \
+ struct type data ____cacheline_aligned_in_smp; \
+ }; \
+ BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \
+ __register_bpf_struct_ops(st_ops); \
+ })
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
-const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
-void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
@@ -1727,15 +1796,13 @@ struct bpf_dummy_ops {
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
#endif
+int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ struct btf *btf,
+ struct bpf_verifier_log *log);
+void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc);
#else
-static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
-{
- return NULL;
-}
-static inline void bpf_struct_ops_init(struct btf *btf,
- struct bpf_verifier_log *log)
-{
-}
+#define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
return try_module_get(owner);
@@ -1754,6 +1821,13 @@ static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
{
return -EOPNOTSUPP;
}
+static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+}
+
+static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+{
+}
#endif
@@ -2068,6 +2142,7 @@ static inline void bpf_enable_instrumentation(void)
migrate_enable();
}
+extern const struct super_operations bpf_super_ops;
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
extern const struct file_operations bpf_iter_fops;
@@ -2202,24 +2277,26 @@ static inline void bpf_map_dec_elem_count(struct bpf_map *map)
extern int sysctl_unprivileged_bpf_disabled;
-static inline bool bpf_allow_ptr_leaks(void)
+bool bpf_token_capable(const struct bpf_token *token, int cap);
+
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_allow_uninit_stack(void)
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_bypass_spec_v1(void)
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
{
- return cpu_mitigations_off() || perfmon_capable();
+ return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_bypass_spec_v4(void)
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
{
- return cpu_mitigations_off() || perfmon_capable();
+ return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
@@ -2236,8 +2313,21 @@ int bpf_link_new_fd(struct bpf_link *link);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
+void bpf_token_inc(struct bpf_token *token);
+void bpf_token_put(struct bpf_token *token);
+int bpf_token_create(union bpf_attr *attr);
+struct bpf_token *bpf_token_get_from_fd(u32 ufd);
+
+bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
+bool bpf_token_allow_prog_type(const struct bpf_token *token,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type attach_type);
+
int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
+struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
+ umode_t mode);
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
#define DEFINE_BPF_ITER_FUNC(target, args...) \
@@ -2472,11 +2562,14 @@ int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *pr
struct btf *btf, const struct btf_type *t);
const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
int comp_idx, const char *tag_key);
+int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key, int last_id);
struct bpf_prog *bpf_prog_by_id(u32 id);
struct bpf_link *bpf_link_by_id(u32 id);
-const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
+const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
void bpf_task_storage_free(struct task_struct *task);
void bpf_cgrp_storage_free(struct cgroup *cgroup);
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
@@ -2595,6 +2688,24 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
return -EOPNOTSUPP;
}
+static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
+}
+
+static inline void bpf_token_inc(struct bpf_token *token)
+{
+}
+
+static inline void bpf_token_put(struct bpf_token *token)
+{
+}
+
+static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline void __dev_flush(void)
{
}
@@ -2718,7 +2829,7 @@ static inline int btf_struct_access(struct bpf_verifier_log *log,
}
static inline const struct bpf_func_proto *
-bpf_base_func_proto(enum bpf_func_id func_id)
+bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
return NULL;
}
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 173ec7f43ed1..dcddb0aef7d8 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -129,10 +129,36 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
struct bpf_local_storage_cache *cache,
bool bpf_ma);
-struct bpf_local_storage_data *
+void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+/* If cacheit_lockit is false, this lookup function is lockless */
+static inline struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
struct bpf_local_storage_map *smap,
- bool cacheit_lockit);
+ bool cacheit_lockit)
+{
+ struct bpf_local_storage_data *sdata;
+ struct bpf_local_storage_elem *selem;
+
+ /* Fast path (cache hit) */
+ sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
+ bpf_rcu_lock_held());
+ if (sdata && rcu_access_pointer(sdata->smap) == smap)
+ return sdata;
+
+ /* Slow path (cache miss) */
+ hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
+ rcu_read_lock_trace_held())
+ if (rcu_access_pointer(SDATA(selem)->smap) == smap)
+ break;
+
+ if (!selem)
+ return NULL;
+ if (cacheit_lockit)
+ __bpf_local_storage_insert_cache(local_storage, smap, selem);
+ return SDATA(selem);
+}
void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index d07d857ca67f..84365e6dd85d 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -453,7 +453,7 @@ struct bpf_verifier_state {
#define bpf_get_spilled_reg(slot, frame, mask) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
- ((1 << frame->stack[slot].slot_type[0]) & (mask))) \
+ ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
? &frame->stack[slot].spilled_ptr : NULL)
/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
@@ -610,6 +610,7 @@ struct bpf_subprog_arg_info {
enum bpf_arg_type arg_type;
union {
u32 mem_size;
+ u32 btf_id;
};
};
@@ -662,6 +663,7 @@ struct bpf_verifier_env {
u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops;
+ struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
@@ -917,6 +919,15 @@ static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
env->scratched_stack_slots = ~0ULL;
}
+static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
+{
+#ifdef __BIG_ENDIAN
+ off -= spill_size - fill_size;
+#endif
+
+ return !(off % BPF_REG_SIZE);
+}
+
const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
diff --git a/include/linux/btf.h b/include/linux/btf.h
index cf5c6ff48981..f9e56fd12a9f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -137,6 +137,7 @@ struct btf_struct_metas {
extern const struct file_operations btf_fops;
+const char *btf_get_name(const struct btf *btf);
void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
@@ -494,8 +495,26 @@ static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
}
+bool btf_param_match_suffix(const struct btf *btf,
+ const struct btf_param *arg,
+ const char *suffix);
+int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
+ u32 arg_no);
+
struct bpf_verifier_log;
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+struct bpf_struct_ops;
+int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find_value(struct btf *btf, u32 value_id);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id);
+#else
+static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
@@ -512,10 +531,9 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
struct module *owner);
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
-const struct btf_type *
-btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
- const struct btf_type *t, enum bpf_prog_type prog_type,
- int arg);
+bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg);
int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
bool btf_types_are_same(const struct btf *btf1, u32 id1,
const struct btf *btf2, u32 id2);
@@ -555,12 +573,12 @@ static inline struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf
{
return NULL;
}
-static inline const struct btf_member *
-btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
- const struct btf_type *t, enum bpf_prog_type prog_type,
- int arg)
+static inline bool
+btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg)
{
- return NULL;
+ return false;
}
static inline int get_kern_ctx_btf_id(struct bpf_verifier_log *log,
enum bpf_prog_type prog_type) {
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index a9cb10b0e2e9..e24aabfe8ecc 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -8,6 +8,9 @@ struct btf_id_set {
u32 ids[];
};
+/* This flag implies BTF_SET8 holds kfunc(s) */
+#define BTF_SET8_KFUNCS (1 << 0)
+
struct btf_id_set8 {
u32 cnt;
u32 flags;
@@ -21,6 +24,7 @@ struct btf_id_set8 {
#include <linux/compiler.h> /* for __PASTE */
#include <linux/compiler_attributes.h> /* for __maybe_unused */
+#include <linux/stringify.h>
/*
* Following macros help to define lists of BTF IDs placed
@@ -183,17 +187,18 @@ extern struct btf_id_set name;
* .word (1 << 3) | (1 << 1) | (1 << 2)
*
*/
-#define __BTF_SET8_START(name, scope) \
+#define __BTF_SET8_START(name, scope, flags) \
+__BTF_ID_LIST(name, local) \
asm( \
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
"." #scope " __BTF_ID__set8__" #name "; \n" \
"__BTF_ID__set8__" #name ":; \n" \
-".zero 8 \n" \
+".zero 4 \n" \
+".long " __stringify(flags) "\n" \
".popsection; \n");
#define BTF_SET8_START(name) \
-__BTF_ID_LIST(name, local) \
-__BTF_SET8_START(name, local)
+__BTF_SET8_START(name, local, 0)
#define BTF_SET8_END(name) \
asm( \
@@ -202,6 +207,12 @@ asm( \
".popsection; \n"); \
extern struct btf_id_set8 name;
+#define BTF_KFUNCS_START(name) \
+__BTF_SET8_START(name, local, BTF_SET8_KFUNCS)
+
+#define BTF_KFUNCS_END(name) \
+BTF_SET8_END(name)
+
#else
#define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
@@ -216,6 +227,8 @@ extern struct btf_id_set8 name;
#define BTF_SET_END(name)
#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
#define BTF_SET8_END(name)
+#define BTF_KFUNCS_START(name) static struct btf_id_set8 __maybe_unused name = { .flags = BTF_SET8_KFUNCS };
+#define BTF_KFUNCS_END(name)
#endif /* CONFIG_DEBUG_INFO_BTF */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 555aae5448ae..bd1e361b351c 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -83,7 +83,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
-} __packed;
+} __packed __aligned(4);
struct bvec_iter_all {
struct bio_vec bv;
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 2eaaabbe98cb..1717cc57cdac 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -283,7 +283,7 @@ struct ceph_msg {
struct kref kref;
bool more_to_follow;
bool needs_out_seq;
- bool sparse_read;
+ u64 sparse_read_total;
int front_alloc_len;
struct ceph_msgpool *pool;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index b8610e9d2471..f66f6aac74f6 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -45,6 +45,7 @@ enum ceph_sparse_read_state {
CEPH_SPARSE_READ_HDR = 0,
CEPH_SPARSE_READ_EXTENTS,
CEPH_SPARSE_READ_DATA_LEN,
+ CEPH_SPARSE_READ_DATA_PRE,
CEPH_SPARSE_READ_DATA,
};
@@ -64,7 +65,7 @@ struct ceph_sparse_read {
u64 sr_req_len; /* orig request length */
u64 sr_pos; /* current pos in buffer */
int sr_index; /* current extent index */
- __le32 sr_datalen; /* length of actual data */
+ u32 sr_datalen; /* length of actual data */
u32 sr_count; /* extent count in reply */
int sr_ext_len; /* length of extent array */
struct ceph_sparse_extent *sr_extent; /* extent array */
@@ -572,9 +573,12 @@ int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
*/
#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
-static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op)
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
{
- return __ceph_alloc_sparse_ext_map(op, CEPH_SPARSE_EXT_ARRAY_INITIAL);
+ if (!cnt)
+ cnt = CEPH_SPARSE_EXT_ARRAY_INITIAL;
+
+ return __ceph_alloc_sparse_ext_map(op, cnt);
}
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index aebb65bf95a7..75bd1692d2e3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -64,6 +64,26 @@
__builtin_unreachable(); \
} while (0)
+/*
+ * GCC 'asm goto' with outputs miscompiles certain code sequences:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
+ *
+ * Work around it via the same compiler barrier quirk that we used
+ * to use for the old 'asm goto' workaround.
+ *
+ * Also, always mark such 'asm goto' statements as volatile: all
+ * asm goto statements are supposed to be volatile as per the
+ * documentation, but some versions of gcc didn't actually do
+ * that for asms with outputs:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619
+ */
+#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND
+#define asm_goto_output(x...) \
+ do { asm volatile goto(x); asm (""); } while (0)
+#endif
+
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6f1ca49306d2..0caf354cb94b 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -362,8 +362,15 @@ struct ftrace_likely_data {
#define __member_size(p) __builtin_object_size(p, 1)
#endif
-#ifndef asm_volatile_goto
-#define asm_volatile_goto(x...) asm goto(x)
+/*
+ * Some versions of gcc do not mark 'asm goto' volatile:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
+ *
+ * We do it here by hand, because it doesn't hurt.
+ */
+#ifndef asm_goto_output
+#define asm_goto_output(x...) asm volatile goto(x)
#endif
#ifdef CONFIG_CC_HAS_ASM_INLINE
diff --git a/include/linux/cper.h b/include/linux/cper.h
index c1a7dc325121..265b0f8fc0b3 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -90,6 +90,29 @@ enum {
GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
+/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CPER_SEC_CXL_GEN_MEDIA_GUID \
+ GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
+ 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CPER_SEC_CXL_DRAM_GUID \
+ GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
+ 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CPER_SEC_CXL_MEM_MODULE_GUID \
+ GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
+ 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
+
/*
* Flags bits definitions for flags in struct cper_record_header
* If set, the error has been recovered
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index cfb545841a2c..1c29947db848 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -7,6 +7,7 @@
* set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>
@@ -720,6 +721,19 @@ static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
}
/**
+ * cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
+ */
+static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
+}
+
+/**
* cpumask_shift_right - *dstp = *srcp >> n
* @dstp: the cpumask result
* @srcp: the input to shift
@@ -977,6 +991,8 @@ static inline bool cpumask_available(cpumask_var_t mask)
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
+DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));
+
/* It's common to want to use cpu_all_mask in struct member initializers,
* so it has to refer to an address rather than a pointer. */
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
index 91125eca4c8a..03fa6d50d46f 100644
--- a/include/linux/cxl-event.h
+++ b/include/linux/cxl-event.h
@@ -140,22 +140,4 @@ struct cxl_cper_event_rec {
union cxl_event event;
} __packed;
-typedef void (*cxl_cper_callback)(enum cxl_event_type type,
- struct cxl_cper_event_rec *rec);
-
-#ifdef CONFIG_ACPI_APEI_GHES
-int cxl_cper_register_callback(cxl_cper_callback callback);
-int cxl_cper_unregister_callback(cxl_cper_callback callback);
-#else
-static inline int cxl_cper_register_callback(cxl_cper_callback callback)
-{
- return 0;
-}
-
-static inline int cxl_cper_unregister_callback(cxl_cper_callback callback)
-{
- return 0;
-}
-#endif
-
#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 1666c387861f..d07cf2f1bb7d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -173,6 +173,7 @@ struct dentry_operations {
#define DCACHE_DONTCACHE BIT(7) /* Purge from memory on final dput() */
#define DCACHE_CANT_MOUNT BIT(8)
+#define DCACHE_GENOCIDE BIT(9)
#define DCACHE_SHRINK_LIST BIT(10)
#define DCACHE_OP_WEAK_REVALIDATE BIT(11)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 3df70d6131c8..752dbde4cec1 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -953,7 +953,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
static inline bool is_slave_direction(enum dma_transfer_direction direction)
{
- return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
+ (direction == DMA_DEV_TO_DEV);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index 9cf896ea1d41..d275736230b3 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -10,6 +10,8 @@
#include <uapi/linux/dpll.h>
#include <linux/device.h>
#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
struct dpll_device;
struct dpll_pin;
@@ -19,6 +21,7 @@ struct dpll_device_ops {
enum dpll_mode *mode, struct netlink_ext_ack *extack);
int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
struct netlink_ext_ack *extack);
int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
s32 *temp, struct netlink_ext_ack *extack);
@@ -120,15 +123,24 @@ struct dpll_pin_properties {
};
#if IS_ENABLED(CONFIG_DPLL)
-size_t dpll_msg_pin_handle_size(struct dpll_pin *pin);
-int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin);
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void dpll_netdev_pin_clear(struct net_device *dev);
+
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev);
#else
-static inline size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
+static inline void
+dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
+static inline void dpll_netdev_pin_clear(struct net_device *dev) { }
+
+static inline size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
{
return 0;
}
-static inline int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+static inline int
+dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
{
return 0;
}
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 407c2f281b64..5693a4be0d9a 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -38,14 +38,22 @@
#ifdef __KERNEL__
+#include <linux/bitops.h>
#include <asm/bug.h>
+#define DQL_HIST_LEN 4
+#define DQL_HIST_ENT(dql, idx) ((dql)->history[(idx) % DQL_HIST_LEN])
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
unsigned int adj_limit; /* limit + num_completed */
unsigned int last_obj_cnt; /* Count at last queuing */
+ unsigned long history_head; /* top 58 bits of jiffies */
+ /* stall entries, a bit per entry */
+ unsigned long history[DQL_HIST_LEN];
+
/* Fields accessed only by completion path (dql_completed) */
unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
@@ -62,6 +70,13 @@ struct dql {
unsigned int max_limit; /* Max limit */
unsigned int min_limit; /* Minimum limit */
unsigned int slack_hold_time; /* Time to measure slack */
+
+ /* Stall threshold (in jiffies), defined by user */
+ unsigned short stall_thrs;
+ /* Longest stall detected, reported to user */
+ unsigned short stall_max;
+ unsigned long last_reap; /* Last reap (in jiffies) */
+ unsigned long stall_cnt; /* Number of stalls */
};
/* Set some static maximums */
@@ -74,6 +89,8 @@ struct dql {
*/
static inline void dql_queued(struct dql *dql, unsigned int count)
{
+ unsigned long map, now, now_hi, i;
+
BUG_ON(count > DQL_MAX_OBJECT);
dql->last_obj_cnt = count;
@@ -86,6 +103,34 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
barrier();
dql->num_queued += count;
+
+ now = jiffies;
+ now_hi = now / BITS_PER_LONG;
+
+ /* The following code set a bit in the ring buffer, where each
+ * bit trackes time the packet was queued. The dql->history buffer
+ * tracks DQL_HIST_LEN * BITS_PER_LONG time (jiffies) slot
+ */
+ if (unlikely(now_hi != dql->history_head)) {
+ /* About to reuse slots, clear them */
+ for (i = 0; i < DQL_HIST_LEN; i++) {
+ /* Multiplication masks high bits */
+ if (now_hi * BITS_PER_LONG ==
+ (dql->history_head + i) * BITS_PER_LONG)
+ break;
+ DQL_HIST_ENT(dql, dql->history_head + i + 1) = 0;
+ }
+ /* pairs with smp_rmb() in dql_check_stall() */
+ smp_wmb();
+ WRITE_ONCE(dql->history_head, now_hi);
+ }
+
+ /* __set_bit() does not guarantee WRITE_ONCE() semantics */
+ map = DQL_HIST_ENT(dql, now_hi);
+
+ /* Populate the history with an entry (bit) per queued */
+ if (!(map & BIT_MASK(now)))
+ WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now));
}
/* Returns how many objects can be queued, < 0 indicates over limit. */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 325e0778e937..9901e563f706 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -222,6 +222,16 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+struct ethtool_keee {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertised);
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_active;
+ bool eee_enabled;
+};
+
struct kernel_ethtool_coalesce {
u8 use_cqe_mode_tx;
u8 use_cqe_mode_rx;
@@ -892,8 +902,8 @@ struct ethtool_ops {
struct ethtool_modinfo *);
int (*get_module_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_eee)(struct net_device *, struct ethtool_eee *);
- int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_eee)(struct net_device *dev, struct ethtool_keee *eee);
+ int (*set_eee)(struct net_device *dev, struct ethtool_keee *eee);
int (*get_tunable)(struct net_device *,
const struct ethtool_tunable *, void *);
int (*set_tunable)(struct net_device *,
diff --git a/include/linux/export.h b/include/linux/export.h
index 9911508a9604..0bbd02fd351d 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -7,15 +7,6 @@
#include <linux/stringify.h>
/*
- * Export symbols from the kernel to modules. Forked from module.h
- * to reduce the amount of pointless cruft we feed to gcc when only
- * exporting a simple symbol or two.
- *
- * Try not to add #includes here. It slows compilation and makes kernel
- * hackers place grumpy comments in header files.
- */
-
-/*
* This comment block is used by fixdep. Please do not remove.
*
* When CONFIG_MODVERSIONS is changed from n to y, all source files having
@@ -23,15 +14,6 @@
* side effect of the *.o build rule.
*/
-#ifndef __ASSEMBLY__
-#ifdef MODULE
-extern struct module __this_module;
-#define THIS_MODULE (&__this_module)
-#else
-#define THIS_MODULE ((struct module *)0)
-#endif
-#endif /* __ASSEMBLY__ */
-
#ifdef CONFIG_64BIT
#define __EXPORT_SYMBOL_REF(sym) \
.balign 8 ASM_NL \
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 68fb6c8142fe..36cc29a2934c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -547,24 +547,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
u64, __ur_3, u64, __ur_4, u64, __ur_5)
-#define BPF_CALL_x(x, name, ...) \
+#define BPF_CALL_x(x, attr, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+#define __NOATTR
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
+
+#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
#define bpf_ctx_range(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
@@ -955,6 +958,7 @@ bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_kfunc_call(void);
bool bpf_jit_supports_far_kfunc_call(void);
bool bpf_jit_supports_exceptions(void);
+bool bpf_jit_supports_ptr_xchg(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
@@ -1139,7 +1143,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
return false;
if (!bpf_jit_harden)
return false;
- if (bpf_jit_harden == 1 && bpf_capable())
+ if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
return true;
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 79ef6ac4c021..89a6888f2f9e 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -214,51 +214,6 @@ __kernel_size_t __fortify_strlen(const char * const POS p)
return ret;
}
-/* Defined after fortified strlen() to reuse it. */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-/**
- * strlcpy - Copy a string into another string buffer
- *
- * @p: pointer to destination of copy
- * @q: pointer to NUL-terminated source string to copy
- * @size: maximum number of bytes to write at @p
- *
- * If strlen(@q) >= @size, the copy of @q will be truncated at
- * @size - 1 bytes. @p will always be NUL-terminated.
- *
- * Do not use this function. While FORTIFY_SOURCE tries to avoid
- * over-reads when calculating strlen(@q), it is still possible.
- * Prefer strscpy(), though note its different return values for
- * detecting truncation.
- *
- * Returns total number of bytes written to @p, including terminating NUL.
- *
- */
-__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
-{
- const size_t p_size = __member_size(p);
- const size_t q_size = __member_size(q);
- size_t q_len; /* Full count of source string length. */
- size_t len; /* Count of characters going into destination. */
-
- if (p_size == SIZE_MAX && q_size == SIZE_MAX)
- return __real_strlcpy(p, q, size);
- q_len = strlen(q);
- len = (q_len >= size) ? size - 1 : q_len;
- if (__builtin_constant_p(size) && __builtin_constant_p(q_len) && size) {
- /* Write size is always larger than destination. */
- if (len >= p_size)
- __write_overflow();
- }
- if (size) {
- if (len >= p_size)
- fortify_panic(__func__);
- __underlying_memcpy(p, q, len);
- p[len] = '\0';
- }
- return q_len;
-}
-
/* Defined after fortified strnlen() to reuse it. */
extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
/**
@@ -272,12 +227,6 @@ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
* @p buffer. The behavior is undefined if the string buffers overlap. The
* destination @p buffer is always NUL terminated, unless it's zero-sized.
*
- * Preferred to strlcpy() since the API doesn't require reading memory
- * from the source @q string beyond the specified @size bytes, and since
- * the return value is easier to error-check than strlcpy()'s.
- * In addition, the implementation is robust to the string changing out
- * from underneath it, unlike the current strlcpy() implementation.
- *
* Preferred to strncpy() since it always returns a valid string, and
* doesn't unnecessarily force the tail of the destination buffer to be
* zero padded. If padding is desired please use strscpy_pad().
diff --git a/include/linux/framer/framer-provider.h b/include/linux/framer/framer-provider.h
index 782cd5fc83d5..9724d4b44b9c 100644
--- a/include/linux/framer/framer-provider.h
+++ b/include/linux/framer/framer-provider.h
@@ -83,7 +83,6 @@ struct framer_ops {
/**
* struct framer_provider - represents the framer provider
* @dev: framer provider device
- * @children: can be used to override the default (dev->of_node) child node
* @owner: the module owner having of_xlate
* @list: to maintain a linked list of framer providers
* @of_xlate: function pointer to obtain framer instance from framer pointer
@@ -93,7 +92,7 @@ struct framer_provider {
struct module *owner;
struct list_head list;
struct framer * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args);
+ const struct of_phandle_args *args);
};
static inline void framer_set_drvdata(struct framer *framer, void *data)
@@ -118,19 +117,19 @@ struct framer *devm_framer_create(struct device *dev, struct device_node *node,
const struct framer_ops *ops);
struct framer *framer_provider_simple_of_xlate(struct device *dev,
- struct of_phandle_args *args);
+ const struct of_phandle_args *args);
struct framer_provider *
__framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ const struct of_phandle_args *args));
void framer_provider_of_unregister(struct framer_provider *framer_provider);
struct framer_provider *
__devm_framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ const struct of_phandle_args *args));
void framer_notify_status_change(struct framer *framer);
@@ -154,7 +153,7 @@ static inline struct framer *devm_framer_create(struct device *dev, struct devic
}
static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
return ERR_PTR(-ENOSYS);
}
@@ -162,7 +161,7 @@ static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
static inline struct framer_provider *
__framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
@@ -174,7 +173,7 @@ void framer_provider_of_unregister(struct framer_provider *framer_provider)
static inline struct framer_provider *
__devm_framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e6ba0cc6f2ee..1fbc72c5f112 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -352,6 +352,8 @@ enum rw_hint {
* unrelated IO (like cache flushing, new IO generation, etc).
*/
#define IOCB_DIO_CALLER_COMP (1 << 22)
+/* kiocb is a read or write operation submitted by fs/aio.c. */
+#define IOCB_AIO_RW (1 << 23)
/* for use in trace events */
#define TRACE_IOCB_STRINGS \
@@ -2101,9 +2103,6 @@ int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *count, unsigned int remap_flags);
-extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t len, unsigned int remap_flags);
extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
@@ -2371,7 +2370,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
#define I_CREATING (1 << 15)
#define I_DONTCACHE (1 << 16)
#define I_SYNC_QUEUED (1 << 17)
-#define I_PINNING_FSCACHE_WB (1 << 18)
+#define I_PINNING_NETFS_WB (1 << 18)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index a174cedf4d90..bdf7f3eddf0a 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -189,17 +189,20 @@ extern atomic_t fscache_n_write;
extern atomic_t fscache_n_no_write_space;
extern atomic_t fscache_n_no_create_space;
extern atomic_t fscache_n_culled;
+extern atomic_t fscache_n_dio_misfit;
#define fscache_count_read() atomic_inc(&fscache_n_read)
#define fscache_count_write() atomic_inc(&fscache_n_write)
#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#define fscache_count_dio_misfit() atomic_inc(&fscache_n_dio_misfit)
#else
#define fscache_count_read() do {} while(0)
#define fscache_count_write() do {} while(0)
#define fscache_count_no_write_space() do {} while(0)
#define fscache_count_no_create_space() do {} while(0)
#define fscache_count_culled() do {} while(0)
+#define fscache_count_dio_misfit() do {} while(0)
#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 8e312c8323a8..6e8562cbcc43 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -437,9 +437,6 @@ const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_r
* indicates the cache resources to which the operation state should be
* attached; @cookie indicates the cache object that will be accessed.
*
- * This is intended to be called from the ->begin_cache_operation() netfs lib
- * operation as implemented by the network filesystem.
- *
* @cres->inval_counter is set from @cookie->inval_counter for comparison at
* the end of the operation. This allows invalidation during the operation to
* be detected by the caller.
@@ -629,48 +626,6 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
}
-#if __fscache_available
-bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
- struct fscache_cookie *cookie);
-#else
-#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
- filemap_dirty_folio(MAPPING, FOLIO)
-#endif
-
-/**
- * fscache_unpin_writeback - Unpin writeback resources
- * @wbc: The writeback control
- * @cookie: The cookie referring to the cache object
- *
- * Unpin the writeback resources pinned by fscache_dirty_folio(). This is
- * intended to be called by the netfs's ->write_inode() method.
- */
-static inline void fscache_unpin_writeback(struct writeback_control *wbc,
- struct fscache_cookie *cookie)
-{
- if (wbc->unpinned_fscache_wb)
- fscache_unuse_cookie(cookie, NULL, NULL);
-}
-
-/**
- * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode
- * @cookie: The cookie referring to the cache object
- * @inode: The inode to clean up
- * @aux: Auxiliary data to apply to the inode
- *
- * Clear any writeback resources held by an inode when the inode is evicted.
- * This must be called before clear_inode() is called.
- */
-static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
- struct inode *inode,
- const void *aux)
-{
- if (inode->i_state & I_PINNING_FSCACHE_WB) {
- loff_t i_size = i_size_read(inode);
- fscache_unuse_cookie(cookie, aux, &i_size);
- }
-}
-
/**
* fscache_note_page_release - Note that a netfs page got released
* @cookie: The cookie corresponding to the file
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index de292a007138..6cef1c241180 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -311,15 +311,23 @@ extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
struct page_frag_cache;
+void page_frag_cache_drain(struct page_frag_cache *nc);
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc_align(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask);
+void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
+ gfp_t gfp_mask, unsigned int align_mask);
+
+static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
+}
static inline void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask)
{
- return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
}
extern void page_frag_free(void *addr);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 9a5c6c76e653..7f75c9a51874 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -819,6 +819,24 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
+static inline struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
+{
+ WARN_ON(1);
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int gpio_device_get_base(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return -ENODEV;
+}
+
+static inline const char *gpio_device_get_label(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index 840cd254172d..7118ac28d468 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -77,17 +77,6 @@ enum hid_bpf_attach_flags {
int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
-/* Following functions are kfunc that we export to BPF programs */
-/* available everywhere in HID-BPF */
-__u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t __sz);
-
-/* only available in syscall */
-int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags);
-int hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
- enum hid_report_type rtype, enum hid_class_request reqtype);
-struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id);
-void hid_bpf_release_context(struct hid_bpf_ctx *ctx);
-
/*
* Below is HID internal
*/
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 87e3bedf8eb0..641c4567cfa7 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -157,6 +157,7 @@ enum hrtimer_base_type {
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
* expired
+ * @online: CPU is online from an hrtimers point of view
* @timer_waiters: A hrtimer_cancel() invocation waits for the timer
* callback to finish.
* @expires_next: absolute time of the next event, is required for remote
@@ -179,7 +180,8 @@ struct hrtimer_cpu_base {
unsigned int hres_active : 1,
in_hrtirq : 1,
hang_detected : 1,
- softirq_activated : 1;
+ softirq_activated : 1,
+ online : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int nr_events;
unsigned short nr_retries;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 2b00faf98017..6ef0557b4bff 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -164,8 +164,28 @@ struct hv_ring_buffer {
u8 buffer[];
} __packed;
+
+/*
+ * If the requested ring buffer size is at least 8 times the size of the
+ * header, steal space from the ring buffer for the header. Otherwise, add
+ * space for the header so that is doesn't take too much of the ring buffer
+ * space.
+ *
+ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
+ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
+ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
+ * large allocation that will be almost half wasted. As a contrasting example,
+ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
+ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
+ * In this latter case, we must add 64 Kbytes for the header and not worry
+ * about what's wasted.
+ */
+#define VMBUS_HEADER_ADJ(payload_sz) \
+ ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \
+ 0 : sizeof(struct hv_ring_buffer))
+
/* Calculate the proper size of a ringbuffer, it must be page-aligned */
-#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
(payload_sz))
struct hv_ring_buffer_info {
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 83c4d060a559..3385a2cc5b09 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2023 Intel Corporation
+ * Copyright (c) 2018 - 2024 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -191,6 +191,11 @@ static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
}
+static inline bool ieee80211_sn_less_eq(u16 sn1, u16 sn2)
+{
+ return ((sn2 - sn1) & IEEE80211_SN_MASK) <= (IEEE80211_SN_MODULO >> 1);
+}
+
static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2)
{
return (sn1 + sn2) & IEEE80211_SN_MASK;
@@ -808,6 +813,11 @@ static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
}
+static inline u16 ieee80211_get_sn(struct ieee80211_hdr *hdr)
+{
+ return le16_get_bits(hdr->seq_ctrl, IEEE80211_SCTL_SEQ);
+}
+
struct ieee80211s_hdr {
u8 flags;
u8 ttl;
@@ -1454,6 +1464,20 @@ struct ieee80211_mgmt {
u8 max_tod_error;
u8 max_toa_error;
} __packed wnm_timing_msr;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ttlm_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 status_code;
+ u8 variable[];
+ } __packed ttlm_res;
+ struct {
+ u8 action_code;
+ } __packed ttlm_tear_down;
} u;
} __packed action;
DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
@@ -3036,6 +3060,9 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x30
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ 0x40
#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
@@ -3175,6 +3202,22 @@ ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
return len >= needed;
}
+/* must validate ieee80211_eht_oper_size_ok() first */
+static inline u16
+ieee80211_eht_oper_dis_subchan_bitmap(const struct ieee80211_eht_operation *eht_oper)
+{
+ const struct ieee80211_eht_operation_info *info =
+ (const void *)eht_oper->optional;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT))
+ return 0;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
+ return 0;
+
+ return get_unaligned_le16(info->optional);
+}
+
#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
struct ieee80211_bandwidth_indication {
@@ -3357,6 +3400,8 @@ enum ieee80211_statuscode {
WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
WLAN_STATUS_SAE_PK = 127,
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING = 133,
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED = 134,
};
@@ -3682,6 +3727,7 @@ enum ieee80211_category {
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
WLAN_CATEGORY_S1G = 22,
+ WLAN_CATEGORY_PROTECTED_EHT = 37,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -3745,6 +3791,13 @@ enum ieee80211_unprotected_wnm_actioncode {
WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1,
};
+/* Protected EHT action codes */
+enum ieee80211_protected_eht_actioncode {
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
+};
+
/* Security key length */
enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
@@ -4845,6 +4898,10 @@ struct ieee80211_multi_link_elem {
#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_NO_SUPP 0
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME 1
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_RESERVED 2
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3
#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
@@ -4908,18 +4965,43 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
}
/**
+ * ieee80211_mle_get_link_id - returns the link ID
+ * @data: the basic multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the BSS link ID can't be found, -1 will be returned
+ */
+static inline int ieee80211_mle_get_link_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_LINK_ID))
+ return -1;
+
+ return *common;
+}
+
+/**
* ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
- * @mle: the basic multi link element
+ * @data: pointer to the basic multi link element
*
* The element is assumed to be of the correct type (BASIC) and big enough,
* this must be checked using ieee80211_mle_type_ok().
*
* If the BSS parameter change count value can't be found (the presence bit
- * for it is clear), 0 will be returned.
+ * for it is clear), -1 will be returned.
*/
-static inline u8
-ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle)
+static inline int
+ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
u16 control = le16_to_cpu(mle->control);
const u8 *common = mle->variable;
@@ -4927,7 +5009,7 @@ ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle)
common += sizeof(struct ieee80211_mle_basic_common_info);
if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
- return 0;
+ return -1;
if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
common += 1;
@@ -4997,6 +5079,81 @@ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
}
/**
+ * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations.
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the MLD capabilities and operations field is not present, 0 will be
+ * returned.
+ */
+static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_id - returns the MLD ID
+ * @data: pointer to the multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the MLD ID is not present, 0 will be returned.
+ */
+static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_ID))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+
+ return *common;
+}
+
+/**
* ieee80211_mle_size_ok - validate multi-link element size
* @data: pointer to the element data
* @len: length of the containing element
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 2a7660843444..043d442994b0 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -27,44 +27,54 @@ struct tun_xdp_hdr {
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
+
static inline bool tun_is_xdp_frame(void *ptr)
{
- return (unsigned long)ptr & TUN_XDP_FLAG;
+ return (unsigned long)ptr & TUN_XDP_FLAG;
}
+
static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
- return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
+ return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
}
+
static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
- return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
}
+
void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
struct file;
struct socket;
+
static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
static inline bool tun_is_xdp_frame(void *ptr)
{
return false;
}
+
static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
return NULL;
}
+
static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
return NULL;
}
+
static inline void tun_ptr_free(void *ptr)
{
}
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 7852f6c9a714..719cf9cc6e1a 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -8,6 +8,8 @@
#ifndef __AD_SIGMA_DELTA_H__
#define __AD_SIGMA_DELTA_H__
+#include <linux/iio/iio.h>
+
enum ad_sigma_delta_mode {
AD_SD_MODE_CONTINUOUS = 0,
AD_SD_MODE_SINGLE = 1,
@@ -99,7 +101,7 @@ struct ad_sigma_delta {
* 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
* rounded to 16 bytes to take into account padding.
*/
- uint8_t tx_buf[4] ____cacheline_aligned;
+ uint8_t tx_buf[4] __aligned(IIO_DMA_MINALIGN);
uint8_t rx_buf[16] __aligned(8);
};
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 607c3a89a647..f9ae5cdd884f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -258,9 +258,9 @@ struct st_sensor_data {
bool hw_irq_trigger;
s64 hw_timestamp;
- char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
-
struct mutex odr_lock;
+
+ char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index dc9ea299e088..8898966bc0f0 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -11,6 +11,7 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
@@ -131,7 +132,7 @@ struct adis {
unsigned long irq_flag;
void *buffer;
- u8 tx[10] ____cacheline_aligned;
+ u8 tx[10] __aligned(IIO_DMA_MINALIGN);
u8 rx[4];
};
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ddb27fc0ee8c..cb5280e6cc21 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -53,13 +53,15 @@ struct in_device {
};
#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1])
+#define IPV4_DEVCONF_RO(cnf, attr) READ_ONCE(IPV4_DEVCONF(cnf, attr))
#define IPV4_DEVCONF_ALL(net, attr) \
IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr)
+#define IPV4_DEVCONF_ALL_RO(net, attr) READ_ONCE(IPV4_DEVCONF_ALL(net, attr))
-static inline int ipv4_devconf_get(struct in_device *in_dev, int index)
+static inline int ipv4_devconf_get(const struct in_device *in_dev, int index)
{
index--;
- return in_dev->cnf.data[index];
+ return READ_ONCE(in_dev->cnf.data[index]);
}
static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
@@ -67,7 +69,7 @@ static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
{
index--;
set_bit(index, in_dev->cnf.state);
- in_dev->cnf.data[index] = val;
+ WRITE_ONCE(in_dev->cnf.data[index], val);
}
static inline void ipv4_devconf_setall(struct in_device *in_dev)
@@ -81,18 +83,18 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val))
#define IN_DEV_ANDCONF(in_dev, attr) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr) && \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
- (IPV4_DEVCONF_ALL(net, attr) || \
+ (IPV4_DEVCONF_ALL_RO(net, attr) || \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_ORCONF(in_dev, attr) \
IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
#define IN_DEV_MAXCONF(in_dev, attr) \
- (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
+ (max(IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr), \
IN_DEV_CONF_GET((in_dev), attr)))
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
diff --git a/include/linux/init.h b/include/linux/init.h
index 01b52c9c7526..3fa3f6241350 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -179,6 +179,13 @@ extern void (*late_time_init)(void);
extern bool initcall_debug;
+#ifdef MODULE
+extern struct module __this_module;
+#define THIS_MODULE (&__this_module)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
#endif
#ifndef MODULE
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 1ea2a820e1eb..5e27cb3a3be9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -892,11 +892,14 @@ struct iommu_fwspec {
struct iommu_sva {
struct device *dev;
struct iommu_domain *domain;
+ struct list_head handle_item;
+ refcount_t users;
};
struct iommu_mm_data {
u32 pasid;
struct list_head sva_domains;
+ struct list_head sva_handles;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 7578d4f6a969..db1249cd9692 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -47,7 +47,30 @@ static inline int task_nice_ioclass(struct task_struct *task)
}
#ifdef CONFIG_BLOCK
-int __get_task_ioprio(struct task_struct *p);
+/*
+ * If the task has set an I/O priority, use that. Otherwise, return
+ * the default I/O priority.
+ *
+ * Expected to be called for current task or with task_lock() held to keep
+ * io_context stable.
+ */
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+ struct io_context *ioc = p->io_context;
+ int prio;
+
+ if (!ioc)
+ return IOPRIO_DEFAULT;
+
+ if (p != current)
+ lockdep_assert_held(&p->alloc_lock);
+
+ prio = ioc->ioprio;
+ if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
+ prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
+ return prio;
+}
#else
static inline int __get_task_ioprio(struct task_struct *p)
{
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5e605e384aac..383a0ea2ab91 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -3,6 +3,7 @@
#define _IPV6_H
#include <uapi/linux/ipv6.h>
+#include <linux/cache.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
@@ -10,9 +11,16 @@
* This structure contains configuration options per IPv6 link.
*/
struct ipv6_devconf {
- __s32 forwarding;
+ /* RX & TX fastpath fields. */
+ __cacheline_group_begin(ipv6_devconf_read_txrx);
+ __s32 disable_ipv6;
__s32 hop_limit;
__s32 mtu6;
+ __s32 forwarding;
+ __s32 disable_policy;
+ __s32 proxy_ndp;
+ __cacheline_group_end(ipv6_devconf_read_txrx);
+
__s32 accept_ra;
__s32 accept_redirects;
__s32 autoconf;
@@ -27,6 +35,7 @@ struct ipv6_devconf {
__s32 use_tempaddr;
__s32 temp_valid_lft;
__s32 temp_prefered_lft;
+ __s32 regen_min_advance;
__s32 regen_max_retry;
__s32 max_desync_factor;
__s32 max_addresses;
@@ -44,7 +53,6 @@ struct ipv6_devconf {
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
- __s32 proxy_ndp;
__s32 accept_source_route;
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -54,7 +62,6 @@ struct ipv6_devconf {
#ifdef CONFIG_IPV6_MROUTE
atomic_t mc_forwarding;
#endif
- __s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
@@ -75,7 +82,6 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
- __s32 disable_policy;
__s32 ndisc_tclass;
__s32 rpl_seg_enabled;
__u32 ioam6_id;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 1dbb14daccfa..26d68115afb8 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -471,7 +471,7 @@ enum ata_completion_errors {
/*
* Link power management policy: If you alter this, you also need to
- * alter libata-scsi.c (for the ascii descriptions)
+ * alter libata-sata.c (for the ascii descriptions)
*/
enum ata_lpm_policy {
ATA_LPM_UNKNOWN,
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 185924c56378..642272576582 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -315,9 +315,9 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
-LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock,
sockptr_t optval, sockptr_t optlen, unsigned int len)
-LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock,
struct sk_buff *skb, u32 *secid)
LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
@@ -404,10 +404,17 @@ LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
-LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map)
-LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map)
-LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux)
-LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
+LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
+LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
+LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
+LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
+LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
+LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
+LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
#endif /* CONFIG_BPF_SYSCALL */
LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 9b54c4f0677f..693eba9869e4 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -26,6 +26,7 @@
#define MARVELL_PHY_ID_88E2110 0x002b09b0
#define MARVELL_PHY_ID_88X2222 0x01410f10
#define MARVELL_PHY_ID_88Q2110 0x002b0980
+#define MARVELL_PHY_ID_88Q2220 0x002b0b20
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 79ceee3c8673..68f8d2e970d4 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -373,6 +373,10 @@ static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 l
{
linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
advertising, lpa & MDIO_AN_T1_ADV_M_B10L);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_100BT1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_1000BT1);
}
/**
@@ -409,6 +413,10 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising))
result |= MDIO_AN_T1_ADV_M_B10L;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_100BT1;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_1000BT1;
return result;
}
@@ -440,6 +448,42 @@ static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
}
/**
+ * mii_eee_cap2_mod_linkmode_sup_t()
+ * @adv: target the linkmode settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.3.11 "EEE control and capability 2" register (3.21)
+ */
+static inline void mii_eee_cap2_mod_linkmode_sup_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
+ * mii_eee_cap2_mod_linkmode_adv_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.7.16 "EEE advertisement 2" register (7.62)
+ * IEEE 802.3-2022 45.2.7.17 "EEE link partner ability 2" register (7.63)
+ * Note: Currently this function is the same as mii_eee_cap2_mod_linkmode_sup_t.
+ * For certain, not yet supported, modes however the bits differ.
+ * Therefore create separate functions already.
+ */
+static inline void mii_eee_cap2_mod_linkmode_adv_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
* linkmode_to_mii_eee_cap1_t()
* @adv: the linkmode advertisement settings
*
@@ -467,6 +511,25 @@ static inline u32 linkmode_to_mii_eee_cap1_t(unsigned long *adv)
}
/**
+ * linkmode_to_mii_eee_cap2_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2022 45.2.7.16
+ * "EEE advertisement 2" register (7.62)
+ */
+static inline u32 linkmode_to_mii_eee_cap2_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, adv))
+ result |= MDIO_EEE_2_5GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_5GT;
+
+ return result;
+}
+
+/**
* mii_10base_t1_adv_mod_linkmode_t()
* @adv: linkmode advertisement settings
* @val: register value
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b695f9e946da..e2082240586d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -121,6 +121,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
void memblock_trim_memory(phys_addr_t align);
+unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+ phys_addr_t base2, phys_addr_t size2);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
diff --git a/include/linux/mfd/idtRC38xxx_reg.h b/include/linux/mfd/idtRC38xxx_reg.h
new file mode 100644
index 000000000000..ec11872f51ad
--- /dev/null
+++ b/include/linux/mfd/idtRC38xxx_reg.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on PolarBear_CSRs.RevA.xlsx (2023-04-21)
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef MFD_IDTRC38XXX_REG
+#define MFD_IDTRC38XXX_REG
+
+/* GLOBAL */
+#define SOFT_RESET_CTRL (0x15) /* Specific to FC3W */
+#define MISC_CTRL (0x14) /* Specific to FC3A */
+#define APLL_REINIT BIT(1)
+#define APLL_REINIT_VFC3A BIT(2)
+
+#define DEVICE_ID (0x2)
+#define DEVICE_ID_MASK (0x1000) /* Bit 12 is 1 if FC3W and 0 if FC3A */
+#define DEVICE_ID_SHIFT (12)
+
+/* FOD */
+#define FOD_0 (0x300)
+#define FOD_0_VFC3A (0x400)
+#define FOD_1 (0x340)
+#define FOD_1_VFC3A (0x440)
+#define FOD_2 (0x380)
+#define FOD_2_VFC3A (0x480)
+
+/* TDCAPLL */
+#define TDC_CTRL (0x44a) /* Specific to FC3W */
+#define TDC_ENABLE_CTRL (0x169) /* Specific to FC3A */
+#define TDC_DAC_CAL_CTRL (0x16a) /* Specific to FC3A */
+#define TDC_EN BIT(0)
+#define TDC_DAC_RECAL_REQ BIT(1)
+#define TDC_DAC_RECAL_REQ_VFC3A BIT(0)
+
+#define TDC_FB_DIV_INT_CNFG (0x442)
+#define TDC_FB_DIV_INT_CNFG_VFC3A (0x162)
+#define TDC_FB_DIV_INT_MASK GENMASK(7, 0)
+#define TDC_REF_DIV_CNFG (0x443)
+#define TDC_REF_DIV_CNFG_VFC3A (0x163)
+#define TDC_REF_DIV_CONFIG_MASK GENMASK(2, 0)
+
+/* TIME SYNC CHANNEL */
+#define TIME_CLOCK_SRC (0xa01) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT (0xa00) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT_MASK GENMASK(5, 0)
+
+#define SUB_SYNC_GEN_CNFG (0xa04)
+
+#define TOD_COUNTER_READ_REQ (0xa5f)
+#define TOD_COUNTER_READ_REQ_VFC3A (0x6df)
+#define TOD_SYNC_LOAD_VAL_CTRL (0xa10)
+#define TOD_SYNC_LOAD_VAL_CTRL_VFC3A (0x690)
+#define SYNC_COUNTER_MASK GENMASK_ULL(51, 0)
+#define SUB_SYNC_COUNTER_MASK GENMASK(30, 0)
+#define TOD_SYNC_LOAD_REQ_CTRL (0xa21)
+#define TOD_SYNC_LOAD_REQ_CTRL_VFC3A (0x6a1)
+#define SYNC_LOAD_ENABLE BIT(1)
+#define SUB_SYNC_LOAD_ENABLE BIT(0)
+#define SYNC_LOAD_REQ BIT(0)
+
+#define LPF_MODE_CNFG (0xa80)
+#define LPF_MODE_CNFG_VFC3A (0x700)
+enum lpf_mode {
+ LPF_DISABLED = 0,
+ LPF_WP = 1,
+ LPF_HOLDOVER = 2,
+ LPF_WF = 3,
+ LPF_INVALID = 4
+};
+#define LPF_CTRL (0xa98)
+#define LPF_CTRL_VFC3A (0x718)
+#define LPF_EN BIT(0)
+
+#define LPF_BW_CNFG (0xa81)
+#define LPF_BW_SHIFT GENMASK(7, 3)
+#define LPF_BW_MULT GENMASK(2, 0)
+#define LPF_BW_SHIFT_DEFAULT (0xb)
+#define LPF_BW_MULT_DEFAULT (0x0)
+#define LPF_BW_SHIFT_1PPS (0x5)
+
+#define LPF_WR_PHASE_CTRL (0xaa8)
+#define LPF_WR_PHASE_CTRL_VFC3A (0x728)
+#define LPF_WR_FREQ_CTRL (0xab0)
+#define LPF_WR_FREQ_CTRL_VFC3A (0x730)
+
+#define TIME_CLOCK_TDC_FANOUT_CNFG (0xB00)
+#define TIME_SYNC_TO_TDC_EN BIT(0)
+#define SIG1_MUX_SEL_MASK GENMASK(7, 4)
+#define SIG2_MUX_SEL_MASK GENMASK(11, 8)
+enum tdc_mux_sel {
+ REF0 = 0,
+ REF1 = 1,
+ REF2 = 2,
+ REF3 = 3,
+ REF_CLK5 = 4,
+ REF_CLK6 = 5,
+ DPLL_FB_TO_TDC = 6,
+ DPLL_FB_DIVIDED_TO_TDC = 7,
+ TIME_CLK_DIVIDED = 8,
+ TIME_SYNC = 9,
+};
+
+#define TIME_CLOCK_MEAS_CNFG (0xB04)
+#define TDC_MEAS_MODE BIT(0)
+enum tdc_meas_mode {
+ CONTINUOUS = 0,
+ ONE_SHOT = 1,
+ MEAS_MODE_INVALID = 2,
+};
+
+#define TIME_CLOCK_MEAS_DIV_CNFG (0xB08)
+#define TIME_REF_DIV_MASK GENMASK(29, 24)
+
+#define TIME_CLOCK_MEAS_CTRL (0xB10)
+#define TDC_MEAS_EN BIT(0)
+#define TDC_MEAS_START BIT(1)
+
+#define TDC_FIFO_READ_REQ (0xB2F)
+#define TDC_FIFO_READ (0xB30)
+#define COARSE_MEAS_MASK GENMASK_ULL(39, 13)
+#define FINE_MEAS_MASK GENMASK(12, 0)
+
+#define TDC_FIFO_CTRL (0xB12)
+#define FIFO_CLEAR BIT(0)
+#define TDC_FIFO_STS (0xB38)
+#define FIFO_FULL BIT(1)
+#define FIFO_EMPTY BIT(0)
+#define TDC_FIFO_EVENT (0xB39)
+#define FIFO_OVERRUN BIT(1)
+
+/* DPLL */
+#define MAX_REFERENCE_INDEX (3)
+#define MAX_NUM_REF_PRIORITY (4)
+
+#define MAX_DPLL_INDEX (2)
+
+#define DPLL_STS (0x580)
+#define DPLL_STS_VFC3A (0x571)
+#define DPLL_STATE_STS_MASK (0x70)
+#define DPLL_STATE_STS_SHIFT (4)
+#define DPLL_REF_SEL_STS_MASK (0x6)
+#define DPLL_REF_SEL_STS_SHIFT (1)
+
+#define DPLL_REF_PRIORITY_CNFG (0x502)
+#define DPLL_REFX_PRIORITY_DISABLE_MASK (0xf)
+#define DPLL_REF0_PRIORITY_ENABLE_AND_SET_MASK (0x31)
+#define DPLL_REF1_PRIORITY_ENABLE_AND_SET_MASK (0xc2)
+#define DPLL_REF2_PRIORITY_ENABLE_AND_SET_MASK (0x304)
+#define DPLL_REF3_PRIORITY_ENABLE_AND_SET_MASK (0xc08)
+#define DPLL_REF0_PRIORITY_SHIFT (4)
+#define DPLL_REF1_PRIORITY_SHIFT (6)
+#define DPLL_REF2_PRIORITY_SHIFT (8)
+#define DPLL_REF3_PRIORITY_SHIFT (10)
+
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKED = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_WRITE_FREQUENCY = 3,
+ DPLL_STATE_ACQUIRE = 4,
+ DPLL_STATE_HITLESS_SWITCH = 5,
+ DPLL_STATE_MAX = DPLL_STATE_HITLESS_SWITCH
+};
+
+/* REFMON */
+#define LOSMON_STS_0 (0x81e)
+#define LOSMON_STS_0_VFC3A (0x18e)
+#define LOSMON_STS_1 (0x82e)
+#define LOSMON_STS_1_VFC3A (0x19e)
+#define LOSMON_STS_2 (0x83e)
+#define LOSMON_STS_2_VFC3A (0x1ae)
+#define LOSMON_STS_3 (0x84e)
+#define LOSMON_STS_3_VFC3A (0x1be)
+#define LOS_STS_MASK (0x1)
+
+#define FREQMON_STS_0 (0x874)
+#define FREQMON_STS_0_VFC3A (0x1d4)
+#define FREQMON_STS_1 (0x894)
+#define FREQMON_STS_1_VFC3A (0x1f4)
+#define FREQMON_STS_2 (0x8b4)
+#define FREQMON_STS_2_VFC3A (0x214)
+#define FREQMON_STS_3 (0x8d4)
+#define FREQMON_STS_3_VFC3A (0x234)
+#define FREQ_FAIL_STS_SHIFT (31)
+
+/* Firmware interface */
+#define TIME_CLK_FREQ_ADDR (0xffa0)
+#define XTAL_FREQ_ADDR (0xffa1)
+
+/*
+ * Return register address and field mask based on passed in firmware version
+ */
+#define IDTFC3_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER))
+#define IDTFC3_FW_FIELD(FW, VER, FIELD) (((FW) < (VER)) ? (FIELD) : (FIELD##_##VER))
+enum fw_version {
+ V_DEFAULT = 0,
+ VFC3W = 1,
+ VFC3A = 2
+};
+
+/* XTAL_FREQ_ADDR/TIME_CLK_FREQ_ADDR */
+enum {
+ FREQ_MIN = 0,
+ FREQ_25M = 1,
+ FREQ_49_152M = 2,
+ FREQ_50M = 3,
+ FREQ_100M = 4,
+ FREQ_125M = 5,
+ FREQ_250M = 6,
+ FREQ_MAX
+};
+
+struct idtfc3_hw_param {
+ u32 xtal_freq;
+ u32 time_clk_freq;
+};
+
+struct idtfc3_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+static inline void idtfc3_default_hw_param(struct idtfc3_hw_param *hw_param)
+{
+ hw_param->xtal_freq = 49152000;
+ hw_param->time_clk_freq = 25000000;
+}
+
+static inline int idtfc3_set_hw_param(struct idtfc3_hw_param *hw_param,
+ u16 addr, u8 val)
+{
+ if (addr == XTAL_FREQ_ADDR)
+ switch (val) {
+ case FREQ_49_152M:
+ hw_param->xtal_freq = 49152000;
+ break;
+ case FREQ_50M:
+ hw_param->xtal_freq = 50000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else if (addr == TIME_CLK_FREQ_ADDR)
+ switch (val) {
+ case FREQ_25M:
+ hw_param->time_clk_freq = 25000000;
+ break;
+ case FREQ_50M:
+ hw_param->time_clk_freq = 50000000;
+ break;
+ case FREQ_100M:
+ hw_param->time_clk_freq = 100000000;
+ break;
+ case FREQ_125M:
+ hw_param->time_clk_freq = 125000000;
+ break;
+ case FREQ_250M:
+ hw_param->time_clk_freq = 250000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EFAULT;
+
+ return 0;
+}
+
+#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 8c55ff351e5f..41f03b352401 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -681,6 +681,7 @@ struct mlx5e_resources {
struct mlx5_sq_bfreg bfreg;
#define MLX5_MAX_NUM_TC 8
u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+ bool tisn_valid;
} hw_objs;
struct net_device *uplink_netdev;
struct mutex uplink_netdev_lock;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 6f7725238abc..3fb428ce7d1c 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -132,6 +132,7 @@ struct mlx5_flow_handle;
enum {
FLOW_CONTEXT_HAS_TAG = BIT(0),
+ FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
};
struct mlx5_flow_context {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index bf5320b28b8b..3a8a9e57a51d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1103,7 +1103,7 @@ struct mlx5_ifc_roce_cap_bits {
u8 sw_r_roce_src_udp_port[0x1];
u8 fl_rc_qp_when_roce_disabled[0x1];
u8 fl_rc_qp_when_roce_enabled[0x1];
- u8 reserved_at_7[0x1];
+ u8 roce_cc_general[0x1];
u8 qp_ooo_transmit_default[0x1];
u8 reserved_at_9[0x15];
u8 qp_ts_format[0x2];
@@ -3576,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits {
u8 action[0x10];
u8 extended_destination[0x1];
- u8 reserved_at_81[0x1];
+ u8 uplink_hairpin_en[0x1];
u8 flow_source[0x2];
u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
@@ -4036,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10];
- u8 reserved_at_60[0xd0];
+ u8 reserved_at_60[0xa0];
+
+ u8 reserved_at_100[0x1];
+ u8 sd_group[0x3];
+ u8 reserved_at_104[0x1c];
+ u8 reserved_at_120[0x10];
u8 mtu[0x10];
u8 system_image_guid[0x40];
@@ -10122,8 +10127,7 @@ struct mlx5_ifc_mpir_reg_bits {
u8 reserved_at_20[0x20];
u8 local_port[0x8];
- u8 reserved_at_28[0x15];
- u8 sd_group[0x3];
+ u8 reserved_at_28[0x18];
u8 reserved_at_60[0x20];
};
@@ -10257,7 +10261,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_63_to_46[0x12];
u8 mrtc[0x1];
- u8 regs_44_to_32[0xd];
+ u8 regs_44_to_41[0x4];
+ u8 mfrl[0x1];
+ u8 regs_39_to_32[0x8];
u8 regs_31_to_10[0x16];
u8 mtmp[0x1];
@@ -10657,6 +10663,7 @@ enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET = 0x7,
};
enum {
@@ -12701,6 +12708,14 @@ enum mlx5_msees_oper_status {
MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5,
};
+enum mlx5_msees_failure_reason {
+ MLX5_MSEES_FAILURE_REASON_UNDEFINED_ERROR = 0x0,
+ MLX5_MSEES_FAILURE_REASON_PORT_DOWN = 0x1,
+ MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF = 0x2,
+ MLX5_MSEES_FAILURE_REASON_NET_SYNCHRONIZER_DEVICE_ERROR = 0x3,
+ MLX5_MSEES_FAILURE_REASON_LACK_OF_RESOURCES = 0x4,
+};
+
struct mlx5_ifc_msees_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index bd53cf4be7bd..f0e55bf3ec8b 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -269,7 +269,10 @@ struct mlx5_wqe_eth_seg {
union {
struct {
__be16 sz;
- u8 start[2];
+ union {
+ u8 start[2];
+ DECLARE_FLEX_ARRAY(u8, data);
+ };
} inline_hdr;
struct {
__be16 type;
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index fbb9bf447889..c36cc6d82926 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid);
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 40d94411d492..dc7048824be8 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -156,6 +156,7 @@ calc_vm_flag_bits(unsigned long flags)
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+ _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
arch_calc_vm_flag_bits(flags);
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4ed33b127821..a497f189d988 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -2013,9 +2013,9 @@ static inline int pfn_valid(unsigned long pfn)
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __pfn_to_section(pfn);
- rcu_read_lock();
+ rcu_read_lock_sched();
if (!valid_section(ms)) {
- rcu_read_unlock();
+ rcu_read_unlock_sched();
return 0;
}
/*
@@ -2023,7 +2023,7 @@ static inline int pfn_valid(unsigned long pfn)
* the entire section-sized span.
*/
ret = early_section(ms) || pfn_section_valid(ms, pfn);
- rcu_read_unlock();
+ rcu_read_unlock_sched();
return ret;
}
diff --git a/include/linux/net.h b/include/linux/net.h
index c9b4a63791a4..15df6d5f27a7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -299,10 +299,7 @@ do { \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
#define net_dbg_ratelimited(fmt, ...) \
- do { \
- if (0) \
- no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
- } while (0)
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
#define net_get_random_once(buf, nbytes) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 118c40258d07..4230c7f3b959 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -79,8 +79,6 @@ struct xdp_buff;
struct xdp_frame;
struct xdp_metadata_ops;
struct xdp_md;
-/* DPLL specific */
-struct dpll_pin;
typedef u32 xdp_features_t;
@@ -227,12 +225,6 @@ struct net_device_core_stats {
#include <linux/cache.h>
#include <linux/skbuff.h>
-#ifdef CONFIG_RPS
-#include <linux/static_key.h>
-extern struct static_key_false rps_needed;
-extern struct static_key_false rfs_needed;
-#endif
-
struct neighbour;
struct neigh_parms;
struct sk_buff;
@@ -732,86 +724,10 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
#endif
}
-#ifdef CONFIG_RPS
-/*
- * This structure holds an RPS map which can be of variable length. The
- * map is an array of CPUs.
- */
-struct rps_map {
- unsigned int len;
- struct rcu_head rcu;
- u16 cpus[];
-};
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
-
-/*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
- * tail pointer for that CPU's input queue at the time of last enqueue, and
- * a hardware filter index.
- */
-struct rps_dev_flow {
- u16 cpu;
- u16 filter;
- unsigned int last_qtail;
-};
-#define RPS_NO_FILTER 0xffff
-
-/*
- * The rps_dev_flow_table structure contains a table of flow mappings.
- */
-struct rps_dev_flow_table {
- unsigned int mask;
- struct rcu_head rcu;
- struct rps_dev_flow flows[];
-};
-#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
- ((_num) * sizeof(struct rps_dev_flow)))
-
-/*
- * The rps_sock_flow_table contains mappings of flows to the last CPU
- * on which they were processed by the application (set in recvmsg).
- * Each entry is a 32bit value. Upper part is the high-order bits
- * of flow hash, lower part is CPU number.
- * rps_cpu_mask is used to partition the space, depending on number of
- * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
- * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
- * meaning we use 32-6=26 bits for the hash.
- */
-struct rps_sock_flow_table {
- u32 mask;
-
- u32 ents[] ____cacheline_aligned_in_smp;
-};
-#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
-
-#define RPS_NO_CPU 0xffff
-
-extern u32 rps_cpu_mask;
-extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
-
-static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
- u32 hash)
-{
- if (table && hash) {
- unsigned int index = hash & table->mask;
- u32 val = hash & ~rps_cpu_mask;
-
- /* We only give a hint, preemption can change CPU under us */
- val |= raw_smp_processor_id();
-
- /* The following WRITE_ONCE() is paired with the READ_ONCE()
- * here, and another one in get_rps_cpu().
- */
- if (READ_ONCE(table->ents[index]) != val)
- WRITE_ONCE(table->ents[index], val);
- }
-}
-
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
#endif
-#endif /* CONFIG_RPS */
/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
enum xps_map_type {
@@ -1062,7 +978,7 @@ struct xfrmdev_ops {
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
- void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x);
@@ -1815,6 +1731,15 @@ enum netdev_stat_type {
NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
};
+enum netdev_reg_state {
+ NETREG_UNINITIALIZED = 0,
+ NETREG_REGISTERED, /* completed register_netdevice */
+ NETREG_UNREGISTERING, /* called unregister_netdevice */
+ NETREG_UNREGISTERED, /* completed unregister todo */
+ NETREG_RELEASED, /* called free_netdev */
+ NETREG_DUMMY, /* dummy device for NAPI poll */
+};
+
/**
* struct net_device - The DEVICE structure.
*
@@ -2030,6 +1955,7 @@ enum netdev_stat_type {
*
* @sysfs_rx_queue_group: Space for optional per-rx queue attributes
* @rtnl_link_ops: Rtnl_link_ops
+ * @stat_ops: Optional ops for queue-aware statistics
*
* @gso_max_size: Maximum size of generic segmentation offload
* @tso_max_size: Device (as in HW) limit on the max TSO request size
@@ -2141,6 +2067,11 @@ struct net_device {
/* TXRX read-mostly hotpath */
__cacheline_group_begin(net_device_read_txrx);
+ union {
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+ struct pcpu_dstats __percpu *dstats;
+ };
unsigned int flags;
unsigned short hard_header_len;
netdev_features_t features;
@@ -2249,7 +2180,7 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
- unsigned char operstate;
+ unsigned int operstate;
unsigned char link_mode;
unsigned char if_port;
@@ -2372,13 +2303,7 @@ struct net_device {
struct list_head link_watch_list;
- enum { NETREG_UNINITIALIZED=0,
- NETREG_REGISTERED, /* completed register_netdevice */
- NETREG_UNREGISTERING, /* called unregister_netdevice */
- NETREG_UNREGISTERED, /* completed unregister todo */
- NETREG_RELEASED, /* called free_netdev */
- NETREG_DUMMY, /* dummy device for NAPI poll */
- } reg_state:8;
+ u8 reg_state;
bool dismantle;
@@ -2395,11 +2320,6 @@ struct net_device {
enum netdev_ml_priv_type ml_priv_type;
enum netdev_stat_type pcpu_stat_type:8;
- union {
- struct pcpu_lstats __percpu *lstats;
- struct pcpu_sw_netstats __percpu *tstats;
- struct pcpu_dstats __percpu *dstats;
- };
#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
@@ -2416,6 +2336,8 @@ struct net_device {
const struct rtnl_link_ops *rtnl_link_ops;
+ const struct netdev_stat_ops *stat_ops;
+
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SEGS 65535u
#define GSO_LEGACY_MAX_SIZE 65536u
@@ -2469,7 +2391,7 @@ struct net_device {
struct devlink_port *devlink_port;
#if IS_ENABLED(CONFIG_DPLL)
- struct dpll_pin *dpll_pin;
+ struct dpll_pin __rcu *dpll_pin;
#endif
#if IS_ENABLED(CONFIG_PAGE_POOL)
/** @page_pools: page pools created for this netdevice */
@@ -3074,8 +2996,6 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
int call_netdevice_notifiers_info(unsigned long val,
struct netdev_notifier_info *info);
-extern rwlock_t dev_base_lock; /* Device list lock */
-
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_reverse(net, d) \
@@ -3198,7 +3118,7 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
void netdev_freemem(struct net_device *dev);
-int init_dummy_netdev(struct net_device *dev);
+void init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
@@ -3499,6 +3419,16 @@ static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue
#endif
}
+static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
@@ -3958,7 +3888,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog);
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);
@@ -4032,17 +3962,6 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
-void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
-void netdev_dpll_pin_clear(struct net_device *dev);
-
-static inline struct dpll_pin *netdev_dpll_pin(const struct net_device *dev)
-{
-#if IS_ENABLED(CONFIG_DPLL)
- return dev->dpll_pin;
-#else
- return NULL;
-#endif
-}
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -4353,8 +4272,10 @@ static inline bool netif_testing(const struct net_device *dev)
*/
static inline bool netif_oper_up(const struct net_device *dev)
{
- return (dev->operstate == IF_OPER_UP ||
- dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
+ unsigned int operstate = READ_ONCE(dev->operstate);
+
+ return operstate == IF_OPER_UP ||
+ operstate == IF_OPER_UNKNOWN /* backward compat */;
}
/**
@@ -4793,11 +4714,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats __percpu *netstats);
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
-extern int netdev_max_backlog;
-extern int dev_rx_weight;
-extern int dev_tx_weight;
-extern int gro_normal_batch;
-
enum {
NESTED_SYNC_IMM_BIT,
NESTED_SYNC_TODO_BIT,
@@ -5254,7 +5170,9 @@ static inline const char *netdev_name(const struct net_device *dev)
static inline const char *netdev_reg_state(const struct net_device *dev)
{
- switch (dev->reg_state) {
+ u8 reg_state = READ_ONCE(dev->reg_state);
+
+ switch (reg_state) {
case NETREG_UNINITIALIZED: return " (uninitialized)";
case NETREG_REGISTERED: return "";
case NETREG_UNREGISTERING: return " (unregistering)";
@@ -5263,7 +5181,7 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
case NETREG_DUMMY: return " (dummy)";
}
- WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
+ WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
return " (unknown)";
}
@@ -5305,7 +5223,6 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
-extern struct list_head ptype_all __read_mostly;
extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
extern struct net_device *blackhole_netdev;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 80900d910992..2683b2b77612 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -370,7 +370,6 @@ __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
u_int8_t protocol, unsigned short family);
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict, unsigned short family);
-int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
@@ -474,6 +473,7 @@ struct nf_ct_hook {
const struct sk_buff *);
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
void (*set_closing)(struct nf_conntrack *nfct);
+ int (*confirm)(struct sk_buff *skb);
};
extern const struct nf_ct_hook __rcu *nf_ct_hook;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index e8c350a3ade1..e9f4f845d760 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -186,6 +186,8 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+ /* Cancel ongoing garbage collectors before destroying the set*/
+ void (*cancel_gc)(struct ip_set *set);
/* Region-locking is used */
bool region_lock;
};
@@ -242,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type);
/* A generic IP set */
struct ip_set {
+ /* For call_cru in destroy */
+ struct rcu_head rcu;
/* The name of the set */
char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index b11a84f6c32b..100cbb261269 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -109,11 +109,18 @@ static inline int wait_on_page_fscache_killable(struct page *page)
return folio_wait_private_2_killable(page_folio(page));
}
+/* Marks used on xarray-based buffers */
+#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
+#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
+
enum netfs_io_source {
NETFS_FILL_WITH_ZEROES,
NETFS_DOWNLOAD_FROM_SERVER,
NETFS_READ_FROM_CACHE,
NETFS_INVALID_READ,
+ NETFS_UPLOAD_TO_SERVER,
+ NETFS_WRITE_TO_CACHE,
+ NETFS_INVALID_WRITE,
} __mode(byte);
typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
@@ -129,9 +136,57 @@ struct netfs_inode {
struct fscache_cookie *cache;
#endif
loff_t remote_i_size; /* Size of the remote file */
+ loff_t zero_point; /* Size after which we assume there's no data
+ * on the server */
+ unsigned long flags;
+#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
+#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
+#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
+#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */
+};
+
+/*
+ * A netfs group - for instance a ceph snap. This is marked on dirty pages and
+ * pages marked with a group must be flushed before they can be written under
+ * the domain of another group.
+ */
+struct netfs_group {
+ refcount_t ref;
+ void (*free)(struct netfs_group *netfs_group);
};
/*
+ * Information about a dirty page (attached only if necessary).
+ * folio->private
+ */
+struct netfs_folio {
+ struct netfs_group *netfs_group; /* Filesystem's grouping marker (or NULL). */
+ unsigned int dirty_offset; /* Write-streaming dirty data offset */
+ unsigned int dirty_len; /* Write-streaming dirty data length */
+};
+#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
+
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+{
+ void *priv = folio_get_private(folio);
+
+ if ((unsigned long)priv & NETFS_FOLIO_INFO)
+ return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
+ return NULL;
+}
+
+static inline struct netfs_group *netfs_folio_group(struct folio *folio)
+{
+ struct netfs_folio *finfo;
+ void *priv = folio_get_private(folio);
+
+ finfo = netfs_folio_info(folio);
+ if (finfo)
+ return finfo->netfs_group;
+ return priv;
+}
+
+/*
* Resources required to do operations on a cache.
*/
struct netfs_cache_resources {
@@ -143,17 +198,24 @@ struct netfs_cache_resources {
};
/*
- * Descriptor for a single component subrequest.
+ * Descriptor for a single component subrequest. Each operation represents an
+ * individual read/write from/to a server, a cache, a journal, etc..
+ *
+ * The buffer iterator is persistent for the life of the subrequest struct and
+ * the pages it points to can be relied on to exist for the duration.
*/
struct netfs_io_subrequest {
struct netfs_io_request *rreq; /* Supervising I/O request */
+ struct work_struct work;
struct list_head rreq_link; /* Link in rreq->subrequests */
+ struct iov_iter io_iter; /* Iterator for this subrequest */
loff_t start; /* Where to start the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
+ unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
enum netfs_io_source source; /* Where to read from/write to */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
@@ -168,6 +230,13 @@ enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+ NETFS_WRITEBACK, /* This write was triggered by writepages */
+ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
+ NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
+ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
+ NETFS_DIO_READ, /* This is a direct I/O read */
+ NETFS_DIO_WRITE, /* This is a direct I/O write */
+ nr__netfs_io_origin
} __mode(byte);
/*
@@ -175,19 +244,34 @@ enum netfs_io_origin {
* operations to a variety of data stores and then stitch the result together.
*/
struct netfs_io_request {
- struct work_struct work;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
struct inode *inode; /* The file being accessed */
struct address_space *mapping; /* The mapping being accessed */
+ struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources;
+ struct list_head proc_link; /* Link in netfs_iorequests */
struct list_head subrequests; /* Contributory I/O operations */
+ struct iov_iter iter; /* Unencrypted-side iterator */
+ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
+ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
+ unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
unsigned int debug_id;
+ unsigned int rsize; /* Maximum read size (0 for none) */
+ unsigned int wsize; /* Maximum write size (0 for none) */
+ unsigned int subreq_counter; /* Next subreq->debug_index */
atomic_t nr_outstanding; /* Number of ops in progress */
atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
size_t submitted; /* Amount submitted for I/O so far */
size_t len; /* Length of the request */
+ size_t upper_len; /* Length can be extended to here */
+ size_t transferred; /* Amount to be indicated as transferred */
short error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
+ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
loff_t i_size; /* Size of the file */
loff_t start; /* Start position */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
@@ -199,17 +283,25 @@ struct netfs_io_request {
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
+#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
+#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
+#define NETFS_RREQ_BLOCKED 10 /* We blocked */
const struct netfs_request_ops *netfs_ops;
+ void (*cleanup)(struct netfs_io_request *req);
};
/*
* Operations the network filesystem can/must provide to the helpers.
*/
struct netfs_request_ops {
+ unsigned int io_request_size; /* Alloc size for netfs_io_request struct */
+ unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */
int (*init_request)(struct netfs_io_request *rreq, struct file *file);
void (*free_request)(struct netfs_io_request *rreq);
- int (*begin_cache_operation)(struct netfs_io_request *rreq);
+ void (*free_subrequest)(struct netfs_io_subrequest *rreq);
+ /* Read request handling */
void (*expand_readahead)(struct netfs_io_request *rreq);
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
@@ -217,6 +309,14 @@ struct netfs_request_ops {
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
struct folio **foliop, void **_fsdata);
void (*done)(struct netfs_io_request *rreq);
+
+ /* Modification handling */
+ void (*update_i_size)(struct inode *inode, loff_t i_size);
+
+ /* Write request handling */
+ void (*create_write_requests)(struct netfs_io_request *wreq,
+ loff_t start, size_t len);
+ void (*invalidate_cache)(struct netfs_io_request *wreq);
};
/*
@@ -229,8 +329,7 @@ enum netfs_read_from_hole {
};
/*
- * Table of operations for access to a cache. This is obtained by
- * rreq->ops->begin_cache_operation().
+ * Table of operations for access to a cache.
*/
struct netfs_cache_ops {
/* End an operation */
@@ -265,8 +364,8 @@ struct netfs_cache_ops {
* actually do.
*/
int (*prepare_write)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size,
- bool no_space_allocated_yet);
+ loff_t *_start, size_t *_len, size_t upper_len,
+ loff_t i_size, bool no_space_allocated_yet);
/* Prepare an on-demand read operation, shortening it to a cached/uncached
* boundary as appropriate.
@@ -284,22 +383,62 @@ struct netfs_cache_ops {
loff_t *_data_start, size_t *_data_len);
};
+/* High-level read API. */
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+
+/* High-level write API */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group);
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+ struct netfs_group *netfs_group);
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+/* Address operations API */
struct readahead_control;
void netfs_readahead(struct readahead_control *);
int netfs_read_folio(struct file *, struct folio *);
int netfs_write_begin(struct netfs_inode *, struct file *,
- struct address_space *, loff_t pos, unsigned int len,
- struct folio **, void **fsdata);
-
+ struct address_space *, loff_t pos, unsigned int len,
+ struct folio **, void **fsdata);
+int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+bool netfs_release_folio(struct folio *folio, gfp_t gfp);
+int netfs_launder_folio(struct folio *folio);
+
+/* VMA operations API. */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
+
+/* (Sub)request management API. */
void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
bool was_async, enum netfs_sreq_ref_trace what);
-void netfs_stats_show(struct seq_file *);
ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
struct iov_iter *new,
iov_iter_extraction_t extraction_flags);
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs);
+struct netfs_io_subrequest *netfs_create_write_request(
+ struct netfs_io_request *wreq, enum netfs_io_source dest,
+ loff_t start, size_t len, work_func_t worker);
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+ bool was_async);
+void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
+
+int netfs_start_io_read(struct inode *inode);
+void netfs_end_io_read(struct inode *inode);
+int netfs_start_io_write(struct inode *inode);
+void netfs_end_io_write(struct inode *inode);
+int netfs_start_io_direct(struct inode *inode);
+void netfs_end_io_direct(struct inode *inode);
/**
* netfs_inode - Get the netfs inode context from the inode
@@ -317,30 +456,44 @@ static inline struct netfs_inode *netfs_inode(struct inode *inode)
* netfs_inode_init - Initialise a netfslib inode context
* @ctx: The netfs inode to initialise
* @ops: The netfs's operations list
+ * @use_zero_point: True to use the zero_point read optimisation
*
* Initialise the netfs library context struct. This is expected to follow on
* directly from the VFS inode struct.
*/
static inline void netfs_inode_init(struct netfs_inode *ctx,
- const struct netfs_request_ops *ops)
+ const struct netfs_request_ops *ops,
+ bool use_zero_point)
{
ctx->ops = ops;
ctx->remote_i_size = i_size_read(&ctx->inode);
+ ctx->zero_point = LLONG_MAX;
+ ctx->flags = 0;
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
+ /* ->releasepage() drives zero_point */
+ if (use_zero_point) {
+ ctx->zero_point = ctx->remote_i_size;
+ mapping_set_release_always(ctx->inode.i_mapping);
+ }
}
/**
* netfs_resize_file - Note that a file got resized
* @ctx: The netfs inode being resized
* @new_i_size: The new file size
+ * @changed_on_server: The change was applied to the server
*
* Inform the netfs lib that a file got resized so that it can adjust its state.
*/
-static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size,
+ bool changed_on_server)
{
- ctx->remote_i_size = new_i_size;
+ if (changed_on_server)
+ ctx->remote_i_size = new_i_size;
+ if (new_i_size < ctx->zero_point)
+ ctx->zero_point = new_i_size;
}
/**
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 1a4445bf2ab9..5df7340d4dab 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -291,6 +291,7 @@ struct netlink_callback {
u16 answer_flags;
u32 min_dump_alloc;
unsigned int prev_seq, seq;
+ int flags;
bool strict_check;
union {
u8 ctx[48];
@@ -323,6 +324,7 @@ struct netlink_dump_control {
void *data;
struct module *module;
u32 min_dump_alloc;
+ int flags;
};
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index cd797e00fe35..92de074e63b9 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -124,6 +124,7 @@ struct nfs_client {
char cl_ipaddr[48];
struct net *cl_net;
struct list_head pending_cb_stateids;
+ struct rcu_head rcu;
};
/*
@@ -265,6 +266,7 @@ struct nfs_server {
const struct cred *cred;
bool has_sec_mnt_opts;
struct kobject kobj;
+ struct rcu_head rcu;
};
/* Server capabilities */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 44325c068b6a..3ef4053ea950 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -20,7 +20,6 @@
#define NVMF_TRSVCID_SIZE 32
#define NVMF_TRADDR_SIZE 256
#define NVMF_TSAS_SIZE 256
-#define NVMF_AUTH_HASH_LEN 64
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
@@ -647,6 +646,7 @@ enum {
NVME_CMD_EFFECTS_NCC = 1 << 2,
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
+ NVME_CMD_EFFECTS_CSER_MASK = GENMASK(15, 14),
NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
@@ -817,12 +817,6 @@ struct nvme_reservation_status_ext {
struct nvme_registered_ctrl_ext regctl_eds[];
};
-enum nvme_async_event_type {
- NVME_AER_TYPE_ERROR = 0,
- NVME_AER_TYPE_SMART = 1,
- NVME_AER_TYPE_NOTICE = 2,
-};
-
/* I/O commands */
enum nvme_opcode {
@@ -1819,7 +1813,7 @@ struct nvme_command {
};
};
-static inline bool nvme_is_fabrics(struct nvme_command *cmd)
+static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
{
return cmd->common.opcode == nvme_fabrics_command;
}
@@ -1838,7 +1832,7 @@ struct nvme_error_slot {
__u8 resv2[24];
};
-static inline bool nvme_is_write(struct nvme_command *cmd)
+static inline bool nvme_is_write(const struct nvme_command *cmd)
{
/*
* What a mess...
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index a72661e47faa..9042bca5bb84 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -2,10 +2,7 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
-#include <linux/platform_device.h>
-#include <linux/of_platform.h> /* temporary until merge */
-
-#include <linux/of.h>
+#include <linux/device/driver.h>
struct device;
struct of_device_id;
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index fadfea575485..a2ff1ad48f7f 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -7,11 +7,11 @@
*/
#include <linux/mod_devicetable.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
struct device;
+struct device_node;
struct of_device_id;
+struct platform_device;
/**
* struct of_dev_auxdata - lookup table entry for device names & platform_data
diff --git a/include/linux/pci.h b/include/linux/pci.h
index add9368e6314..7ab0d13672da 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1422,6 +1422,7 @@ int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
@@ -1625,6 +1626,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata);
int pci_cfg_space_size(struct pci_dev *dev);
unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
@@ -2025,6 +2028,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{ return 0; }
+static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{ return 0; }
static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{ return 0; }
static inline pci_power_t pci_choose_state(struct pci_dev *dev,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 684efaeca07c..3f68b8239bb1 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -30,6 +30,7 @@
#include <linux/refcount.h>
#include <linux/atomic.h>
+#include <net/eee.h>
#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \
@@ -54,6 +55,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
@@ -65,6 +67,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
+#define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features)
extern const int phy_basic_ports_array[3];
extern const int phy_fibre_port_array[1];
@@ -329,6 +332,7 @@ struct mdio_bus_stats {
* struct phy_package_shared - Shared information in PHY packages
* @base_addr: Base PHY address of PHY package used to combine PHYs
* in one package and for offset calculation of phy_package_read/write
+ * @np: Pointer to the Device Node if PHY package defined in DT
* @refcnt: Number of PHYs connected to this shared data
* @flags: Initialization of PHY package
* @priv_size: Size of the shared private data @priv
@@ -340,6 +344,8 @@ struct mdio_bus_stats {
*/
struct phy_package_shared {
u8 base_addr;
+ /* With PHY package defined in DT this points to the PHY package node */
+ struct device_node *np;
refcount_t refcnt;
unsigned long flags;
size_t priv_size;
@@ -589,6 +595,8 @@ struct macsec_ops;
* @supported_eee: supported PHY EEE linkmodes
* @advertising_eee: Currently advertised EEE linkmodes
* @eee_enabled: Flag indicating whether the EEE feature is enabled
+ * @enable_tx_lpi: When True, MAC should transmit LPI to PHY
+ * @eee_cfg: User configuration of EEE
* @lp_advertising: Current link partner advertised linkmodes
* @host_interfaces: PHY interface modes supported by host
* @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
@@ -638,7 +646,7 @@ struct phy_device {
/* Information about the PHY type */
/* And management functions */
- struct phy_driver *drv;
+ const struct phy_driver *drv;
struct device_link *devlink;
@@ -698,7 +706,7 @@ struct phy_device {
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* used with phy_speed_down */
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
- /* used for eee validation */
+ /* used for eee validation and configuration*/
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee);
bool eee_enabled;
@@ -708,6 +716,8 @@ struct phy_device {
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
+ bool enable_tx_lpi;
+ struct eee_config eee_cfg;
#ifdef CONFIG_LED_TRIGGER_PHY
struct phy_led_trigger *phy_led_triggers;
@@ -852,6 +862,15 @@ struct phy_plca_status {
bool pst;
};
+/* Modes for PHY LED configuration */
+enum phy_led_modes {
+ PHY_LED_ACTIVE_LOW = 0,
+ PHY_LED_INACTIVE_HIGH_IMPEDANCE = 1,
+
+ /* keep it last */
+ __PHY_LED_MODES_NUM,
+};
+
/**
* struct phy_led: An LED driven by the PHY
*
@@ -1145,6 +1164,19 @@ struct phy_driver {
int (*led_hw_control_get)(struct phy_device *dev, u8 index,
unsigned long *rules);
+ /**
+ * @led_polarity_set: Set the LED polarity modes
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @modes: bitmap of LED polarity modes
+ *
+ * Configure LED with all the required polarity modes in @modes
+ * to make it correctly turn ON or OFF.
+ *
+ * Returns 0, or an error code.
+ */
+ int (*led_polarity_set)(struct phy_device *dev, int index,
+ unsigned long modes);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -1851,7 +1883,7 @@ int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
/* Clause 37 */
int genphy_c37_config_aneg(struct phy_device *phydev);
-int genphy_c37_read_status(struct phy_device *phydev);
+int genphy_c37_read_status(struct phy_device *phydev, bool *changed);
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
@@ -1886,9 +1918,9 @@ int genphy_c45_plca_get_status(struct phy_device *phydev,
int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
unsigned long *lp, bool *is_enabled);
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
- struct ethtool_eee *data);
+ struct ethtool_keee *data);
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
- struct ethtool_eee *data);
+ struct ethtool_keee *data);
int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv);
int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
@@ -1938,8 +1970,10 @@ int phy_get_rate_matching(struct phy_device *phydev,
void phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
void phy_advertise_supported(struct phy_device *phydev);
+void phy_advertise_eee_all(struct phy_device *phydev);
void phy_support_sym_pause(struct phy_device *phydev);
void phy_support_asym_pause(struct phy_device *phydev);
+void phy_support_eee(struct phy_device *phydev);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg);
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
@@ -1966,8 +2000,8 @@ int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
-int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
-int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data);
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data);
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
void phy_ethtool_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol);
@@ -1977,9 +2011,12 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
void phy_package_leave(struct phy_device *phydev);
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
int base_addr, size_t priv_size);
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
@@ -2100,7 +2137,7 @@ static inline bool phy_package_probe_once(struct phy_device *phydev)
return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
}
-extern struct bus_type mdio_bus_type;
+extern const struct bus_type mdio_bus_type;
struct mdio_board_info {
const char *bus_id;
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index d589f89c612c..9a57deefcb07 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -480,9 +480,6 @@ void pcs_disable(struct phylink_pcs *pcs);
* negotiation completion state in @state->an_complete, and link up state
* in @state->link. If possible, @state->lp_advertising should also be
* populated.
- *
- * When present, this overrides pcs_get_state() in &struct
- * phylink_pcs_ops.
*/
void pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state);
@@ -584,8 +581,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
int phylink_get_eee_err(struct phylink *);
int phylink_init_eee(struct phylink *, bool);
-int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
-int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
+int phylink_ethtool_get_eee(struct phylink *link, struct ethtool_keee *eee);
+int phylink_ethtool_set_eee(struct phylink *link, struct ethtool_keee *eee);
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
int phylink_speed_down(struct phylink *pl, bool sync);
int phylink_speed_up(struct phylink *pl);
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index f922a192fe58..ec99b7b73d1d 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 201 Broadcom Corporation
+ * Copyright (c) 2016 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h
index 8a5f9f0b2c52..724e1f57b81f 100644
--- a/include/linux/platform_data/mdio-bcm-unimac.h
+++ b/include/linux/platform_data/mdio-bcm-unimac.h
@@ -1,11 +1,14 @@
#ifndef __MDIO_BCM_UNIMAC_PDATA_H
#define __MDIO_BCM_UNIMAC_PDATA_H
+struct clk;
+
struct unimac_mdio_pdata {
u32 phy_mask;
int (*wait_func)(void *data);
void *wait_func_data;
const char *bus_name;
+ struct clk *clk;
};
#define UNIMAC_MDIO_DRV_NAME "unimac-mdio"
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index f177416635a2..8c659db4da6b 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -33,6 +33,7 @@ enum ksz_chip_id {
KSZ9897_CHIP_ID = 0x00989700,
KSZ9893_CHIP_ID = 0x00989300,
KSZ9563_CHIP_ID = 0x00956300,
+ KSZ8567_CHIP_ID = 0x00856700,
KSZ9567_CHIP_ID = 0x00956700,
LAN9370_CHIP_ID = 0x00937000,
LAN9371_CHIP_ID = 0x00937100,
diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h
index c510734405bb..89d0ec6f7d46 100644
--- a/include/linux/platform_data/net-cw1200.h
+++ b/include/linux/platform_data/net-cw1200.h
@@ -14,8 +14,6 @@ struct cw1200_platform_data_spi {
/* All others are optional */
bool have_5ghz;
- int reset; /* GPIO to RSTn signal (0 disables) */
- int powerup; /* GPIO to POWERUP signal (0 disables) */
int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata,
bool enable); /* Control 3v3 / 1v8 supply */
int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata,
@@ -30,8 +28,6 @@ struct cw1200_platform_data_sdio {
/* All others are optional */
bool have_5ghz;
bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */
- int reset; /* GPIO to RSTn signal (0 disables) */
- int powerup; /* GPIO to POWERUP signal (0 disables) */
int irq; /* IRQ line or 0 to use SDIO IRQ */
int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata,
bool enable); /* Control 3v3 / 1v8 supply */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 27a7dad17eef..1f0ee2459f2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -92,4 +92,7 @@
/********** VFS **********/
#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
+/********** lib/stackdepot.c **********/
+#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 7c8d65414a70..7d8025fb74b7 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -83,5 +83,6 @@ struct bq27xxx_device_info {
void bq27xxx_battery_update(struct bq27xxx_device_info *di);
int bq27xxx_battery_setup(struct bq27xxx_device_info *di);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di);
+extern const struct dev_pm_ops bq27xxx_battery_battery_pm_ops;
#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index de407e7c3b55..0b2a89854440 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -65,6 +65,7 @@ struct proc_fs_info {
kgid_t pid_gid;
enum proc_hidepid hide_pid;
enum proc_pidonly pidonly;
+ struct rcu_head rcu;
};
static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 1ef4e0f9bd2a..6e4b8206c7d0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -200,6 +200,7 @@ struct ptp_clock;
enum ptp_clock_events {
PTP_CLOCK_ALARM,
PTP_CLOCK_EXTTS,
+ PTP_CLOCK_EXTOFF,
PTP_CLOCK_PPS,
PTP_CLOCK_PPSUSR,
};
@@ -210,6 +211,7 @@ enum ptp_clock_events {
* @type: One of the ptp_clock_events enumeration values.
* @index: Identifies the source of the event.
* @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only).
+ * @offset: When the event occurred (%PTP_CLOCK_EXTOFF only).
* @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only).
*/
@@ -218,6 +220,7 @@ struct ptp_clock_event {
int index;
union {
u64 timestamp;
+ s64 offset;
struct pps_event_time pps_times;
};
};
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index eaaef3ffec22..90507d4afcd6 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -393,6 +393,10 @@ static inline void user_single_step_report(struct pt_regs *regs)
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
+#ifndef exception_ip
+#define exception_ip(x) instruction_pointer(x)
+#endif
+
extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 410529fca18b..cdfc897f1e3c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -47,6 +47,7 @@ extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq;
+extern atomic_t dev_unreg_count;
extern struct rw_semaphore pernet_ops_rwsem;
extern struct rw_semaphore net_rwsem;
@@ -171,4 +172,6 @@ rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
}
+void netdev_set_operstate(struct net_device *dev, int newstate);
+
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cdb8ea53c365..ffe8f618ab86 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -920,7 +920,7 @@ struct task_struct {
unsigned sched_rt_mutex:1;
#endif
- /* Bit to tell LSMs we're in execve(): */
+ /* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
diff --git a/include/linux/security.h b/include/linux/security.h
index d0eb20f90b26..15804af54f37 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/sockptr.h>
+#include <linux/bpf.h>
#include <uapi/linux/lsm.h>
struct linux_binprm;
@@ -2064,15 +2065,22 @@ static inline void securityfs_remove(struct dentry *dentry)
union bpf_attr;
struct bpf_map;
struct bpf_prog;
-struct bpf_prog_aux;
+struct bpf_token;
#ifdef CONFIG_SECURITY
extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
extern int security_bpf_prog(struct bpf_prog *prog);
-extern int security_bpf_map_alloc(struct bpf_map *map);
+extern int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token);
extern void security_bpf_map_free(struct bpf_map *map);
-extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux);
-extern void security_bpf_prog_free(struct bpf_prog_aux *aux);
+extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token);
+extern void security_bpf_prog_free(struct bpf_prog *prog);
+extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path);
+extern void security_bpf_token_free(struct bpf_token *token);
+extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
#else
static inline int security_bpf(int cmd, union bpf_attr *attr,
unsigned int size)
@@ -2090,7 +2098,8 @@ static inline int security_bpf_prog(struct bpf_prog *prog)
return 0;
}
-static inline int security_bpf_map_alloc(struct bpf_map *map)
+static inline int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
{
return 0;
}
@@ -2098,13 +2107,33 @@ static inline int security_bpf_map_alloc(struct bpf_map *map)
static inline void security_bpf_map_free(struct bpf_map *map)
{ }
-static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
+static inline int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
{
return 0;
}
-static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
+static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
+
+static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
+{
+ return 0;
+}
+
+static inline void security_bpf_token_free(struct bpf_token *token)
+{ }
+
+static inline int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
+{
+ return 0;
+}
+
+static inline int security_bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_BPF_SYSCALL */
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index c44f4b47b945..fe41da005970 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -2,7 +2,10 @@
#ifndef _LINUX_SEQ_BUF_H
#define _LINUX_SEQ_BUF_H
-#include <linux/fs.h>
+#include <linux/bug.h>
+#include <linux/minmax.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
/*
* Trace sequences are used to allow a function to call several other functions
@@ -10,7 +13,7 @@
*/
/**
- * seq_buf - seq buffer structure
+ * struct seq_buf - seq buffer structure
* @buffer: pointer to the buffer
* @size: size of the buffer
* @len: the amount of data inside the buffer
@@ -77,10 +80,10 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
- * seq_buf_str - get %NUL-terminated C string from seq_buf
+ * seq_buf_str - get NUL-terminated C string from seq_buf
* @s: the seq_buf handle
*
- * This makes sure that the buffer in @s is nul terminated and
+ * This makes sure that the buffer in @s is NUL-terminated and
* safe to read as a string.
*
* Note, if this is called when the buffer has overflowed, then
@@ -90,7 +93,7 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
* After this function is called, s->buffer is safe to use
* in string operations.
*
- * Returns @s->buf after making sure it is terminated.
+ * Returns: @s->buf after making sure it is terminated.
*/
static inline const char *seq_buf_str(struct seq_buf *s)
{
@@ -110,7 +113,7 @@ static inline const char *seq_buf_str(struct seq_buf *s)
* @s: the seq_buf handle
* @bufp: the beginning of the buffer is stored here
*
- * Return the number of bytes available in the buffer, or zero if
+ * Returns: the number of bytes available in the buffer, or zero if
* there's no space.
*/
static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
@@ -132,7 +135,7 @@ static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
* @num: the number of bytes to commit
*
* Commit @num bytes of data written to a buffer previously acquired
- * by seq_buf_get. To signal an error condition, or that the data
+ * by seq_buf_get_buf(). To signal an error condition, or that the data
* didn't fit in the available space, pass a negative @num value.
*/
static inline void seq_buf_commit(struct seq_buf *s, int num)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 536b2581d3e2..55b1f3ba48ac 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -748,8 +748,17 @@ struct uart_driver {
void uart_write_wakeup(struct uart_port *port);
-#define __uart_port_tx(uport, ch, tx_ready, put_char, tx_done, for_test, \
- for_post) \
+/**
+ * enum UART_TX_FLAGS -- flags for uart_port_tx_flags()
+ *
+ * @UART_TX_NOSTOP: don't call port->ops->stop_tx() on empty buffer
+ */
+enum UART_TX_FLAGS {
+ UART_TX_NOSTOP = BIT(0),
+};
+
+#define __uart_port_tx(uport, ch, flags, tx_ready, put_char, tx_done, \
+ for_test, for_post) \
({ \
struct uart_port *__port = (uport); \
struct circ_buf *xmit = &__port->state->xmit; \
@@ -777,7 +786,7 @@ void uart_write_wakeup(struct uart_port *port);
if (pending < WAKEUP_CHARS) { \
uart_write_wakeup(__port); \
\
- if (pending == 0) \
+ if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
__port->ops->stop_tx(__port); \
} \
\
@@ -812,7 +821,7 @@ void uart_write_wakeup(struct uart_port *port);
*/
#define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \
unsigned int __count = (count); \
- __uart_port_tx(port, ch, tx_ready, put_char, tx_done, __count, \
+ __uart_port_tx(port, ch, 0, tx_ready, put_char, tx_done, __count, \
__count--); \
})
@@ -826,8 +835,21 @@ void uart_write_wakeup(struct uart_port *port);
* See uart_port_tx_limited() for more details.
*/
#define uart_port_tx(port, ch, tx_ready, put_char) \
- __uart_port_tx(port, ch, tx_ready, put_char, ({}), true, ({}))
+ __uart_port_tx(port, ch, 0, tx_ready, put_char, ({}), true, ({}))
+
+/**
+ * uart_port_tx_flags -- transmit helper for uart_port with flags
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @flags: %UART_TX_NOSTOP or similar
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx_flags(port, ch, flags, tx_ready, put_char) \
+ __uart_port_tx(port, ch, flags, tx_ready, put_char, ({}), true, ({}))
/*
* Baud rate helpers.
*/
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2dde34c29203..d0508f90bed5 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
#endif
#include <net/net_debug.h>
#include <net/dropreason-core.h>
+#include <net/netmem.h>
/**
* DOC: skb checksums
@@ -359,7 +360,11 @@ extern int sysctl_max_skb_frags;
*/
#define GSO_BY_FRAGS 0xFFFF
-typedef struct bio_vec skb_frag_t;
+typedef struct skb_frag {
+ netmem_ref netmem;
+ unsigned int len;
+ unsigned int offset;
+} skb_frag_t;
/**
* skb_frag_size() - Returns the size of a skb fragment
@@ -367,7 +372,7 @@ typedef struct bio_vec skb_frag_t;
*/
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
- return frag->bv_len;
+ return frag->len;
}
/**
@@ -377,7 +382,7 @@ static inline unsigned int skb_frag_size(const skb_frag_t *frag)
*/
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
- frag->bv_len = size;
+ frag->len = size;
}
/**
@@ -387,7 +392,7 @@ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
*/
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
- frag->bv_len += delta;
+ frag->len += delta;
}
/**
@@ -397,7 +402,7 @@ static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
*/
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
- frag->bv_len -= delta;
+ frag->len -= delta;
}
/**
@@ -417,7 +422,7 @@ static inline bool skb_frag_must_loop(struct page *p)
* skb_frag_foreach_page - loop over pages in a fragment
*
* @f: skb frag to operate on
- * @f_off: offset from start of f->bv_page
+ * @f_off: offset from start of f->netmem
* @f_len: length from f_off to loop over
* @p: (temp var) current page
* @p_off: (temp var) offset from start of current page,
@@ -817,9 +822,9 @@ typedef unsigned char *sk_buff_data_t;
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
* @mono_delivery_time: When set, skb->tstamp has the
- * delivery_time in mono clock base (i.e. EDT). Otherwise, the
- * skb->tstamp has the (rcv) timestamp at ingress and
- * delivery_time at egress.
+ * delivery_time in mono clock base (i.e., EDT) or a clock base chosen
+ * by SO_TXTIME. If zero, skb->tstamp has the (rcv) timestamp at
+ * ingress.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
* @alloc_cpu: CPU which did the skb allocation.
@@ -1266,7 +1271,6 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
-extern struct kmem_cache *skbuff_cache;
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
@@ -2429,22 +2433,37 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
return skb_headlen(skb) + __skb_pagelen(skb);
}
+static inline void skb_frag_fill_netmem_desc(skb_frag_t *frag,
+ netmem_ref netmem, int off,
+ int size)
+{
+ frag->netmem = netmem;
+ frag->offset = off;
+ skb_frag_size_set(frag, size);
+}
+
static inline void skb_frag_fill_page_desc(skb_frag_t *frag,
struct page *page,
int off, int size)
{
- frag->bv_page = page;
- frag->bv_offset = off;
- skb_frag_size_set(frag, size);
+ skb_frag_fill_netmem_desc(frag, page_to_netmem(page), off, size);
+}
+
+static inline void __skb_fill_netmem_desc_noacc(struct skb_shared_info *shinfo,
+ int i, netmem_ref netmem,
+ int off, int size)
+{
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, netmem, off, size);
}
static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
int i, struct page *page,
int off, int size)
{
- skb_frag_t *frag = &shinfo->frags[i];
-
- skb_frag_fill_page_desc(frag, page, off, size);
+ __skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off,
+ size);
}
/**
@@ -2460,10 +2479,10 @@ static inline void skb_len_add(struct sk_buff *skb, int delta)
}
/**
- * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * __skb_fill_netmem_desc - initialise a fragment in an skb
* @skb: buffer containing fragment to be initialised
- * @i: paged fragment index to initialise
- * @page: the page to use for this fragment
+ * @i: fragment index to initialise
+ * @netmem: the netmem to use for this fragment
* @off: the offset to the data with @page
* @size: the length of the data
*
@@ -2472,10 +2491,12 @@ static inline void skb_len_add(struct sk_buff *skb, int delta)
*
* Does not take any additional reference on the fragment.
*/
-static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
+static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off, int size)
{
- __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);
+ struct page *page = netmem_to_page(netmem);
+
+ __skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size);
/* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
@@ -2483,7 +2504,20 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
*/
page = compound_head(page);
if (page_is_pfmemalloc(page))
- skb->pfmemalloc = true;
+ skb->pfmemalloc = true;
+}
+
+static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
+{
+ __skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
+}
+
+static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off, int size)
+{
+ __skb_fill_netmem_desc(skb, i, netmem, off, size);
+ skb_shinfo(skb)->nr_frags = i + 1;
}
/**
@@ -2503,8 +2537,7 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
- __skb_fill_page_desc(skb, i, page, off, size);
- skb_shinfo(skb)->nr_frags = i + 1;
+ skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
}
/**
@@ -2528,8 +2561,16 @@ static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
shinfo->nr_frags = i + 1;
}
-void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
- int size, unsigned int truesize);
+void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size, unsigned int truesize);
+
+static inline void skb_add_rx_frag(struct sk_buff *skb, int i,
+ struct page *page, int off, int size,
+ unsigned int truesize)
+{
+ skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size,
+ truesize);
+}
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize);
@@ -2642,6 +2683,8 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->data -= len;
skb->len += len;
return skb->data;
@@ -2650,6 +2693,8 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->len -= len;
if (unlikely(skb->len < skb->data_len)) {
#if defined(CONFIG_DEBUG_NET)
@@ -2674,6 +2719,8 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline enum skb_drop_reason
pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
if (likely(len <= skb_headlen(skb)))
return SKB_NOT_DROPPED_YET;
@@ -2846,6 +2893,11 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb->inner_network_header += offset;
}
+static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
+{
+ return skb->inner_network_header > 0;
+}
+
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_mac_header;
@@ -2983,6 +3035,7 @@ static inline int skb_transport_offset(const struct sk_buff *skb)
static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->transport_header - skb->network_header;
}
@@ -3378,7 +3431,7 @@ static inline void skb_propagate_pfmemalloc(const struct page *page,
*/
static inline unsigned int skb_frag_off(const skb_frag_t *frag)
{
- return frag->bv_offset;
+ return frag->offset;
}
/**
@@ -3388,7 +3441,7 @@ static inline unsigned int skb_frag_off(const skb_frag_t *frag)
*/
static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
{
- frag->bv_offset += delta;
+ frag->offset += delta;
}
/**
@@ -3398,7 +3451,7 @@ static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
*/
static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
{
- frag->bv_offset = offset;
+ frag->offset = offset;
}
/**
@@ -3409,7 +3462,7 @@ static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
static inline void skb_frag_off_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
- fragto->bv_offset = fragfrom->bv_offset;
+ fragto->offset = fragfrom->offset;
}
/**
@@ -3420,7 +3473,7 @@ static inline void skb_frag_off_copy(skb_frag_t *fragto,
*/
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
- return frag->bv_page;
+ return netmem_to_page(frag->netmem);
}
/**
@@ -3446,6 +3499,10 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
+int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom);
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog);
bool napi_pp_put_page(struct page *page, bool napi_safe);
static inline void
@@ -3524,7 +3581,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag)
static inline void skb_frag_page_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
- fragto->bv_page = fragfrom->bv_page;
+ fragto->netmem = fragfrom->netmem;
}
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 888a4b217829..e65ec3fd2799 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -505,12 +505,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
-static inline bool sk_is_udp(const struct sock *sk)
-{
- return sk->sk_type == SOCK_DGRAM &&
- sk->sk_protocol == IPPROTO_UDP;
-}
-
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
#define BPF_F_STRPARSER (1UL << 1)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 471fe2ff9066..600fbd5daf68 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -21,7 +21,7 @@
#include <uapi/linux/spi/spi.h>
/* Max no. of CS supported per spi device */
-#define SPI_CS_CNT_MAX 4
+#define SPI_CS_CNT_MAX 16
struct dma_chan;
struct software_node;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index eaac8b0da25b..3fcd20de6ca8 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -449,6 +449,12 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
return raw_spin_is_contended(&lock->rlock);
}
+#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
+
+#else /* !CONFIG_PREEMPT_RT */
+# include <linux/spinlock_rt.h>
+#endif /* CONFIG_PREEMPT_RT */
+
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPTION,
@@ -480,12 +486,6 @@ static inline int rwlock_needbreak(rwlock_t *lock)
#endif
}
-#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
-
-#else /* !CONFIG_PREEMPT_RT */
-# include <linux/spinlock_rt.h>
-#endif /* CONFIG_PREEMPT_RT */
-
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index dee5ad6e48c5..dfa1828cd756 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -127,6 +127,7 @@ struct stmmac_est {
u32 gcl_unaligned[EST_GCL];
u32 gcl[EST_GCL];
u32 gcl_size;
+ u32 max_sdu[MTL_MAX_TX_QUEUES];
};
struct stmmac_rxq_cfg {
diff --git a/include/linux/string.h b/include/linux/string.h
index ce137830a0b9..ab148d8dbfc1 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -66,9 +66,6 @@ extern char * strcpy(char *,const char *);
#ifndef __HAVE_ARCH_STRNCPY
extern char * strncpy(char *,const char *, __kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *, const char *, size_t);
-#endif
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *, const char *, size_t);
#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4db00ddad261..8d28f6091a32 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -549,6 +549,11 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
+static inline int swapcache_prepare(swp_entry_t swp)
+{
+ return 0;
+}
+
static inline void swap_free(swp_entry_t swp)
{
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index cdba4d0c6d4a..77eb9b0e7685 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -128,6 +128,7 @@ struct mnt_id_req;
#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
#define __SC_CAST(t, a) (__force t) a
+#define __SC_TYPE(t, a) t
#define __SC_ARGS(t, a) a
#define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 89b290d8c8dc..55399ee2a57e 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -221,8 +221,10 @@ struct tcp_sock {
u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */
u16 tcp_header_len; /* Bytes of tcp header to send */
+ u8 scaling_ratio; /* see tcp_win_from_space() */
u8 chrono_type : 2, /* current chronograph type */
repair : 1,
+ tcp_usec_ts : 1, /* TSval values in usec */
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
__cacheline_group_end(tcp_sock_read_txrx);
@@ -262,10 +264,10 @@ struct tcp_sock {
u32 pushed_seq; /* Last pushed seq, required to talk to windows */
u32 lsndtime;
u32 mdev_us; /* medium deviation */
+ u32 rtt_seq; /* sequence number to update rttvar */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
u64 tcp_mstamp; /* most recent packet received/sent */
- u32 rtt_seq; /* sequence number to update rttvar */
struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
struct sk_buff *highest_sack; /* skb just after the highest
* skb with SACKed bit set
@@ -302,7 +304,7 @@ struct tcp_sock {
__cacheline_group_end(tcp_sock_write_txrx);
/* RX read-write hotpath cache lines */
- __cacheline_group_begin(tcp_sock_write_rx);
+ __cacheline_group_begin(tcp_sock_write_rx) __aligned(8);
u64 bytes_received;
/* RFC4898 tcpEStatsAppHCThruOctetsReceived
* sum(delta(rcv_nxt)), or how many bytes
@@ -348,11 +350,9 @@ struct tcp_sock {
u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
* total number of DSACK blocks received
*/
- u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 compressed_ack_rcv_nxt;
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- u8 scaling_ratio; /* see tcp_win_from_space() */
/* Information of the most recently (s)acked skb */
struct tcp_rack {
u64 mstamp; /* (Re)sent time of the skb */
@@ -368,8 +368,7 @@ struct tcp_sock {
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
- tcp_usec_ts:1, /* TSval values in usec */
- unused:4;
+ unused:5;
u8 thin_lto : 1,/* Use linear timeouts for thin streams */
recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
@@ -384,12 +383,12 @@ struct tcp_sock {
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
+ u8 keepalive_probes; /* num of allowed keep alive probes */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
/* RTT measurement */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
- u8 keepalive_probes; /* num of allowed keep alive probes */
u32 reord_seen; /* number of data packet reordering events */
/*
@@ -402,6 +401,7 @@ struct tcp_sock {
u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
+ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
struct hrtimer pacing_timer;
struct hrtimer compressed_ack_timer;
@@ -477,8 +477,8 @@ struct tcp_sock {
bool is_mptcp;
#endif
#if IS_ENABLED(CONFIG_SMC)
- bool (*smc_hs_congested)(const struct sock *sk);
bool syn_smc; /* SYN includes SMC */
+ bool (*smc_hs_congested)(const struct sock *sk);
#endif
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index d04188714dca..3748e82b627b 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -92,6 +92,9 @@ struct udp_sock {
/* This fields follows rcvbuf value, and is touched by udp_recvmsg */
int forward_threshold;
+
+ /* Cache friendly copy of sk->sk_peek_off >= 0 */
+ bool peeking_with_offset;
};
#define udp_test_bit(nr, sk) \
@@ -109,6 +112,13 @@ struct udp_sock {
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
+static inline int udp_set_peek_off(struct sock *sk, int val)
+{
+ sk_set_peek_off(sk, val);
+ WRITE_ONCE(udp_sk(sk)->peeking_with_offset, val >= 0);
+ return 0;
+}
+
static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
{
udp_assign_bit(NO_CHECK6_TX, sk, val);
diff --git a/include/linux/uio.h b/include/linux/uio.h
index bea9c89922d9..00cebe2b70de 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -40,7 +40,6 @@ struct iov_iter_state {
struct iov_iter {
u8 iter_type;
- bool copy_mc;
bool nofault;
bool data_source;
size_t iov_offset;
@@ -248,22 +247,8 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#ifdef CONFIG_ARCH_HAS_COPY_MC
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
-static inline void iov_iter_set_copy_mc(struct iov_iter *i)
-{
- i->copy_mc = true;
-}
-
-static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
-{
- return i->copy_mc;
-}
#else
#define _copy_mc_to_iter _copy_to_iter
-static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
-static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
-{
- return false;
-}
#endif
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
@@ -355,7 +340,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter) {
.iter_type = ITER_UBUF,
- .copy_mc = false,
.data_source = direction,
.ubuf = buf,
.count = count,
diff --git a/include/linux/units.h b/include/linux/units.h
index 45110daaf8d3..00e15de33eca 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -24,10 +24,13 @@
#define NANOHZ_PER_HZ 1000000000UL
#define MICROHZ_PER_HZ 1000000UL
#define MILLIHZ_PER_HZ 1000UL
+
#define HZ_PER_KHZ 1000UL
-#define KHZ_PER_MHZ 1000UL
#define HZ_PER_MHZ 1000000UL
+#define KHZ_PER_MHZ 1000UL
+#define KHZ_PER_GHZ 1000000UL
+
#define MILLIWATT_PER_WATT 1000UL
#define MICROWATT_PER_MILLIWATT 1000UL
#define MICROWATT_PER_WATT 1000000UL
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index a771ccc038ac..6532beb587b1 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -236,7 +236,6 @@ struct usb_ep {
unsigned max_streams:16;
unsigned mult:2;
unsigned maxburst:5;
- unsigned fifo_mode:1;
u8 address;
const struct usb_endpoint_descriptor *desc;
const struct usb_ss_ep_comp_descriptor *comp_desc;
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 6d0a14f7019d..453736fd1d23 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -60,7 +60,7 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
- unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */
+ unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */
/*
* When writeback IOs are bounced through async layers, only the
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index 01fa15506286..170fdee6339c 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -16,6 +16,7 @@
* @WWAN_PORT_QCDM: Qcom Modem diagnostic interface
* @WWAN_PORT_FIREHOSE: XML based command protocol
* @WWAN_PORT_XMMRPC: Control protocol for Intel XMM modems
+ * @WWAN_PORT_FASTBOOT: Fastboot protocol control
*
* @WWAN_PORT_MAX: Highest supported port types
* @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
@@ -28,6 +29,7 @@ enum wwan_port_type {
WWAN_PORT_QCDM,
WWAN_PORT_FIREHOSE,
WWAN_PORT_XMMRPC,
+ WWAN_PORT_FASTBOOT,
/* Add new port types above this line */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index e1e5e72b901e..77ee0c657e2c 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -201,6 +201,8 @@ int tcf_idr_release(struct tc_action *a, bool bind);
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
+#define NET_ACT_ALIAS_PREFIX "net-act-"
+#define MODULE_ALIAS_NET_ACT(kind) MODULE_ALIAS(NET_ACT_ALIAS_PREFIX kind)
int tcf_action_destroy(struct tc_action *actions[], int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 61ebe723ee4d..9d06eb945509 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -8,8 +8,9 @@
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
-#define TEMP_VALID_LIFETIME (7*86400)
-#define TEMP_PREFERRED_LIFETIME (86400)
+#define TEMP_VALID_LIFETIME (7*86400) /* 1 week */
+#define TEMP_PREFERRED_LIFETIME (86400) /* 24 hours */
+#define REGEN_MIN_ADVANCE (2) /* 2 seconds */
#define REGEN_MAX_RETRY (3)
#define MAX_DESYNC_FACTOR (600)
@@ -416,7 +417,7 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
if (unlikely(!idev))
return true;
- return !!idev->cnf.ignore_routes_with_linkdown;
+ return !!READ_ONCE(idev->cnf.ignore_routes_with_linkdown);
}
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 49c4640027d8..627ea8e2d915 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -8,21 +8,29 @@
#include <linux/refcount.h>
#include <net/sock.h>
+#if IS_ENABLED(CONFIG_UNIX)
+struct unix_sock *unix_get_socket(struct file *filp);
+#else
+static inline struct unix_sock *unix_get_socket(struct file *filp)
+{
+ return NULL;
+}
+#endif
+
+extern spinlock_t unix_gc_lock;
+extern unsigned int unix_tot_inflight;
+
void unix_inflight(struct user_struct *user, struct file *fp);
void unix_notinflight(struct user_struct *user, struct file *fp);
-void unix_destruct_scm(struct sk_buff *skb);
-void io_uring_destruct_scm(struct sk_buff *skb);
void unix_gc(void);
-void wait_for_unix_gc(void);
-struct sock *unix_get_socket(struct file *filp);
+void wait_for_unix_gc(struct scm_fp_list *fpl);
+
struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_MOD (256 - 1)
#define UNIX_HASH_SIZE (256 * 2)
#define UNIX_HASH_BITS 8
-extern unsigned int unix_tot_inflight;
-
struct unix_address {
refcount_t refcnt;
int len;
@@ -46,12 +54,6 @@ struct scm_stat {
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
-#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
-#define unix_state_lock_nested(s) \
- spin_lock_nested(&unix_sk(s)->lock, \
- SINGLE_DEPTH_NESTING)
-
/* The AF_UNIX socket */
struct unix_sock {
/* WARNING: sk has to be the first member */
@@ -61,7 +63,7 @@ struct unix_sock {
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
- atomic_long_t inflight;
+ unsigned long inflight;
spinlock_t lock;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
@@ -77,6 +79,20 @@ struct unix_sock {
#define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
#define unix_peer(sk) (unix_sk(sk)->peer)
+#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
+#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
+enum unix_socket_lock_class {
+ U_LOCK_NORMAL,
+ U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
+ U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+};
+
+static inline void unix_state_lock_nested(struct sock *sk,
+ enum unix_socket_lock_class subclass)
+{
+ spin_lock_nested(&unix_sk(sk)->lock, subclass);
+}
+
#define peer_wait peer_wq.wait
long unix_inq_len(struct sock *sk);
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index c5e57c6bd873..9ce5ac2bfbad 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -54,6 +54,8 @@ typedef enum {
AD_MUX_DETACHED, /* mux machine */
AD_MUX_WAITING, /* mux machine */
AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING, /* mux machine */
+ AD_MUX_DISTRIBUTING, /* mux machine */
AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
} mux_states_t;
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 69292ecc0325..473a0147769e 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -76,6 +76,7 @@ enum {
BOND_OPT_MISSED_MAX,
BOND_OPT_NS_TARGETS,
BOND_OPT_PRIO,
+ BOND_OPT_COUPLED_CONTROL,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 5b8b1b644a2d..b61fb1aa3a56 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -148,6 +148,7 @@ struct bond_params {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
#endif
+ int coupled_control;
/* 2 bytes of padding : see ether_addr_equal_64bits() */
u8 ad_actor_system[ETH_ALEN + 2];
@@ -167,6 +168,7 @@ struct slave {
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1, /* indicates inactive slave */
+ rx_disabled:1, /* indicates whether slave's Rx is disabled */
should_notify:1, /* indicates whether the state changed */
should_notify_link:1; /* indicates whether the link changed */
u8 duplex;
@@ -568,6 +570,14 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave,
bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
+ slave->rx_disabled = 1;
+}
+
+static inline void bond_set_slave_tx_disabled_flags(struct slave *slave,
+ bool notify)
+{
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
}
static inline void bond_set_slave_active_flags(struct slave *slave,
@@ -575,6 +585,14 @@ static inline void bond_set_slave_active_flags(struct slave *slave,
{
bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
slave->inactive = 0;
+ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
+ slave->rx_disabled = 0;
+}
+
+static inline void bond_set_slave_rx_enabled_flags(struct slave *slave,
+ bool notify)
+{
+ slave->rx_disabled = 0;
}
static inline bool bond_is_slave_inactive(struct slave *slave)
@@ -582,6 +600,11 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
+static inline bool bond_is_slave_rx_disabled(struct slave *slave)
+{
+ return slave->rx_disabled;
+}
+
static inline void bond_propose_link_state(struct slave *slave, int state)
{
slave->link_new_state = state;
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 4dabeb6c76d3..9b09acac538e 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -48,6 +48,10 @@ void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+void napi_busy_loop_rcu(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cf79656ce09c..f9eada2a26ec 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/ethtool.h>
@@ -118,10 +118,13 @@ struct wiphy;
* restrictions.
* @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
* @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT
- * @IEEE80211_CHAN_NO_UHB_VLP_CLIENT: Client connection with VLP AP
+ * @IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT: Client connection with VLP AP
* not permitted using this channel
- * @IEEE80211_CHAN_NO_UHB_AFC_CLIENT: Client connection with AFC AP
+ * @IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT: Client connection with AFC AP
* not permitted using this channel
+ * @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor
+ * mode even in the presence of other (regulatory) restrictions,
+ * even if it is otherwise disabled.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
@@ -146,8 +149,9 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_320MHZ = 1<<19,
IEEE80211_CHAN_NO_EHT = 1<<20,
IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
- IEEE80211_CHAN_NO_UHB_VLP_CLIENT= 1<<22,
- IEEE80211_CHAN_NO_UHB_AFC_CLIENT= 1<<23,
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22,
+ IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23,
+ IEEE80211_CHAN_CAN_MONITOR = 1<<24,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -808,6 +812,9 @@ struct key_params {
* chan will define the primary channel and all other
* parameters are ignored.
* @freq1_offset: offset from @center_freq1, in KHz
+ * @punctured: mask of the punctured 20 MHz subchannels, with
+ * bits turned on being disabled (punctured); numbered
+ * from lower to higher frequency (like in the spec)
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
@@ -816,6 +823,7 @@ struct cfg80211_chan_def {
u32 center_freq2;
struct ieee80211_edmg edmg;
u16 freq1_offset;
+ u16 punctured;
};
/*
@@ -956,7 +964,8 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
chandef1->width == chandef2->width &&
chandef1->center_freq1 == chandef2->center_freq1 &&
chandef1->freq1_offset == chandef2->freq1_offset &&
- chandef1->center_freq2 == chandef2->center_freq2);
+ chandef1->center_freq2 == chandef2->center_freq2 &&
+ chandef1->punctured == chandef2->punctured);
}
/**
@@ -1048,6 +1057,20 @@ cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef);
/**
+ * cfg80211_chandef_primary - calculate primary 40/80/160 MHz freq
+ * @chandef: chandef to calculate for
+ * @primary_chan_width: primary channel width to calculate center for
+ * @punctured: punctured sub-channel bitmap, will be recalculated
+ * according to the new bandwidth, can be %NULL
+ *
+ * Returns: the primary 40/80/160 MHz channel center frequency, or -1
+ * for errors, updating the punctured bitmap
+ */
+int cfg80211_chandef_primary(const struct cfg80211_chan_def *chandef,
+ enum nl80211_chan_width primary_chan_width,
+ u16 *punctured);
+
+/**
* nl80211_send_chandef - sends the channel definition.
* @msg: the msg to send channel definition
* @chandef: the channel definition to check
@@ -1457,9 +1480,6 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
- * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
- * a 20 MHz channel, lowest bit corresponding to the lowest channel.
- * Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1494,7 +1514,6 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
- u16 punct_bitmap;
};
@@ -1528,9 +1547,8 @@ struct cfg80211_ap_update {
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
- * @punct_bitmap: Preamble puncturing bitmap. Each bit represents
- * a 20 MHz channel, lowest bit corresponding to the lowest channel.
- * Bit set to 1 indicates that the channel is punctured.
+ * @link_id: defines the link on which channel switch is expected during
+ * MLO. 0 in case of non-MLO.
*/
struct cfg80211_csa_settings {
struct cfg80211_chan_def chandef;
@@ -1543,7 +1561,7 @@ struct cfg80211_csa_settings {
bool radar_required;
bool block_tx;
u8 count;
- u16 punct_bitmap;
+ u8 link_id;
};
/**
@@ -1766,11 +1784,15 @@ struct station_parameters {
* @subtype: Management frame subtype to use for indicating removal
* (10 = Disassociation, 12 = Deauthentication)
* @reason_code: Reason code for the Disassociation/Deauthentication frame
+ * @link_id: Link ID indicating a link that stations to be flushed must be
+ * using; valid only for MLO, but can also be -1 for MLO to really
+ * remove all stations.
*/
struct station_del_parameters {
const u8 *mac;
u8 subtype;
u16 reason_code;
+ int link_id;
};
/**
@@ -2695,19 +2717,11 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
* @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match
* or no match (RSSI only)
* @rssi_thold: don't report scan results below this threshold (in s32 dBm)
- * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied
- * for filtering out scan results received. Drivers advertise this support
- * of band specific rssi based filtering through the feature capability
- * %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band
- * specific rssi thresholds take precedence over rssi_thold, if specified.
- * If not specified for any band, it will be assigned with rssi_thold of
- * corresponding matchset.
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
u8 bssid[ETH_ALEN];
s32 rssi_thold;
- s32 per_band_rssi_thold[NUM_NL80211_BANDS];
};
/**
@@ -2910,6 +2924,8 @@ struct cfg80211_bss_ies {
* own the beacon_ies, but they're just pointers to the ones from the
* @hidden_beacon_bss struct)
* @proberesp_ies: the information elements from the last Probe Response frame
+ * @proberesp_ecsa_stuck: ECSA element is stuck in the Probe Response frame,
+ * cannot rely on it having valid data
* @hidden_beacon_bss: in case this BSS struct represents a probe response from
* a BSS that hides the SSID in its beacon, this points to the BSS struct
* that holds the beacon data. @beacon_ies is still valid, of course, and
@@ -2950,6 +2966,8 @@ struct cfg80211_bss {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+ u8 proberesp_ecsa_stuck:1;
+
u8 bssid_index;
u8 max_bssid_indicator;
@@ -3059,6 +3077,7 @@ struct cfg80211_assoc_link {
* @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links.
* Drivers shall disable MLO features for the current association if this
* flag is not set.
+ * @ASSOC_REQ_SPP_AMSDU: SPP A-MSDUs will be used on this connection (if any)
*/
enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HT = BIT(0),
@@ -3068,6 +3087,7 @@ enum cfg80211_assoc_req_flags {
ASSOC_REQ_DISABLE_HE = BIT(4),
ASSOC_REQ_DISABLE_EHT = BIT(5),
CONNECT_REQ_MLO_SUPPORT = BIT(6),
+ ASSOC_REQ_SPP_AMSDU = BIT(7),
};
/**
@@ -3592,12 +3612,15 @@ struct cfg80211_wowlan_nd_info {
* @tcp_connlost: TCP connection lost or failed to establish
* @tcp_nomoretokens: TCP data ran out of tokens
* @net_detect: if not %NULL, woke up because of net detect
+ * @unprot_deauth_disassoc: woke up due to unprotected deauth or
+ * disassoc frame (in MFP).
*/
struct cfg80211_wowlan_wakeup {
bool disconnect, magic_pkt, gtk_rekey_failure,
eap_identity_req, four_way_handshake,
rfkill_release, packet_80211,
- tcp_match, tcp_connlost, tcp_nomoretokens;
+ tcp_match, tcp_connlost, tcp_nomoretokens,
+ unprot_deauth_disassoc;
s32 pattern_idx;
u32 packet_present_len, packet_len;
const void *packet;
@@ -4919,7 +4942,7 @@ struct cfg80211_ops {
* enum wiphy_flags - wiphy capability flags
*
* @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
- * into two, first for legacy bands and second for UHB.
+ * into two, first for legacy bands and second for 6 GHz.
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
@@ -6200,7 +6223,7 @@ struct wireless_dev {
int beacon_interval;
struct cfg80211_chan_def preset_chandef;
struct cfg80211_chan_def chandef;
- u8 id[IEEE80211_MAX_SSID_LEN];
+ u8 id[IEEE80211_MAX_MESH_ID_LEN];
u8 id_len, id_up_len;
} mesh;
struct {
@@ -7152,11 +7175,13 @@ size_t cfg80211_merge_profile(const u8 *ie, size_t ielen,
* from a beacon or probe response
* @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon
* @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response
+ * @CFG80211_BSS_FTYPE_S1G_BEACON: data comes from an S1G beacon
*/
enum cfg80211_bss_frame_type {
CFG80211_BSS_FTYPE_UNKNOWN,
CFG80211_BSS_FTYPE_BEACON,
CFG80211_BSS_FTYPE_PRESP,
+ CFG80211_BSS_FTYPE_S1G_BEACON,
};
/**
@@ -8729,14 +8754,13 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* @dev: the device which switched channels
* @chandef: the new channel definition
* @link_id: the link ID for MLO, must be 0 for non-MLO
- * @punct_bitmap: the new puncturing bitmap
*
* Caller must hold wiphy mutex, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id, u16 punct_bitmap);
+ unsigned int link_id);
/*
* cfg80211_ch_switch_started_notify - notify channel switch start
@@ -8745,7 +8769,6 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
* @quiet: whether or not immediate quiet was requested by the AP
- * @punct_bitmap: the future puncturing bitmap
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
@@ -8754,7 +8777,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet, u16 punct_bitmap);
+ bool quiet);
/**
* ieee80211_operating_class_to_band - convert operating class to band
@@ -8768,6 +8791,19 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
enum nl80211_band *band);
/**
+ * ieee80211_operating_class_to_chandef - convert operating class to chandef
+ *
+ * @operating_class: the operating class to convert
+ * @chan: the ieee80211_channel to convert
+ * @chandef: a pointer to the resulting chandef
+ *
+ * Returns %true if the conversion was successful, %false otherwise.
+ */
+bool ieee80211_operating_class_to_chandef(u8 operating_class,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *chandef);
+
+/**
* ieee80211_chandef_to_operating_class - convert chandef to operation class
*
* @chandef: the chandef to convert
@@ -9373,18 +9409,6 @@ static inline int cfg80211_color_change_notify(struct net_device *dev)
}
/**
- * cfg80211_valid_disable_subchannel_bitmap - validate puncturing bitmap
- * @bitmap: bitmap to be validated
- * @chandef: channel definition
- *
- * Validate the puncturing bitmap.
- *
- * Return: %true if the bitmap is valid. %false otherwise.
- */
-bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
- const struct cfg80211_chan_def *chandef);
-
-/**
* cfg80211_links_removed - Notify about removed STA MLD setup links.
* @dev: network device.
* @link_mask: BIT mask of removed STA MLD setup link IDs.
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 6d3a20163260..9707ab54fdd5 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -30,6 +30,7 @@
FN(TCP_AOFAILURE) \
FN(SOCKET_BACKLOG) \
FN(TCP_FLAGS) \
+ FN(TCP_ABORT_ON_DATA) \
FN(TCP_ZEROWINDOW) \
FN(TCP_OLD_DATA) \
FN(TCP_OVERWINDOW) \
@@ -37,6 +38,7 @@
FN(TCP_RFC7323_PAWS) \
FN(TCP_OLD_SEQUENCE) \
FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_INVALID_ACK_SEQUENCE) \
FN(TCP_RESET) \
FN(TCP_INVALID_SYN) \
FN(TCP_CLOSE) \
@@ -54,6 +56,7 @@
FN(NEIGH_QUEUEFULL) \
FN(NEIGH_DEAD) \
FN(TC_EGRESS) \
+ FN(SECURITY_HOOK) \
FN(QDISC_DROP) \
FN(CPU_BACKLOG) \
FN(XDP) \
@@ -105,7 +108,13 @@ enum skb_drop_reason {
SKB_CONSUMED,
/** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */
SKB_DROP_REASON_NOT_SPECIFIED,
- /** @SKB_DROP_REASON_NO_SOCKET: socket not found */
+ /**
+ * @SKB_DROP_REASON_NO_SOCKET: no valid socket that can be used.
+ * Reason could be one of three cases:
+ * 1) no established/listening socket found during lookup process
+ * 2) no valid request socket during 3WHS process
+ * 3) no valid child socket during 3WHS process
+ */
SKB_DROP_REASON_NO_SOCKET,
/** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */
SKB_DROP_REASON_PKT_TOO_SMALL,
@@ -198,6 +207,11 @@ enum skb_drop_reason {
/** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */
SKB_DROP_REASON_TCP_FLAGS,
/**
+ * @SKB_DROP_REASON_TCP_ABORT_ON_DATA: abort on data, corresponding to
+ * LINUX_MIB_TCPABORTONDATA
+ */
+ SKB_DROP_REASON_TCP_ABORT_ON_DATA,
+ /**
* @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero,
* see LINUX_MIB_TCPZEROWINDOWDROP
*/
@@ -221,13 +235,19 @@ enum skb_drop_reason {
SKB_DROP_REASON_TCP_OFOMERGE,
/**
* @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to
- * LINUX_MIB_PAWSESTABREJECTED
+ * LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED
*/
SKB_DROP_REASON_TCP_RFC7323_PAWS,
/** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */
SKB_DROP_REASON_TCP_OLD_SEQUENCE,
/** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
+ /**
+ * @SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE: Not acceptable ACK SEQ
+ * field because ack sequence is not in the window between snd_una
+ * and snd_nxt
+ */
+ SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE,
/** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */
SKB_DROP_REASON_TCP_RESET,
/**
@@ -271,6 +291,8 @@ enum skb_drop_reason {
SKB_DROP_REASON_NEIGH_DEAD,
/** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */
SKB_DROP_REASON_TC_EGRESS,
+ /** @SKB_DROP_REASON_SECURITY_HOOK: dropped due to security HOOK */
+ SKB_DROP_REASON_SECURITY_HOOK,
/**
* @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting (
* failed to enqueue to current qdisc)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 82135fbdb1e6..7c0da9effe4e 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -991,9 +991,9 @@ struct dsa_switch_ops {
* Port's MAC EEE settings
*/
int (*set_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ struct ethtool_keee *e);
int (*get_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_eee *e);
+ struct ethtool_keee *e);
/* EEPROM access */
int (*get_eeprom_len)(struct dsa_switch *ds);
diff --git a/include/net/dst.h b/include/net/dst.h
index f5dfc8fb7b37..0aa331bd2fdb 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -390,7 +390,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
struct net_device *dev, int initial_obsolete,
unsigned short flags);
-struct dst_entry *dst_destroy(struct dst_entry *dst);
void dst_dev_put(struct dst_entry *dst);
static inline void dst_confirm(struct dst_entry *dst)
diff --git a/include/net/eee.h b/include/net/eee.h
new file mode 100644
index 000000000000..84837aba3cd9
--- /dev/null
+++ b/include/net/eee.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _EEE_H
+#define _EEE_H
+
+#include <linux/types.h>
+
+struct eee_config {
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_enabled;
+};
+
+static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg)
+{
+ /* eee_enabled is the master on/off */
+ if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled)
+ return false;
+
+ return true;
+}
+
+static inline void eeecfg_to_eee(struct ethtool_keee *eee,
+ const struct eee_config *eeecfg)
+{
+ eee->tx_lpi_timer = eeecfg->tx_lpi_timer;
+ eee->tx_lpi_enabled = eeecfg->tx_lpi_enabled;
+ eee->eee_enabled = eeecfg->eee_enabled;
+}
+
+static inline void eee_to_eeecfg(struct eee_config *eeecfg,
+ const struct ethtool_keee *eee)
+{
+ eeecfg->tx_lpi_timer = eee->tx_lpi_timer;
+ eeecfg->tx_lpi_enabled = eee->tx_lpi_enabled;
+ eeecfg->eee_enabled = eee->eee_enabled;
+}
+
+#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index e61469129402..9ece6e5a3ea8 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -41,6 +41,8 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @bind: called when family multicast group is added to a netlink socket
+ * @unbind: called when family multicast group is removed from a netlink socket
* @module: pointer to the owning module (set to THIS_MODULE)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
@@ -84,6 +86,8 @@ struct genl_family {
void (*post_doit)(const struct genl_split_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
+ int (*bind)(int mcgrp);
+ void (*unbind)(int mcgrp);
const struct genl_ops * ops;
const struct genl_small_ops *small_ops;
const struct genl_split_ops *split_ops;
@@ -149,7 +153,7 @@ static inline void *genl_info_userhdr(const struct genl_info *info)
/* Report that a root attribute is missing */
#define GENL_REQ_ATTR_CHECK(info, attr) ({ \
- struct genl_info *__info = (info); \
+ const struct genl_info *__info = (info); \
\
NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \
})
diff --git a/include/net/gro.h b/include/net/gro.h
index b435f0ddbf64..d6fc8fbd3730 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -9,6 +9,7 @@
#include <net/ip6_checksum.h>
#include <linux/skbuff.h>
#include <net/udp.h>
+#include <net/hotdata.h>
struct napi_gro_cb {
union {
@@ -139,21 +140,16 @@ static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
NAPI_GRO_CB(skb)->data_offset += len;
}
-static inline void *skb_gro_header_fast(struct sk_buff *skb,
+static inline void *skb_gro_header_fast(const struct sk_buff *skb,
unsigned int offset)
{
return NAPI_GRO_CB(skb)->frag0 + offset;
}
-static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+static inline bool skb_gro_may_pull(const struct sk_buff *skb,
+ unsigned int hlen)
{
- return NAPI_GRO_CB(skb)->frag0_len < hlen;
-}
-
-static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
-{
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ return likely(hlen <= NAPI_GRO_CB(skb)->frag0_len);
}
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
@@ -162,28 +158,30 @@ static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
if (!pskb_may_pull(skb, hlen))
return NULL;
- skb_gro_frag0_invalidate(skb);
return skb->data + offset;
}
-static inline void *skb_gro_header(struct sk_buff *skb,
- unsigned int hlen, unsigned int offset)
+static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
{
void *ptr;
ptr = skb_gro_header_fast(skb, offset);
- if (skb_gro_header_hard(skb, hlen))
+ if (!skb_gro_may_pull(skb, hlen))
ptr = skb_gro_header_slow(skb, hlen, offset);
return ptr;
}
-static inline void *skb_gro_network_header(struct sk_buff *skb)
+static inline void *skb_gro_network_header(const struct sk_buff *skb)
{
- return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
- skb_network_offset(skb);
+ if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
+ return skb_gro_header_fast(skb, skb_network_offset(skb));
+
+ return skb_network_header(skb);
}
-static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
+static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
+ int proto)
{
const struct iphdr *iph = skb_gro_network_header(skb);
@@ -421,7 +419,8 @@ static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
return uh;
}
-static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
+static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
+ int proto)
{
const struct ipv6hdr *iph = skb_gro_network_header(skb);
@@ -448,7 +447,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
{
list_add_tail(&skb->list, &napi->rx_list);
napi->rx_count += segs;
- if (napi->rx_count >= READ_ONCE(gro_normal_batch))
+ if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
gro_normal_list(napi);
}
@@ -495,6 +494,4 @@ static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *
#endif
}
-extern struct list_head offload_base;
-
#endif /* _NET_IPV6_GRO_H */
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
new file mode 100644
index 000000000000..003667a1efd6
--- /dev/null
+++ b/include/net/hotdata.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_HOTDATA_H
+#define _NET_HOTDATA_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <net/protocol.h>
+
+/* Read mostly data used in network fast paths. */
+struct net_hotdata {
+#if IS_ENABLED(CONFIG_INET)
+ struct packet_offload ip_packet_offload;
+ struct net_offload tcpv4_offload;
+ struct net_protocol tcp_protocol;
+ struct net_offload udpv4_offload;
+ struct net_protocol udp_protocol;
+ struct packet_offload ipv6_packet_offload;
+ struct net_offload tcpv6_offload;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_protocol tcpv6_protocol;
+ struct inet6_protocol udpv6_protocol;
+#endif
+ struct net_offload udpv6_offload;
+#endif
+ struct list_head offload_base;
+ struct list_head ptype_all;
+ struct kmem_cache *skbuff_cache;
+ struct kmem_cache *skbuff_fclone_cache;
+ struct kmem_cache *skb_small_head_cache;
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+ u32 rps_cpu_mask;
+#endif
+ int gro_normal_batch;
+ int netdev_budget;
+ int netdev_budget_usecs;
+ int tstamp_prequeue;
+ int max_backlog;
+ int dev_tx_weight;
+ int dev_rx_weight;
+};
+
+#define inet_ehash_secret net_hotdata.tcp_protocol.secret
+#define udp_ehash_secret net_hotdata.udp_protocol.secret
+#define inet6_ehash_secret net_hotdata.tcpv6_protocol.secret
+#define tcp_ipv6_hash_secret net_hotdata.tcpv6_offload.secret
+#define udp6_ehash_secret net_hotdata.udpv6_protocol.secret
+#define udp_ipv6_hash_secret net_hotdata.udpv6_offload.secret
+
+extern struct net_hotdata net_hotdata;
+
+#endif /* _NET_HOTDATA_H */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index f07642264c1e..238ad3349456 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -144,7 +144,7 @@ struct ipv6_ac_socklist {
struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
- struct ifacaddr6 *aca_next;
+ struct ifacaddr6 __rcu *aca_next;
struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
@@ -196,7 +196,7 @@ struct inet6_dev {
spinlock_t mc_report_lock; /* mld query report lock */
struct mutex mc_lock; /* mld global lock */
- struct ifacaddr6 *ac_list;
+ struct ifacaddr6 __rcu *ac_list;
rwlock_t lock;
refcount_t refcnt;
__u32 if_flags;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index d0a2f827d5f2..9ab4bf704e86 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -357,4 +357,12 @@ static inline bool inet_csk_has_ulp(const struct sock *sk)
return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
}
+static inline void inet_init_csk_locks(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
+ spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
+}
+
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index aa86453f6b9b..f9ddd47dc4f8 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -274,6 +274,7 @@ enum {
INET_FLAGS_REPFLOW = 27,
INET_FLAGS_RTALERT_ISOLATE = 28,
INET_FLAGS_SNDFLOW = 29,
+ INET_FLAGS_RTALERT = 30,
};
/* cmsg flags for inet */
@@ -307,11 +308,6 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
#define inet_assign_bit(nr, sk, val) \
assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
-static inline bool sk_is_inet(struct sock *sk)
-{
- return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
-}
-
/**
* sk_to_full_sk - Access to a full socket
* @sk: pointer to a socket
diff --git a/include/net/ioam6.h b/include/net/ioam6.h
index 781d2d8b2f29..2cbbee6e806a 100644
--- a/include/net/ioam6.h
+++ b/include/net/ioam6.h
@@ -12,6 +12,7 @@
#include <linux/net.h>
#include <linux/ipv6.h>
#include <linux/ioam6.h>
+#include <linux/ioam6_genl.h>
#include <linux/rhashtable-types.h>
struct ioam6_namespace {
@@ -65,4 +66,7 @@ void ioam6_exit(void);
int ioam6_iptunnel_init(void);
void ioam6_iptunnel_exit(void);
+void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
+ void *opt, unsigned int opt_len);
+
#endif /* _NET_IOAM6_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index de0c69c57e3c..25cb688bdc62 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -767,7 +767,7 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
* Functions provided by ip_sockglue.c
*/
-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst);
void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 360b12e61850..323c94f1845b 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -173,6 +173,9 @@ struct fib6_info {
refcount_t fib6_ref;
unsigned long expires;
+
+ struct hlist_node gc_link;
+
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
@@ -241,12 +244,18 @@ static inline bool fib6_requires_src(const struct fib6_info *rt)
return rt->fib6_src.plen > 0;
}
+/* The callers should hold f6i->fib6_table->tb6_lock if a route has ever
+ * been added to a table before.
+ */
static inline void fib6_clean_expires(struct fib6_info *f6i)
{
f6i->fib6_flags &= ~RTF_EXPIRES;
f6i->expires = 0;
}
+/* The callers should hold f6i->fib6_table->tb6_lock if a route has ever
+ * been added to a table before.
+ */
static inline void fib6_set_expires(struct fib6_info *f6i,
unsigned long expires)
{
@@ -327,8 +336,10 @@ static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
static inline void fib6_info_release(struct fib6_info *f6i)
{
- if (f6i && refcount_dec_and_test(&f6i->fib6_ref))
+ if (f6i && refcount_dec_and_test(&f6i->fib6_ref)) {
+ DEBUG_NET_WARN_ON_ONCE(!hlist_unhashed(&f6i->gc_link));
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
+ }
}
enum fib6_walk_state {
@@ -382,6 +393,7 @@ struct fib6_table {
struct inet_peer_base tb6_peers;
unsigned int flags;
unsigned int fib_seq;
+ struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -498,6 +510,38 @@ void fib6_gc_cleanup(void);
int fib6_init(void);
+/* Add the route to the gc list if it is not already there
+ *
+ * The callers should hold f6i->fib6_table->tb6_lock.
+ */
+static inline void fib6_add_gc_list(struct fib6_info *f6i)
+{
+ /* If fib6_node is null, the f6i is not in (or removed from) the
+ * table.
+ *
+ * There is a gap between finding the f6i from the table and
+ * calling this function without the protection of the tb6_lock.
+ * This check makes sure the f6i is not added to the gc list when
+ * it is not on the table.
+ */
+ if (!rcu_dereference_protected(f6i->fib6_node,
+ lockdep_is_held(&f6i->fib6_table->tb6_lock)))
+ return;
+
+ if (hlist_unhashed(&f6i->gc_link))
+ hlist_add_head(&f6i->gc_link, &f6i->fib6_table->tb6_gc_hlist);
+}
+
+/* Remove the route from the gc list if it is on the list.
+ *
+ * The callers should hold f6i->fib6_table->tb6_lock.
+ */
+static inline void fib6_remove_gc_list(struct fib6_info *f6i)
+{
+ if (!hlist_unhashed(&f6i->gc_link))
+ hlist_del_init(&f6i->gc_link);
+}
+
struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 28b065790261..a30c6aa9e5cf 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -170,7 +170,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev, unsigned int pref,
- u32 defrtr_usr_metric);
+ u32 defrtr_usr_metric,
+ int lifetime);
void rt6_purge_dflt_routers(struct net *net);
@@ -331,7 +332,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
- mtu = idev->cnf.mtu6;
+ mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
out:
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index d4667b7797e3..9b2f69ba5e49 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -264,6 +264,7 @@ struct fib_dump_filter {
bool filter_set;
bool dump_routes;
bool dump_exceptions;
+ bool rtnl_held;
unsigned char protocol;
unsigned char rt_type;
unsigned int flags;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 2d746f4c9a0a..5cd64bb2104d 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -284,7 +284,8 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
- struct rtnl_link_ops *ops);
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index cf25ea21d770..88a8e554f7a1 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -534,13 +534,15 @@ static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb)
return 0;
}
-static inline bool ipv6_accept_ra(struct inet6_dev *idev)
+static inline bool ipv6_accept_ra(const struct inet6_dev *idev)
{
+ s32 accept_ra = READ_ONCE(idev->cnf.accept_ra);
+
/* If forwarding is enabled, RA are not accepted unless the special
* hybrid mode (accept_ra=2) is enabled.
*/
- return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 :
- idev->cnf.accept_ra;
+ return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 :
+ accept_ra;
}
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 7e73f8e5e497..1d55ba7c45be 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
*/
static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
{
- if (skb->protocol == htons(ETH_P_802_2))
- memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+ memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
}
/**
@@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
*/
static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
{
- if (skb->protocol == htons(ETH_P_802_2))
- memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+ memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
}
/**
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index d400fe2e8668..56c6ecb2c10a 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#ifndef MAC80211_H
@@ -214,6 +214,10 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
* this is used only with channel switching with CSA
* @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_AP: The AP channel definition changed, so (wider
+ * bandwidth) OFDMA settings need to be changed
+ * @IEEE80211_CHANCTX_CHANGE_PUNCTURING: The punctured channel(s) bitmap
+ * was changed.
*/
enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0),
@@ -221,6 +225,19 @@ enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+ IEEE80211_CHANCTX_CHANGE_AP = BIT(5),
+ IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(6),
+};
+
+/**
+ * struct ieee80211_chan_req - A channel "request"
+ * @oper: channel definition to use for operation
+ * @ap: the channel definition of the AP, if any
+ * (otherwise the chan member is %NULL)
+ */
+struct ieee80211_chan_req {
+ struct cfg80211_chan_def oper;
+ struct cfg80211_chan_def ap;
};
/**
@@ -231,6 +248,8 @@ enum ieee80211_chanctx_change {
*
* @def: the channel definition
* @min_def: the minimum channel definition currently required.
+ * @ap: the channel definition the AP actually is operating as,
+ * for use with (wider bandwidth) OFDMA
* @rx_chains_static: The number of RX chains that must always be
* active on the channel to receive MIMO transmissions
* @rx_chains_dynamic: The number of RX chains that must be enabled
@@ -243,6 +262,7 @@ enum ieee80211_chanctx_change {
struct ieee80211_chanctx_conf {
struct cfg80211_chan_def def;
struct cfg80211_chan_def min_def;
+ struct cfg80211_chan_def ap;
u8 rx_chains_static, rx_chains_dynamic;
@@ -340,8 +360,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
* @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
* status changed.
- * @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed.
* @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
+ * @BSS_CHANGED_MLD_TTLM: TID to link mapping was changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -376,8 +396,8 @@ enum ieee80211_bss_change {
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
- BSS_CHANGED_EHT_PUNCTURING = BIT_ULL(32),
BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
+ BSS_CHANGED_MLD_TTLM = BIT_ULL(34),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -537,6 +557,10 @@ struct ieee80211_fils_discovery {
* to that BSS) that can change during the lifetime of the BSS.
*
* @vif: reference to owning VIF
+ * @bss: the cfg80211 bss descriptor. Valid only for a station, and only
+ * when associated. Note: This contains information which is not
+ * necessarily authenticated. For example, information coming from probe
+ * responses.
* @addr: (link) address used locally
* @link_id: link ID, or 0 for non-MLO
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
@@ -581,7 +605,7 @@ struct ieee80211_fils_discovery {
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
* @bssid: The BSSID for this BSS
* @enable_beacon: whether beaconing should be enabled or not
- * @chandef: Channel definition for this BSS -- the hardware might be
+ * @chanreq: Channel request for this BSS -- the hardware might be
* configured a higher bandwidth than this BSS uses, for example.
* @mu_group: VHT MU-MIMO group membership data
* @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
@@ -642,9 +666,7 @@ struct ieee80211_fils_discovery {
* @tx_pwr_env_num: number of @tx_pwr_env.
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
- * @eht_puncturing: bitmap to indicate which channels are punctured in this BSS
* @csa_active: marks whether a channel switch is going on.
- * @csa_punct_bitmap: new puncturing bitmap for channel switch
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
* when it is not assigned. This pointer is RCU-protected due to the TX
@@ -682,6 +704,7 @@ struct ieee80211_fils_discovery {
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
+ struct cfg80211_bss *bss;
const u8 *bssid;
unsigned int link_id;
@@ -714,7 +737,7 @@ struct ieee80211_bss_conf {
u32 cqm_rssi_hyst;
s32 cqm_rssi_low;
s32 cqm_rssi_high;
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq;
struct ieee80211_mu_group_data mu_group;
bool qos;
bool hidden_ssid;
@@ -747,10 +770,8 @@ struct ieee80211_bss_conf {
u8 tx_pwr_env_num;
u8 pwr_reduction;
bool eht_support;
- u16 eht_puncturing;
bool csa_active;
- u16 csa_punct_bitmap;
bool mu_mimo_owner;
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
@@ -1772,6 +1793,10 @@ struct ieee80211_channel_switch {
* this is not pure P2P vif.
* @IEEE80211_VIF_EML_ACTIVE: The driver indicates that EML operation is
* enabled for the interface.
+ * @IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW: Ignore wider bandwidth OFDMA
+ * operation on this interface and request a channel context without
+ * the AP definition. Use this e.g. because the device is able to
+ * handle OFDMA (downlink and trigger for uplink) on a per-AP basis.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
@@ -1779,6 +1804,7 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
IEEE80211_VIF_EML_ACTIVE = BIT(4),
+ IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = BIT(5),
};
@@ -1808,9 +1834,11 @@ enum ieee80211_offload_flags {
* @ps: power-save mode (STA only). This flag is NOT affected by
* offchannel/dynamic_ps operations.
* @aid: association ID number, valid only when @assoc is true
- * @eml_cap: EML capabilities as described in P802.11be_D2.2 Figure 9-1002k.
+ * @eml_cap: EML capabilities as described in P802.11be_D4.1 Figure 9-1001j.
* @eml_med_sync_delay: Medium Synchronization delay as described in
- * P802.11be_D2.2 Figure 9-1002j.
+ * P802.11be_D4.1 Figure 9-1001i.
+ * @mld_capa_op: MLD Capabilities and Operations per P802.11be_D4.1
+ * Figure 9-1001k
* @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
* may filter ARP queries targeted for other addresses than listed here.
* The driver must allow ARP queries targeted for all address listed here
@@ -1835,6 +1863,7 @@ struct ieee80211_vif_cfg {
u16 aid;
u16 eml_cap;
u16 eml_med_sync_delay;
+ u16 mld_capa_op;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
int arp_addr_cnt;
@@ -1845,6 +1874,35 @@ struct ieee80211_vif_cfg {
u8 ap_addr[ETH_ALEN] __aligned(2);
};
+#define IEEE80211_TTLM_NUM_TIDS 8
+
+/**
+ * struct ieee80211_neg_ttlm - negotiated TID to link map info
+ *
+ * @downlink: bitmap of active links per TID for downlink, or 0 if mapping for
+ * this TID is not included.
+ * @uplink: bitmap of active links per TID for uplink, or 0 if mapping for this
+ * TID is not included.
+ * @valid: info is valid or not.
+ */
+struct ieee80211_neg_ttlm {
+ u16 downlink[IEEE80211_TTLM_NUM_TIDS];
+ u16 uplink[IEEE80211_TTLM_NUM_TIDS];
+ bool valid;
+};
+
+/**
+ * enum ieee80211_neg_ttlm_res - return value for negotiated TTLM handling
+ * @NEG_TTLM_RES_ACCEPT: accept the request
+ * @NEG_TTLM_RES_REJECT: reject the request
+ * @NEG_TTLM_RES_SUGGEST_PREFERRED: reject and suggest a new mapping
+ */
+enum ieee80211_neg_ttlm_res {
+ NEG_TTLM_RES_ACCEPT,
+ NEG_TTLM_RES_REJECT,
+ NEG_TTLM_RES_SUGGEST_PREFERRED
+};
+
/**
* struct ieee80211_vif - per-interface data
*
@@ -1863,6 +1921,11 @@ struct ieee80211_vif_cfg {
* API calls meant for that purpose.
* @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO.
* Must be a subset of valid_links.
+ * @suspended_links: subset of dormant_links representing links that are
+ * suspended.
+ * 0 for non-MLO.
+ * @neg_ttlm: negotiated TID to link mapping info.
+ * see &struct ieee80211_neg_ttlm.
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
@@ -1900,7 +1963,8 @@ struct ieee80211_vif {
struct ieee80211_vif_cfg cfg;
struct ieee80211_bss_conf bss_conf;
struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
- u16 valid_links, active_links, dormant_links;
+ u16 valid_links, active_links, dormant_links, suspended_links;
+ struct ieee80211_neg_ttlm neg_ttlm;
u8 addr[ETH_ALEN] __aligned(2);
bool p2p;
@@ -2041,6 +2105,8 @@ static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif)
* @IEEE80211_KEY_FLAG_GENERATE_MMIE: This flag should be set by the driver
* for a AES_CMAC key to indicate that it requires sequence number
* generation only
+ * @IEEE80211_KEY_FLAG_SPP_AMSDU: SPP A-MSDUs can be used with this key
+ * (set by mac80211 from the sta->spp_amsdu flag)
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -2054,6 +2120,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
IEEE80211_KEY_FLAG_NO_AUTO_TX = BIT(9),
IEEE80211_KEY_FLAG_GENERATE_MMIE = BIT(10),
+ IEEE80211_KEY_FLAG_SPP_AMSDU = BIT(11),
};
/**
@@ -2352,6 +2419,7 @@ struct ieee80211_link_sta {
* would be assigned to link[link_id] where link_id is the id assigned
* by the AP.
* @valid_links: bitmap of valid links, or 0 for non-MLO
+ * @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
*/
struct ieee80211_sta {
u8 addr[ETH_ALEN];
@@ -2365,6 +2433,7 @@ struct ieee80211_sta {
bool tdls_initiator;
bool mfp;
bool mlo;
+ bool spp_amsdu;
u8 max_amsdu_subframes;
struct ieee80211_sta_aggregates *cur;
@@ -4293,6 +4362,10 @@ struct ieee80211_prep_tx_info {
* flow offloading for flows originating from the vif.
* Note that the driver must not assume that the vif driver_data is valid
* at this point, since the callback can be called during netdev teardown.
+ * @can_neg_ttlm: for managed interface, requests the driver to determine
+ * if the requested TID-To-Link mapping can be accepted or not.
+ * If it's not accepted the driver may suggest a preferred mapping and
+ * modify @ttlm parameter with the suggested TID-to-Link mapping.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4673,6 +4746,9 @@ struct ieee80211_ops {
struct net_device *dev,
enum tc_setup_type type,
void *type_data);
+ enum ieee80211_neg_ttlm_res
+ (*can_neg_ttlm)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *ttlm);
};
/**
@@ -5455,6 +5531,7 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
/**
* ieee80211_beacon_update_cntdwn - request mac80211 to decrement the beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* The beacon counter should be updated after each beacon transmission.
* This function is called implicitly when
@@ -5464,7 +5541,8 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
*
* Return: new countdown value
*/
-u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif);
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
* ieee80211_beacon_set_cntdwn - request mac80211 to set beacon countdown
@@ -5482,20 +5560,23 @@ void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter);
/**
* ieee80211_csa_finish - notify mac80211 about channel switch
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* After a channel switch announcement was scheduled and the counter in this
* announcement hits 1, this function must be called by the driver to
* notify mac80211 that the channel can be changed.
*/
-void ieee80211_csa_finish(struct ieee80211_vif *vif);
+void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id);
/**
* ieee80211_beacon_cntdwn_is_complete - find out if countdown reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: valid link_id during MLO or 0 for non-MLO
*
* This function returns whether the countdown reached zero.
*/
-bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif);
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
+ unsigned int link_id);
/**
* ieee80211_color_change_finish - notify mac80211 about color change
@@ -7416,11 +7497,10 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is
* aware of.
- * @gfp: allocation flags
*/
void
ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp);
+ u64 color_bitmap);
/**
* ieee80211_is_tx_data - check if frame is a data frame
@@ -7480,4 +7560,17 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
u16 active_links);
+/* for older drivers - let's not document these ... */
+int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx);
+void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode);
+
#endif /* MAC80211_H */
diff --git a/include/net/mctp.h b/include/net/mctp.h
index da86e106c91d..7b17c52e8ce2 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -87,7 +87,7 @@ struct mctp_sock {
};
/* Key for matching incoming packets to sockets or reassembly contexts.
- * Packets are matched on (src,dest,tag).
+ * Packets are matched on (peer EID, local EID, tag).
*
* Lifetime / locking requirements:
*
@@ -133,6 +133,7 @@ struct mctp_sock {
* - through an expiry timeout, on a per-socket timer
*/
struct mctp_sk_key {
+ unsigned int net;
mctp_eid_t peer_addr;
mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */
__u8 tag; /* incoming tag match; invert TO for local */
@@ -249,12 +250,14 @@ struct mctp_route {
struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
mctp_eid_t daddr);
+/* always takes ownership of skb */
int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
void mctp_key_unref(struct mctp_sk_key *key);
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
- mctp_eid_t daddr, mctp_eid_t saddr,
+ unsigned int netid,
+ mctp_eid_t local, mctp_eid_t peer,
bool manual, u8 *tagp);
/* routing <--> device interface */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 13b3a4e29fdb..20c34bd7a077 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -67,8 +67,6 @@ struct net {
*/
spinlock_t rules_mod_lock;
- atomic_t dev_unreg_count;
-
unsigned int dev_base_seq; /* protected by rtnl_mutex */
u32 ifindex;
@@ -450,6 +448,9 @@ struct pernet_operations {
void (*pre_exit)(struct net *net);
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
+ /* Following method is called with RTNL held. */
+ void (*exit_batch_rtnl)(struct list_head *net_exit_list,
+ struct list_head *dev_kill_list);
unsigned int *id;
size_t size;
};
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 8b8ed4e13d74..1ec408585373 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -4,6 +4,62 @@
#include <linux/netdevice.h>
+/* See the netdev.yaml spec for definition of each statistic */
+struct netdev_queue_stats_rx {
+ u64 bytes;
+ u64 packets;
+ u64 alloc_fail;
+};
+
+struct netdev_queue_stats_tx {
+ u64 bytes;
+ u64 packets;
+};
+
+/**
+ * struct netdev_stat_ops - netdev ops for fine grained stats
+ * @get_queue_stats_rx: get stats for a given Rx queue
+ * @get_queue_stats_tx: get stats for a given Tx queue
+ * @get_base_stats: get base stats (not belonging to any live instance)
+ *
+ * Query stats for a given object. The values of the statistics are undefined
+ * on entry (specifically they are *not* zero-initialized). Drivers should
+ * assign values only to the statistics they collect. Statistics which are not
+ * collected must be left undefined.
+ *
+ * Queue objects are not necessarily persistent, and only currently active
+ * queues are queried by the per-queue callbacks. This means that per-queue
+ * statistics will not generally add up to the total number of events for
+ * the device. The @get_base_stats callback allows filling in the delta
+ * between events for currently live queues and overall device history.
+ * When the statistics for the entire device are queried, first @get_base_stats
+ * is issued to collect the delta, and then a series of per-queue callbacks.
+ * Only statistics which are set in @get_base_stats will be reported
+ * at the device level, meaning that unlike in queue callbacks, setting
+ * a statistic to zero in @get_base_stats is a legitimate thing to do.
+ * This is because @get_base_stats has a second function of designating which
+ * statistics are in fact correct for the entire device (e.g. when history
+ * for some of the events is not maintained, and reliable "total" cannot
+ * be provided).
+ *
+ * Device drivers can assume that when collecting total device stats,
+ * the @get_base_stats and subsequent per-queue calls are performed
+ * "atomically" (without releasing the rtnl_lock).
+ *
+ * Device drivers are encouraged to reset the per-queue statistics when
+ * number of queues change. This is because the primary use case for
+ * per-queue statistics is currently to detect traffic imbalance.
+ */
+struct netdev_stat_ops {
+ void (*get_queue_stats_rx)(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *stats);
+ void (*get_queue_stats_tx)(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *stats);
+ void (*get_base_stats)(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx);
+};
+
/**
* DOC: Lockless queue stopping / waking helpers.
*
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 956c752ceb31..a763dd327c6e 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -276,7 +276,7 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
}
void flow_offload_route_init(struct flow_offload *flow,
- const struct nf_flow_route *route);
+ struct nf_flow_route *route);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
void flow_offload_refresh(struct nf_flowtable *flow_table,
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index c81021ab07aa..4aeffddb7586 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -35,7 +35,6 @@ struct nf_queue_handler {
void nf_register_queue_handler(const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(void);
-void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_free(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index b157c5cafd14..e27c28b612e4 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -205,6 +205,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
* @nla: netlink attributes
* @portid: netlink portID of the original message
* @seq: netlink sequence number
+ * @flags: modifiers to new request
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
@@ -282,6 +283,7 @@ struct nft_elem_priv { };
*
* @key: element key
* @key_end: closing element key
+ * @data: element data
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -325,10 +327,10 @@ struct nft_set_iter {
* @dtype: data type
* @dlen: data length
* @objtype: object type
- * @flags: flags
* @size: number of set elements
* @policy: set policy
* @gc_int: garbage collector interval
+ * @timeout: element timeout
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
* @expr: set must support for expressions
@@ -351,9 +353,9 @@ struct nft_set_desc {
/**
* enum nft_set_class - performance class
*
- * @NFT_LOOKUP_O_1: constant, O(1)
- * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
- * @NFT_LOOKUP_O_N: linear, O(N)
+ * @NFT_SET_CLASS_O_1: constant, O(1)
+ * @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N)
+ * @NFT_SET_CLASS_O_N: linear, O(N)
*/
enum nft_set_class {
NFT_SET_CLASS_O_1,
@@ -422,9 +424,13 @@ struct nft_set_ext;
* @remove: remove element from set
* @walk: iterate over all set elements
* @get: get set elements
+ * @commit: commit set elements
+ * @abort: abort set elements
* @privsize: function to return size of set private data
+ * @estimate: estimate the required memory size and the lookup complexity class
* @init: initialize private data of new set instance
* @destroy: destroy private data of set instance
+ * @gc_init: initialize garbage collection
* @elemsize: element private size
*
* Operations lookup, update and delete have simpler interfaces, are faster
@@ -540,13 +546,16 @@ struct nft_set_elem_expr {
* @policy: set parameterization (see enum nft_set_policies)
* @udlen: user data length
* @udata: user data
- * @expr: stateful expression
+ * @pending_update: list of pending update set element
* @ops: set ops
* @flags: set flags
* @dead: set will be freed, never cleared
* @genmask: generation mask
* @klen: key length
* @dlen: data length
+ * @num_exprs: numbers of exprs
+ * @exprs: stateful expression
+ * @catchall_list: list of catch-all set element
* @data: private set data
*/
struct nft_set {
@@ -692,6 +701,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
*
* @len: length of extension area
* @offset: offsets of individual extension types
+ * @ext_len: length of the expected extension(used to sanity check)
*/
struct nft_set_ext_tmpl {
u16 len;
@@ -798,10 +808,16 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex
return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS);
}
-static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext,
+ u64 tstamp)
{
return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
- time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext));
+ time_after_eq64(tstamp, *nft_set_ext_expiration(ext));
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+ return __nft_set_elem_expired(ext, get_jiffies_64());
}
static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
@@ -840,6 +856,7 @@ struct nft_expr_ops;
* @select_ops: function to select nft_expr_ops
* @release_ops: release nft_expr_ops
* @ops: default ops, used when no select_ops functions is present
+ * @inner_ops: inner ops, used for inner packet operation
* @list: used internally
* @name: Identifier
* @owner: module reference
@@ -881,14 +898,22 @@ struct nft_offload_ctx;
* struct nft_expr_ops - nf_tables expression operations
*
* @eval: Expression evaluation function
+ * @clone: Expression clone function
* @size: full expression size, including private data size
* @init: initialization function
* @activate: activate expression in the next generation
* @deactivate: deactivate expression in next generation
* @destroy: destruction function, called after synchronize_rcu
+ * @destroy_clone: destruction clone function
* @dump: function to dump parameters
- * @type: expression type
* @validate: validate expression, called during loop detection
+ * @reduce: reduce expression
+ * @gc: garbage collection expression
+ * @offload: hardware offload expression
+ * @offload_action: function to report true/false to allocate one slot or not in the flow
+ * offload array
+ * @offload_stats: function to synchronize hardware stats via updating the counter expression
+ * @type: expression type
* @data: extra data to attach to this expression operation
*/
struct nft_expr_ops {
@@ -1041,14 +1066,21 @@ struct nft_rule_blob {
/**
* struct nft_chain - nf_tables chain
*
+ * @blob_gen_0: rule blob pointer to the current generation
+ * @blob_gen_1: rule blob pointer to the future generation
* @rules: list of rules in the chain
* @list: used internally
* @rhlhead: used internally
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
- * @flags: bitmask of enum nft_chain_flags
+ * @flags: bitmask of enum NFTA_CHAIN_FLAGS
+ * @bound: bind or not
+ * @genmask: generation mask
* @name: name of the chain
+ * @udlen: user data length
+ * @udata: user data in the chain
+ * @blob_next: rule blob pointer to the next in the chain
*/
struct nft_chain {
struct nft_rule_blob __rcu *blob_gen_0;
@@ -1146,6 +1178,7 @@ struct nft_hook {
* @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
* @type: chain type
* @policy: default policy
+ * @flags: indicate the base chain disabled or not
* @stats: per-cpu chain stats
* @chain: the chain
* @flow_block: flow block (for hardware offload)
@@ -1244,6 +1277,12 @@ static inline bool nft_table_has_owner(const struct nft_table *table)
return table->flags & NFT_TABLE_F_OWNER;
}
+static inline bool nft_table_is_orphan(const struct nft_table *table)
+{
+ return (table->flags & (NFT_TABLE_F_OWNER | NFT_TABLE_F_PERSIST)) ==
+ NFT_TABLE_F_PERSIST;
+}
+
static inline bool nft_base_chain_netdev(int family, u32 hooknum)
{
return family == NFPROTO_NETDEV ||
@@ -1274,11 +1313,13 @@ struct nft_object_hash_key {
* struct nft_object - nf_tables stateful object
*
* @list: table stateful object list node
- * @key: keys that identify this object
* @rhlhead: nft_objname_ht node
+ * @key: keys that identify this object
* @genmask: generation mask
* @use: number of references to this stateful object
* @handle: unique object handle
+ * @udlen: length of user data
+ * @udata: user data
* @ops: object operations
* @data: object data, layout depends on type
*/
@@ -1322,6 +1363,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
* @type: stateful object numeric type
* @owner: module owner
* @maxattr: maximum netlink attribute
+ * @family: address family for AF-specific object types
* @policy: netlink attribute policy
*/
struct nft_object_type {
@@ -1331,6 +1373,7 @@ struct nft_object_type {
struct list_head list;
u32 type;
unsigned int maxattr;
+ u8 family;
struct module *owner;
const struct nla_policy *policy;
};
@@ -1344,6 +1387,7 @@ struct nft_object_type {
* @destroy: release existing stateful object
* @dump: netlink dump stateful object
* @update: update stateful object
+ * @type: pointer to object type
*/
struct nft_object_ops {
void (*eval)(struct nft_object *obj,
@@ -1379,9 +1423,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
* @genmask: generation mask
* @use: number of references to this flow table
* @handle: unique object handle
- * @dev_name: array of device names
+ * @hook_list: hook list for hooks per net_device in flowtables
* @data: rhashtable and garbage collector
- * @ops: array of hooks
*/
struct nft_flowtable {
struct list_head list;
@@ -1748,6 +1791,7 @@ struct nftables_pernet {
struct list_head notify_list;
struct mutex commit_mutex;
u64 table_handle;
+ u64 tstamp;
unsigned int base_seq;
unsigned int gc_seq;
u8 validate_state;
@@ -1760,6 +1804,11 @@ static inline struct nftables_pernet *nft_pernet(const struct net *net)
return net_generic(net, nf_tables_net_id);
}
+static inline u64 nft_net_tstamp(const struct net *net)
+{
+ return nft_pernet(net)->tstamp;
+}
+
#define __NFT_REDUCE_READONLY 1UL
#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 43ae50337685..f3ab0b8a4b18 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -145,15 +145,14 @@ struct netlbl_lsm_cache {
* processing.
*
*/
-#define NETLBL_CATMAP_MAPTYPE u64
#define NETLBL_CATMAP_MAPCNT 4
-#define NETLBL_CATMAP_MAPSIZE (sizeof(NETLBL_CATMAP_MAPTYPE) * 8)
+#define NETLBL_CATMAP_MAPSIZE (sizeof(u64) * 8)
#define NETLBL_CATMAP_SIZE (NETLBL_CATMAP_MAPSIZE * \
NETLBL_CATMAP_MAPCNT)
-#define NETLBL_CATMAP_BIT (NETLBL_CATMAP_MAPTYPE)0x01
+#define NETLBL_CATMAP_BIT ((u64)0x01)
struct netlbl_lsm_catmap {
u32 startbit;
- NETLBL_CATMAP_MAPTYPE bitmap[NETLBL_CATMAP_MAPCNT];
+ u64 bitmap[NETLBL_CATMAP_MAPCNT];
struct netlbl_lsm_catmap *next;
};
diff --git a/include/net/netmem.h b/include/net/netmem.h
new file mode 100644
index 000000000000..d8b810245c1d
--- /dev/null
+++ b/include/net/netmem.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Network memory
+ *
+ * Author: Mina Almasry <almasrymina@google.com>
+ */
+
+#ifndef _NET_NETMEM_H
+#define _NET_NETMEM_H
+
+/**
+ * typedef netmem_ref - a nonexistent type marking a reference to generic
+ * network memory.
+ *
+ * A netmem_ref currently is always a reference to a struct page. This
+ * abstraction is introduced so support for new memory types can be added.
+ *
+ * Use the supplied helpers to obtain the underlying memory pointer and fields.
+ */
+typedef unsigned long __bitwise netmem_ref;
+
+/* This conversion fails (returns NULL) if the netmem_ref is not struct page
+ * backed.
+ *
+ * Currently struct page is the only possible netmem, and this helper never
+ * fails.
+ */
+static inline struct page *netmem_to_page(netmem_ref netmem)
+{
+ return (__force struct page *)netmem;
+}
+
+/* Converting from page to netmem is always safe, because a page can always be
+ * a netmem.
+ */
+static inline netmem_ref page_to_netmem(struct page *page)
+{
+ return (__force netmem_ref)page;
+}
+
+#endif /* _NET_NETMEM_H */
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index d92046a4a078..7ec9cc80f11c 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -47,6 +47,8 @@ struct nh_config {
bool nh_grp_res_has_idle_timer;
bool nh_grp_res_has_unbalanced_timer;
+ bool nh_hw_stats;
+
struct nlattr *nh_encap;
u16 nh_encap_type;
@@ -95,8 +97,14 @@ struct nh_res_table {
struct nh_res_bucket nh_buckets[] __counted_by(num_nh_buckets);
};
+struct nh_grp_entry_stats {
+ u64_stats_t packets;
+ struct u64_stats_sync syncp;
+};
+
struct nh_grp_entry {
struct nexthop *nh;
+ struct nh_grp_entry_stats __percpu *stats;
u8 weight;
union {
@@ -114,6 +122,7 @@ struct nh_grp_entry {
struct list_head nh_list;
struct nexthop *nh_parent; /* nexthop of group with this entry */
+ u64 packets_hw;
};
struct nh_group {
@@ -124,6 +133,7 @@ struct nh_group {
bool resilient;
bool fdb_nh;
bool has_v4;
+ bool hw_stats;
struct nh_res_table __rcu *res_table;
struct nh_grp_entry nh_entries[] __counted_by(num_nh);
@@ -157,6 +167,7 @@ enum nexthop_event_type {
NEXTHOP_EVENT_REPLACE,
NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
NEXTHOP_EVENT_BUCKET_REPLACE,
+ NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
};
enum nh_notifier_info_type {
@@ -164,6 +175,7 @@ enum nh_notifier_info_type {
NH_NOTIFIER_INFO_TYPE_GRP,
NH_NOTIFIER_INFO_TYPE_RES_TABLE,
NH_NOTIFIER_INFO_TYPE_RES_BUCKET,
+ NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS,
};
struct nh_notifier_single_info {
@@ -187,6 +199,7 @@ struct nh_notifier_grp_entry_info {
struct nh_notifier_grp_info {
u16 num_nh;
bool is_fdb;
+ bool hw_stats;
struct nh_notifier_grp_entry_info nh_entries[] __counted_by(num_nh);
};
@@ -200,9 +213,21 @@ struct nh_notifier_res_bucket_info {
struct nh_notifier_res_table_info {
u16 num_nh_buckets;
+ bool hw_stats;
struct nh_notifier_single_info nhs[] __counted_by(num_nh_buckets);
};
+struct nh_notifier_grp_hw_stats_entry_info {
+ u32 id;
+ u64 packets;
+};
+
+struct nh_notifier_grp_hw_stats_info {
+ u16 num_nh;
+ bool hw_stats_used;
+ struct nh_notifier_grp_hw_stats_entry_info stats[] __counted_by(num_nh);
+};
+
struct nh_notifier_info {
struct net *net;
struct netlink_ext_ack *extack;
@@ -213,17 +238,22 @@ struct nh_notifier_info {
struct nh_notifier_grp_info *nh_grp;
struct nh_notifier_res_table_info *nh_res_table;
struct nh_notifier_res_bucket_info *nh_res_bucket;
+ struct nh_notifier_grp_hw_stats_info *nh_grp_hw_stats;
};
};
int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
+int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap);
void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
bool offload, bool trap);
void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
unsigned long *activity);
+void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
+ unsigned int nh_idx,
+ u64 delta_packets);
/* caller is holding rcu or rtnl; no reference taken to nexthop */
struct nexthop *nexthop_find_by_id(struct net *net, u32 id);
@@ -316,7 +346,7 @@ static inline
int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
u8 rt_family)
{
- struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
int i;
for (i = 0; i < nhg->num_nh; i++) {
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 5dee575fbe86..3d07abacf08b 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -196,7 +196,7 @@ struct nfc_dev {
};
#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
-extern struct class nfc_class;
+extern const struct class nfc_class;
struct nfc_dev *nfc_allocate_device(const struct nfc_ops *ops,
u32 supported_protocols,
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 76481c465375..5e43a08d3231 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -18,8 +18,9 @@
* Please note DMA-sync-for-CPU is still
* device driver responsibility
*/
-#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
- PP_FLAG_DMA_SYNC_DEV)
+#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
+ PP_FLAG_SYSTEM_POOL)
/*
* Fast allocation side cache array/stack
@@ -128,6 +129,7 @@ struct page_pool_stats {
struct page_pool {
struct page_pool_params_fast p;
+ int cpuid;
bool has_init_callback;
long frag_users;
@@ -203,21 +205,18 @@ struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params);
+struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
+ int cpuid);
struct xdp_mem_info;
#ifdef CONFIG_PAGE_POOL
-void page_pool_unlink_napi(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
struct xdp_mem_info *mem);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
#else
-static inline void page_pool_unlink_napi(struct page_pool *pool)
-{
-}
-
static inline void page_pool_destroy(struct page_pool *pool)
{
}
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index f308e8268651..a4ee43f493bb 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -24,6 +24,8 @@ struct tcf_walker {
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+#define NET_CLS_ALIAS_PREFIX "net-cls-"
+#define MODULE_ALIAS_NET_CLS(kind) MODULE_ALIAS(NET_CLS_ALIAS_PREFIX kind)
struct tcf_block_ext_info {
enum flow_block_binder_type binder_type;
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 1e200d9a066d..d7b7b6cd4aa1 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -100,6 +100,8 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
int register_qdisc(struct Qdisc_ops *qops);
void unregister_qdisc(struct Qdisc_ops *qops);
+#define NET_SCH_ALIAS_PREFIX "net-sch-"
+#define MODULE_ALIAS_NET_SCH(id) MODULE_ALIAS(NET_SCH_ALIAS_PREFIX id)
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 6aef8cb11cc8..b2499f88f8f8 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -46,6 +46,7 @@ struct net_protocol {
* socket lookup?
*/
icmp_strict_tag_validation:1;
+ u32 secret;
};
#if IS_ENABLED(CONFIG_IPV6)
@@ -59,6 +60,7 @@ struct inet6_protocol {
__be32 info);
unsigned int flags; /* INET6_PROTO_xxx */
+ u32 secret;
};
#define INET6_PROTO_NOPOLICY 0x1
@@ -68,6 +70,7 @@ struct inet6_protocol {
struct net_offload {
struct offload_callbacks callbacks;
unsigned int flags; /* Flags used by IPv6 for now */
+ u32 secret;
};
/* This should be set for any extension header which is compatible with GSO. */
#define INET6_PROTO_GSO_EXTHDR 0x1
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 144c39db9898..8839133d6f6b 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -83,6 +83,45 @@ static inline struct sock *req_to_sk(struct request_sock *req)
return (struct sock *)req;
}
+/**
+ * skb_steal_sock - steal a socket from an sk_buff
+ * @skb: sk_buff to steal the socket from
+ * @refcounted: is set to true if the socket is reference-counted
+ * @prefetched: is set to true if the socket was assigned from bpf
+ */
+static inline struct sock *skb_steal_sock(struct sk_buff *skb,
+ bool *refcounted, bool *prefetched)
+{
+ struct sock *sk = skb->sk;
+
+ if (!sk) {
+ *prefetched = false;
+ *refcounted = false;
+ return NULL;
+ }
+
+ *prefetched = skb_sk_is_prefetched(skb);
+ if (*prefetched) {
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
+ struct request_sock *req = inet_reqsk(sk);
+
+ *refcounted = false;
+ sk = req->rsk_listener;
+ req->rsk_listener = NULL;
+ return sk;
+ }
+#endif
+ *refcounted = sk_is_refcounted(sk);
+ } else {
+ *refcounted = true;
+ }
+
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ return sk;
+}
+
static inline struct request_sock *
reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
bool attach_listener)
diff --git a/include/net/route.h b/include/net/route.h
index 980ab474eabd..d4a0147942f1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -37,9 +37,6 @@
#define RTO_ONLINK 0x01
-#define RT_CONN_FLAGS(sk) (RT_TOS(READ_ONCE(inet_sk(sk)->tos)) | sock_flag(sk, SOCK_LOCALROUTE))
-#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
-
static inline __u8 ip_sock_rt_scope(const struct sock *sk)
{
if (sock_flag(sk, SOCK_LOCALROUTE))
@@ -163,8 +160,8 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
__u8 proto, __u8 tos, int oif)
{
flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos,
- RT_SCOPE_UNIVERSE, proto,
- sk ? inet_sk_flowi_flags(sk) : 0,
+ sk ? ip_sock_rt_scope(sk) : RT_SCOPE_UNIVERSE,
+ proto, sk ? inet_sk_flowi_flags(sk) : 0,
daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
diff --git a/include/net/rps.h b/include/net/rps.h
new file mode 100644
index 000000000000..7660243e905b
--- /dev/null
+++ b/include/net/rps.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_RPS_H
+#define _NET_RPS_H
+
+#include <linux/types.h>
+#include <linux/static_key.h>
+#include <net/sock.h>
+#include <net/hotdata.h>
+
+#ifdef CONFIG_RPS
+
+extern struct static_key_false rps_needed;
+extern struct static_key_false rfs_needed;
+
+/*
+ * This structure holds an RPS map which can be of variable length. The
+ * map is an array of CPUs.
+ */
+struct rps_map {
+ unsigned int len;
+ struct rcu_head rcu;
+ u16 cpus[];
+};
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
+
+/*
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
+ */
+struct rps_dev_flow {
+ u16 cpu;
+ u16 filter;
+ unsigned int last_qtail;
+};
+#define RPS_NO_FILTER 0xffff
+
+/*
+ * The rps_dev_flow_table structure contains a table of flow mappings.
+ */
+struct rps_dev_flow_table {
+ unsigned int mask;
+ struct rcu_head rcu;
+ struct rps_dev_flow flows[];
+};
+#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
+ ((_num) * sizeof(struct rps_dev_flow)))
+
+/*
+ * The rps_sock_flow_table contains mappings of flows to the last CPU
+ * on which they were processed by the application (set in recvmsg).
+ * Each entry is a 32bit value. Upper part is the high-order bits
+ * of flow hash, lower part is CPU number.
+ * rps_cpu_mask is used to partition the space, depending on number of
+ * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
+ * meaning we use 32-6=26 bits for the hash.
+ */
+struct rps_sock_flow_table {
+ u32 mask;
+
+ u32 ents[] ____cacheline_aligned_in_smp;
+};
+#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
+
+#define RPS_NO_CPU 0xffff
+
+static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ u32 hash)
+{
+ unsigned int index = hash & table->mask;
+ u32 val = hash & ~net_hotdata.rps_cpu_mask;
+
+ /* We only give a hint, preemption can change CPU under us */
+ val |= raw_smp_processor_id();
+
+ /* The following WRITE_ONCE() is paired with the READ_ONCE()
+ * here, and another one in get_rps_cpu().
+ */
+ if (READ_ONCE(table->ents[index]) != val)
+ WRITE_ONCE(table->ents[index], val);
+}
+
+#endif /* CONFIG_RPS */
+
+static inline void sock_rps_record_flow_hash(__u32 hash)
+{
+#ifdef CONFIG_RPS
+ struct rps_sock_flow_table *sock_flow_table;
+
+ if (!hash)
+ return;
+ rcu_read_lock();
+ sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
+ if (sock_flow_table)
+ rps_record_sock_flow(sock_flow_table, hash);
+ rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ if (static_branch_unlikely(&rfs_needed)) {
+ /* Reading sk->sk_rxhash might incur an expensive cache line
+ * miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ /* This READ_ONCE() is paired with the WRITE_ONCE()
+ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
+ */
+ sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
+ }
+ }
+#endif
+}
+
+#endif /* _NET_RPS_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 6506221c5fe3..3bfb80bad173 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -12,6 +12,7 @@ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
+ RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
};
enum rtnl_kinds {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index ba3e1b315de8..cefe0c4bdae3 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -238,12 +238,7 @@ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
{
-#ifdef CONFIG_BQL
- /* Non-BQL migrated drivers will return 0, too. */
- return dql_avail(&txq->dql);
-#else
- return 0;
-#endif
+ return netdev_queue_dql_avail(txq);
}
struct Qdisc_class_ops {
@@ -375,6 +370,10 @@ struct tcf_proto_ops {
struct nlattr **tca,
struct netlink_ext_ack *extack);
void (*tmplt_destroy)(void *tmplt_priv);
+ void (*tmplt_reoffload)(struct tcf_chain *chain,
+ bool add,
+ flow_setup_cb_t *cb,
+ void *cb_priv);
struct tcf_exts * (*get_exts)(const struct tcf_proto *tp,
u32 handle);
diff --git a/include/net/scm.h b/include/net/scm.h
index cf68acec4d70..92276a2c5543 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -25,6 +25,7 @@ struct scm_creds {
struct scm_fp_list {
short count;
+ short count_unix;
short max;
struct user_struct *user;
struct file *fp[SCM_MAX_FD];
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5a24d6d8522a..f24a1bbcb3ef 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -242,10 +242,7 @@ struct sctp_sock {
int do_auto_asconf;
};
-static inline struct sctp_sock *sctp_sk(const struct sock *sk)
-{
- return (struct sctp_sock *)sk;
-}
+#define sctp_sk(ptr) container_of_const(ptr, struct sctp_sock, inet.sk)
static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
{
diff --git a/include/net/sock.h b/include/net/sock.h
index a7f815c7cfdf..b5e00702acc1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -378,14 +378,10 @@ struct sock {
#define sk_flags __sk_common.skc_flags
#define sk_rxhash __sk_common.skc_rxhash
- /* early demux fields */
- struct dst_entry __rcu *sk_rx_dst;
- int sk_rx_dst_ifindex;
- u32 sk_rx_dst_cookie;
+ __cacheline_group_begin(sock_write_rx);
- socket_lock_t sk_lock;
atomic_t sk_drops;
- int sk_rcvlowat;
+ __s32 sk_peek_off;
struct sk_buff_head sk_error_queue;
struct sk_buff_head sk_receive_queue;
/*
@@ -402,18 +398,24 @@ struct sock {
struct sk_buff *head;
struct sk_buff *tail;
} sk_backlog;
-
#define sk_rmem_alloc sk_backlog.rmem_alloc
- int sk_forward_alloc;
- u32 sk_reserved_mem;
+ __cacheline_group_end(sock_write_rx);
+
+ __cacheline_group_begin(sock_read_rx);
+ /* early demux fields */
+ struct dst_entry __rcu *sk_rx_dst;
+ int sk_rx_dst_ifindex;
+ u32 sk_rx_dst_cookie;
+
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec;
- /* ===== mostly read cache line ===== */
unsigned int sk_napi_id;
+ u16 sk_busy_poll_budget;
+ u8 sk_prefer_busy_poll;
#endif
+ u8 sk_userlocks;
int sk_rcvbuf;
- int sk_disconnects;
struct sk_filter __rcu *sk_filter;
union {
@@ -422,15 +424,33 @@ struct sock {
struct socket_wq *sk_wq_raw;
/* public: */
};
+
+ void (*sk_data_ready)(struct sock *sk);
+ long sk_rcvtimeo;
+ int sk_rcvlowat;
+ __cacheline_group_end(sock_read_rx);
+
+ __cacheline_group_begin(sock_read_rxtx);
+ int sk_err;
+ struct socket *sk_socket;
+ struct mem_cgroup *sk_memcg;
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
+ __cacheline_group_end(sock_read_rxtx);
- struct dst_entry __rcu *sk_dst_cache;
+ __cacheline_group_begin(sock_write_rxtx);
+ socket_lock_t sk_lock;
+ u32 sk_reserved_mem;
+ int sk_forward_alloc;
+ u32 sk_tsflags;
+ __cacheline_group_end(sock_write_rxtx);
+
+ __cacheline_group_begin(sock_write_tx);
+ int sk_write_pending;
atomic_t sk_omem_alloc;
int sk_sndbuf;
- /* ===== cache line for TX ===== */
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
unsigned long sk_tsq_flags;
@@ -439,22 +459,36 @@ struct sock {
struct rb_root tcp_rtx_queue;
};
struct sk_buff_head sk_write_queue;
- __s32 sk_peek_off;
- int sk_write_pending;
- __u32 sk_dst_pending_confirm;
+ u32 sk_dst_pending_confirm;
u32 sk_pacing_status; /* see enum sk_pacing */
- long sk_sndtimeo;
+ struct page_frag sk_frag;
struct timer_list sk_timer;
- __u32 sk_priority;
- __u32 sk_mark;
+
unsigned long sk_pacing_rate; /* bytes per second */
+ atomic_t sk_zckey;
+ atomic_t sk_tskey;
+ __cacheline_group_end(sock_write_tx);
+
+ __cacheline_group_begin(sock_read_tx);
unsigned long sk_max_pacing_rate;
- struct page_frag sk_frag;
+ long sk_sndtimeo;
+ u32 sk_priority;
+ u32 sk_mark;
+ struct dst_entry __rcu *sk_dst_cache;
netdev_features_t sk_route_caps;
- int sk_gso_type;
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+#endif
+ u16 sk_gso_type;
+ u16 sk_gso_max_segs;
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
- __u32 sk_txhash;
+ u32 sk_txhash;
+ u8 sk_pacing_shift;
+ bool sk_use_task_frag;
+ __cacheline_group_end(sock_read_tx);
/*
* Because of non atomicity rules, all
@@ -463,64 +497,44 @@ struct sock {
u8 sk_gso_disabled : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
- sk_no_check_rx : 1,
- sk_userlocks : 4;
- u8 sk_pacing_shift;
+ sk_no_check_rx : 1;
+ u8 sk_shutdown;
u16 sk_type;
u16 sk_protocol;
- u16 sk_gso_max_segs;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
- int sk_err,
- sk_err_soft;
+ int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
kuid_t sk_uid;
- u8 sk_txrehash;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- u8 sk_prefer_busy_poll;
- u16 sk_busy_poll_budget;
-#endif
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
- long sk_rcvtimeo;
ktime_t sk_stamp;
#if BITS_PER_LONG==32
seqlock_t sk_stamp_seq;
#endif
- atomic_t sk_tskey;
- atomic_t sk_zckey;
- u32 sk_tsflags;
- u8 sk_shutdown;
+ int sk_disconnects;
+ u8 sk_txrehash;
u8 sk_clockid;
u8 sk_txtime_deadline_mode : 1,
sk_txtime_report_errors : 1,
sk_txtime_unused : 6;
- bool sk_use_task_frag;
- struct socket *sk_socket;
void *sk_user_data;
#ifdef CONFIG_SECURITY
void *sk_security;
#endif
struct sock_cgroup_data sk_cgrp_data;
- struct mem_cgroup *sk_memcg;
void (*sk_state_change)(struct sock *sk);
- void (*sk_data_ready)(struct sock *sk);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
- struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
- struct net_device *dev,
- struct sk_buff *skb);
-#endif
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
#ifdef CONFIG_BPF_SYSCALL
@@ -1103,41 +1117,6 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
WRITE_ONCE(sk->sk_incoming_cpu, cpu);
}
-static inline void sock_rps_record_flow_hash(__u32 hash)
-{
-#ifdef CONFIG_RPS
- struct rps_sock_flow_table *sock_flow_table;
-
- rcu_read_lock();
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
- rps_record_sock_flow(sock_flow_table, hash);
- rcu_read_unlock();
-#endif
-}
-
-static inline void sock_rps_record_flow(const struct sock *sk)
-{
-#ifdef CONFIG_RPS
- if (static_branch_unlikely(&rfs_needed)) {
- /* Reading sk->sk_rxhash might incur an expensive cache line
- * miss.
- *
- * TCP_ESTABLISHED does cover almost all states where RFS
- * might be useful, and is cheaper [1] than testing :
- * IPv4: inet_sk(sk)->inet_daddr
- * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
- * OR an additional socket flag
- * [1] : sk_state and sk_prot are in the same cache line.
- */
- if (sk->sk_state == TCP_ESTABLISHED) {
- /* This READ_ONCE() is paired with the WRITE_ONCE()
- * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
- */
- sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
- }
- }
-#endif
-}
static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
@@ -1429,6 +1408,7 @@ sk_memory_allocated(const struct sock *sk)
/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+extern int sysctl_mem_pcpu_rsv;
static inline void
sk_memory_allocated_add(struct sock *sk, int amt)
@@ -1437,7 +1417,7 @@ sk_memory_allocated_add(struct sock *sk, int amt)
preempt_disable();
local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
+ if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
@@ -1451,7 +1431,7 @@ sk_memory_allocated_sub(struct sock *sk, int amt)
preempt_disable();
local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
+ if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
@@ -2765,9 +2745,25 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
+static inline bool sk_is_inet(const struct sock *sk)
+{
+ int family = READ_ONCE(sk->sk_family);
+
+ return family == AF_INET || family == AF_INET6;
+}
+
static inline bool sk_is_tcp(const struct sock *sk)
{
- return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+ return sk_is_inet(sk) &&
+ sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+ return sk_is_inet(sk) &&
+ sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP;
}
static inline bool sk_is_stream_unix(const struct sock *sk)
@@ -2814,31 +2810,6 @@ sk_is_refcounted(struct sock *sk)
return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
}
-/**
- * skb_steal_sock - steal a socket from an sk_buff
- * @skb: sk_buff to steal the socket from
- * @refcounted: is set to true if the socket is reference-counted
- * @prefetched: is set to true if the socket was assigned from bpf
- */
-static inline struct sock *
-skb_steal_sock(struct sk_buff *skb, bool *refcounted, bool *prefetched)
-{
- if (skb->sk) {
- struct sock *sk = skb->sk;
-
- *refcounted = true;
- *prefetched = skb_sk_is_prefetched(skb);
- if (*prefetched)
- *refcounted = sk_is_refcounted(sk);
- skb->destructor = NULL;
- skb->sk = NULL;
- return sk;
- }
- *prefetched = false;
- *refcounted = false;
- return NULL;
-}
-
/* Checks if this SKB belongs to an HW offloaded socket
* and whether any SW fallbacks are required based on dev.
* Check decrypted mark in case skb_orphan() cleared socket.
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index a43062d4c734..8346b0d29542 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -308,6 +308,9 @@ void switchdev_deferred_process(void);
int switchdev_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack);
+bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
+ enum switchdev_notifier_type nt,
+ const struct switchdev_obj *obj);
int switchdev_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index dd78a1181031..6ae35199d3b3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -348,7 +348,7 @@ void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
+enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -396,8 +396,8 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
bool *lost_race);
-int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb);
+enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
@@ -498,6 +498,22 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct tcp_options_received *tcp_opt,
int mss, u32 tsoff);
+#if IS_ENABLED(CONFIG_BPF)
+struct bpf_tcp_req_attrs {
+ u32 rcv_tsval;
+ u32 rcv_tsecr;
+ u16 mss;
+ u8 rcv_wscale;
+ u8 snd_wscale;
+ u8 ecn_ok;
+ u8 wscale_ok;
+ u8 sack_ok;
+ u8 tstamp_ok;
+ u8 usec_ts_ok;
+ u8 reserved[3];
+};
+#endif
+
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -577,6 +593,15 @@ static inline u32 tcp_cookie_time(void)
return val;
}
+/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
+static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
+{
+ if (usec_ts)
+ return div_u64(val, NSEC_PER_USEC);
+
+ return div_u64(val, NSEC_PER_MSEC);
+}
+
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
@@ -590,6 +615,26 @@ static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *
dst_feature(dst, RTAX_FEATURE_ECN);
}
+#if IS_ENABLED(CONFIG_BPF)
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return skb->sk;
+}
+
+struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
+#else
+static inline bool cookie_bpf_ok(struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ return NULL;
+}
+#endif
+
/* From net/ipv6/syncookies.c */
int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
@@ -2506,7 +2551,7 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
/* diagnostic */
- int (*get_info)(const struct sock *sk, struct sk_buff *skb);
+ int (*get_info)(struct sock *sk, struct sk_buff *skb);
size_t (*get_info_size)(const struct sock *sk);
/* clone ulp */
void (*clone)(const struct request_sock *req, struct sock *newsk,
diff --git a/include/net/tls.h b/include/net/tls.h
index 962f0c501111..340ad43971e4 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -97,9 +97,6 @@ struct tls_sw_context_tx {
struct tls_rec *open_rec;
struct list_head tx_list;
atomic_t encrypt_pending;
- /* protect crypto_wait with encrypt_pending */
- spinlock_t encrypt_compl_lock;
- int async_notify;
u8 async_capable:1;
#define BIT_TX_SCHEDULED 0
@@ -136,8 +133,6 @@ struct tls_sw_context_rx {
struct tls_strparser strp;
atomic_t decrypt_pending;
- /* protect crypto_wait with decrypt_pending*/
- spinlock_t decrypt_compl_lock;
struct sk_buff_head async_hold;
struct wait_queue_head wq;
};
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 526c1e7f505e..c9aec9ab6191 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -159,11 +159,29 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
return ret;
}
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+ list_del(&xskb->xskb_list_node);
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+ struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+ struct xdp_buff_xsk *frag;
+
+ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+ xskb_list_node);
+ return &frag->xdp;
+}
+
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{
xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
xdp->data_meta = xdp->data;
xdp->data_end = xdp->data + size;
+ xdp->flags = 0;
}
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
@@ -350,6 +368,15 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
return NULL;
}
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+ return NULL;
+}
+
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{
}
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1d107241b901..57c743b7e4fe 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -51,8 +51,10 @@
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
+#define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
#else
#define XFRM_INC_STATS(net, field) ((void)(net))
+#define XFRM_ADD_STATS(net, field, val) ((void)(net))
#endif
@@ -1577,22 +1579,20 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_update_stats(struct net *net);
#ifdef CONFIG_XFRM_OFFLOAD
-static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
struct net_device *dev = xdo->dev;
- if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
- return;
-
if (dev && dev->xfrmdev_ops &&
- dev->xfrmdev_ops->xdo_dev_state_update_curlft)
- dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
+ dev->xfrmdev_ops->xdo_dev_state_update_stats)
+ dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
}
#else
-static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
+static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
#endif
void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 5ec1e71a09de..c38f4fe5e64c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -100,10 +100,6 @@ struct scsi_vpd {
unsigned char data[];
};
-enum scsi_vpd_parameters {
- SCSI_VPD_HEADER_SIZE = 4,
-};
-
struct scsi_device {
struct Scsi_Host *host;
struct request_queue *request_queue;
@@ -208,6 +204,7 @@ struct scsi_device {
unsigned use_10_for_rw:1; /* first try 10-byte read / write */
unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
+ unsigned read_before_ms:1; /* perform a READ before MODE SENSE */
unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
unsigned no_write_same:1; /* no WRITE SAME command */
unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 8c18e8b6d27d..b24716ab2750 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -75,6 +75,7 @@
#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_0 0x25E2040
#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_1 0x25E2044
#define CS35L56_DSP1_XMEM_UNPACKED24_0 0x2800000
+#define CS35L56_DSP1_FW_VER 0x2800010
#define CS35L56_DSP1_HALO_STATE_A1 0x2801E58
#define CS35L56_DSP1_HALO_STATE 0x28021E0
#define CS35L56_DSP1_PM_CUR_STATE_A1 0x2804000
@@ -241,7 +242,7 @@
#define CS35L56_CONTROL_PORT_READY_US 2200
#define CS35L56_HALO_STATE_POLL_US 1000
-#define CS35L56_HALO_STATE_TIMEOUT_US 50000
+#define CS35L56_HALO_STATE_TIMEOUT_US 250000
#define CS35L56_RESET_PULSE_MIN_US 1100
#define CS35L56_WAKE_HOLD_TIME_US 1000
@@ -272,6 +273,7 @@ extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
+int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base);
@@ -284,7 +286,10 @@ int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base);
int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base);
int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire);
void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
+int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
+ bool *fw_missing, unsigned int *fw_version);
int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
+int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
int cs35l56_get_bclk_freq_id(unsigned int freq);
void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
index ecc02e955279..1f4c39922d82 100644
--- a/include/sound/soc-card.h
+++ b/include/sound/soc-card.h
@@ -30,6 +30,8 @@ static inline void snd_soc_card_mutex_unlock(struct snd_soc_card *card)
struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
const char *name);
+struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
+ const char *name);
int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
struct snd_soc_jack *jack);
int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index 0a86ab8d47b9..9aff384941de 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
//
-// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
//
// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
// https://www.ti.com
//
-// The TAS2781 driver implements a flexible and configurable
+// The TAS2563/TAS2781 driver implements a flexible and configurable
// algo coefficient setting for one, two, or even multiple
-// TAS2781 chips.
+// TAS2563/TAS2781 chips.
//
// Author: Shenghao Ding <shenghao-ding@ti.com>
// Author: Kevin Lu <kevin-lu@ti.com>
@@ -60,7 +60,8 @@
#define TASDEVICE_CMD_FIELD_W 0x4
enum audio_device {
- TAS2781 = 0,
+ TAS2563,
+ TAS2781,
};
enum device_catlog_id {
@@ -141,6 +142,7 @@ struct tasdevice_priv {
void tas2781_reset(struct tasdevice_priv *tas_dev);
int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
+ struct module *module,
void (*cont)(const struct firmware *fw, void *context));
struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
int tasdevice_init(struct tasdevice_priv *tas_priv);
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 5194b7e6dc8d..08f2c93d6b16 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -902,37 +902,6 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
-TRACE_EVENT(afs_folio_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio),
-
- TP_ARGS(vnode, where, folio),
-
- TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode)
- __field(const char *, where)
- __field(pgoff_t, index)
- __field(unsigned long, from)
- __field(unsigned long, to)
- ),
-
- TP_fast_assign(
- unsigned long priv = (unsigned long)folio_get_private(folio);
- __entry->vnode = vnode;
- __entry->where = where;
- __entry->index = folio_index(folio);
- __entry->from = afs_folio_dirty_from(folio, priv);
- __entry->to = afs_folio_dirty_to(folio, priv);
- __entry->to |= (afs_is_folio_dirty_mmapped(priv) ?
- (1UL << (BITS_PER_LONG - 1)) : 0);
- ),
-
- TP_printk("vn=%p %lx %s %lx-%lx%s",
- __entry->vnode, __entry->index, __entry->where,
- __entry->from,
- __entry->to & ~(1UL << (BITS_PER_LONG - 1)),
- __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
- );
-
TRACE_EVENT(afs_call_state,
TP_PROTO(struct afs_call *call,
enum afs_call_state from,
@@ -1102,6 +1071,31 @@ TRACE_EVENT(afs_file_error,
__print_symbolic(__entry->where, afs_file_errors))
);
+TRACE_EVENT(afs_bulkstat_error,
+ TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
+
+ TP_ARGS(op, fid, index, abort),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, op)
+ __field(unsigned int, index)
+ __field(s32, abort)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->fid = *fid;
+ __entry->index = index;
+ __entry->abort = abort;
+ ),
+
+ TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
+ __entry->op, __entry->index,
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->abort)
+ );
+
TRACE_EVENT(afs_cm_no_server,
TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 65029dfb92fb..a697f4b77162 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -772,15 +772,14 @@ TRACE_EVENT(ext4_mb_release_group_pa,
);
TRACE_EVENT(ext4_discard_preallocations,
- TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
+ TP_PROTO(struct inode *inode, unsigned int len),
- TP_ARGS(inode, len, needed),
+ TP_ARGS(inode, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( unsigned int, len )
- __field( unsigned int, needed )
),
@@ -788,13 +787,11 @@ TRACE_EVENT(ext4_discard_preallocations,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->len = len;
- __entry->needed = needed;
),
- TP_printk("dev %d,%d ino %lu len: %u needed %u",
+ TP_printk("dev %d,%d ino %lu len: %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->len,
- __entry->needed)
+ (unsigned long) __entry->ino, __entry->len)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index 6678cf8b235b..dc03cf8e0369 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -36,6 +36,39 @@ TRACE_EVENT(napi_poll,
__entry->work, __entry->budget)
);
+TRACE_EVENT(dql_stall_detected,
+
+ TP_PROTO(unsigned short thrs, unsigned int len,
+ unsigned long last_reap, unsigned long hist_head,
+ unsigned long now, unsigned long *hist),
+
+ TP_ARGS(thrs, len, last_reap, hist_head, now, hist),
+
+ TP_STRUCT__entry(
+ __field( unsigned short, thrs)
+ __field( unsigned int, len)
+ __field( unsigned long, last_reap)
+ __field( unsigned long, hist_head)
+ __field( unsigned long, now)
+ __array( unsigned long, hist, 4)
+ ),
+
+ TP_fast_assign(
+ __entry->thrs = thrs;
+ __entry->len = len;
+ __entry->last_reap = last_reap;
+ __entry->hist_head = hist_head * BITS_PER_LONG;
+ __entry->now = now;
+ memcpy(__entry->hist, hist, sizeof(entry->hist));
+ ),
+
+ TP_printk("thrs %u len %u last_reap %lu hist_head %lu now %lu hist %016lx %016lx %016lx %016lx",
+ __entry->thrs, __entry->len,
+ __entry->last_reap, __entry->hist_head, __entry->now,
+ __entry->hist[0], __entry->hist[1],
+ __entry->hist[2], __entry->hist[3])
+);
+
#undef NO_DEV
#endif /* _TRACE_NAPI_H */
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index beec534cbaab..447a8c21cf57 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -16,34 +16,57 @@
* Define enums for tracing information.
*/
#define netfs_read_traces \
+ EM(netfs_read_trace_dio_read, "DIO-READ ") \
EM(netfs_read_trace_expanded, "EXPANDED ") \
EM(netfs_read_trace_readahead, "READAHEAD") \
EM(netfs_read_trace_readpage, "READPAGE ") \
+ EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
+#define netfs_write_traces \
+ EM(netfs_write_trace_dio_write, "DIO-WRITE") \
+ EM(netfs_write_trace_launder, "LAUNDER ") \
+ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
+ EM(netfs_write_trace_writeback, "WRITEBACK") \
+ E_(netfs_write_trace_writethrough, "WRITETHRU")
+
#define netfs_rreq_origins \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
- E_(NETFS_READ_FOR_WRITE, "RW")
+ EM(NETFS_READ_FOR_WRITE, "RW") \
+ EM(NETFS_WRITEBACK, "WB") \
+ EM(NETFS_WRITETHROUGH, "WT") \
+ EM(NETFS_LAUNDER_WRITE, "LW") \
+ EM(NETFS_UNBUFFERED_WRITE, "UW") \
+ EM(NETFS_DIO_READ, "DR") \
+ E_(NETFS_DIO_WRITE, "DW")
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
EM(netfs_rreq_trace_copy, "COPY ") \
EM(netfs_rreq_trace_done, "DONE ") \
EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_redirty, "REDIRTY") \
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
- E_(netfs_rreq_trace_unmark, "UNMARK ")
+ EM(netfs_rreq_trace_unmark, "UNMARK ") \
+ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
+ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
+ E_(netfs_rreq_trace_write_done, "WR-DONE")
#define netfs_sreq_sources \
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
EM(NETFS_READ_FROM_CACHE, "READ") \
- E_(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \
+ EM(NETFS_WRITE_TO_CACHE, "WRIT") \
+ E_(NETFS_INVALID_WRITE, "INVL")
#define netfs_sreq_traces \
EM(netfs_sreq_trace_download_instead, "RDOWN") \
EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_limited, "LIMIT") \
EM(netfs_sreq_trace_prepare, "PREP ") \
EM(netfs_sreq_trace_resubmit_short, "SHORT") \
EM(netfs_sreq_trace_submit, "SUBMT") \
@@ -55,19 +78,24 @@
#define netfs_failures \
EM(netfs_fail_check_write_begin, "check-write-begin") \
EM(netfs_fail_copy_to_cache, "copy-to-cache") \
+ EM(netfs_fail_dio_read_short, "dio-read-short") \
+ EM(netfs_fail_dio_read_zero, "dio-read-zero") \
EM(netfs_fail_read, "read") \
EM(netfs_fail_short_read, "short-read") \
- E_(netfs_fail_prepare_write, "prep-write")
+ EM(netfs_fail_prepare_write, "prep-write") \
+ E_(netfs_fail_write, "write")
#define netfs_rreq_ref_traces \
- EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
+ EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
- EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
+ EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \
+ EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
- EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \
+ EM(netfs_rreq_trace_put_work, "PUT WORK ") \
+ EM(netfs_rreq_trace_see_work, "SEE WORK ") \
E_(netfs_rreq_trace_new, "NEW ")
#define netfs_sreq_ref_traces \
@@ -76,11 +104,44 @@
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
EM(netfs_sreq_trace_new, "NEW ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
+ EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \
+ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
+ EM(netfs_sreq_trace_put_work, "PUT WORK ") \
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
+#define netfs_folio_traces \
+ /* The first few correspond to enum netfs_how_to_modify */ \
+ EM(netfs_folio_is_uptodate, "mod-uptodate") \
+ EM(netfs_just_prefetch, "mod-prefetch") \
+ EM(netfs_whole_folio_modify, "mod-whole-f") \
+ EM(netfs_modify_and_clear, "mod-n-clear") \
+ EM(netfs_streaming_write, "mod-streamw") \
+ EM(netfs_streaming_write_cont, "mod-streamw+") \
+ EM(netfs_flush_content, "flush") \
+ EM(netfs_streaming_filled_page, "mod-streamw-f") \
+ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
+ /* The rest are for writeback */ \
+ EM(netfs_folio_trace_clear, "clear") \
+ EM(netfs_folio_trace_clear_s, "clear-s") \
+ EM(netfs_folio_trace_clear_g, "clear-g") \
+ EM(netfs_folio_trace_copy_to_cache, "copy") \
+ EM(netfs_folio_trace_end_copy, "end-copy") \
+ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
+ EM(netfs_folio_trace_kill, "kill") \
+ EM(netfs_folio_trace_launder, "launder") \
+ EM(netfs_folio_trace_mkwrite, "mkwrite") \
+ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
+ EM(netfs_folio_trace_read_gaps, "read-gaps") \
+ EM(netfs_folio_trace_redirty, "redirty") \
+ EM(netfs_folio_trace_redirtied, "redirtied") \
+ EM(netfs_folio_trace_store, "store") \
+ EM(netfs_folio_trace_store_plus, "store+") \
+ EM(netfs_folio_trace_wthru, "wthru") \
+ E_(netfs_folio_trace_wthru_plus, "wthru+")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -90,11 +151,13 @@
#define E_(a, b) a
enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_write_trace { netfs_write_traces } __mode(byte);
enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
enum netfs_failure { netfs_failures } __mode(byte);
enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
#endif
@@ -107,6 +170,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
netfs_read_traces;
+netfs_write_traces;
netfs_rreq_origins;
netfs_rreq_traces;
netfs_sreq_sources;
@@ -114,6 +178,7 @@ netfs_sreq_traces;
netfs_failures;
netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
+netfs_folio_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -314,6 +379,82 @@ TRACE_EVENT(netfs_sreq_ref,
__entry->ref)
);
+TRACE_EVENT(netfs_folio,
+ TP_PROTO(struct folio *folio, enum netfs_folio_trace why),
+
+ TP_ARGS(folio, why),
+
+ TP_STRUCT__entry(
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(unsigned int, nr)
+ __field(enum netfs_folio_trace, why)
+ ),
+
+ TP_fast_assign(
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->why = why;
+ __entry->index = folio_index(folio);
+ __entry->nr = folio_nr_pages(folio);
+ ),
+
+ TP_printk("i=%05lx ix=%05lx-%05lx %s",
+ __entry->ino, __entry->index, __entry->index + __entry->nr - 1,
+ __print_symbolic(__entry->why, netfs_folio_traces))
+ );
+
+TRACE_EVENT(netfs_write_iter,
+ TP_PROTO(const struct kiocb *iocb, const struct iov_iter *from),
+
+ TP_ARGS(iocb, from),
+
+ TP_STRUCT__entry(
+ __field(unsigned long long, start )
+ __field(size_t, len )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->start = iocb->ki_pos;
+ __entry->len = iov_iter_count(from);
+ __entry->flags = iocb->ki_flags;
+ ),
+
+ TP_printk("WRITE-ITER s=%llx l=%zx f=%x",
+ __entry->start, __entry->len, __entry->flags)
+ );
+
+TRACE_EVENT(netfs_write,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ enum netfs_write_trace what),
+
+ TP_ARGS(wreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq )
+ __field(unsigned int, cookie )
+ __field(enum netfs_write_trace, what )
+ __field(unsigned long long, start )
+ __field(size_t, len )
+ ),
+
+ TP_fast_assign(
+ struct netfs_inode *__ctx = netfs_inode(wreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->wreq = wreq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->what = what;
+ __entry->start = wreq->start;
+ __entry->len = wreq->len;
+ ),
+
+ TP_printk("R=%08x %s c=%08x by=%llx-%llx",
+ __entry->wreq,
+ __print_symbolic(__entry->what, netfs_write_traces),
+ __entry->cookie,
+ __entry->start, __entry->start + __entry->len - 1)
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index a3995925cb05..1f4258308b96 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -81,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
+ __assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
@@ -106,14 +106,14 @@ TRACE_EVENT(qdisc_destroy,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
+ __assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 4c1ef7b3705c..a1b126a6b0d7 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -83,7 +83,7 @@
EM(rxrpc_badmsg_bad_abort, "bad-abort") \
EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \
EM(rxrpc_badmsg_short_ack, "short-ack") \
- EM(rxrpc_badmsg_short_ack_info, "short-ack-info") \
+ EM(rxrpc_badmsg_short_ack_trailer, "short-ack-trailer") \
EM(rxrpc_badmsg_short_hdr, "short-hdr") \
EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \
EM(rxrpc_badmsg_zero_call, "zero-call") \
@@ -119,6 +119,7 @@
EM(rxrpc_call_poke_complete, "Compl") \
EM(rxrpc_call_poke_error, "Error") \
EM(rxrpc_call_poke_idle, "Idle") \
+ EM(rxrpc_call_poke_set_timeout, "Set-timo") \
EM(rxrpc_call_poke_start, "Start") \
EM(rxrpc_call_poke_timer, "Timer") \
E_(rxrpc_call_poke_timer_now, "Timer-now")
@@ -128,6 +129,7 @@
EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
EM(rxrpc_skb_get_conn_work, "GET conn-work") \
+ EM(rxrpc_skb_get_last_nack, "GET last-nack") \
EM(rxrpc_skb_get_local_work, "GET locl-work") \
EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
@@ -141,6 +143,7 @@
EM(rxrpc_skb_put_error_report, "PUT error-rep") \
EM(rxrpc_skb_put_input, "PUT input ") \
EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
+ EM(rxrpc_skb_put_last_nack, "PUT last-nack") \
EM(rxrpc_skb_put_purge, "PUT purge ") \
EM(rxrpc_skb_put_rotate, "PUT rotate ") \
EM(rxrpc_skb_put_unknown, "PUT unknown ") \
@@ -338,35 +341,26 @@
E_(rxrpc_rtt_rx_requested_ack, "RACK")
#define rxrpc_timer_traces \
- EM(rxrpc_timer_begin, "Begin ") \
- EM(rxrpc_timer_exp_ack, "ExpAck") \
- EM(rxrpc_timer_exp_hard, "ExpHrd") \
- EM(rxrpc_timer_exp_idle, "ExpIdl") \
- EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
- EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
- EM(rxrpc_timer_exp_normal, "ExpNml") \
- EM(rxrpc_timer_exp_ping, "ExpPng") \
- EM(rxrpc_timer_exp_resend, "ExpRsn") \
- EM(rxrpc_timer_init_for_reply, "IniRpl") \
- EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
- EM(rxrpc_timer_restart, "Restrt") \
- EM(rxrpc_timer_set_for_ack, "SetAck") \
- EM(rxrpc_timer_set_for_hard, "SetHrd") \
- EM(rxrpc_timer_set_for_idle, "SetIdl") \
- EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
- EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
- EM(rxrpc_timer_set_for_normal, "SetNml") \
- EM(rxrpc_timer_set_for_ping, "SetPng") \
- EM(rxrpc_timer_set_for_resend, "SetRTx") \
- E_(rxrpc_timer_set_for_send, "SetSnd")
+ EM(rxrpc_timer_trace_delayed_ack, "DelayAck ") \
+ EM(rxrpc_timer_trace_expect_rx, "ExpectRx ") \
+ EM(rxrpc_timer_trace_hard, "HardLimit") \
+ EM(rxrpc_timer_trace_idle, "IdleLimit") \
+ EM(rxrpc_timer_trace_keepalive, "KeepAlive") \
+ EM(rxrpc_timer_trace_lost_ack, "LostAck ") \
+ EM(rxrpc_timer_trace_ping, "DelayPing") \
+ EM(rxrpc_timer_trace_resend, "Resend ") \
+ EM(rxrpc_timer_trace_resend_reset, "ResendRst") \
+ E_(rxrpc_timer_trace_resend_tx, "ResendTx ")
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
+ EM(rxrpc_propose_ack_delayed_ack, "DlydAck") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
EM(rxrpc_propose_ack_input_data_hole, "DataInH") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+ EM(rxrpc_propose_ack_ping_for_0_retrans, "0-Retrn") \
EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \
@@ -1082,9 +1076,9 @@ TRACE_EVENT(rxrpc_tx_packet,
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
- rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
+ rxrpc_serial_t serial, unsigned int flags, bool lose),
- TP_ARGS(call, seq, serial, flags, retrans, lose),
+ TP_ARGS(call, seq, serial, flags, lose),
TP_STRUCT__entry(
__field(unsigned int, call)
@@ -1092,8 +1086,7 @@ TRACE_EVENT(rxrpc_tx_data,
__field(rxrpc_serial_t, serial)
__field(u32, cid)
__field(u32, call_id)
- __field(u8, flags)
- __field(bool, retrans)
+ __field(u16, flags)
__field(bool, lose)
),
@@ -1104,7 +1097,6 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
- __entry->retrans = retrans;
__entry->lose = lose;
),
@@ -1114,8 +1106,8 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->call_id,
__entry->serial,
__entry->seq,
- __entry->flags,
- __entry->retrans ? " *RETRANS*" : "",
+ __entry->flags & RXRPC_TXBUF_WIRE_FLAGS,
+ __entry->flags & RXRPC_TXBUF_RESENT ? " *RETRANS*" : "",
__entry->lose ? " *LOSE*" : "")
);
@@ -1312,90 +1304,112 @@ TRACE_EVENT(rxrpc_rtt_rx,
__entry->rto)
);
-TRACE_EVENT(rxrpc_timer,
- TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- unsigned long now),
+TRACE_EVENT(rxrpc_timer_set,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay,
+ enum rxrpc_timer_trace why),
- TP_ARGS(call, why, now),
+ TP_ARGS(call, delay, why),
TP_STRUCT__entry(
__field(unsigned int, call)
__field(enum rxrpc_timer_trace, why)
- __field(long, now)
- __field(long, ack_at)
- __field(long, ack_lost_at)
- __field(long, resend_at)
- __field(long, ping_at)
- __field(long, expect_rx_by)
- __field(long, expect_req_by)
- __field(long, expect_term_by)
- __field(long, timer)
+ __field(ktime_t, delay)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
- __entry->now = now;
- __entry->ack_at = call->delay_ack_at;
- __entry->ack_lost_at = call->ack_lost_at;
- __entry->resend_at = call->resend_at;
- __entry->expect_rx_by = call->expect_rx_by;
- __entry->expect_req_by = call->expect_req_by;
- __entry->expect_term_by = call->expect_term_by;
- __entry->timer = call->timer.expires;
+ __entry->delay = delay;
),
- TP_printk("c=%08x %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
+ TP_printk("c=%08x %s to=%lld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
- __entry->ack_at - __entry->now,
- __entry->ack_lost_at - __entry->now,
- __entry->resend_at - __entry->now,
- __entry->expect_rx_by - __entry->now,
- __entry->expect_req_by - __entry->now,
- __entry->expect_term_by - __entry->now,
- __entry->timer - __entry->now)
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_timer_exp,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay,
+ enum rxrpc_timer_trace why),
+
+ TP_ARGS(call, delay, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x %s to=%lld",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_timer_traces),
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_timer_can,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why),
+
+ TP_ARGS(call, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(enum rxrpc_timer_trace, why)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%08x %s",
+ __entry->call,
+ __print_symbolic(__entry->why, rxrpc_timer_traces))
+ );
+
+TRACE_EVENT(rxrpc_timer_restart,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay, unsigned long delayj),
+
+ TP_ARGS(call, delay, delayj),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(unsigned long, delayj)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->delayj = delayj;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x to=%lld j=%ld",
+ __entry->call,
+ ktime_to_us(__entry->delay),
+ __entry->delayj)
);
TRACE_EVENT(rxrpc_timer_expired,
- TP_PROTO(struct rxrpc_call *call, unsigned long now),
+ TP_PROTO(struct rxrpc_call *call),
- TP_ARGS(call, now),
+ TP_ARGS(call),
TP_STRUCT__entry(
__field(unsigned int, call)
- __field(long, now)
- __field(long, ack_at)
- __field(long, ack_lost_at)
- __field(long, resend_at)
- __field(long, ping_at)
- __field(long, expect_rx_by)
- __field(long, expect_req_by)
- __field(long, expect_term_by)
- __field(long, timer)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->now = now;
- __entry->ack_at = call->delay_ack_at;
- __entry->ack_lost_at = call->ack_lost_at;
- __entry->resend_at = call->resend_at;
- __entry->expect_rx_by = call->expect_rx_by;
- __entry->expect_req_by = call->expect_req_by;
- __entry->expect_term_by = call->expect_term_by;
- __entry->timer = call->timer.expires;
),
- TP_printk("c=%08x EXPIRED a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
- __entry->call,
- __entry->ack_at - __entry->now,
- __entry->ack_lost_at - __entry->now,
- __entry->resend_at - __entry->now,
- __entry->expect_rx_by - __entry->now,
- __entry->expect_req_by - __entry->now,
- __entry->expect_term_by - __entry->now,
- __entry->timer - __entry->now)
+ TP_printk("c=%08x EXPIRED",
+ __entry->call)
);
TRACE_EVENT(rxrpc_rx_lose,
@@ -1504,26 +1518,30 @@ TRACE_EVENT(rxrpc_drop_ack,
);
TRACE_EVENT(rxrpc_retransmit,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, s64 expiry),
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, ktime_t expiry),
- TP_ARGS(call, seq, expiry),
+ TP_ARGS(call, seq, serial, expiry),
TP_STRUCT__entry(
__field(unsigned int, call)
__field(rxrpc_seq_t, seq)
- __field(s64, expiry)
+ __field(rxrpc_serial_t, serial)
+ __field(ktime_t, expiry)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->seq = seq;
+ __entry->serial = serial;
__entry->expiry = expiry;
),
- TP_printk("c=%08x q=%x xp=%lld",
+ TP_printk("c=%08x q=%x r=%x xp=%lld",
__entry->call,
__entry->seq,
- __entry->expiry)
+ __entry->serial,
+ ktime_to_us(__entry->expiry))
);
TRACE_EVENT(rxrpc_congest,
@@ -1552,7 +1570,7 @@ TRACE_EVENT(rxrpc_congest,
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
__entry->ack_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
@@ -1560,9 +1578,9 @@ TRACE_EVENT(rxrpc_congest,
__print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
__entry->sum.cwnd,
__entry->sum.ssthresh,
- __entry->sum.nr_acks, __entry->sum.saw_nacks,
+ __entry->sum.nr_acks, __entry->sum.nr_retained_nacks,
__entry->sum.nr_new_acks,
- __entry->sum.nr_rot_new_acks,
+ __entry->sum.nr_new_nacks,
__entry->top - __entry->hard_ack,
__entry->sum.cumulative_acks,
__entry->sum.dup_acks,
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 7b1ddffa3dfc..699dafd204ea 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -88,7 +88,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
- TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ __entry->skbaddr, __entry->skaddr,
show_family_name(__entry->family),
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
@@ -258,6 +259,8 @@ TRACE_EVENT(tcp_probe,
__field(__u32, srtt)
__field(__u32, rcv_wnd)
__field(__u64, sock_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
),
TP_fast_assign(
@@ -285,14 +288,18 @@ TRACE_EVENT(tcp_probe,
__entry->ssthresh = tcp_current_ssthresh(sk);
__entry->srtt = tp->srtt_us >> 3;
__entry->sock_cookie = sock_gen_cookie(sk);
+
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
),
- TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx",
+ TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx skbaddr=%p skaddr=%p",
show_family_name(__entry->family),
__entry->saddr, __entry->daddr, __entry->mark,
__entry->data_len, __entry->snd_nxt, __entry->snd_una,
__entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
- __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie)
+ __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie,
+ __entry->skbaddr, __entry->skaddr)
);
#define TP_STORE_ADDR_PORTS_SKB_V4(__entry, skb) \
@@ -361,7 +368,8 @@ DECLARE_EVENT_CLASS(tcp_event_skb,
TP_STORE_ADDR_PORTS_SKB(__entry, skb);
),
- TP_printk("src=%pISpc dest=%pISpc", __entry->saddr, __entry->daddr)
+ TP_printk("skbaddr=%p src=%pISpc dest=%pISpc",
+ __entry->skbaddr, __entry->saddr, __entry->daddr)
);
DEFINE_EVENT(tcp_event_skb, tcp_bad_csum,
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index de1944e42c65..19a13468eca5 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -53,7 +53,7 @@ extern "C" {
#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
#define DRM_IVPU_PARAM_NUM_CONTEXTS 4
#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
-#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
+#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */
#define DRM_IVPU_PARAM_CONTEXT_ID 7
#define DRM_IVPU_PARAM_FW_API_VERSION 8
#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
@@ -64,11 +64,18 @@ extern "C" {
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
+/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0
+#define DRM_IVPU_JOB_PRIORITY_IDLE 1
+#define DRM_IVPU_JOB_PRIORITY_NORMAL 2
+#define DRM_IVPU_JOB_PRIORITY_FOCUS 3
+#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
+
/**
* DRM_IVPU_CAP_METRIC_STREAMER
*
@@ -112,10 +119,6 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
* Lowest VPU virtual address available in the current context (read-only)
*
- * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- * Value of current context scheduling priority (read-write).
- * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
- *
* %DRM_IVPU_PARAM_CONTEXT_ID:
* Current context ID, always greater than 0 (read-only)
*
@@ -286,10 +289,23 @@ struct drm_ivpu_submit {
* to be executed. The offset has to be 8-byte aligned.
*/
__u32 commands_offset;
+
+ /**
+ * @priority:
+ *
+ * Priority to be set for related job command queue, can be one of the following:
+ * %DRM_IVPU_JOB_PRIORITY_DEFAULT
+ * %DRM_IVPU_JOB_PRIORITY_IDLE
+ * %DRM_IVPU_JOB_PRIORITY_NORMAL
+ * %DRM_IVPU_JOB_PRIORITY_FOCUS
+ * %DRM_IVPU_JOB_PRIORITY_REALTIME
+ */
+ __u32 priority;
};
/* drm_ivpu_bo_wait job status codes */
#define DRM_IVPU_JOB_STATUS_SUCCESS 0
+#define DRM_IVPU_JOB_STATUS_ABORTED 256
/**
* struct drm_ivpu_bo_wait - Wait for BO to become inactive
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 0bade1592f34..77d7ff0d5b11 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -54,6 +54,20 @@ extern "C" {
*/
#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
+/*
+ * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
+ *
+ * Query the VRAM BAR size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
+
+/*
+ * NOUVEAU_GETPARAM_VRAM_USED
+ *
+ * Get remaining VRAM size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_USED 19
+
struct drm_nouveau_getparam {
__u64 param;
__u64 value;
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 9fa3ae324731..bb0c8a994116 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -831,11 +831,6 @@ struct drm_xe_vm_destroy {
* - %DRM_XE_VM_BIND_OP_PREFETCH
*
* and the @flags can be:
- * - %DRM_XE_VM_BIND_FLAG_READONLY
- * - %DRM_XE_VM_BIND_FLAG_ASYNC
- * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - Valid on a faulting VM only, do the
- * MAP operation immediately rather than deferring the MAP to the page
- * fault handler.
* - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
* tables are setup with a special bit which indicates writes are
* dropped and all reads return zero. In the future, the NULL flags
@@ -928,9 +923,8 @@ struct drm_xe_vm_bind_op {
/** @op: Bind operation to perform */
__u32 op;
-#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
-#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
+#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
/** @flags: Bind flags */
__u32 flags;
@@ -1045,20 +1039,6 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
-#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
-/* Monitor 128KB contiguous region with 4K sub-granularity */
-#define DRM_XE_ACC_GRANULARITY_128K 0
-/* Monitor 2MB contiguous region with 64KB sub-granularity */
-#define DRM_XE_ACC_GRANULARITY_2M 1
-/* Monitor 16MB contiguous region with 512KB sub-granularity */
-#define DRM_XE_ACC_GRANULARITY_16M 2
-/* Monitor 64MB contiguous region with 2M sub-granularity */
-#define DRM_XE_ACC_GRANULARITY_64M 3
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 754e68ca8744..a241f407c234 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -77,12 +77,29 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
+ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
+ * the trailing flexible array member) instead.
+ */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
__u8 data[0]; /* Arbitrary size */
};
+/* Header for bpf_lpm_trie_key structs */
+struct bpf_lpm_trie_key_hdr {
+ __u32 prefixlen;
+};
+
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
+struct bpf_lpm_trie_key_u8 {
+ union {
+ struct bpf_lpm_trie_key_hdr hdr;
+ __u32 prefixlen;
+ };
+ __u8 data[]; /* Arbitrary size */
+};
+
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
@@ -617,7 +634,11 @@ union bpf_iter_link_info {
* to NULL to begin the batched operation. After each subsequent
* **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
* *out_batch* as the *in_batch* for the next operation to
- * continue iteration from the current point.
+ * continue iteration from the current point. Both *in_batch* and
+ * *out_batch* must point to memory large enough to hold a key,
+ * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
+ * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
+ * must be at least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point
* to memory large enough to hold *count* items based on the key
@@ -847,6 +868,36 @@ union bpf_iter_link_info {
* Returns zero on success. On error, -1 is returned and *errno*
* is set appropriately.
*
+ * BPF_TOKEN_CREATE
+ * Description
+ * Create BPF token with embedded information about what
+ * BPF-related functionality it allows:
+ * - a set of allowed bpf() syscall commands;
+ * - a set of allowed BPF map types to be created with
+ * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
+ * - a set of allowed BPF program types and BPF program attach
+ * types to be loaded with BPF_PROG_LOAD command, if
+ * BPF_PROG_LOAD itself is allowed.
+ *
+ * BPF token is created (derived) from an instance of BPF FS,
+ * assuming it has necessary delegation mount options specified.
+ * This BPF token can be passed as an extra parameter to various
+ * bpf() syscall commands to grant BPF subsystem functionality to
+ * unprivileged processes.
+ *
+ * When created, BPF token is "associated" with the owning
+ * user namespace of BPF FS instance (super block) that it was
+ * derived from, and subsequent BPF operations performed with
+ * BPF token would be performing capabilities checks (i.e.,
+ * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
+ * that user namespace. Without BPF token, such capabilities
+ * have to be granted in init user namespace, making bpf()
+ * syscall incompatible with user namespace, for the most part.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -901,6 +952,8 @@ enum bpf_cmd {
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
+ BPF_TOKEN_CREATE,
+ __MAX_BPF_CMD,
};
enum bpf_map_type {
@@ -951,6 +1004,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
+ __MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
@@ -995,6 +1049,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
BPF_PROG_TYPE_NETFILTER,
+ __MAX_BPF_PROG_TYPE
};
enum bpf_attach_type {
@@ -1330,6 +1385,12 @@ enum {
/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
BPF_F_PATH_FD = (1U << 14),
+
+/* Flag for value_type_btf_obj_fd, the fd is available */
+ BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
+
+/* BPF token FD is passed in a corresponding command's token_fd field */
+ BPF_F_TOKEN_FD = (1U << 16),
};
/* Flags for BPF_PROG_QUERY. */
@@ -1403,6 +1464,15 @@ union bpf_attr {
* to using 5 hash functions).
*/
__u64 map_extra;
+
+ __s32 value_type_btf_obj_fd; /* fd pointing to a BTF
+ * type data for
+ * btf_vmlinux_value_type_id.
+ */
+ /* BPF token FD to use with BPF_MAP_CREATE operation.
+ * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -1472,6 +1542,10 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 log_true_size;
+ /* BPF token FD to use with BPF_PROG_LOAD operation.
+ * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 prog_token_fd;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -1584,6 +1658,11 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 btf_log_true_size;
+ __u32 btf_flags;
+ /* BPF token FD to use with BPF_BTF_LOAD operation.
+ * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 btf_token_fd;
};
struct {
@@ -1714,6 +1793,11 @@ union bpf_attr {
__u32 flags; /* extra flags */
} prog_bind_map;
+ struct { /* struct used by BPF_TOKEN_CREATE command */
+ __u32 flags;
+ __u32 bpffs_fd;
+ } token_create;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -4839,9 +4923,9 @@ union bpf_attr {
* going through the CPU's backlog queue.
*
* The *flags* argument is reserved and must be 0. The helper is
- * currently only supported for tc BPF program types at the ingress
- * hook and for veth device types. The peer device must reside in a
- * different network namespace.
+ * currently only supported for tc BPF program types at the
+ * ingress hook and for veth and netkit target device types. The
+ * peer device must reside in a different network namespace.
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
@@ -6487,7 +6571,7 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
- __u32 :32; /* alignment pad */
+ __u32 btf_vmlinux_id;
__u64 map_extra;
} __attribute__((aligned(8)));
@@ -6563,6 +6647,7 @@ struct bpf_link_info {
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
__u64 missed;
+ __aligned_u64 cookies;
} kprobe_multi;
struct {
__aligned_u64 path;
@@ -6582,6 +6667,7 @@ struct bpf_link_info {
__aligned_u64 file_name; /* in/out */
__u32 name_len;
__u32 offset; /* offset from file_name */
+ __u64 cookie;
} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
struct {
__aligned_u64 func_name; /* in/out */
@@ -6589,14 +6675,19 @@ struct bpf_link_info {
__u32 offset; /* offset from func_name */
__u64 addr;
__u64 missed;
+ __u64 cookie;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */
__u32 name_len;
+ __u32 :32;
+ __u64 cookie;
} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
struct {
__u64 config;
__u32 type;
+ __u32 :32;
+ __u64 cookie;
} event; /* BPF_PERF_EVENT_EVENT */
};
} perf_event;
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 7c29d82db9ee..f8bc34a6bcfa 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -614,6 +614,9 @@ struct btrfs_ioctl_clone_range_args {
*/
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
+#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
+ BTRFS_DEFRAG_RANGE_START_IO)
+
struct btrfs_ioctl_defrag_range_args {
/* start of the defrag operation */
__u64 start;
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 939db2388208..e78cbd85ce7c 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -193,9 +193,14 @@ struct canfd_frame {
#define CANXL_XLF 0x80 /* mandatory CAN XL frame flag (must always be set!) */
#define CANXL_SEC 0x01 /* Simple Extended Content (security/segmentation) */
+/* the 8-bit VCID is optionally placed in the canxl_frame.prio element */
+#define CANXL_VCID_OFFSET 16 /* bit offset of VCID in prio element */
+#define CANXL_VCID_VAL_MASK 0xFFUL /* VCID is an 8-bit value */
+#define CANXL_VCID_MASK (CANXL_VCID_VAL_MASK << CANXL_VCID_OFFSET)
+
/**
* struct canxl_frame - CAN with e'X'tended frame 'L'ength frame structure
- * @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags
+ * @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags / VCID
* @flags: additional flags for CAN XL
* @sdt: SDU (service data unit) type
* @len: frame payload length in byte (CANXL_MIN_DLEN .. CANXL_MAX_DLEN)
@@ -205,7 +210,7 @@ struct canfd_frame {
* @prio shares the same position as @can_id from struct can[fd]_frame.
*/
struct canxl_frame {
- canid_t prio; /* 11 bit priority for arbitration (canid_t) */
+ canid_t prio; /* 11 bit priority for arbitration / 8 bit VCID */
__u8 flags; /* additional flags for CAN XL */
__u8 sdt; /* SDU (service data unit) type */
__u16 len; /* frame payload length in byte */
diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
index 439c982f7e81..6cde62371b6f 100644
--- a/include/uapi/linux/can/isotp.h
+++ b/include/uapi/linux/can/isotp.h
@@ -137,6 +137,7 @@ struct can_isotp_ll_options {
#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */
#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */
#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */
+#define CAN_ISOTP_DYN_FC_PARMS 0x2000 /* dynamic FC parameters BS/STmin */
/* protocol machine default values */
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 31622c9b7988..e024d896e278 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -65,6 +65,22 @@ enum {
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
CAN_RAW_XL_FRAMES, /* allow CAN XL frames (default:off) */
+ CAN_RAW_XL_VCID_OPTS, /* CAN XL VCID configuration options */
};
+/* configuration for CAN XL virtual CAN identifier (VCID) handling */
+struct can_raw_vcid_options {
+
+ __u8 flags; /* flags for vcid (filter) behaviour */
+ __u8 tx_vcid; /* VCID value set into canxl_frame.prio */
+ __u8 rx_vcid; /* VCID value for VCID filter */
+ __u8 rx_vcid_mask; /* VCID mask for VCID filter */
+
+};
+
+/* can_raw_vcid_options.flags for CAN XL virtual CAN identifier handling */
+#define CAN_RAW_XL_VCID_TX_SET 0x01
+#define CAN_RAW_XL_VCID_TX_PASS 0x02
+#define CAN_RAW_XL_VCID_RX_FILTER 0x04
+
#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index b4e947f9bfbc..0c13d7f1a1bc 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -50,6 +50,35 @@ enum dpll_lock_status {
DPLL_LOCK_STATUS_MAX = (__DPLL_LOCK_STATUS_MAX - 1)
};
+/**
+ * enum dpll_lock_status_error - if previous status change was done due to a
+ * failure, this provides information of dpll device lock status error. Valid
+ * values for DPLL_A_LOCK_STATUS_ERROR attribute
+ * @DPLL_LOCK_STATUS_ERROR_NONE: dpll device lock status was changed without
+ * any error
+ * @DPLL_LOCK_STATUS_ERROR_UNDEFINED: dpll device lock status was changed due
+ * to undefined error. Driver fills this value up in case it is not able to
+ * obtain suitable exact error type.
+ * @DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN: dpll device lock status was changed
+ * because of associated media got down. This may happen for example if dpll
+ * device was previously locked on an input pin of type
+ * PIN_TYPE_SYNCE_ETH_PORT.
+ * @DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH: the FFO
+ * (Fractional Frequency Offset) between the RX and TX symbol rate on the
+ * media got too high. This may happen for example if dpll device was
+ * previously locked on an input pin of type PIN_TYPE_SYNCE_ETH_PORT.
+ */
+enum dpll_lock_status_error {
+ DPLL_LOCK_STATUS_ERROR_NONE = 1,
+ DPLL_LOCK_STATUS_ERROR_UNDEFINED,
+ DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN,
+ DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH,
+
+ /* private: */
+ __DPLL_LOCK_STATUS_ERROR_MAX,
+ DPLL_LOCK_STATUS_ERROR_MAX = (__DPLL_LOCK_STATUS_ERROR_MAX - 1)
+};
+
#define DPLL_TEMP_DIVIDER 1000
/**
@@ -150,6 +179,7 @@ enum dpll_a {
DPLL_A_LOCK_STATUS,
DPLL_A_TEMP,
DPLL_A_TYPE,
+ DPLL_A_LOCK_STATUS_ERROR,
__DPLL_A_MAX,
DPLL_A_MAX = (__DPLL_A_MAX - 1)
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index cfbcc4cc49ac..4f4b948ef381 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -85,4 +85,17 @@ struct epoll_event {
__u64 data;
} EPOLL_PACKED;
+struct epoll_params {
+ __u32 busy_poll_usecs;
+ __u16 busy_poll_budget;
+ __u8 prefer_busy_poll;
+
+ /* pad the struct to a multiple of 64bits */
+ __u8 __pad;
+};
+
+#define EPOLL_IOC_TYPE 0x8A
+#define EPIOCSPARAMS _IOW(EPOLL_IOC_TYPE, 0x01, struct epoll_params)
+#define EPIOCGPARAMS _IOR(EPOLL_IOC_TYPE, 0x02, struct epoll_params)
+
#endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index ab9bcff96e4d..ffa637b38c93 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1505,6 +1505,7 @@ enum {
IFLA_BOND_AD_LACP_ACTIVE,
IFLA_BOND_MISSED_MAX,
IFLA_BOND_NS_IP6_TARGET,
+ IFLA_BOND_COUPLED_CONTROL,
__IFLA_BOND_MAX,
};
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 5060963707b1..f2e0b2d50e6b 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -91,8 +91,6 @@ enum iio_modifier {
IIO_MOD_CO2,
IIO_MOD_VOC,
IIO_MOD_LIGHT_UV,
- IIO_MOD_LIGHT_UVA,
- IIO_MOD_LIGHT_UVB,
IIO_MOD_LIGHT_DUV,
IIO_MOD_PM1,
IIO_MOD_PM2P5,
@@ -107,6 +105,8 @@ enum iio_modifier {
IIO_MOD_PITCH,
IIO_MOD_YAW,
IIO_MOD_ROLL,
+ IIO_MOD_LIGHT_UVA,
+ IIO_MOD_LIGHT_UVB,
};
enum iio_event_type {
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index c4c53a9ab959..ff8d21f9e95b 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -145,7 +145,7 @@ struct in6_flowlabel_req {
#define IPV6_TLV_PADN 1
#define IPV6_TLV_ROUTERALERT 5
#define IPV6_TLV_CALIPSO 7 /* RFC 5570 */
-#define IPV6_TLV_IOAM 49 /* TEMPORARY IANA allocation for IOAM */
+#define IPV6_TLV_IOAM 49 /* RFC 9486 */
#define IPV6_TLV_JUMBO 194
#define IPV6_TLV_HAO 201 /* home address option */
diff --git a/include/uapi/linux/ioam6_genl.h b/include/uapi/linux/ioam6_genl.h
index ca4b22833754..1733fbc51fb5 100644
--- a/include/uapi/linux/ioam6_genl.h
+++ b/include/uapi/linux/ioam6_genl.h
@@ -49,4 +49,24 @@ enum {
#define IOAM6_CMD_MAX (__IOAM6_CMD_MAX - 1)
+#define IOAM6_GENL_EV_GRP_NAME "ioam6_events"
+
+enum ioam6_event_type {
+ IOAM6_EVENT_UNSPEC,
+ IOAM6_EVENT_TRACE,
+};
+
+enum ioam6_event_attr {
+ IOAM6_EVENT_ATTR_UNSPEC,
+
+ IOAM6_EVENT_ATTR_TRACE_NAMESPACE, /* u16 */
+ IOAM6_EVENT_ATTR_TRACE_NODELEN, /* u8 */
+ IOAM6_EVENT_ATTR_TRACE_TYPE, /* u32 */
+ IOAM6_EVENT_ATTR_TRACE_DATA, /* Binary */
+
+ __IOAM6_EVENT_ATTR_MAX
+};
+
+#define IOAM6_EVENT_ATTR_MAX (__IOAM6_EVENT_ATTR_MAX - 1)
+
#endif /* _UAPI_LINUX_IOAM6_GENL_H */
diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h
index 154ab56651f1..e1db65df9359 100644
--- a/include/uapi/linux/mctp.h
+++ b/include/uapi/linux/mctp.h
@@ -50,7 +50,14 @@ struct sockaddr_mctp_ext {
#define SIOCMCTPALLOCTAG (SIOCPROTOPRIVATE + 0)
#define SIOCMCTPDROPTAG (SIOCPROTOPRIVATE + 1)
+#define SIOCMCTPALLOCTAG2 (SIOCPROTOPRIVATE + 2)
+#define SIOCMCTPDROPTAG2 (SIOCPROTOPRIVATE + 3)
+/* Deprecated: use mctp_ioc_tag_ctl2 / TAG2 ioctls instead, which defines the
+ * MCTP network ID as part of the allocated tag. Using this assumes the default
+ * net ID for allocated tags, which may not give correct behaviour on system
+ * with multiple networks configured.
+ */
struct mctp_ioc_tag_ctl {
mctp_eid_t peer_addr;
@@ -65,4 +72,29 @@ struct mctp_ioc_tag_ctl {
__u16 flags;
};
+struct mctp_ioc_tag_ctl2 {
+ /* Peer details: network ID, peer EID, local EID. All set by the
+ * caller.
+ *
+ * Local EID must be MCTP_ADDR_NULL or MCTP_ADDR_ANY in current
+ * kernels.
+ */
+ unsigned int net;
+ mctp_eid_t peer_addr;
+ mctp_eid_t local_addr;
+
+ /* Set by caller, but no flags defined currently. Must be 0 */
+ __u16 flags;
+
+ /* For SIOCMCTPALLOCTAG2: must be passed as zero, kernel will
+ * populate with the allocated tag value. Returned tag value will
+ * always have TO and PREALLOC set.
+ *
+ * For SIOCMCTPDROPTAG2: userspace provides tag value to drop, from
+ * a prior SIOCMCTPALLOCTAG2 call (and so must have TO and PREALLOC set).
+ */
+ __u8 tag;
+
+};
+
#endif /* __UAPI_MCTP_H */
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index d03863da180e..c0c8ec995b06 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -138,6 +138,8 @@
#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */
#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */
#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */
+#define MDIO_PMA_SPEED_2_5G 0x2000 /* 2.5G capable */
+#define MDIO_PMA_SPEED_5G 0x4000 /* 5G capable */
#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */
#define MDIO_PCS_SPEED_2_5G 0x0040 /* 2.5G capable */
#define MDIO_PCS_SPEED_5G 0x0080 /* 5G capable */
@@ -348,6 +350,8 @@
/* BASE-T1 auto-negotiation advertisement register [31:16] */
#define MDIO_AN_T1_ADV_M_B10L 0x4000 /* device is compatible with 10BASE-T1L */
+#define MDIO_AN_T1_ADV_M_1000BT1 0x0080 /* advertise 1000BASE-T1 */
+#define MDIO_AN_T1_ADV_M_100BT1 0x0020 /* advertise 100BASE-T1 */
#define MDIO_AN_T1_ADV_M_MST 0x0010 /* advertise master preference */
/* BASE-T1 auto-negotiation advertisement register [47:32] */
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 93cb411adf72..bb65ee840cda 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -70,6 +70,10 @@ enum netdev_queue_type {
NETDEV_QUEUE_TYPE_TX,
};
+enum netdev_qstats_scope {
+ NETDEV_QSTATS_SCOPE_QUEUE = 1,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
@@ -133,6 +137,21 @@ enum {
};
enum {
+ NETDEV_A_QSTATS_IFINDEX = 1,
+ NETDEV_A_QSTATS_QUEUE_TYPE,
+ NETDEV_A_QSTATS_QUEUE_ID,
+ NETDEV_A_QSTATS_SCOPE,
+ NETDEV_A_QSTATS_RX_PACKETS = 8,
+ NETDEV_A_QSTATS_RX_BYTES,
+ NETDEV_A_QSTATS_TX_PACKETS,
+ NETDEV_A_QSTATS_TX_BYTES,
+ NETDEV_A_QSTATS_RX_ALLOC_FAIL,
+
+ __NETDEV_A_QSTATS_MAX,
+ NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -144,6 +163,7 @@ enum {
NETDEV_CMD_PAGE_POOL_STATS_GET,
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
+ NETDEV_CMD_QSTATS_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index ca30232b7bc8..aa4094ca2444 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -179,13 +179,17 @@ enum nft_hook_attributes {
* enum nft_table_flags - nf_tables table flags
*
* @NFT_TABLE_F_DORMANT: this table is not active
+ * @NFT_TABLE_F_OWNER: this table is owned by a process
+ * @NFT_TABLE_F_PERSIST: this table shall outlive its owner
*/
enum nft_table_flags {
NFT_TABLE_F_DORMANT = 0x1,
NFT_TABLE_F_OWNER = 0x2,
+ NFT_TABLE_F_PERSIST = 0x4,
};
#define NFT_TABLE_F_MASK (NFT_TABLE_F_DORMANT | \
- NFT_TABLE_F_OWNER)
+ NFT_TABLE_F_OWNER | \
+ NFT_TABLE_F_PERSIST)
/**
* enum nft_table_attributes - nf_tables table netlink attributes
@@ -285,9 +289,11 @@ enum nft_rule_attributes {
/**
* enum nft_rule_compat_flags - nf_tables rule compat flags
*
+ * @NFT_RULE_COMPAT_F_UNUSED: unused
* @NFT_RULE_COMPAT_F_INV: invert the check result
*/
enum nft_rule_compat_flags {
+ NFT_RULE_COMPAT_F_UNUSED = (1 << 0),
NFT_RULE_COMPAT_F_INV = (1 << 1),
NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV,
};
diff --git a/include/uapi/linux/nexthop.h b/include/uapi/linux/nexthop.h
index d8ffa8c9ca78..dd8787f9cf39 100644
--- a/include/uapi/linux/nexthop.h
+++ b/include/uapi/linux/nexthop.h
@@ -30,6 +30,9 @@ enum {
#define NEXTHOP_GRP_TYPE_MAX (__NEXTHOP_GRP_TYPE_MAX - 1)
+#define NHA_OP_FLAG_DUMP_STATS BIT(0)
+#define NHA_OP_FLAG_DUMP_HW_STATS BIT(1)
+
enum {
NHA_UNSPEC,
NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */
@@ -60,6 +63,18 @@ enum {
/* nested; nexthop bucket attributes */
NHA_RES_BUCKET,
+ /* u32; operation-specific flags */
+ NHA_OP_FLAGS,
+
+ /* nested; nexthop group stats */
+ NHA_GROUP_STATS,
+
+ /* u32; nexthop hardware stats enable */
+ NHA_HW_STATS_ENABLE,
+
+ /* u32; read-only; whether any driver collects HW stats */
+ NHA_HW_STATS_USED,
+
__NHA_MAX,
};
@@ -101,4 +116,34 @@ enum {
#define NHA_RES_BUCKET_MAX (__NHA_RES_BUCKET_MAX - 1)
+enum {
+ NHA_GROUP_STATS_UNSPEC,
+
+ /* nested; nexthop group entry stats */
+ NHA_GROUP_STATS_ENTRY,
+
+ __NHA_GROUP_STATS_MAX,
+};
+
+#define NHA_GROUP_STATS_MAX (__NHA_GROUP_STATS_MAX - 1)
+
+enum {
+ NHA_GROUP_STATS_ENTRY_UNSPEC,
+
+ /* u32; nexthop id of the nexthop group entry */
+ NHA_GROUP_STATS_ENTRY_ID,
+
+ /* uint; number of packets forwarded via the nexthop group entry */
+ NHA_GROUP_STATS_ENTRY_PACKETS,
+
+ /* uint; number of packets forwarded via the nexthop group entry in
+ * hardware
+ */
+ NHA_GROUP_STATS_ENTRY_PACKETS_HW,
+
+ __NHA_GROUP_STATS_ENTRY_MAX,
+};
+
+#define NHA_GROUP_STATS_ENTRY_MAX (__NHA_GROUP_STATS_ENTRY_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 1ccdcae24372..f23ecbdd84a2 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -438,7 +438,8 @@
* %NL80211_ATTR_REASON_CODE can optionally be used to specify which type
* of disconnection indication should be sent to the station
* (Deauthentication or Disassociation frame and reason code for that
- * frame).
+ * frame). %NL80211_ATTR_MLO_LINK_ID can be used optionally to remove
+ * stations connected and using at least that link as one of its links.
*
* @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to
* destination %NL80211_ATTR_MAC on the interface identified by
@@ -2851,6 +2852,10 @@ enum nl80211_commands {
* mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element)
* in Draft P802.11be_D4.0.
*
+ * @NL80211_ATTR_ASSOC_SPP_AMSDU: flag attribute used with
+ * %NL80211_CMD_ASSOCIATE indicating the SPP A-MSDUs
+ * are used on this connection
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3394,6 +3399,8 @@ enum nl80211_attrs {
NL80211_ATTR_MLO_TTLM_DLINK,
NL80211_ATTR_MLO_TTLM_ULINK,
+ NL80211_ATTR_ASSOC_SPP_AMSDU,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3534,6 +3541,7 @@ enum nl80211_iftype {
* @NL80211_STA_FLAG_ASSOCIATED: station is associated; used with drivers
* that support %NL80211_FEATURE_FULL_AP_CLIENT_STATE to transition a
* previously added station into associated state
+ * @NL80211_STA_FLAG_SPP_AMSDU: station supports SPP A-MSDUs
* @NL80211_STA_FLAG_MAX: highest station flag number currently defined
* @__NL80211_STA_FLAG_AFTER_LAST: internal use
*/
@@ -3546,6 +3554,7 @@ enum nl80211_sta_flags {
NL80211_STA_FLAG_AUTHENTICATED,
NL80211_STA_FLAG_TDLS_PEER,
NL80211_STA_FLAG_ASSOCIATED,
+ NL80211_STA_FLAG_SPP_AMSDU,
/* keep last */
__NL80211_STA_FLAG_AFTER_LAST,
@@ -4260,10 +4269,13 @@ enum nl80211_wmm_rule {
* allowed for peer-to-peer or adhoc communication under the control
* of a DFS master which operates on the same channel (FCC-594280 D01
* Section B.3). Should be used together with %NL80211_RRF_DFS only.
- * @NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT: Client connection to VLP AP
+ * @NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP
* not allowed using this channel
- * @NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT: Client connection to AFC AP
+ * @NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP
* not allowed using this channel
+ * @NL80211_FREQUENCY_ATTR_CAN_MONITOR: This channel can be used in monitor
+ * mode despite other (regulatory) restrictions, even if the channel is
+ * otherwise completely disabled.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4304,8 +4316,9 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_EHT,
NL80211_FREQUENCY_ATTR_PSD,
NL80211_FREQUENCY_ATTR_DFS_CONCURRENT,
- NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT,
- NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT,
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT,
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT,
+ NL80211_FREQUENCY_ATTR_CAN_MONITOR,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4318,6 +4331,10 @@ enum nl80211_frequency_attr {
#define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
NL80211_FREQUENCY_ATTR_IR_CONCURRENT
+#define NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT \
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT
+#define NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT \
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT
/**
* enum nl80211_bitrate_attr - bitrate attributes
@@ -4455,14 +4472,7 @@ enum nl80211_reg_rule_attr {
* value as specified by &struct nl80211_bss_select_rssi_adjust.
* @NL80211_SCHED_SCAN_MATCH_ATTR_BSSID: BSSID to be used for matching
* (this cannot be used together with SSID).
- * @NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI: Nested attribute that carries the
- * band specific minimum rssi thresholds for the bands defined in
- * enum nl80211_band. The minimum rssi threshold value(s32) specific to a
- * band shall be encapsulated in attribute with type value equals to one
- * of the NL80211_BAND_* defined in enum nl80211_band. For example, the
- * minimum rssi threshold value for 2.4GHZ band shall be encapsulated
- * within an attribute of type NL80211_BAND_2GHZ. And one or more of such
- * attributes will be nested within this attribute.
+ * @NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI: Obsolete
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -4475,7 +4485,7 @@ enum nl80211_sched_scan_match_attr {
NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
NL80211_SCHED_SCAN_MATCH_ATTR_BSSID,
- NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI,
+ NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI, /* obsolete */
/* keep last */
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -4515,8 +4525,8 @@ enum nl80211_sched_scan_match_attr {
peer-to-peer or adhoc communication under the control of a DFS master
which operates on the same channel (FCC-594280 D01 Section B.3).
Should be used together with %NL80211_RRF_DFS only.
- * @NL80211_RRF_NO_UHB_VLP_CLIENT: Client connection to VLP AP not allowed
- * @NL80211_RRF_NO_UHB_AFC_CLIENT: Client connection to AFC AP not allowed
+ * @NL80211_RRF_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP not allowed
+ * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -4539,8 +4549,8 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_EHT = 1<<19,
NL80211_RRF_PSD = 1<<20,
NL80211_RRF_DFS_CONCURRENT = 1<<21,
- NL80211_RRF_NO_UHB_VLP_CLIENT = 1<<22,
- NL80211_RRF_NO_UHB_AFC_CLIENT = 1<<23,
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1<<22,
+ NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1<<23,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -4549,6 +4559,8 @@ enum nl80211_reg_rule_flags {
#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
NL80211_RRF_NO_HT40PLUS)
#define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT
+#define NL80211_RRF_NO_UHB_VLP_CLIENT NL80211_RRF_NO_6GHZ_VLP_CLIENT
+#define NL80211_RRF_NO_UHB_AFC_CLIENT NL80211_RRF_NO_6GHZ_AFC_CLIENT
/* For backport compatibility with older userspace */
#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
@@ -5096,14 +5108,17 @@ enum nl80211_bss_use_for {
* BSS isn't possible
* @NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY: NSTR nonprimary links aren't
* supported by the device, and this BSS entry represents one.
- * @NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH: STA is not supporting
+ * @NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH: STA is not supporting
* the AP power type (SP, VLP, AP) that the AP uses.
*/
enum nl80211_bss_cannot_use_reasons {
NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 1 << 0,
- NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 1 << 1,
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 1 << 1,
};
+#define NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH \
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH
+
/**
* enum nl80211_bss - netlink attributes for a BSS
*
@@ -5742,6 +5757,8 @@ struct nl80211_pattern_support {
* %NL80211_ATTR_SCAN_FREQUENCIES contains more than one
* frequency, it means that the match occurred in more than one
* channel.
+ * @NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC: For wakeup reporting only.
+ * Wake up happened due to unprotected deauth or disassoc frame in MFP.
* @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers
* @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number
*
@@ -5769,6 +5786,7 @@ enum nl80211_wowlan_triggers {
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS,
NL80211_WOWLAN_TRIG_NET_DETECT,
NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS,
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC,
/* keep last */
NUM_NL80211_WOWLAN_TRIG,
@@ -6410,8 +6428,7 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_AP_PMKSA_CACHING: Driver/device supports PMKSA caching
* (set/del PMKSA operations) in AP mode.
*
- * @NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD: Driver supports
- * filtering of sched scan results using band specific RSSI thresholds.
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD: Obsolete
*
* @NL80211_EXT_FEATURE_STA_TX_PWR: This driver supports controlling tx power
* to a station.
@@ -6520,6 +6537,11 @@ enum nl80211_feature_flags {
* DFS master on the same channel as described in FCC-594280 D01
* (Section B.3). This, for example, allows P2P GO and P2P clients to
* operate on DFS channels as long as there's a concurrent BSS connection.
+ *
+ * @NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT: The driver has support for SPP
+ * (signaling and payload protected) A-MSDUs and this shall be advertised
+ * in the RSNXE.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6561,7 +6583,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS,
NL80211_EXT_FEATURE_AP_PMKSA_CACHING,
- NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD,
+ NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD, /* obsolete */
NL80211_EXT_FEATURE_EXT_KEY_ID,
NL80211_EXT_FEATURE_STA_TX_PWR,
NL80211_EXT_FEATURE_SAE_OFFLOAD,
@@ -6594,6 +6616,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_OWE_OFFLOAD,
NL80211_EXT_FEATURE_OWE_OFFLOAD_AP,
NL80211_EXT_FEATURE_DFS_CONCURRENT,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index da700999cad4..053b40d642de 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -32,6 +32,7 @@
#define PTP_RISING_EDGE (1<<1)
#define PTP_FALLING_EDGE (1<<2)
#define PTP_STRICT_FLAGS (1<<3)
+#define PTP_EXT_OFFSET (1<<4)
#define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE)
/*
@@ -40,7 +41,8 @@
#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
PTP_RISING_EDGE | \
PTP_FALLING_EDGE | \
- PTP_STRICT_FLAGS)
+ PTP_STRICT_FLAGS | \
+ PTP_EXT_OFFSET)
/*
* flag fields valid for the original PTP_EXTTS_REQUEST ioctl.
@@ -51,6 +53,11 @@
PTP_FALLING_EDGE)
/*
+ * flag fields valid for the ptp_extts_event report.
+ */
+#define PTP_EXTTS_EVENT_VALID (PTP_ENABLE_FEATURE)
+
+/*
* Bits of the ptp_perout_request.flags field:
*/
#define PTP_PEROUT_ONE_SHOT (1<<0)
@@ -228,9 +235,9 @@ struct ptp_pin_desc {
#define PTP_MASK_EN_SINGLE _IOW(PTP_CLK_MAGIC, 20, unsigned int)
struct ptp_extts_event {
- struct ptp_clock_time t; /* Time event occured. */
+ struct ptp_clock_time t; /* Time event occurred. */
unsigned int index; /* Which channel produced the event. */
- unsigned int flags; /* Reserved for future use. */
+ unsigned int flags; /* Event type. */
unsigned int rsv[2]; /* Reserved for future use. */
};
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 9086367db043..de9b4733607e 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -145,12 +145,13 @@ struct serial_rs485 {
#define SER_RS485_ENABLED _BITUL(0)
#define SER_RS485_RTS_ON_SEND _BITUL(1)
#define SER_RS485_RTS_AFTER_SEND _BITUL(2)
-#define SER_RS485_RX_DURING_TX _BITUL(3)
-#define SER_RS485_TERMINATE_BUS _BITUL(4)
-#define SER_RS485_ADDRB _BITUL(5)
-#define SER_RS485_ADDR_RECV _BITUL(6)
-#define SER_RS485_ADDR_DEST _BITUL(7)
-#define SER_RS485_MODE_RS422 _BITUL(8)
+/* Placeholder for bit 3: SER_RS485_RTS_BEFORE_SEND, which isn't used anymore */
+#define SER_RS485_RX_DURING_TX _BITUL(4)
+#define SER_RS485_TERMINATE_BUS _BITUL(5)
+#define SER_RS485_ADDRB _BITUL(6)
+#define SER_RS485_ADDR_RECV _BITUL(7)
+#define SER_RS485_ADDR_DEST _BITUL(8)
+#define SER_RS485_MODE_RS422 _BITUL(9)
__u32 delay_rts_before_send;
__u32 delay_rts_after_send;
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index f3e61b04fa01..f5cab7fc96ab 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -62,7 +62,7 @@ struct tc_pedit_sel {
tc_gen;
unsigned char nkeys;
unsigned char flags;
- struct tc_pedit_key keys[0];
+ struct tc_pedit_key keys[] __counted_by(nkeys);
};
#define tc_pedit tc_pedit_sel
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index d5b9cfbd9cea..628d46a0da92 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -142,7 +142,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 16)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 17)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -416,7 +416,7 @@ struct snd_pcm_hw_params {
unsigned int rmask; /* W: requested masks */
unsigned int cmask; /* R: changed masks */
unsigned int info; /* R: Info flags for returned setup */
- unsigned int msbits; /* R: used most significant bits */
+ unsigned int msbits; /* R: used most significant bits (in sample bit-width) */
unsigned int rate_num; /* R: rate numerator */
unsigned int rate_den; /* R: rate denominator */
snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
diff --git a/include/uapi/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
index 48d2790ef928..3109282672f3 100644
--- a/include/uapi/xen/gntalloc.h
+++ b/include/uapi/xen/gntalloc.h
@@ -31,7 +31,10 @@ struct ioctl_gntalloc_alloc_gref {
__u64 index;
/* The grant references of the newly created grant, one per page */
/* Variable size, depending on count */
- __u32 gref_ids[1];
+ union {
+ __u32 gref_ids[1];
+ __DECLARE_FLEX_ARRAY(__u32, gref_ids_flex);
+ };
};
#define GNTALLOC_FLAG_WRITABLE 1
diff --git a/init/Kconfig b/init/Kconfig
index 8df18f3a9748..6acb3febeb8e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -89,6 +89,15 @@ config CC_HAS_ASM_GOTO_TIED_OUTPUT
# Detect buggy gcc and clang, fixed in gcc-11 clang-14.
def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null)
+config GCC_ASM_GOTO_OUTPUT_WORKAROUND
+ bool
+ depends on CC_IS_GCC && CC_HAS_ASM_GOTO_OUTPUT
+ # Fixed in GCC 14, 13.3, 12.4 and 11.5
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
+ default y if GCC_VERSION < 110500
+ default y if GCC_VERSION >= 120000 && GCC_VERSION < 120400
+ default y if GCC_VERSION >= 130000 && GCC_VERSION < 130300
+
config TOOLS_SUPPORT_RELR
def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
@@ -876,6 +885,18 @@ config CC_NO_ARRAY_BOUNDS
bool
default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
+# Currently, disable -Wstringop-overflow for GCC globally.
+config GCC_NO_STRINGOP_OVERFLOW
+ def_bool y
+
+config CC_NO_STRINGOP_OVERFLOW
+ bool
+ default y if CC_IS_GCC && GCC_NO_STRINGOP_OVERFLOW
+
+config CC_STRINGOP_OVERFLOW
+ bool
+ default y if CC_IS_GCC && !CC_NO_STRINGOP_OVERFLOW
+
#
# For architectures that know their GCC __int128 support is sound
#
@@ -1445,11 +1466,6 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
config HAVE_PCSPKR_PLATFORM
bool
-# interpreter that classic socket filters depend on
-config BPF
- bool
- select CRYPTO_LIB_SHA1
-
menuconfig EXPERT
bool "Configure standard kernel features (expert users)"
# Unhide debug options, to make the on-by-default options visible
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 86761ec623f9..cd9a137ad6ce 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -137,6 +137,14 @@ struct io_defer_entry {
#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
+/*
+ * No waiters. It's larger than any valid value of the tw counter
+ * so that tests against ->cq_wait_nr would fail and skip wake_up().
+ */
+#define IO_CQ_WAKE_INIT (-1U)
+/* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
+#define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
+
static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task,
bool cancel_all);
@@ -303,6 +311,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
goto err;
ctx->flags = p->flags;
+ atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
init_waitqueue_head(&ctx->sqo_sq_wait);
INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
@@ -1304,16 +1313,23 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned nr_wait, nr_tw, nr_tw_prev;
- struct llist_node *first;
+ struct llist_node *head;
+
+ /* See comment above IO_CQ_WAKE_INIT */
+ BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
+ /*
+ * We don't know how many reuqests is there in the link and whether
+ * they can even be queued lazily, fall back to non-lazy.
+ */
if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
flags &= ~IOU_F_TWQ_LAZY_WAKE;
- first = READ_ONCE(ctx->work_llist.first);
+ head = READ_ONCE(ctx->work_llist.first);
do {
nr_tw_prev = 0;
- if (first) {
- struct io_kiocb *first_req = container_of(first,
+ if (head) {
+ struct io_kiocb *first_req = container_of(head,
struct io_kiocb,
io_task_work.node);
/*
@@ -1322,17 +1338,29 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
*/
nr_tw_prev = READ_ONCE(first_req->nr_tw);
}
+
+ /*
+ * Theoretically, it can overflow, but that's fine as one of
+ * previous adds should've tried to wake the task.
+ */
nr_tw = nr_tw_prev + 1;
- /* Large enough to fail the nr_wait comparison below */
if (!(flags & IOU_F_TWQ_LAZY_WAKE))
- nr_tw = -1U;
+ nr_tw = IO_CQ_WAKE_FORCE;
req->nr_tw = nr_tw;
- req->io_task_work.node.next = first;
- } while (!try_cmpxchg(&ctx->work_llist.first, &first,
+ req->io_task_work.node.next = head;
+ } while (!try_cmpxchg(&ctx->work_llist.first, &head,
&req->io_task_work.node));
- if (!first) {
+ /*
+ * cmpxchg implies a full barrier, which pairs with the barrier
+ * in set_current_state() on the io_cqring_wait() side. It's used
+ * to ensure that either we see updated ->cq_wait_nr, or waiters
+ * going to sleep will observe the work added to the list, which
+ * is similar to the wait/wawke task state sync.
+ */
+
+ if (!head) {
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
if (ctx->has_evfd)
@@ -1340,14 +1368,12 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
}
nr_wait = atomic_read(&ctx->cq_wait_nr);
- /* no one is waiting */
- if (!nr_wait)
+ /* not enough or no one is waiting */
+ if (nr_tw < nr_wait)
return;
- /* either not enough or the previous add has already woken it up */
- if (nr_wait > nr_tw || nr_tw_prev >= nr_wait)
+ /* the previous add has already woken it up */
+ if (nr_tw_prev >= nr_wait)
return;
- /* pairs with set_current_state() in io_cqring_wait() */
- smp_mb__after_atomic();
wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
}
@@ -2000,9 +2026,10 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
goto out;
fd = array_index_nospec(fd, ctx->nr_user_files);
slot = io_fixed_file_slot(&ctx->file_table, fd);
- file = io_slot_file(slot);
+ if (!req->rsrc_node)
+ __io_req_set_rsrc_node(req, ctx);
req->flags |= io_slot_flags(slot);
- io_req_set_rsrc_node(req, ctx, 0);
+ file = io_slot_file(slot);
out:
io_ring_submit_unlock(ctx, issue_flags);
return file;
@@ -2613,7 +2640,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
ret = io_cqring_wait_schedule(ctx, &iowq);
__set_current_state(TASK_RUNNING);
- atomic_set(&ctx->cq_wait_nr, 0);
+ atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
/*
* Run task_work after scheduling and before io_should_wake().
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 04e33f25919c..d5495710c178 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -15,12 +15,18 @@
#include <trace/events/io_uring.h>
#endif
-
enum {
IOU_OK = 0,
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
/*
+ * Requeue the task_work to restart operations on this request. The
+ * actual value isn't important, should just be not an otherwise
+ * valid error code, yet less than -MAX_ERRNO and valid internally.
+ */
+ IOU_REQUEUE = -3072,
+
+ /*
* Intended only when both IO_URING_F_MULTISHOT is passed
* to indicate to the poll runner that multishot should be
* removed and the result is set on req->cqe.res.
diff --git a/io_uring/net.c b/io_uring/net.c
index 75d494dad7e2..161622029147 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -60,6 +60,7 @@ struct io_sr_msg {
unsigned len;
unsigned done_io;
unsigned msg_flags;
+ unsigned nr_multishot_loops;
u16 flags;
/* initialised and used only by !msg send variants */
u16 addr_len;
@@ -70,6 +71,13 @@ struct io_sr_msg {
struct io_kiocb *notif;
};
+/*
+ * Number of times we'll try and do receives if there's more data. If we
+ * exceed this limit, then add us to the back of the queue and retry from
+ * there. This helps fairness between flooding clients.
+ */
+#define MULTISHOT_MAX_RETRY 32
+
static inline bool io_check_multishot(struct io_kiocb *req,
unsigned int issue_flags)
{
@@ -611,6 +619,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
sr->done_io = 0;
+ sr->nr_multishot_loops = 0;
return 0;
}
@@ -645,23 +654,35 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
return true;
}
- if (!mshot_finished) {
- if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
- *ret, cflags | IORING_CQE_F_MORE)) {
- io_recv_prep_retry(req);
- /* Known not-empty or unknown state, retry */
- if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
- msg->msg_inq == -1)
+ if (mshot_finished)
+ goto finish;
+
+ /*
+ * Fill CQE for this receive and see if we should keep trying to
+ * receive from this socket.
+ */
+ if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ *ret, cflags | IORING_CQE_F_MORE)) {
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
+
+ io_recv_prep_retry(req);
+ /* Known not-empty or unknown state, retry */
+ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
+ if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
return false;
- if (issue_flags & IO_URING_F_MULTISHOT)
- *ret = IOU_ISSUE_SKIP_COMPLETE;
- else
- *ret = -EAGAIN;
- return true;
+ /* mshot retries exceeded, force a requeue */
+ sr->nr_multishot_loops = 0;
+ mshot_retry_ret = IOU_REQUEUE;
}
- /* Otherwise stop multishot but use the current result. */
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ *ret = mshot_retry_ret;
+ else
+ *ret = -EAGAIN;
+ return true;
}
-
+ /* Otherwise stop multishot but use the current result. */
+finish:
io_req_set_res(req, *ret, cflags);
if (issue_flags & IO_URING_F_MULTISHOT)
@@ -902,6 +923,7 @@ retry_multishot:
if (!buf)
return -ENOBUFS;
sr->buf = buf;
+ sr->len = len;
}
ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
@@ -1350,7 +1372,7 @@ retry:
* has already been done
*/
if (issue_flags & IO_URING_F_MULTISHOT)
- ret = IOU_ISSUE_SKIP_COMPLETE;
+ return IOU_ISSUE_SKIP_COMPLETE;
return ret;
}
if (ret == -ERESTARTSYS)
@@ -1375,7 +1397,8 @@ retry:
ret, IORING_CQE_F_MORE))
goto retry;
- return -ECANCELED;
+ io_req_set_res(req, ret, 0);
+ return IOU_STOP_MULTISHOT;
}
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 6705634e5f52..b1ee3a9c3807 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -471,7 +471,6 @@ const struct io_issue_def io_issue_defs[] = {
},
[IORING_OP_FIXED_FD_INSTALL] = {
.needs_file = 1,
- .audit_skip = 1,
.prep = io_install_fixed_fd_prep,
.issue = io_install_fixed_fd,
},
diff --git a/io_uring/openclose.c b/io_uring/openclose.c
index 0fe0dd305546..e3357dfa14ca 100644
--- a/io_uring/openclose.c
+++ b/io_uring/openclose.c
@@ -277,6 +277,10 @@ int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sq
if (flags & ~IORING_FIXED_FD_NO_CLOEXEC)
return -EINVAL;
+ /* ensure the task's creds are used when installing/receiving fds */
+ if (req->flags & REQ_F_CREDS)
+ return -EPERM;
+
/* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
ifi->o_flags = O_CLOEXEC;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index d59b74a99d4e..7513afc7b702 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -226,8 +226,29 @@ enum {
IOU_POLL_NO_ACTION = 1,
IOU_POLL_REMOVE_POLL_USE_RES = 2,
IOU_POLL_REISSUE = 3,
+ IOU_POLL_REQUEUE = 4,
};
+static void __io_poll_execute(struct io_kiocb *req, int mask)
+{
+ unsigned flags = 0;
+
+ io_req_set_res(req, mask, 0);
+ req->io_task_work.func = io_poll_task_func;
+
+ trace_io_uring_task_add(req, mask);
+
+ if (!(req->flags & REQ_F_POLL_NO_LAZY))
+ flags = IOU_F_TWQ_LAZY_WAKE;
+ __io_req_task_work_add(req, flags);
+}
+
+static inline void io_poll_execute(struct io_kiocb *req, int res)
+{
+ if (io_poll_get_ownership(req))
+ __io_poll_execute(req, res);
+}
+
/*
* All poll tw should go through this. Checks for poll events, manages
* references, does rewait, etc.
@@ -309,6 +330,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
int ret = io_poll_issue(req, ts);
if (ret == IOU_STOP_MULTISHOT)
return IOU_POLL_REMOVE_POLL_USE_RES;
+ else if (ret == IOU_REQUEUE)
+ return IOU_POLL_REQUEUE;
if (ret < 0)
return ret;
}
@@ -331,8 +354,12 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
int ret;
ret = io_poll_check_events(req, ts);
- if (ret == IOU_POLL_NO_ACTION)
+ if (ret == IOU_POLL_NO_ACTION) {
+ return;
+ } else if (ret == IOU_POLL_REQUEUE) {
+ __io_poll_execute(req, 0);
return;
+ }
io_poll_remove_entries(req);
io_poll_tw_hash_eject(req, ts);
@@ -364,26 +391,6 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
}
}
-static void __io_poll_execute(struct io_kiocb *req, int mask)
-{
- unsigned flags = 0;
-
- io_req_set_res(req, mask, 0);
- req->io_task_work.func = io_poll_task_func;
-
- trace_io_uring_task_add(req, mask);
-
- if (!(req->flags & REQ_F_POLL_NO_LAZY))
- flags = IOU_F_TWQ_LAZY_WAKE;
- __io_req_task_work_add(req, flags);
-}
-
-static inline void io_poll_execute(struct io_kiocb *req, int res)
-{
- if (io_poll_get_ownership(req))
- __io_poll_execute(req, res);
-}
-
static void io_poll_cancel_req(struct io_kiocb *req)
{
io_poll_mark_cancelled(req);
diff --git a/io_uring/poll.h b/io_uring/poll.h
index ff4d5d753387..1dacae9e816c 100644
--- a/io_uring/poll.h
+++ b/io_uring/poll.h
@@ -24,6 +24,15 @@ struct async_poll {
struct io_poll *double_poll;
};
+/*
+ * Must only be called inside issue_flags & IO_URING_F_MULTISHOT, or
+ * potentially other cases where we already "own" this poll request.
+ */
+static inline void io_poll_multishot_retry(struct io_kiocb *req)
+{
+ atomic_inc(&req->poll_refs);
+}
+
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/io_uring/register.c b/io_uring/register.c
index 708dd1d89add..5e62c1208996 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/nospec.h>
+#include <linux/compat.h>
#include <linux/io_uring.h>
#include <linux/io_uring_types.h>
@@ -278,13 +279,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
if (len > cpumask_size())
len = cpumask_size();
- if (in_compat_syscall()) {
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
ret = compat_get_bitmap(cpumask_bits(new_mask),
(const compat_ulong_t __user *)arg,
len * 8 /* CHAR_BIT */);
- } else {
+ else
+#endif
ret = copy_from_user(new_mask, arg, len);
- }
if (ret) {
free_cpumask_var(new_mask);
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 7238b9cfe33b..c6f199bbee28 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -102,17 +102,21 @@ static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx,
node->refs++;
}
+static inline void __io_req_set_rsrc_node(struct io_kiocb *req,
+ struct io_ring_ctx *ctx)
+{
+ lockdep_assert_held(&ctx->uring_lock);
+ req->rsrc_node = ctx->rsrc_node;
+ io_charge_rsrc_node(ctx, ctx->rsrc_node);
+}
+
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
struct io_ring_ctx *ctx,
unsigned int issue_flags)
{
if (!req->rsrc_node) {
io_ring_submit_lock(ctx, issue_flags);
-
- lockdep_assert_held(&ctx->uring_lock);
-
- req->rsrc_node = ctx->rsrc_node;
- io_charge_rsrc_node(ctx, ctx->rsrc_node);
+ __io_req_set_rsrc_node(req, ctx);
io_ring_submit_unlock(ctx, issue_flags);
}
}
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 0c856726b15d..d5e79d9bdc71 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -18,6 +18,7 @@
#include "opdef.h"
#include "kbuf.h"
#include "rsrc.h"
+#include "poll.h"
#include "rw.h"
struct io_rw {
@@ -168,27 +169,6 @@ void io_readv_writev_cleanup(struct io_kiocb *req)
kfree(io->free_iovec);
}
-static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
-{
- switch (ret) {
- case -EIOCBQUEUED:
- break;
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- case -ERESTARTNOHAND:
- case -ERESTART_RESTARTBLOCK:
- /*
- * We can't just restart the syscall, since previously
- * submitted sqes may already be in progress. Just fail this
- * IO with EINTR.
- */
- ret = -EINTR;
- fallthrough;
- default:
- kiocb->ki_complete(kiocb, ret);
- }
-}
-
static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
@@ -371,6 +351,33 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
smp_store_release(&req->iopoll_completed, 1);
}
+static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
+{
+ /* IO was queued async, completion will happen later */
+ if (ret == -EIOCBQUEUED)
+ return;
+
+ /* transform internal restart error codes */
+ if (unlikely(ret < 0)) {
+ switch (ret) {
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ case -ERESTARTNOHAND:
+ case -ERESTART_RESTARTBLOCK:
+ /*
+ * We can't just restart the syscall, since previously
+ * submitted sqes may already be in progress. Just fail
+ * this IO with EINTR.
+ */
+ ret = -EINTR;
+ break;
+ }
+ }
+
+ INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
+ io_complete_rw, kiocb, ret);
+}
+
static int kiocb_done(struct io_kiocb *req, ssize_t ret,
unsigned int issue_flags)
{
@@ -956,8 +963,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
if (io_fill_cqe_req_aux(req,
issue_flags & IO_URING_F_COMPLETE_DEFER,
ret, cflags | IORING_CQE_F_MORE)) {
- if (issue_flags & IO_URING_F_MULTISHOT)
+ if (issue_flags & IO_URING_F_MULTISHOT) {
+ /*
+ * Force retry, as we might have more data to
+ * be read and otherwise it won't get retried
+ * until (if ever) another poll is triggered.
+ */
+ io_poll_multishot_retry(req);
return IOU_ISSUE_SKIP_COMPLETE;
+ }
return -EAGAIN;
}
}
diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig
index 6a906ff93006..bc25f5098a25 100644
--- a/kernel/bpf/Kconfig
+++ b/kernel/bpf/Kconfig
@@ -3,6 +3,7 @@
# BPF interpreter that, for example, classic socket filters depend on.
config BPF
bool
+ select CRYPTO_LIB_SHA1
# Used by archs to tell that they support BPF JIT compiler plus which
# flavour. Only one of the two can be selected for a specific arch since
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index f526b7573e97..4ce95acfcaa7 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -6,7 +6,7 @@ cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
endif
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
-obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o
+obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 0bdbbbeab155..13358675ff2e 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -82,7 +82,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
int numa_node = bpf_map_attr_numa_node(attr);
u32 elem_size, index_mask, max_entries;
- bool bypass_spec_v1 = bpf_bypass_spec_v1();
+ bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
u64 array_size, mask64;
struct bpf_array *array;
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 146824cc9689..bdea1a459153 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -414,47 +414,21 @@ void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
bpf_selem_unlink_storage(selem, reuse_now);
}
-/* If cacheit_lockit is false, this lookup function is lockless */
-struct bpf_local_storage_data *
-bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
- struct bpf_local_storage_map *smap,
- bool cacheit_lockit)
+void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem)
{
- struct bpf_local_storage_data *sdata;
- struct bpf_local_storage_elem *selem;
-
- /* Fast path (cache hit) */
- sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
- bpf_rcu_lock_held());
- if (sdata && rcu_access_pointer(sdata->smap) == smap)
- return sdata;
-
- /* Slow path (cache miss) */
- hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
- rcu_read_lock_trace_held())
- if (rcu_access_pointer(SDATA(selem)->smap) == smap)
- break;
-
- if (!selem)
- return NULL;
-
- sdata = SDATA(selem);
- if (cacheit_lockit) {
- unsigned long flags;
-
- /* spinlock is needed to avoid racing with the
- * parallel delete. Otherwise, publishing an already
- * deleted sdata to the cache will become a use-after-free
- * problem in the next bpf_local_storage_lookup().
- */
- raw_spin_lock_irqsave(&local_storage->lock, flags);
- if (selem_linked_to_storage(selem))
- rcu_assign_pointer(local_storage->cache[smap->cache_idx],
- sdata);
- raw_spin_unlock_irqrestore(&local_storage->lock, flags);
- }
+ unsigned long flags;
- return sdata;
+ /* spinlock is needed to avoid racing with the
+ * parallel delete. Otherwise, publishing an already
+ * deleted sdata to the cache will become a use-after-free
+ * problem in the next bpf_local_storage_lookup().
+ */
+ raw_spin_lock_irqsave(&local_storage->lock, flags);
+ if (selem_linked_to_storage(selem))
+ rcu_assign_pointer(local_storage->cache[smap->cache_idx], SDATA(selem));
+ raw_spin_unlock_irqrestore(&local_storage->lock, flags);
}
static int check_flags(const struct bpf_local_storage_data *old_sdata,
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index e8e910395bf6..68240c3c6e7d 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -260,9 +260,15 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
BTF_SET_START(sleepable_lsm_hooks)
BTF_ID(func, bpf_lsm_bpf)
BTF_ID(func, bpf_lsm_bpf_map)
-BTF_ID(func, bpf_lsm_bpf_map_alloc_security)
-BTF_ID(func, bpf_lsm_bpf_map_free_security)
+BTF_ID(func, bpf_lsm_bpf_map_create)
+BTF_ID(func, bpf_lsm_bpf_map_free)
BTF_ID(func, bpf_lsm_bpf_prog)
+BTF_ID(func, bpf_lsm_bpf_prog_load)
+BTF_ID(func, bpf_lsm_bpf_prog_free)
+BTF_ID(func, bpf_lsm_bpf_token_create)
+BTF_ID(func, bpf_lsm_bpf_token_free)
+BTF_ID(func, bpf_lsm_bpf_token_cmd)
+BTF_ID(func, bpf_lsm_bpf_token_capable)
BTF_ID(func, bpf_lsm_bprm_check_security)
BTF_ID(func, bpf_lsm_bprm_committed_creds)
BTF_ID(func, bpf_lsm_bprm_committing_creds)
@@ -276,10 +282,6 @@ BTF_ID(func, bpf_lsm_file_lock)
BTF_ID(func, bpf_lsm_file_open)
BTF_ID(func, bpf_lsm_file_receive)
-#ifdef CONFIG_SECURITY_NETWORK
-BTF_ID(func, bpf_lsm_inet_conn_established)
-#endif /* CONFIG_SECURITY_NETWORK */
-
BTF_ID(func, bpf_lsm_inode_create)
BTF_ID(func, bpf_lsm_inode_free_security)
BTF_ID(func, bpf_lsm_inode_getattr)
@@ -330,6 +332,8 @@ BTF_ID(func, bpf_lsm_sb_umount)
BTF_ID(func, bpf_lsm_settime)
#ifdef CONFIG_SECURITY_NETWORK
+BTF_ID(func, bpf_lsm_inet_conn_established)
+
BTF_ID(func, bpf_lsm_socket_accept)
BTF_ID(func, bpf_lsm_socket_bind)
BTF_ID(func, bpf_lsm_socket_connect)
@@ -357,9 +361,8 @@ BTF_ID(func, bpf_lsm_userns_create)
BTF_SET_END(sleepable_lsm_hooks)
BTF_SET_START(untrusted_lsm_hooks)
-BTF_ID(func, bpf_lsm_bpf_map_free_security)
-BTF_ID(func, bpf_lsm_bpf_prog_alloc_security)
-BTF_ID(func, bpf_lsm_bpf_prog_free_security)
+BTF_ID(func, bpf_lsm_bpf_map_free)
+BTF_ID(func, bpf_lsm_bpf_prog_free)
BTF_ID(func, bpf_lsm_file_alloc_security)
BTF_ID(func, bpf_lsm_file_free_security)
#ifdef CONFIG_SECURITY_NETWORK
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 02068bd0e4d9..a6019087b467 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -13,26 +13,15 @@
#include <linux/btf_ids.h>
#include <linux/rcupdate_wait.h>
-enum bpf_struct_ops_state {
- BPF_STRUCT_OPS_STATE_INIT,
- BPF_STRUCT_OPS_STATE_INUSE,
- BPF_STRUCT_OPS_STATE_TOBEFREE,
- BPF_STRUCT_OPS_STATE_READY,
-};
-
-#define BPF_STRUCT_OPS_COMMON_VALUE \
- refcount_t refcnt; \
- enum bpf_struct_ops_state state
-
struct bpf_struct_ops_value {
- BPF_STRUCT_OPS_COMMON_VALUE;
+ struct bpf_struct_ops_common_value common;
char data[] ____cacheline_aligned_in_smp;
};
struct bpf_struct_ops_map {
struct bpf_map map;
struct rcu_head rcu;
- const struct bpf_struct_ops *st_ops;
+ const struct bpf_struct_ops_desc *st_ops_desc;
/* protect map_update */
struct mutex lock;
/* link has all the bpf_links that is populated
@@ -40,12 +29,15 @@ struct bpf_struct_ops_map {
* (in kvalue.data).
*/
struct bpf_link **links;
+ u32 links_cnt;
/* image is a page that has all the trampolines
* that stores the func args before calling the bpf_prog.
* A PAGE_SIZE "image" is enough to store all trampoline for
* "links[]".
*/
void *image;
+ /* The owner moduler's btf. */
+ struct btf *btf;
/* uvalue->data stores the kernel struct
* (e.g. tcp_congestion_ops) that is more useful
* to userspace than the kvalue. For example,
@@ -70,35 +62,6 @@ static DEFINE_MUTEX(update_mutex);
#define VALUE_PREFIX "bpf_struct_ops_"
#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
-/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
- * the map's value exposed to the userspace and its btf-type-id is
- * stored at the map->btf_vmlinux_value_type_id.
- *
- */
-#define BPF_STRUCT_OPS_TYPE(_name) \
-extern struct bpf_struct_ops bpf_##_name; \
- \
-struct bpf_struct_ops_##_name { \
- BPF_STRUCT_OPS_COMMON_VALUE; \
- struct _name data ____cacheline_aligned_in_smp; \
-};
-#include "bpf_struct_ops_types.h"
-#undef BPF_STRUCT_OPS_TYPE
-
-enum {
-#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
-#include "bpf_struct_ops_types.h"
-#undef BPF_STRUCT_OPS_TYPE
- __NR_BPF_STRUCT_OPS_TYPE,
-};
-
-static struct bpf_struct_ops * const bpf_struct_ops[] = {
-#define BPF_STRUCT_OPS_TYPE(_name) \
- [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
-#include "bpf_struct_ops_types.h"
-#undef BPF_STRUCT_OPS_TYPE
-};
-
const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
};
@@ -108,138 +71,330 @@ const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
#endif
};
-static const struct btf_type *module_type;
+BTF_ID_LIST(st_ops_ids)
+BTF_ID(struct, module)
+BTF_ID(struct, bpf_struct_ops_common_value)
-void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
+enum {
+ IDX_MODULE_ID,
+ IDX_ST_OPS_COMMON_VALUE_ID,
+};
+
+extern struct btf *btf_vmlinux;
+
+static bool is_valid_value_type(struct btf *btf, s32 value_id,
+ const struct btf_type *type,
+ const char *value_name)
{
- s32 type_id, value_id, module_id;
+ const struct btf_type *common_value_type;
const struct btf_member *member;
- struct bpf_struct_ops *st_ops;
- const struct btf_type *t;
- char value_name[128];
- const char *mname;
- u32 i, j;
+ const struct btf_type *vt, *mt;
- /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
-#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
-#include "bpf_struct_ops_types.h"
-#undef BPF_STRUCT_OPS_TYPE
+ vt = btf_type_by_id(btf, value_id);
+ if (btf_vlen(vt) != 2) {
+ pr_warn("The number of %s's members should be 2, but we get %d\n",
+ value_name, btf_vlen(vt));
+ return false;
+ }
+ member = btf_type_member(vt);
+ mt = btf_type_by_id(btf, member->type);
+ common_value_type = btf_type_by_id(btf_vmlinux,
+ st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
+ if (mt != common_value_type) {
+ pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
+ value_name);
+ return false;
+ }
+ member++;
+ mt = btf_type_by_id(btf, member->type);
+ if (mt != type) {
+ pr_warn("The second member of %s should be %s\n",
+ value_name, btf_name_by_offset(btf, type->name_off));
+ return false;
+ }
- module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
- if (module_id < 0) {
- pr_warn("Cannot find struct module in btf_vmlinux\n");
- return;
+ return true;
+}
+
+#define MAYBE_NULL_SUFFIX "__nullable"
+#define MAX_STUB_NAME 128
+
+/* Return the type info of a stub function, if it exists.
+ *
+ * The name of a stub function is made up of the name of the struct_ops and
+ * the name of the function pointer member, separated by "__". For example,
+ * if the struct_ops type is named "foo_ops" and the function pointer
+ * member is named "bar", the stub function name would be "foo_ops__bar".
+ */
+static const struct btf_type *
+find_stub_func_proto(const struct btf *btf, const char *st_op_name,
+ const char *member_name)
+{
+ char stub_func_name[MAX_STUB_NAME];
+ const struct btf_type *func_type;
+ s32 btf_id;
+ int cp;
+
+ cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s",
+ st_op_name, member_name);
+ if (cp >= MAX_STUB_NAME) {
+ pr_warn("Stub function name too long\n");
+ return NULL;
}
- module_type = btf_type_by_id(btf, module_id);
+ btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC);
+ if (btf_id < 0)
+ return NULL;
+ func_type = btf_type_by_id(btf, btf_id);
+ if (!func_type)
+ return NULL;
- for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
- st_ops = bpf_struct_ops[i];
+ return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */
+}
- if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
- sizeof(value_name)) {
- pr_warn("struct_ops name %s is too long\n",
- st_ops->name);
- continue;
- }
- sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
+/* Prepare argument info for every nullable argument of a member of a
+ * struct_ops type.
+ *
+ * Initialize a struct bpf_struct_ops_arg_info according to type info of
+ * the arguments of a stub function. (Check kCFI for more information about
+ * stub functions.)
+ *
+ * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
+ * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
+ * the information that used by the verifier to check the arguments of the
+ * BPF struct_ops program assigned to the member. Here, we only care about
+ * the arguments that are marked as __nullable.
+ *
+ * The array of struct bpf_ctx_arg_aux is eventually assigned to
+ * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
+ * verifier. (See check_struct_ops_btf_id())
+ *
+ * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
+ * fails, it will be kept untouched.
+ */
+static int prepare_arg_info(struct btf *btf,
+ const char *st_ops_name,
+ const char *member_name,
+ const struct btf_type *func_proto,
+ struct bpf_struct_ops_arg_info *arg_info)
+{
+ const struct btf_type *stub_func_proto, *pointed_type;
+ const struct btf_param *stub_args, *args;
+ struct bpf_ctx_arg_aux *info, *info_buf;
+ u32 nargs, arg_no, info_cnt = 0;
+ u32 arg_btf_id;
+ int offset;
+
+ stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name);
+ if (!stub_func_proto)
+ return 0;
+
+ /* Check if the number of arguments of the stub function is the same
+ * as the number of arguments of the function pointer.
+ */
+ nargs = btf_type_vlen(func_proto);
+ if (nargs != btf_type_vlen(stub_func_proto)) {
+ pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n",
+ st_ops_name, member_name, member_name, st_ops_name);
+ return -EINVAL;
+ }
+
+ if (!nargs)
+ return 0;
+
+ args = btf_params(func_proto);
+ stub_args = btf_params(stub_func_proto);
+
+ info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
+ if (!info_buf)
+ return -ENOMEM;
- value_id = btf_find_by_name_kind(btf, value_name,
- BTF_KIND_STRUCT);
- if (value_id < 0) {
- pr_warn("Cannot find struct %s in btf_vmlinux\n",
- value_name);
+ /* Prepare info for every nullable argument */
+ info = info_buf;
+ for (arg_no = 0; arg_no < nargs; arg_no++) {
+ /* Skip arguments that is not suffixed with
+ * "__nullable".
+ */
+ if (!btf_param_match_suffix(btf, &stub_args[arg_no],
+ MAYBE_NULL_SUFFIX))
continue;
+
+ /* Should be a pointer to struct */
+ pointed_type = btf_type_resolve_ptr(btf,
+ args[arg_no].type,
+ &arg_btf_id);
+ if (!pointed_type ||
+ !btf_type_is_struct(pointed_type)) {
+ pr_warn("stub function %s__%s has %s tagging to an unsupported type\n",
+ st_ops_name, member_name, MAYBE_NULL_SUFFIX);
+ goto err_out;
}
- type_id = btf_find_by_name_kind(btf, st_ops->name,
- BTF_KIND_STRUCT);
- if (type_id < 0) {
- pr_warn("Cannot find struct %s in btf_vmlinux\n",
- st_ops->name);
- continue;
+ offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
+ if (offset < 0) {
+ pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n",
+ st_ops_name, member_name, arg_no);
+ goto err_out;
}
- t = btf_type_by_id(btf, type_id);
- if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
- pr_warn("Cannot support #%u members in struct %s\n",
- btf_type_vlen(t), st_ops->name);
- continue;
+
+ if (args[arg_no].type != stub_args[arg_no].type) {
+ pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n",
+ arg_no, st_ops_name, member_name);
+ goto err_out;
}
- for_each_member(j, t, member) {
- const struct btf_type *func_proto;
+ /* Fill the information of the new argument */
+ info->reg_type =
+ PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
+ info->btf_id = arg_btf_id;
+ info->btf = btf;
+ info->offset = offset;
- mname = btf_name_by_offset(btf, member->name_off);
- if (!*mname) {
- pr_warn("anon member in struct %s is not supported\n",
- st_ops->name);
- break;
- }
+ info++;
+ info_cnt++;
+ }
- if (__btf_member_bitfield_size(t, member)) {
- pr_warn("bit field member %s in struct %s is not supported\n",
- mname, st_ops->name);
- break;
- }
+ if (info_cnt) {
+ arg_info->info = info_buf;
+ arg_info->cnt = info_cnt;
+ } else {
+ kfree(info_buf);
+ }
- func_proto = btf_type_resolve_func_ptr(btf,
- member->type,
- NULL);
- if (func_proto &&
- btf_distill_func_proto(log, btf,
- func_proto, mname,
- &st_ops->func_models[j])) {
- pr_warn("Error in parsing func ptr %s in struct %s\n",
- mname, st_ops->name);
- break;
- }
- }
+ return 0;
- if (j == btf_type_vlen(t)) {
- if (st_ops->init(btf)) {
- pr_warn("Error in init bpf_struct_ops %s\n",
- st_ops->name);
- } else {
- st_ops->type_id = type_id;
- st_ops->type = t;
- st_ops->value_id = value_id;
- st_ops->value_type = btf_type_by_id(btf,
- value_id);
- }
- }
- }
+err_out:
+ kfree(info_buf);
+
+ return -EINVAL;
}
-extern struct btf *btf_vmlinux;
+/* Clean up the arg_info in a struct bpf_struct_ops_desc. */
+void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+{
+ struct bpf_struct_ops_arg_info *arg_info;
+ int i;
+
+ arg_info = st_ops_desc->arg_info;
+ for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
+ kfree(arg_info[i].info);
-static const struct bpf_struct_ops *
-bpf_struct_ops_find_value(u32 value_id)
+ kfree(arg_info);
+}
+
+int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ struct btf *btf,
+ struct bpf_verifier_log *log)
{
- unsigned int i;
+ struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
+ struct bpf_struct_ops_arg_info *arg_info;
+ const struct btf_member *member;
+ const struct btf_type *t;
+ s32 type_id, value_id;
+ char value_name[128];
+ const char *mname;
+ int i, err;
- if (!value_id || !btf_vmlinux)
- return NULL;
+ if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
+ sizeof(value_name)) {
+ pr_warn("struct_ops name %s is too long\n",
+ st_ops->name);
+ return -EINVAL;
+ }
+ sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
- for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
- if (bpf_struct_ops[i]->value_id == value_id)
- return bpf_struct_ops[i];
+ if (!st_ops->cfi_stubs) {
+ pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
+ return -EINVAL;
}
- return NULL;
-}
+ type_id = btf_find_by_name_kind(btf, st_ops->name,
+ BTF_KIND_STRUCT);
+ if (type_id < 0) {
+ pr_warn("Cannot find struct %s in %s\n",
+ st_ops->name, btf_get_name(btf));
+ return -EINVAL;
+ }
+ t = btf_type_by_id(btf, type_id);
+ if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
+ pr_warn("Cannot support #%u members in struct %s\n",
+ btf_type_vlen(t), st_ops->name);
+ return -EINVAL;
+ }
-const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
-{
- unsigned int i;
+ value_id = btf_find_by_name_kind(btf, value_name,
+ BTF_KIND_STRUCT);
+ if (value_id < 0) {
+ pr_warn("Cannot find struct %s in %s\n",
+ value_name, btf_get_name(btf));
+ return -EINVAL;
+ }
+ if (!is_valid_value_type(btf, value_id, t, value_name))
+ return -EINVAL;
- if (!type_id || !btf_vmlinux)
- return NULL;
+ arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
+ GFP_KERNEL);
+ if (!arg_info)
+ return -ENOMEM;
+
+ st_ops_desc->arg_info = arg_info;
+ st_ops_desc->type = t;
+ st_ops_desc->type_id = type_id;
+ st_ops_desc->value_id = value_id;
+ st_ops_desc->value_type = btf_type_by_id(btf, value_id);
- for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
- if (bpf_struct_ops[i]->type_id == type_id)
- return bpf_struct_ops[i];
+ for_each_member(i, t, member) {
+ const struct btf_type *func_proto;
+
+ mname = btf_name_by_offset(btf, member->name_off);
+ if (!*mname) {
+ pr_warn("anon member in struct %s is not supported\n",
+ st_ops->name);
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ if (__btf_member_bitfield_size(t, member)) {
+ pr_warn("bit field member %s in struct %s is not supported\n",
+ mname, st_ops->name);
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ func_proto = btf_type_resolve_func_ptr(btf,
+ member->type,
+ NULL);
+ if (!func_proto)
+ continue;
+
+ if (btf_distill_func_proto(log, btf,
+ func_proto, mname,
+ &st_ops->func_models[i])) {
+ pr_warn("Error in parsing func ptr %s in struct %s\n",
+ mname, st_ops->name);
+ err = -EINVAL;
+ goto errout;
+ }
+
+ err = prepare_arg_info(btf, st_ops->name, mname,
+ func_proto,
+ arg_info + i);
+ if (err)
+ goto errout;
}
- return NULL;
+ if (st_ops->init(btf)) {
+ pr_warn("Error in init bpf_struct_ops %s\n",
+ st_ops->name);
+ err = -EINVAL;
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ bpf_struct_ops_desc_release(st_ops_desc);
+
+ return err;
}
static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
@@ -265,7 +420,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
kvalue = &st_map->kvalue;
/* Pair with smp_store_release() during map_update */
- state = smp_load_acquire(&kvalue->state);
+ state = smp_load_acquire(&kvalue->common.state);
if (state == BPF_STRUCT_OPS_STATE_INIT) {
memset(value, 0, map->value_size);
return 0;
@@ -276,7 +431,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
*/
uvalue = value;
memcpy(uvalue, st_map->uvalue, map->value_size);
- uvalue->state = state;
+ uvalue->common.state = state;
/* This value offers the user space a general estimate of how
* many sockets are still utilizing this struct_ops for TCP
@@ -284,7 +439,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
* should sufficiently meet our present goals.
*/
refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
- refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0));
+ refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
return 0;
}
@@ -296,10 +451,9 @@ static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
{
- const struct btf_type *t = st_map->st_ops->type;
u32 i;
- for (i = 0; i < btf_type_vlen(t); i++) {
+ for (i = 0; i < st_map->links_cnt; i++) {
if (st_map->links[i]) {
bpf_link_put(st_map->links[i]);
st_map->links[i] = NULL;
@@ -307,7 +461,7 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
}
}
-static int check_zero_holes(const struct btf_type *t, void *data)
+static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
{
const struct btf_member *member;
u32 i, moff, msize, prev_mend = 0;
@@ -319,8 +473,8 @@ static int check_zero_holes(const struct btf_type *t, void *data)
memchr_inv(data + prev_mend, 0, moff - prev_mend))
return -EINVAL;
- mtype = btf_type_by_id(btf_vmlinux, member->type);
- mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
+ mtype = btf_type_by_id(btf, member->type);
+ mtype = btf_resolve_size(btf, mtype, &msize);
if (IS_ERR(mtype))
return PTR_ERR(mtype);
prev_mend = moff + msize;
@@ -376,10 +530,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
- const struct bpf_struct_ops *st_ops = st_map->st_ops;
+ const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
+ const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
struct bpf_struct_ops_value *uvalue, *kvalue;
+ const struct btf_type *module_type;
const struct btf_member *member;
- const struct btf_type *t = st_ops->type;
+ const struct btf_type *t = st_ops_desc->type;
struct bpf_tramp_links *tlinks;
void *udata, *kdata;
int prog_fd, err;
@@ -392,16 +548,16 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (*(u32 *)key != 0)
return -E2BIG;
- err = check_zero_holes(st_ops->value_type, value);
+ err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
if (err)
return err;
uvalue = value;
- err = check_zero_holes(t, uvalue->data);
+ err = check_zero_holes(st_map->btf, t, uvalue->data);
if (err)
return err;
- if (uvalue->state || refcount_read(&uvalue->refcnt))
+ if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
return -EINVAL;
tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
@@ -413,7 +569,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
mutex_lock(&st_map->lock);
- if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
+ if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
err = -EBUSY;
goto unlock;
}
@@ -425,6 +581,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
image = st_map->image;
image_end = st_map->image + PAGE_SIZE;
+ module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
for_each_member(i, t, member) {
const struct btf_type *mtype, *ptype;
struct bpf_prog *prog;
@@ -432,7 +589,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
u32 moff;
moff = __btf_member_bit_offset(t, member) / 8;
- ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
+ ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
if (ptype == module_type) {
if (*(void **)(udata + moff))
goto reset_unlock;
@@ -457,8 +614,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (!ptype || !btf_type_is_func_proto(ptype)) {
u32 msize;
- mtype = btf_type_by_id(btf_vmlinux, member->type);
- mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
+ mtype = btf_type_by_id(st_map->btf, member->type);
+ mtype = btf_resolve_size(st_map->btf, mtype, &msize);
if (IS_ERR(mtype)) {
err = PTR_ERR(mtype);
goto reset_unlock;
@@ -484,7 +641,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
}
if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
- prog->aux->attach_btf_id != st_ops->type_id ||
+ prog->aux->attach_btf_id != st_ops_desc->type_id ||
prog->expected_attach_type != i) {
bpf_prog_put(prog);
err = -EINVAL;
@@ -527,7 +684,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
*
* Pair with smp_load_acquire() during lookup_elem().
*/
- smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY);
+ smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
goto unlock;
}
@@ -545,7 +702,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
* It ensures the above udata updates (e.g. prog->aux->id)
* can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
*/
- smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
+ smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
goto unlock;
}
@@ -575,12 +732,12 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
if (st_map->map.map_flags & BPF_F_LINK)
return -EOPNOTSUPP;
- prev_state = cmpxchg(&st_map->kvalue.state,
+ prev_state = cmpxchg(&st_map->kvalue.common.state,
BPF_STRUCT_OPS_STATE_INUSE,
BPF_STRUCT_OPS_STATE_TOBEFREE);
switch (prev_state) {
case BPF_STRUCT_OPS_STATE_INUSE:
- st_map->st_ops->unreg(&st_map->kvalue.data);
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
bpf_map_put(map);
return 0;
case BPF_STRUCT_OPS_STATE_TOBEFREE:
@@ -597,6 +754,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
struct seq_file *m)
{
+ struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
void *value;
int err;
@@ -606,7 +764,8 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
if (!err) {
- btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
+ btf_type_seq_show(st_map->btf,
+ map->btf_vmlinux_value_type_id,
value, m);
seq_puts(m, "\n");
}
@@ -631,6 +790,15 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map)
static void bpf_struct_ops_map_free(struct bpf_map *map)
{
+ struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
+
+ /* st_ops->owner was acquired during map_alloc to implicitly holds
+ * the btf's refcnt. The acquire was only done when btf_is_module()
+ * st_map->btf cannot be NULL here.
+ */
+ if (btf_is_module(st_map->btf))
+ module_put(st_map->st_ops_desc->st_ops->owner);
+
/* The struct_ops's function may switch to another struct_ops.
*
* For example, bpf_tcp_cc_x->init() may switch to
@@ -654,29 +822,61 @@ static void bpf_struct_ops_map_free(struct bpf_map *map)
static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
{
if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
- (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id)
+ (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
+ !attr->btf_vmlinux_value_type_id)
return -EINVAL;
return 0;
}
static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
{
- const struct bpf_struct_ops *st_ops;
+ const struct bpf_struct_ops_desc *st_ops_desc;
size_t st_map_size;
struct bpf_struct_ops_map *st_map;
const struct btf_type *t, *vt;
+ struct module *mod = NULL;
struct bpf_map *map;
+ struct btf *btf;
int ret;
- st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
- if (!st_ops)
- return ERR_PTR(-ENOTSUPP);
+ if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
+ /* The map holds btf for its whole life time. */
+ btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
+ if (IS_ERR(btf))
+ return ERR_CAST(btf);
+ if (!btf_is_module(btf)) {
+ btf_put(btf);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mod = btf_try_get_module(btf);
+ /* mod holds a refcnt to btf. We don't need an extra refcnt
+ * here.
+ */
+ btf_put(btf);
+ if (!mod)
+ return ERR_PTR(-EINVAL);
+ } else {
+ btf = bpf_get_btf_vmlinux();
+ if (IS_ERR(btf))
+ return ERR_CAST(btf);
+ if (!btf)
+ return ERR_PTR(-ENOTSUPP);
+ }
- vt = st_ops->value_type;
- if (attr->value_size != vt->size)
- return ERR_PTR(-EINVAL);
+ st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
+ if (!st_ops_desc) {
+ ret = -ENOTSUPP;
+ goto errout;
+ }
- t = st_ops->type;
+ vt = st_ops_desc->value_type;
+ if (attr->value_size != vt->size) {
+ ret = -EINVAL;
+ goto errout;
+ }
+
+ t = st_ops_desc->type;
st_map_size = sizeof(*st_map) +
/* kvalue stores the
@@ -685,17 +885,17 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
(vt->size - sizeof(struct bpf_struct_ops_value));
st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
- if (!st_map)
- return ERR_PTR(-ENOMEM);
+ if (!st_map) {
+ ret = -ENOMEM;
+ goto errout;
+ }
- st_map->st_ops = st_ops;
+ st_map->st_ops_desc = st_ops_desc;
map = &st_map->map;
ret = bpf_jit_charge_modmem(PAGE_SIZE);
- if (ret) {
- __bpf_struct_ops_map_free(map);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto errout_free;
st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE);
if (!st_map->image) {
@@ -704,29 +904,38 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
* here.
*/
bpf_jit_uncharge_modmem(PAGE_SIZE);
- __bpf_struct_ops_map_free(map);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto errout_free;
}
st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
+ st_map->links_cnt = btf_type_vlen(t);
st_map->links =
- bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
+ bpf_map_area_alloc(st_map->links_cnt * sizeof(struct bpf_links *),
NUMA_NO_NODE);
if (!st_map->uvalue || !st_map->links) {
- __bpf_struct_ops_map_free(map);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto errout_free;
}
+ st_map->btf = btf;
mutex_init(&st_map->lock);
bpf_map_init_from_attr(map, attr);
return map;
+
+errout_free:
+ __bpf_struct_ops_map_free(map);
+errout:
+ module_put(mod);
+
+ return ERR_PTR(ret);
}
static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
{
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
- const struct bpf_struct_ops *st_ops = st_map->st_ops;
- const struct btf_type *vt = st_ops->value_type;
+ const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
+ const struct btf_type *vt = st_ops_desc->value_type;
u64 usage;
usage = sizeof(*st_map) +
@@ -785,7 +994,7 @@ static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
map->map_flags & BPF_F_LINK &&
/* Pair with smp_store_release() during map_update */
- smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY;
+ smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
}
static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
@@ -800,7 +1009,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
/* st_link->map can be NULL if
* bpf_struct_ops_link_create() fails to register.
*/
- st_map->st_ops->unreg(&st_map->kvalue.data);
+ st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data);
bpf_map_put(&st_map->map);
}
kfree(st_link);
@@ -847,7 +1056,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
if (!bpf_struct_ops_valid_to_reg(new_map))
return -EINVAL;
- if (!st_map->st_ops->update)
+ if (!st_map->st_ops_desc->st_ops->update)
return -EOPNOTSUPP;
mutex_lock(&update_mutex);
@@ -860,12 +1069,12 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
/* The new and old struct_ops must be the same type. */
- if (st_map->st_ops != old_st_map->st_ops) {
+ if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
err = -EINVAL;
goto err_out;
}
- err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
+ err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
if (err)
goto err_out;
@@ -916,7 +1125,7 @@ int bpf_struct_ops_link_create(union bpf_attr *attr)
if (err)
goto err_out;
- err = st_map->st_ops->reg(st_map->kvalue.data);
+ err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data);
if (err) {
bpf_link_cleanup(&link_primer);
link = NULL;
@@ -931,3 +1140,10 @@ err_out:
kfree(link);
return err;
}
+
+void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+ struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
+
+ info->btf_vmlinux_id = btf_obj_id(st_map->btf);
+}
diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h
deleted file mode 100644
index 5678a9ddf817..000000000000
--- a/kernel/bpf/bpf_struct_ops_types.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* internal file - do not include directly */
-
-#ifdef CONFIG_BPF_JIT
-#ifdef CONFIG_NET
-BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
-#endif
-#ifdef CONFIG_INET
-#include <net/tcp.h>
-BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
-#endif
-#endif
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 596471189176..6ff0bd1a91d5 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -19,6 +19,7 @@
#include <linux/bpf_verifier.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
+#include <linux/bpf.h>
#include <linux/bpf_lsm.h>
#include <linux/skmsg.h>
#include <linux/perf_event.h>
@@ -241,6 +242,12 @@ struct btf_id_dtor_kfunc_tab {
struct btf_id_dtor_kfunc dtors[];
};
+struct btf_struct_ops_tab {
+ u32 cnt;
+ u32 capacity;
+ struct bpf_struct_ops_desc ops[];
+};
+
struct btf {
void *data;
struct btf_type **types;
@@ -258,6 +265,7 @@ struct btf {
struct btf_kfunc_set_tab *kfunc_set_tab;
struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
struct btf_struct_metas *struct_meta_tab;
+ struct btf_struct_ops_tab *struct_ops_tab;
/* split BTF support */
struct btf *base_btf;
@@ -1688,11 +1696,27 @@ static void btf_free_struct_meta_tab(struct btf *btf)
btf->struct_meta_tab = NULL;
}
+static void btf_free_struct_ops_tab(struct btf *btf)
+{
+ struct btf_struct_ops_tab *tab = btf->struct_ops_tab;
+ u32 i;
+
+ if (!tab)
+ return;
+
+ for (i = 0; i < tab->cnt; i++)
+ bpf_struct_ops_desc_release(&tab->ops[i]);
+
+ kfree(tab);
+ btf->struct_ops_tab = NULL;
+}
+
static void btf_free(struct btf *btf)
{
btf_free_struct_meta_tab(btf);
btf_free_dtor_kfunc_tab(btf);
btf_free_kfunc_set_tab(btf);
+ btf_free_struct_ops_tab(btf);
kvfree(btf->types);
kvfree(btf->resolved_sizes);
kvfree(btf->resolved_ids);
@@ -1707,6 +1731,11 @@ static void btf_free_rcu(struct rcu_head *rcu)
btf_free(btf);
}
+const char *btf_get_name(const struct btf *btf)
+{
+ return btf->name;
+}
+
void btf_get(struct btf *btf)
{
refcount_inc(&btf->refcnt);
@@ -3310,30 +3339,48 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
return BTF_FIELD_FOUND;
}
-const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
- int comp_idx, const char *tag_key)
+int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key, int last_id)
{
- const char *value = NULL;
- int i;
+ int len = strlen(tag_key);
+ int i, n;
- for (i = 1; i < btf_nr_types(btf); i++) {
+ for (i = last_id + 1, n = btf_nr_types(btf); i < n; i++) {
const struct btf_type *t = btf_type_by_id(btf, i);
- int len = strlen(tag_key);
if (!btf_type_is_decl_tag(t))
continue;
- if (pt != btf_type_by_id(btf, t->type) ||
- btf_type_decl_tag(t)->component_idx != comp_idx)
+ if (pt != btf_type_by_id(btf, t->type))
+ continue;
+ if (btf_type_decl_tag(t)->component_idx != comp_idx)
continue;
if (strncmp(__btf_name_by_offset(btf, t->name_off), tag_key, len))
continue;
- /* Prevent duplicate entries for same type */
- if (value)
- return ERR_PTR(-EEXIST);
- value = __btf_name_by_offset(btf, t->name_off) + len;
+ return i;
}
- if (!value)
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
+}
+
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key)
+{
+ const char *value = NULL;
+ const struct btf_type *t;
+ int len, id;
+
+ id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, 0);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ t = btf_type_by_id(btf, id);
+ len = strlen(tag_key);
+ value = __btf_name_by_offset(btf, t->name_off) + len;
+
+ /* Prevent duplicate entries for same type */
+ id = btf_find_next_decl_tag(btf, pt, comp_idx, tag_key, id);
+ if (id >= 0)
+ return ERR_PTR(-EEXIST);
+
return value;
}
@@ -5647,15 +5694,29 @@ static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
return ctx_type->type;
}
-const struct btf_type *
-btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
- const struct btf_type *t, enum bpf_prog_type prog_type,
- int arg)
+bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg)
{
const struct btf_type *ctx_type;
const char *tname, *ctx_tname;
t = btf_type_by_id(btf, t->type);
+
+ /* KPROBE programs allow bpf_user_pt_regs_t typedef, which we need to
+ * check before we skip all the typedef below.
+ */
+ if (prog_type == BPF_PROG_TYPE_KPROBE) {
+ while (btf_type_is_modifier(t) && !btf_type_is_typedef(t))
+ t = btf_type_by_id(btf, t->type);
+
+ if (btf_type_is_typedef(t)) {
+ tname = btf_name_by_offset(btf, t->name_off);
+ if (tname && strcmp(tname, "bpf_user_pt_regs_t") == 0)
+ return true;
+ }
+ }
+
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_struct(t)) {
@@ -5664,27 +5725,30 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
* is not supported yet.
* BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
*/
- return NULL;
+ return false;
}
tname = btf_name_by_offset(btf, t->name_off);
if (!tname) {
bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
- return NULL;
+ return false;
}
ctx_type = find_canonical_prog_ctx_type(prog_type);
if (!ctx_type) {
bpf_log(log, "btf_vmlinux is malformed\n");
/* should not happen */
- return NULL;
+ return false;
}
again:
ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_type->name_off);
if (!ctx_tname) {
/* should not happen */
bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
- return NULL;
+ return false;
}
+ /* program types without named context types work only with arg:ctx tag */
+ if (ctx_tname[0] == '\0')
+ return false;
/* only compare that prog's ctx type name is the same as
* kernel expects. No need to compare field by field.
* It's ok for bpf prog to do:
@@ -5693,20 +5757,20 @@ again:
* { // no fields of skb are ever used }
*/
if (strcmp(ctx_tname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
- return ctx_type;
+ return true;
if (strcmp(ctx_tname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
- return ctx_type;
+ return true;
if (strcmp(ctx_tname, tname)) {
/* bpf_user_pt_regs_t is a typedef, so resolve it to
* underlying struct and check name again
*/
if (!btf_type_is_modifier(ctx_type))
- return NULL;
+ return false;
while (btf_type_is_modifier(ctx_type))
ctx_type = btf_type_by_id(btf_vmlinux, ctx_type->type);
goto again;
}
- return ctx_type;
+ return true;
}
/* forward declarations for arch-specific underlying types of
@@ -5858,7 +5922,7 @@ static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
enum bpf_prog_type prog_type,
int arg)
{
- if (!btf_get_prog_ctx_type(log, btf, t, prog_type, arg))
+ if (!btf_is_prog_ctx_type(log, btf, t, prog_type, arg))
return -ENOENT;
return find_kern_ctx_type_id(prog_type);
}
@@ -5933,8 +5997,6 @@ struct btf *btf_parse_vmlinux(void)
/* btf_parse_vmlinux() runs under bpf_verifier_lock */
bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
- bpf_struct_ops_init(btf, log);
-
refcount_set(&btf->refcnt, 1);
err = btf_alloc_id(btf);
@@ -6092,6 +6154,26 @@ static bool prog_args_trusted(const struct bpf_prog *prog)
}
}
+int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
+ u32 arg_no)
+{
+ const struct btf_param *args;
+ const struct btf_type *t;
+ int off = 0, i;
+ u32 sz;
+
+ args = btf_params(func_proto);
+ for (i = 0; i < arg_no; i++) {
+ t = btf_type_by_id(btf, args[i].type);
+ t = btf_resolve_size(btf, t, &sz);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ off += roundup(sz, 8);
+ }
+
+ return off;
+}
+
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
@@ -6228,7 +6310,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
}
info->reg_type = ctx_arg_info->reg_type;
- info->btf = btf_vmlinux;
+ info->btf = ctx_arg_info->btf ? : btf_vmlinux;
info->btf_id = ctx_arg_info->btf_id;
return true;
}
@@ -6284,6 +6366,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
__btf_name_by_offset(btf, t->name_off));
return true;
}
+EXPORT_SYMBOL_GPL(btf_ctx_access);
enum bpf_struct_walk_result {
/* < 0 error */
@@ -6946,6 +7029,80 @@ static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
return false;
}
+struct bpf_cand_cache {
+ const char *name;
+ u32 name_len;
+ u16 kind;
+ u16 cnt;
+ struct {
+ const struct btf *btf;
+ u32 id;
+ } cands[];
+};
+
+static DEFINE_MUTEX(cand_cache_mutex);
+
+static struct bpf_cand_cache *
+bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id);
+
+static int btf_get_ptr_to_btf_id(struct bpf_verifier_log *log, int arg_idx,
+ const struct btf *btf, const struct btf_type *t)
+{
+ struct bpf_cand_cache *cc;
+ struct bpf_core_ctx ctx = {
+ .btf = btf,
+ .log = log,
+ };
+ u32 kern_type_id, type_id;
+ int err = 0;
+
+ /* skip PTR and modifiers */
+ type_id = t->type;
+ t = btf_type_by_id(btf, t->type);
+ while (btf_type_is_modifier(t)) {
+ type_id = t->type;
+ t = btf_type_by_id(btf, t->type);
+ }
+
+ mutex_lock(&cand_cache_mutex);
+ cc = bpf_core_find_cands(&ctx, type_id);
+ if (IS_ERR(cc)) {
+ err = PTR_ERR(cc);
+ bpf_log(log, "arg#%d reference type('%s %s') candidate matching error: %d\n",
+ arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
+ err);
+ goto cand_cache_unlock;
+ }
+ if (cc->cnt != 1) {
+ bpf_log(log, "arg#%d reference type('%s %s') %s\n",
+ arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off),
+ cc->cnt == 0 ? "has no matches" : "is ambiguous");
+ err = cc->cnt == 0 ? -ENOENT : -ESRCH;
+ goto cand_cache_unlock;
+ }
+ if (btf_is_module(cc->cands[0].btf)) {
+ bpf_log(log, "arg#%d reference type('%s %s') points to kernel module type (unsupported)\n",
+ arg_idx, btf_type_str(t), __btf_name_by_offset(btf, t->name_off));
+ err = -EOPNOTSUPP;
+ goto cand_cache_unlock;
+ }
+ kern_type_id = cc->cands[0].id;
+
+cand_cache_unlock:
+ mutex_unlock(&cand_cache_mutex);
+ if (err)
+ return err;
+
+ return kern_type_id;
+}
+
+enum btf_arg_tag {
+ ARG_TAG_CTX = 0x1,
+ ARG_TAG_NONNULL = 0x2,
+ ARG_TAG_TRUSTED = 0x4,
+ ARG_TAG_NULLABLE = 0x8,
+};
+
/* Process BTF of a function to produce high-level expectation of function
* arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
* is cached in subprog info for reuse.
@@ -7009,6 +7166,8 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
args = (const struct btf_param *)(t + 1);
nargs = btf_type_vlen(t);
if (nargs > MAX_BPF_FUNC_REG_ARGS) {
+ if (!is_global)
+ return -EINVAL;
bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
tname, nargs, MAX_BPF_FUNC_REG_ARGS);
return -EINVAL;
@@ -7018,6 +7177,8 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
+ if (!is_global)
+ return -EINVAL;
bpf_log(log,
"Global function %s() doesn't return scalar. Only those are supported.\n",
tname);
@@ -7027,92 +7188,124 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
* Only PTR_TO_CTX and SCALAR are supported atm.
*/
for (i = 0; i < nargs; i++) {
- bool is_nonnull = false;
- const char *tag;
-
- t = btf_type_by_id(btf, args[i].type);
+ u32 tags = 0;
+ int id = 0;
- tag = btf_find_decl_tag_value(btf, fn_t, i, "arg:");
- if (IS_ERR(tag) && PTR_ERR(tag) == -ENOENT) {
- tag = NULL;
- } else if (IS_ERR(tag)) {
- bpf_log(log, "arg#%d type's tag fetching failure: %ld\n", i, PTR_ERR(tag));
- return PTR_ERR(tag);
- }
/* 'arg:<tag>' decl_tag takes precedence over derivation of
* register type from BTF type itself
*/
- if (tag) {
+ while ((id = btf_find_next_decl_tag(btf, fn_t, i, "arg:", id)) > 0) {
+ const struct btf_type *tag_t = btf_type_by_id(btf, id);
+ const char *tag = __btf_name_by_offset(btf, tag_t->name_off) + 4;
+
/* disallow arg tags in static subprogs */
if (!is_global) {
bpf_log(log, "arg#%d type tag is not supported in static functions\n", i);
return -EOPNOTSUPP;
}
+
if (strcmp(tag, "ctx") == 0) {
- sub->args[i].arg_type = ARG_PTR_TO_CTX;
- continue;
+ tags |= ARG_TAG_CTX;
+ } else if (strcmp(tag, "trusted") == 0) {
+ tags |= ARG_TAG_TRUSTED;
+ } else if (strcmp(tag, "nonnull") == 0) {
+ tags |= ARG_TAG_NONNULL;
+ } else if (strcmp(tag, "nullable") == 0) {
+ tags |= ARG_TAG_NULLABLE;
+ } else {
+ bpf_log(log, "arg#%d has unsupported set of tags\n", i);
+ return -EOPNOTSUPP;
}
- if (strcmp(tag, "nonnull") == 0)
- is_nonnull = true;
+ }
+ if (id != -ENOENT) {
+ bpf_log(log, "arg#%d type tag fetching failure: %d\n", i, id);
+ return id;
}
+ t = btf_type_by_id(btf, args[i].type);
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
- if (btf_type_is_int(t) || btf_is_any_enum(t)) {
- sub->args[i].arg_type = ARG_ANYTHING;
- continue;
- }
- if (btf_type_is_ptr(t) && btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
+ if (!btf_type_is_ptr(t))
+ goto skip_pointer;
+
+ if ((tags & ARG_TAG_CTX) || btf_is_prog_ctx_type(log, btf, t, prog_type, i)) {
+ if (tags & ~ARG_TAG_CTX) {
+ bpf_log(log, "arg#%d has invalid combination of tags\n", i);
+ return -EINVAL;
+ }
+ if ((tags & ARG_TAG_CTX) &&
+ btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
+ prog->expected_attach_type))
+ return -EINVAL;
sub->args[i].arg_type = ARG_PTR_TO_CTX;
continue;
}
- if (btf_type_is_ptr(t) && btf_is_dynptr_ptr(btf, t)) {
+ if (btf_is_dynptr_ptr(btf, t)) {
+ if (tags) {
+ bpf_log(log, "arg#%d has invalid combination of tags\n", i);
+ return -EINVAL;
+ }
sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
continue;
}
- if (is_global && btf_type_is_ptr(t)) {
+ if (tags & ARG_TAG_TRUSTED) {
+ int kern_type_id;
+
+ if (tags & ARG_TAG_NONNULL) {
+ bpf_log(log, "arg#%d has invalid combination of tags\n", i);
+ return -EINVAL;
+ }
+
+ kern_type_id = btf_get_ptr_to_btf_id(log, i, btf, t);
+ if (kern_type_id < 0)
+ return kern_type_id;
+
+ sub->args[i].arg_type = ARG_PTR_TO_BTF_ID | PTR_TRUSTED;
+ if (tags & ARG_TAG_NULLABLE)
+ sub->args[i].arg_type |= PTR_MAYBE_NULL;
+ sub->args[i].btf_id = kern_type_id;
+ continue;
+ }
+ if (is_global) { /* generic user data pointer */
u32 mem_size;
+ if (tags & ARG_TAG_NULLABLE) {
+ bpf_log(log, "arg#%d has invalid combination of tags\n", i);
+ return -EINVAL;
+ }
+
t = btf_type_skip_modifiers(btf, t->type, NULL);
ref_t = btf_resolve_size(btf, t, &mem_size);
if (IS_ERR(ref_t)) {
- bpf_log(log,
- "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
- i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
+ bpf_log(log, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
+ i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
PTR_ERR(ref_t));
return -EINVAL;
}
- sub->args[i].arg_type = is_nonnull ? ARG_PTR_TO_MEM : ARG_PTR_TO_MEM_OR_NULL;
+ sub->args[i].arg_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL;
+ if (tags & ARG_TAG_NONNULL)
+ sub->args[i].arg_type &= ~PTR_MAYBE_NULL;
sub->args[i].mem_size = mem_size;
continue;
}
- if (is_nonnull) {
- bpf_log(log, "arg#%d marked as non-null, but is not a pointer type\n", i);
+
+skip_pointer:
+ if (tags) {
+ bpf_log(log, "arg#%d has pointer tag, but is not a pointer type\n", i);
return -EINVAL;
}
+ if (btf_type_is_int(t) || btf_is_any_enum(t)) {
+ sub->args[i].arg_type = ARG_ANYTHING;
+ continue;
+ }
+ if (!is_global)
+ return -EINVAL;
bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
i, btf_type_str(t), tname);
return -EINVAL;
}
- for (i = 0; i < nargs; i++) {
- const char *tag;
-
- if (sub->args[i].arg_type != ARG_PTR_TO_CTX)
- continue;
-
- /* check if arg has "arg:ctx" tag */
- t = btf_type_by_id(btf, args[i].type);
- tag = btf_find_decl_tag_value(btf, fn_t, i, "arg:");
- if (IS_ERR_OR_NULL(tag) || strcmp(tag, "ctx") != 0)
- continue;
-
- if (btf_validate_prog_ctx_type(log, btf, t, i, prog_type,
- prog->expected_attach_type))
- return -EINVAL;
- }
-
sub->arg_cnt = nargs;
sub->args_cached = true;
@@ -7589,6 +7782,17 @@ static struct btf *btf_get_module_btf(const struct module *module)
return btf;
}
+static int check_btf_kconfigs(const struct module *module, const char *feature)
+{
+ if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
+ pr_err("missing vmlinux BTF, cannot register %s\n", feature);
+ return -ENOENT;
+ }
+ if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+ pr_warn("missing module BTF, cannot register %s\n", feature);
+ return 0;
+}
+
BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
{
struct btf *btf = NULL;
@@ -7949,15 +8153,8 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
int ret, i;
btf = btf_get_module_btf(kset->owner);
- if (!btf) {
- if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
- pr_err("missing vmlinux BTF, cannot register kfuncs\n");
- return -ENOENT;
- }
- if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
- pr_warn("missing module BTF, cannot register kfuncs\n");
- return 0;
- }
+ if (!btf)
+ return check_btf_kconfigs(kset->owner, "kfunc");
if (IS_ERR(btf))
return PTR_ERR(btf);
@@ -7981,6 +8178,14 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
{
enum btf_kfunc_hook hook;
+ /* All kfuncs need to be tagged as such in BTF.
+ * WARN() for initcall registrations that do not check errors.
+ */
+ if (!(kset->set->flags & BTF_SET8_KFUNCS)) {
+ WARN_ON(!kset->owner);
+ return -EINVAL;
+ }
+
hook = bpf_prog_type_to_kfunc_hook(prog_type);
return __register_btf_kfunc_id_set(hook, kset);
}
@@ -8057,17 +8262,8 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c
int ret;
btf = btf_get_module_btf(owner);
- if (!btf) {
- if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
- pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n");
- return -ENOENT;
- }
- if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
- pr_err("missing module BTF, cannot register dtor kfuncs\n");
- return -ENOENT;
- }
- return 0;
- }
+ if (!btf)
+ return check_btf_kconfigs(owner, "dtor kfuncs");
if (IS_ERR(btf))
return PTR_ERR(btf);
@@ -8182,17 +8378,6 @@ size_t bpf_core_essential_name_len(const char *name)
return n;
}
-struct bpf_cand_cache {
- const char *name;
- u32 name_len;
- u16 kind;
- u16 cnt;
- struct {
- const struct btf *btf;
- u32 id;
- } cands[];
-};
-
static void bpf_free_cands(struct bpf_cand_cache *cands)
{
if (!cands->cnt)
@@ -8213,8 +8398,6 @@ static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
#define MODULE_CAND_CACHE_SIZE 31
static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
-static DEFINE_MUTEX(cand_cache_mutex);
-
static void __print_cand_cache(struct bpf_verifier_log *log,
struct bpf_cand_cache **cache,
int cache_size)
@@ -8645,3 +8828,141 @@ bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
return !strncmp(reg_name, arg_name, cmp_len);
}
+
+#ifdef CONFIG_BPF_JIT
+static int
+btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops,
+ struct bpf_verifier_log *log)
+{
+ struct btf_struct_ops_tab *tab, *new_tab;
+ int i, err;
+
+ tab = btf->struct_ops_tab;
+ if (!tab) {
+ tab = kzalloc(offsetof(struct btf_struct_ops_tab, ops[4]),
+ GFP_KERNEL);
+ if (!tab)
+ return -ENOMEM;
+ tab->capacity = 4;
+ btf->struct_ops_tab = tab;
+ }
+
+ for (i = 0; i < tab->cnt; i++)
+ if (tab->ops[i].st_ops == st_ops)
+ return -EEXIST;
+
+ if (tab->cnt == tab->capacity) {
+ new_tab = krealloc(tab,
+ offsetof(struct btf_struct_ops_tab,
+ ops[tab->capacity * 2]),
+ GFP_KERNEL);
+ if (!new_tab)
+ return -ENOMEM;
+ tab = new_tab;
+ tab->capacity *= 2;
+ btf->struct_ops_tab = tab;
+ }
+
+ tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops;
+
+ err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log);
+ if (err)
+ return err;
+
+ btf->struct_ops_tab->cnt++;
+
+ return 0;
+}
+
+const struct bpf_struct_ops_desc *
+bpf_struct_ops_find_value(struct btf *btf, u32 value_id)
+{
+ const struct bpf_struct_ops_desc *st_ops_list;
+ unsigned int i;
+ u32 cnt;
+
+ if (!value_id)
+ return NULL;
+ if (!btf->struct_ops_tab)
+ return NULL;
+
+ cnt = btf->struct_ops_tab->cnt;
+ st_ops_list = btf->struct_ops_tab->ops;
+ for (i = 0; i < cnt; i++) {
+ if (st_ops_list[i].value_id == value_id)
+ return &st_ops_list[i];
+ }
+
+ return NULL;
+}
+
+const struct bpf_struct_ops_desc *
+bpf_struct_ops_find(struct btf *btf, u32 type_id)
+{
+ const struct bpf_struct_ops_desc *st_ops_list;
+ unsigned int i;
+ u32 cnt;
+
+ if (!type_id)
+ return NULL;
+ if (!btf->struct_ops_tab)
+ return NULL;
+
+ cnt = btf->struct_ops_tab->cnt;
+ st_ops_list = btf->struct_ops_tab->ops;
+ for (i = 0; i < cnt; i++) {
+ if (st_ops_list[i].type_id == type_id)
+ return &st_ops_list[i];
+ }
+
+ return NULL;
+}
+
+int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops)
+{
+ struct bpf_verifier_log *log;
+ struct btf *btf;
+ int err = 0;
+
+ btf = btf_get_module_btf(st_ops->owner);
+ if (!btf)
+ return check_btf_kconfigs(st_ops->owner, "struct_ops");
+ if (IS_ERR(btf))
+ return PTR_ERR(btf);
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN);
+ if (!log) {
+ err = -ENOMEM;
+ goto errout;
+ }
+
+ log->level = BPF_LOG_KERNEL;
+
+ err = btf_add_struct_ops(btf, st_ops, log);
+
+errout:
+ kfree(log);
+ btf_put(btf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(__register_bpf_struct_ops);
+#endif
+
+bool btf_param_match_suffix(const struct btf *btf,
+ const struct btf_param *arg,
+ const char *suffix)
+{
+ int suffix_len = strlen(suffix), len;
+ const char *param_name;
+
+ /* In the future, this can be ported to use BTF tagging */
+ param_name = btf_name_by_offset(btf, arg->name_off);
+ if (str_is_empty(param_name))
+ return false;
+ len = strlen(param_name);
+ if (len <= suffix_len)
+ return false;
+ param_name += len - suffix_len;
+ return !strncmp(param_name, suffix, suffix_len);
+}
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 491d20038cbe..82243cb6c54d 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1358,15 +1358,12 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
enum cgroup_bpf_attach_type atype)
{
- unsigned int offset = skb->data - skb_network_header(skb);
+ unsigned int offset = -skb_network_offset(skb);
struct sock *save_sk;
void *saved_data_end;
struct cgroup *cgrp;
int ret;
- if (!sk || !sk_fullsock(sk))
- return 0;
-
if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
return 0;
@@ -1630,7 +1627,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_event_output_data_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -2191,7 +2188,7 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_event_output_data_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -2348,7 +2345,7 @@ cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_event_output_data_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ea6843be2616..71c459a51d9e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -682,7 +682,7 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{
if (!bpf_prog_kallsyms_candidate(fp) ||
- !bpf_capable())
+ !bpf_token_capable(fp->aux->token, CAP_BPF))
return;
bpf_prog_ksym_set_addr(fp);
@@ -2779,6 +2779,7 @@ void bpf_prog_free(struct bpf_prog *fp)
if (aux->dst_prog)
bpf_prog_put(aux->dst_prog);
+ bpf_token_put(aux->token);
INIT_WORK(&aux->work, bpf_prog_free_deferred);
schedule_work(&aux->work);
}
@@ -2925,6 +2926,16 @@ bool __weak bpf_jit_supports_far_kfunc_call(void)
return false;
}
+/* Return TRUE if the JIT backend satisfies the following two conditions:
+ * 1) JIT backend supports atomic_xchg() on pointer-sized words.
+ * 2) Under the specific arch, the implementation of xchg() is the same
+ * as atomic_xchg() on pointer-sized words.
+ */
+bool __weak bpf_jit_supports_ptr_xchg(void)
+{
+ return false;
+}
+
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
*/
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8a0bb80fe48a..9ee8da477465 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -24,6 +24,7 @@
#include <linux/filter.h>
#include <linux/ptr_ring.h>
#include <net/xdp.h>
+#include <net/hotdata.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
@@ -178,7 +179,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
void **frames, int n,
struct xdp_cpumap_stats *stats)
{
- struct xdp_rxq_info rxq;
+ struct xdp_rxq_info rxq = {};
struct xdp_buff xdp;
int i, nframes = 0;
@@ -326,7 +327,8 @@ static int cpu_map_kthread_run(void *data)
/* Support running another XDP prog on this CPU */
nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
if (nframes) {
- m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
+ m = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
+ gfp, nframes, skbs);
if (unlikely(m == 0)) {
for (i = 0; i < nframes; i++)
skbs[i] = NULL; /* effect: xdp_return_frame */
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
index 2e73533a3811..dad0fb1c8e87 100644
--- a/kernel/bpf/cpumask.c
+++ b/kernel/bpf/cpumask.c
@@ -424,7 +424,7 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
__bpf_kfunc_end_defs();
-BTF_SET8_START(cpumask_kfunc_btf_ids)
+BTF_KFUNCS_START(cpumask_kfunc_btf_ids)
BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
@@ -450,7 +450,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
-BTF_SET8_END(cpumask_kfunc_btf_ids)
+BTF_KFUNCS_END(cpumask_kfunc_btf_ids)
static const struct btf_kfunc_id_set cpumask_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index be72824f32b2..a89587859571 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -334,7 +334,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
__this_cpu_write(irqsave_flags, flags);
}
-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
{
__bpf_spin_lock_irqsave(lock);
return 0;
@@ -357,7 +357,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
local_irq_restore(flags);
}
-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
{
__bpf_spin_unlock_irqrestore(lock);
return 0;
@@ -1101,6 +1101,7 @@ struct bpf_hrtimer {
struct bpf_prog *prog;
void __rcu *callback_fn;
void *value;
+ struct rcu_head rcu;
};
/* the actual struct hidden inside uapi struct bpf_timer */
@@ -1332,6 +1333,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
if (in_nmi())
return -EOPNOTSUPP;
+ rcu_read_lock();
__bpf_spin_lock_irqsave(&timer->lock);
t = timer->timer;
if (!t) {
@@ -1353,6 +1355,7 @@ out:
* if it was running.
*/
ret = ret ?: hrtimer_cancel(&t->timer);
+ rcu_read_unlock();
return ret;
}
@@ -1407,13 +1410,14 @@ out:
*/
if (this_cpu_read(hrtimer_running) != t)
hrtimer_cancel(&t->timer);
- kfree(t);
+ kfree_rcu(t, rcu);
}
BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
{
unsigned long *kptr = map_value;
+ /* This helper may be inlined by verifier. */
return xchg(kptr, (unsigned long)ptr);
}
@@ -1679,7 +1683,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
const struct bpf_func_proto *
-bpf_base_func_proto(enum bpf_func_id func_id)
+bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -1730,7 +1734,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
break;
}
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return NULL;
switch (func_id) {
@@ -1788,7 +1792,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
break;
}
- if (!perfmon_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
return NULL;
switch (func_id) {
@@ -2483,9 +2487,9 @@ __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
return obj;
}
-__bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
+__bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
{
- return obj__ign;
+ return (void *)obj__ign;
}
__bpf_kfunc void bpf_rcu_read_lock(void)
@@ -2543,7 +2547,7 @@ __bpf_kfunc void bpf_throw(u64 cookie)
__bpf_kfunc_end_defs();
-BTF_SET8_START(generic_btf_ids)
+BTF_KFUNCS_START(generic_btf_ids)
#ifdef CONFIG_KEXEC_CORE
BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
#endif
@@ -2572,7 +2576,7 @@ BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
#endif
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_throw)
-BTF_SET8_END(generic_btf_ids)
+BTF_KFUNCS_END(generic_btf_ids)
static const struct btf_kfunc_id_set generic_kfunc_set = {
.owner = THIS_MODULE,
@@ -2588,7 +2592,7 @@ BTF_ID(struct, cgroup)
BTF_ID(func, bpf_cgroup_release_dtor)
#endif
-BTF_SET8_START(common_btf_ids)
+BTF_KFUNCS_START(common_btf_ids)
BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
BTF_ID_FLAGS(func, bpf_rdonly_cast)
BTF_ID_FLAGS(func, bpf_rcu_read_lock)
@@ -2617,7 +2621,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_null)
BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
BTF_ID_FLAGS(func, bpf_dynptr_size)
BTF_ID_FLAGS(func, bpf_dynptr_clone)
-BTF_SET8_END(common_btf_ids)
+BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 41e0a55c35f5..af5d2ffadd70 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -20,6 +20,7 @@
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/kstrtox.h>
#include "preload/bpf_preload.h"
enum bpf_type {
@@ -98,9 +99,9 @@ static const struct inode_operations bpf_prog_iops = { };
static const struct inode_operations bpf_map_iops = { };
static const struct inode_operations bpf_link_iops = { };
-static struct inode *bpf_get_inode(struct super_block *sb,
- const struct inode *dir,
- umode_t mode)
+struct inode *bpf_get_inode(struct super_block *sb,
+ const struct inode *dir,
+ umode_t mode)
{
struct inode *inode;
@@ -594,6 +595,136 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
}
EXPORT_SYMBOL(bpf_prog_get_type_path);
+struct bpffs_btf_enums {
+ const struct btf *btf;
+ const struct btf_type *cmd_t;
+ const struct btf_type *map_t;
+ const struct btf_type *prog_t;
+ const struct btf_type *attach_t;
+};
+
+static int find_bpffs_btf_enums(struct bpffs_btf_enums *info)
+{
+ const struct btf *btf;
+ const struct btf_type *t;
+ const char *name;
+ int i, n;
+
+ memset(info, 0, sizeof(*info));
+
+ btf = bpf_get_btf_vmlinux();
+ if (IS_ERR(btf))
+ return PTR_ERR(btf);
+ if (!btf)
+ return -ENOENT;
+
+ info->btf = btf;
+
+ for (i = 1, n = btf_nr_types(btf); i < n; i++) {
+ t = btf_type_by_id(btf, i);
+ if (!btf_type_is_enum(t))
+ continue;
+
+ name = btf_name_by_offset(btf, t->name_off);
+ if (!name)
+ continue;
+
+ if (strcmp(name, "bpf_cmd") == 0)
+ info->cmd_t = t;
+ else if (strcmp(name, "bpf_map_type") == 0)
+ info->map_t = t;
+ else if (strcmp(name, "bpf_prog_type") == 0)
+ info->prog_t = t;
+ else if (strcmp(name, "bpf_attach_type") == 0)
+ info->attach_t = t;
+ else
+ continue;
+
+ if (info->cmd_t && info->map_t && info->prog_t && info->attach_t)
+ return 0;
+ }
+
+ return -ESRCH;
+}
+
+static bool find_btf_enum_const(const struct btf *btf, const struct btf_type *enum_t,
+ const char *prefix, const char *str, int *value)
+{
+ const struct btf_enum *e;
+ const char *name;
+ int i, n, pfx_len = strlen(prefix);
+
+ *value = 0;
+
+ if (!btf || !enum_t)
+ return false;
+
+ for (i = 0, n = btf_vlen(enum_t); i < n; i++) {
+ e = &btf_enum(enum_t)[i];
+
+ name = btf_name_by_offset(btf, e->name_off);
+ if (!name || strncasecmp(name, prefix, pfx_len) != 0)
+ continue;
+
+ /* match symbolic name case insensitive and ignoring prefix */
+ if (strcasecmp(name + pfx_len, str) == 0) {
+ *value = e->val;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void seq_print_delegate_opts(struct seq_file *m,
+ const char *opt_name,
+ const struct btf *btf,
+ const struct btf_type *enum_t,
+ const char *prefix,
+ u64 delegate_msk, u64 any_msk)
+{
+ const struct btf_enum *e;
+ bool first = true;
+ const char *name;
+ u64 msk;
+ int i, n, pfx_len = strlen(prefix);
+
+ delegate_msk &= any_msk; /* clear unknown bits */
+
+ if (delegate_msk == 0)
+ return;
+
+ seq_printf(m, ",%s", opt_name);
+ if (delegate_msk == any_msk) {
+ seq_printf(m, "=any");
+ return;
+ }
+
+ if (btf && enum_t) {
+ for (i = 0, n = btf_vlen(enum_t); i < n; i++) {
+ e = &btf_enum(enum_t)[i];
+ name = btf_name_by_offset(btf, e->name_off);
+ if (!name || strncasecmp(name, prefix, pfx_len) != 0)
+ continue;
+ msk = 1ULL << e->val;
+ if (delegate_msk & msk) {
+ /* emit lower-case name without prefix */
+ seq_printf(m, "%c", first ? '=' : ':');
+ name += pfx_len;
+ while (*name) {
+ seq_printf(m, "%c", tolower(*name));
+ name++;
+ }
+
+ delegate_msk &= ~msk;
+ first = false;
+ }
+ }
+ }
+ if (delegate_msk)
+ seq_printf(m, "%c0x%llx", first ? '=' : ':', delegate_msk);
+}
+
/*
* Display the mount options in /proc/mounts.
*/
@@ -601,6 +732,8 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
{
struct inode *inode = d_inode(root);
umode_t mode = inode->i_mode & S_IALLUGO & ~S_ISVTX;
+ struct bpf_mount_opts *opts = root->d_sb->s_fs_info;
+ u64 mask;
if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
@@ -610,6 +743,35 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
from_kgid_munged(&init_user_ns, inode->i_gid));
if (mode != S_IRWXUGO)
seq_printf(m, ",mode=%o", mode);
+
+ if (opts->delegate_cmds || opts->delegate_maps ||
+ opts->delegate_progs || opts->delegate_attachs) {
+ struct bpffs_btf_enums info;
+
+ /* ignore errors, fallback to hex */
+ (void)find_bpffs_btf_enums(&info);
+
+ mask = (1ULL << __MAX_BPF_CMD) - 1;
+ seq_print_delegate_opts(m, "delegate_cmds",
+ info.btf, info.cmd_t, "BPF_",
+ opts->delegate_cmds, mask);
+
+ mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
+ seq_print_delegate_opts(m, "delegate_maps",
+ info.btf, info.map_t, "BPF_MAP_TYPE_",
+ opts->delegate_maps, mask);
+
+ mask = (1ULL << __MAX_BPF_PROG_TYPE) - 1;
+ seq_print_delegate_opts(m, "delegate_progs",
+ info.btf, info.prog_t, "BPF_PROG_TYPE_",
+ opts->delegate_progs, mask);
+
+ mask = (1ULL << __MAX_BPF_ATTACH_TYPE) - 1;
+ seq_print_delegate_opts(m, "delegate_attachs",
+ info.btf, info.attach_t, "BPF_",
+ opts->delegate_attachs, mask);
+ }
+
return 0;
}
@@ -624,7 +786,7 @@ static void bpf_free_inode(struct inode *inode)
free_inode_nonrcu(inode);
}
-static const struct super_operations bpf_super_ops = {
+const struct super_operations bpf_super_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.show_options = bpf_show_options,
@@ -635,28 +797,30 @@ enum {
OPT_UID,
OPT_GID,
OPT_MODE,
+ OPT_DELEGATE_CMDS,
+ OPT_DELEGATE_MAPS,
+ OPT_DELEGATE_PROGS,
+ OPT_DELEGATE_ATTACHS,
};
static const struct fs_parameter_spec bpf_fs_parameters[] = {
fsparam_u32 ("uid", OPT_UID),
fsparam_u32 ("gid", OPT_GID),
fsparam_u32oct ("mode", OPT_MODE),
+ fsparam_string ("delegate_cmds", OPT_DELEGATE_CMDS),
+ fsparam_string ("delegate_maps", OPT_DELEGATE_MAPS),
+ fsparam_string ("delegate_progs", OPT_DELEGATE_PROGS),
+ fsparam_string ("delegate_attachs", OPT_DELEGATE_ATTACHS),
{}
};
-struct bpf_mount_opts {
- kuid_t uid;
- kgid_t gid;
- umode_t mode;
-};
-
static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- struct bpf_mount_opts *opts = fc->fs_private;
+ struct bpf_mount_opts *opts = fc->s_fs_info;
struct fs_parse_result result;
kuid_t uid;
kgid_t gid;
- int opt;
+ int opt, err;
opt = fs_parse(fc, bpf_fs_parameters, param, &result);
if (opt < 0) {
@@ -708,6 +872,67 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
case OPT_MODE:
opts->mode = result.uint_32 & S_IALLUGO;
break;
+ case OPT_DELEGATE_CMDS:
+ case OPT_DELEGATE_MAPS:
+ case OPT_DELEGATE_PROGS:
+ case OPT_DELEGATE_ATTACHS: {
+ struct bpffs_btf_enums info;
+ const struct btf_type *enum_t;
+ const char *enum_pfx;
+ u64 *delegate_msk, msk = 0;
+ char *p;
+ int val;
+
+ /* ignore errors, fallback to hex */
+ (void)find_bpffs_btf_enums(&info);
+
+ switch (opt) {
+ case OPT_DELEGATE_CMDS:
+ delegate_msk = &opts->delegate_cmds;
+ enum_t = info.cmd_t;
+ enum_pfx = "BPF_";
+ break;
+ case OPT_DELEGATE_MAPS:
+ delegate_msk = &opts->delegate_maps;
+ enum_t = info.map_t;
+ enum_pfx = "BPF_MAP_TYPE_";
+ break;
+ case OPT_DELEGATE_PROGS:
+ delegate_msk = &opts->delegate_progs;
+ enum_t = info.prog_t;
+ enum_pfx = "BPF_PROG_TYPE_";
+ break;
+ case OPT_DELEGATE_ATTACHS:
+ delegate_msk = &opts->delegate_attachs;
+ enum_t = info.attach_t;
+ enum_pfx = "BPF_";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ while ((p = strsep(&param->string, ":"))) {
+ if (strcmp(p, "any") == 0) {
+ msk |= ~0ULL;
+ } else if (find_btf_enum_const(info.btf, enum_t, enum_pfx, p, &val)) {
+ msk |= 1ULL << val;
+ } else {
+ err = kstrtou64(p, 0, &msk);
+ if (err)
+ return err;
+ }
+ }
+
+ /* Setting delegation mount options requires privileges */
+ if (msk && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ *delegate_msk |= msk;
+ break;
+ }
+ default:
+ /* ignore unknown mount options */
+ break;
}
return 0;
@@ -784,10 +1009,14 @@ out:
static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr bpf_rfiles[] = { { "" } };
- struct bpf_mount_opts *opts = fc->fs_private;
+ struct bpf_mount_opts *opts = sb->s_fs_info;
struct inode *inode;
int ret;
+ /* Mounting an instance of BPF FS requires privileges */
+ if (fc->user_ns != &init_user_ns && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
if (ret)
return ret;
@@ -811,7 +1040,7 @@ static int bpf_get_tree(struct fs_context *fc)
static void bpf_free_fc(struct fs_context *fc)
{
- kfree(fc->fs_private);
+ kfree(fc->s_fs_info);
}
static const struct fs_context_operations bpf_context_ops = {
@@ -835,17 +1064,32 @@ static int bpf_init_fs_context(struct fs_context *fc)
opts->uid = current_fsuid();
opts->gid = current_fsgid();
- fc->fs_private = opts;
+ /* start out with no BPF token delegation enabled */
+ opts->delegate_cmds = 0;
+ opts->delegate_maps = 0;
+ opts->delegate_progs = 0;
+ opts->delegate_attachs = 0;
+
+ fc->s_fs_info = opts;
fc->ops = &bpf_context_ops;
return 0;
}
+static void bpf_kill_super(struct super_block *sb)
+{
+ struct bpf_mount_opts *opts = sb->s_fs_info;
+
+ kill_litter_super(sb);
+ kfree(opts);
+}
+
static struct file_system_type bpf_fs_type = {
.owner = THIS_MODULE,
.name = "bpf",
.init_fs_context = bpf_init_fs_context,
.parameters = bpf_fs_parameters,
- .kill_sb = kill_litter_super,
+ .kill_sb = bpf_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
};
static int __init bpf_init(void)
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 594a234f122b..63c34e7b0715 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -9,6 +9,7 @@
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/math64.h>
+#include <linux/string.h>
#define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
@@ -333,7 +334,8 @@ find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
{
const struct bpf_line_info *linfo;
const struct bpf_prog *prog;
- u32 i, nr_linfo;
+ u32 nr_linfo;
+ int l, r, m;
prog = env->prog;
nr_linfo = prog->aux->nr_linfo;
@@ -342,11 +344,30 @@ find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
return NULL;
linfo = prog->aux->linfo;
- for (i = 1; i < nr_linfo; i++)
- if (insn_off < linfo[i].insn_off)
- break;
+ /* Loop invariant: linfo[l].insn_off <= insns_off.
+ * linfo[0].insn_off == 0 which always satisfies above condition.
+ * Binary search is searching for rightmost linfo entry that satisfies
+ * the above invariant, giving us the desired record that covers given
+ * instruction offset.
+ */
+ l = 0;
+ r = nr_linfo - 1;
+ while (l < r) {
+ /* (r - l + 1) / 2 means we break a tie to the right, so if:
+ * l=1, r=2, linfo[l].insn_off <= insn_off, linfo[r].insn_off > insn_off,
+ * then m=2, we see that linfo[m].insn_off > insn_off, and so
+ * r becomes 1 and we exit the loop with correct l==1.
+ * If the tie was broken to the left, m=1 would end us up in
+ * an endless loop where l and m stay at 1 and r stays at 2.
+ */
+ m = l + (r - l + 1) / 2;
+ if (linfo[m].insn_off <= insn_off)
+ l = m;
+ else
+ r = m - 1;
+ }
- return &linfo[i - 1];
+ return &linfo[l];
}
static const char *ltrim(const char *s)
@@ -361,13 +382,28 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...)
{
- const struct bpf_line_info *linfo;
+ const struct bpf_line_info *linfo, *prev_linfo;
+ const struct btf *btf;
+ const char *s, *fname;
if (!bpf_verifier_log_needed(&env->log))
return;
+ prev_linfo = env->prev_linfo;
linfo = find_linfo(env, insn_off);
- if (!linfo || linfo == env->prev_linfo)
+ if (!linfo || linfo == prev_linfo)
+ return;
+
+ /* It often happens that two separate linfo records point to the same
+ * source code line, but have differing column numbers. Given verifier
+ * log doesn't emit column information, from user perspective we just
+ * end up emitting the same source code line twice unnecessarily.
+ * So instead check that previous and current linfo record point to
+ * the same file (file_name_offs match) and the same line number, and
+ * avoid emitting duplicated source code line in such case.
+ */
+ if (prev_linfo && linfo->file_name_off == prev_linfo->file_name_off &&
+ BPF_LINE_INFO_LINE_NUM(linfo->line_col) == BPF_LINE_INFO_LINE_NUM(prev_linfo->line_col))
return;
if (prefix_fmt) {
@@ -378,9 +414,15 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
va_end(args);
}
- verbose(env, "%s\n",
- ltrim(btf_name_by_offset(env->prog->aux->btf,
- linfo->line_off)));
+ btf = env->prog->aux->btf;
+ s = ltrim(btf_name_by_offset(btf, linfo->line_off));
+ verbose(env, "%s", s); /* source code line */
+
+ s = btf_name_by_offset(btf, linfo->file_name_off);
+ /* leave only file name */
+ fname = strrchr(s, '/');
+ fname = fname ? fname + 1 : s;
+ verbose(env, " @ %s:%u\n", fname, BPF_LINE_INFO_LINE_NUM(linfo->line_col));
env->prev_linfo = linfo;
}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index b32be680da6c..050fe1ebf0f7 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -164,13 +164,13 @@ static inline int extract_bit(const u8 *data, size_t index)
*/
static size_t longest_prefix_match(const struct lpm_trie *trie,
const struct lpm_trie_node *node,
- const struct bpf_lpm_trie_key *key)
+ const struct bpf_lpm_trie_key_u8 *key)
{
u32 limit = min(node->prefixlen, key->prefixlen);
u32 prefixlen = 0, i = 0;
BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32));
- BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key, data) % sizeof(u32));
+ BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key_u8, data) % sizeof(u32));
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(CONFIG_64BIT)
@@ -229,7 +229,7 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
{
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
struct lpm_trie_node *node, *found = NULL;
- struct bpf_lpm_trie_key *key = _key;
+ struct bpf_lpm_trie_key_u8 *key = _key;
if (key->prefixlen > trie->max_prefixlen)
return NULL;
@@ -309,7 +309,7 @@ static long trie_update_elem(struct bpf_map *map,
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
struct lpm_trie_node __rcu **slot;
- struct bpf_lpm_trie_key *key = _key;
+ struct bpf_lpm_trie_key_u8 *key = _key;
unsigned long irq_flags;
unsigned int next_bit;
size_t matchlen = 0;
@@ -437,7 +437,7 @@ out:
static long trie_delete_elem(struct bpf_map *map, void *_key)
{
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
- struct bpf_lpm_trie_key *key = _key;
+ struct bpf_lpm_trie_key_u8 *key = _key;
struct lpm_trie_node __rcu **trim, **trim2;
struct lpm_trie_node *node, *parent;
unsigned long irq_flags;
@@ -536,7 +536,7 @@ out:
sizeof(struct lpm_trie_node))
#define LPM_VAL_SIZE_MIN 1
-#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key) + (X))
+#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key_u8) + (X))
#define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX)
#define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
@@ -565,7 +565,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
/* copy mandatory map attributes */
bpf_map_init_from_attr(&trie->map, attr);
trie->data_size = attr->key_size -
- offsetof(struct bpf_lpm_trie_key, data);
+ offsetof(struct bpf_lpm_trie_key_u8, data);
trie->max_prefixlen = trie->data_size * 8;
spin_lock_init(&trie->lock);
@@ -616,7 +616,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
{
struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root;
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
- struct bpf_lpm_trie_key *key = _key, *next_key = _next_key;
+ struct bpf_lpm_trie_key_u8 *key = _key, *next_key = _next_key;
struct lpm_trie_node **node_stack = NULL;
int err = 0, stack_ptr = -1;
unsigned int next_bit;
@@ -703,7 +703,7 @@ find_leftmost:
}
do_copy:
next_key->prefixlen = next_node->prefixlen;
- memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data),
+ memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key_u8, data),
next_node->data, trie->data_size);
free_stack:
kfree(node_stack);
@@ -715,7 +715,7 @@ static int trie_check_btf(const struct bpf_map *map,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
- /* Keys must have struct bpf_lpm_trie_key embedded. */
+ /* Keys must have struct bpf_lpm_trie_key_u8 embedded. */
return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ?
-EINVAL : 0;
}
diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c
index 6abd7c5df4b3..9575314f40a6 100644
--- a/kernel/bpf/map_iter.c
+++ b/kernel/bpf/map_iter.c
@@ -213,9 +213,9 @@ __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map)
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_map_iter_kfunc_ids)
+BTF_KFUNCS_START(bpf_map_iter_kfunc_ids)
BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS)
-BTF_SET8_END(bpf_map_iter_kfunc_ids)
+BTF_KFUNCS_END(bpf_map_iter_kfunc_ids)
static const struct btf_kfunc_id_set bpf_map_iter_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a1f18681721c..b2750b79ac80 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1011,8 +1011,8 @@ int map_check_no_btf(const struct bpf_map *map,
return -ENOTSUPP;
}
-static int map_check_btf(struct bpf_map *map, const struct btf *btf,
- u32 btf_key_id, u32 btf_value_id)
+static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
+ const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
{
const struct btf_type *key_type, *value_type;
u32 key_size, value_size;
@@ -1040,7 +1040,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
if (!IS_ERR_OR_NULL(map->record)) {
int i;
- if (!bpf_capable()) {
+ if (!bpf_token_capable(token, CAP_BPF)) {
ret = -EPERM;
goto free_map_tab;
}
@@ -1123,14 +1123,21 @@ free_map_tab:
return ret;
}
-#define BPF_MAP_CREATE_LAST_FIELD map_extra
+static bool bpf_net_capable(void)
+{
+ return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
+}
+
+#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
const struct bpf_map_ops *ops;
+ struct bpf_token *token = NULL;
int numa_node = bpf_map_attr_numa_node(attr);
u32 map_type = attr->map_type;
struct bpf_map *map;
+ bool token_flag;
int f_flags;
int err;
@@ -1138,6 +1145,12 @@ static int map_create(union bpf_attr *attr)
if (err)
return -EINVAL;
+ /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
+ * to avoid per-map type checks tripping on unknown flag
+ */
+ token_flag = attr->map_flags & BPF_F_TOKEN_FD;
+ attr->map_flags &= ~BPF_F_TOKEN_FD;
+
if (attr->btf_vmlinux_value_type_id) {
if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
attr->btf_key_type_id || attr->btf_value_type_id)
@@ -1178,14 +1191,32 @@ static int map_create(union bpf_attr *attr)
if (!ops->map_mem_usage)
return -EINVAL;
+ if (token_flag) {
+ token = bpf_token_get_from_fd(attr->map_token_fd);
+ if (IS_ERR(token))
+ return PTR_ERR(token);
+
+ /* if current token doesn't grant map creation permissions,
+ * then we can't use this token, so ignore it and rely on
+ * system-wide capabilities checks
+ */
+ if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
+ !bpf_token_allow_map_type(token, attr->map_type)) {
+ bpf_token_put(token);
+ token = NULL;
+ }
+ }
+
+ err = -EPERM;
+
/* Intent here is for unprivileged_bpf_disabled to block BPF map
* creation for unprivileged users; other actions depend
* on fd availability and access to bpffs, so are dependent on
* object creation success. Even with unprivileged BPF disabled,
* capability checks are still carried out.
*/
- if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
- return -EPERM;
+ if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
+ goto put_token;
/* check privileged map type permissions */
switch (map_type) {
@@ -1218,25 +1249,27 @@ static int map_create(union bpf_attr *attr)
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
case BPF_MAP_TYPE_STRUCT_OPS:
case BPF_MAP_TYPE_CPUMAP:
- if (!bpf_capable())
- return -EPERM;
+ if (!bpf_token_capable(token, CAP_BPF))
+ goto put_token;
break;
case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_XSKMAP:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
+ if (!bpf_token_capable(token, CAP_NET_ADMIN))
+ goto put_token;
break;
default:
WARN(1, "unsupported map type %d", map_type);
- return -EPERM;
+ goto put_token;
}
map = ops->map_alloc(attr);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto put_token;
+ }
map->ops = ops;
map->map_type = map_type;
@@ -1273,7 +1306,7 @@ static int map_create(union bpf_attr *attr)
map->btf = btf;
if (attr->btf_value_type_id) {
- err = map_check_btf(map, btf, attr->btf_key_type_id,
+ err = map_check_btf(map, token, btf, attr->btf_key_type_id,
attr->btf_value_type_id);
if (err)
goto free_map;
@@ -1285,15 +1318,16 @@ static int map_create(union bpf_attr *attr)
attr->btf_vmlinux_value_type_id;
}
- err = security_bpf_map_alloc(map);
+ err = security_bpf_map_create(map, attr, token);
if (err)
- goto free_map;
+ goto free_map_sec;
err = bpf_map_alloc_id(map);
if (err)
goto free_map_sec;
bpf_map_save_memcg(map);
+ bpf_token_put(token);
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
@@ -1314,6 +1348,8 @@ free_map_sec:
free_map:
btf_put(map->btf);
map->ops->map_free(map);
+put_token:
+ bpf_token_put(token);
return err;
}
@@ -2144,7 +2180,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
kvfree(aux->func_info);
kfree(aux->func_info_aux);
free_uid(aux->user);
- security_bpf_prog_free(aux);
+ security_bpf_prog_free(aux->prog);
bpf_prog_free(aux->prog);
}
@@ -2590,13 +2626,15 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
}
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD log_true_size
+#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{
enum bpf_prog_type type = attr->prog_type;
struct bpf_prog *prog, *dst_prog = NULL;
struct btf *attach_btf = NULL;
+ struct bpf_token *token = NULL;
+ bool bpf_cap;
int err;
char license[128];
@@ -2610,13 +2648,35 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
BPF_F_TEST_RND_HI32 |
BPF_F_XDP_HAS_FRAGS |
BPF_F_XDP_DEV_BOUND_ONLY |
- BPF_F_TEST_REG_INVARIANTS))
+ BPF_F_TEST_REG_INVARIANTS |
+ BPF_F_TOKEN_FD))
return -EINVAL;
+ bpf_prog_load_fixup_attach_type(attr);
+
+ if (attr->prog_flags & BPF_F_TOKEN_FD) {
+ token = bpf_token_get_from_fd(attr->prog_token_fd);
+ if (IS_ERR(token))
+ return PTR_ERR(token);
+ /* if current token doesn't grant prog loading permissions,
+ * then we can't use this token, so ignore it and rely on
+ * system-wide capabilities checks
+ */
+ if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) ||
+ !bpf_token_allow_prog_type(token, attr->prog_type,
+ attr->expected_attach_type)) {
+ bpf_token_put(token);
+ token = NULL;
+ }
+ }
+
+ bpf_cap = bpf_token_capable(token, CAP_BPF);
+ err = -EPERM;
+
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
(attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
- !bpf_capable())
- return -EPERM;
+ !bpf_cap)
+ goto put_token;
/* Intent here is for unprivileged_bpf_disabled to block BPF program
* creation for unprivileged users; other actions depend
@@ -2625,21 +2685,23 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
* capability checks are still carried out for these
* and other operations.
*/
- if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
- return -EPERM;
+ if (sysctl_unprivileged_bpf_disabled && !bpf_cap)
+ goto put_token;
if (attr->insn_cnt == 0 ||
- attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
- return -E2BIG;
+ attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) {
+ err = -E2BIG;
+ goto put_token;
+ }
if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
type != BPF_PROG_TYPE_CGROUP_SKB &&
- !bpf_capable())
- return -EPERM;
+ !bpf_cap)
+ goto put_token;
- if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (is_perfmon_prog_type(type) && !perfmon_capable())
- return -EPERM;
+ if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN))
+ goto put_token;
+ if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON))
+ goto put_token;
/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
* or btf, we need to check which one it is
@@ -2649,27 +2711,33 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
if (IS_ERR(dst_prog)) {
dst_prog = NULL;
attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
- if (IS_ERR(attach_btf))
- return -EINVAL;
+ if (IS_ERR(attach_btf)) {
+ err = -EINVAL;
+ goto put_token;
+ }
if (!btf_is_kernel(attach_btf)) {
/* attaching through specifying bpf_prog's BTF
* objects directly might be supported eventually
*/
btf_put(attach_btf);
- return -ENOTSUPP;
+ err = -ENOTSUPP;
+ goto put_token;
}
}
} else if (attr->attach_btf_id) {
/* fall back to vmlinux BTF, if BTF type ID is specified */
attach_btf = bpf_get_btf_vmlinux();
- if (IS_ERR(attach_btf))
- return PTR_ERR(attach_btf);
- if (!attach_btf)
- return -EINVAL;
+ if (IS_ERR(attach_btf)) {
+ err = PTR_ERR(attach_btf);
+ goto put_token;
+ }
+ if (!attach_btf) {
+ err = -EINVAL;
+ goto put_token;
+ }
btf_get(attach_btf);
}
- bpf_prog_load_fixup_attach_type(attr);
if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
attach_btf, attr->attach_btf_id,
dst_prog)) {
@@ -2677,7 +2745,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
bpf_prog_put(dst_prog);
if (attach_btf)
btf_put(attach_btf);
- return -EINVAL;
+ err = -EINVAL;
+ goto put_token;
}
/* plain bpf_prog allocation */
@@ -2687,7 +2756,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
bpf_prog_put(dst_prog);
if (attach_btf)
btf_put(attach_btf);
- return -ENOMEM;
+ err = -EINVAL;
+ goto put_token;
}
prog->expected_attach_type = attr->expected_attach_type;
@@ -2698,9 +2768,9 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
- err = security_bpf_prog_alloc(prog->aux);
- if (err)
- goto free_prog;
+ /* move token into prog->aux, reuse taken refcnt */
+ prog->aux->token = token;
+ token = NULL;
prog->aux->user = get_current_user();
prog->len = attr->insn_cnt;
@@ -2709,12 +2779,12 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
if (copy_from_bpfptr(prog->insns,
make_bpfptr(attr->insns, uattr.is_kernel),
bpf_prog_insn_size(prog)) != 0)
- goto free_prog_sec;
+ goto free_prog;
/* copy eBPF program license from user space */
if (strncpy_from_bpfptr(license,
make_bpfptr(attr->license, uattr.is_kernel),
sizeof(license) - 1) < 0)
- goto free_prog_sec;
+ goto free_prog;
license[sizeof(license) - 1] = 0;
/* eBPF programs must be GPL compatible to use GPL-ed functions */
@@ -2728,14 +2798,14 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
if (bpf_prog_is_dev_bound(prog->aux)) {
err = bpf_prog_dev_bound_init(prog, attr);
if (err)
- goto free_prog_sec;
+ goto free_prog;
}
if (type == BPF_PROG_TYPE_EXT && dst_prog &&
bpf_prog_is_dev_bound(dst_prog->aux)) {
err = bpf_prog_dev_bound_inherit(prog, dst_prog);
if (err)
- goto free_prog_sec;
+ goto free_prog;
}
/*
@@ -2757,12 +2827,16 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
if (err < 0)
- goto free_prog_sec;
+ goto free_prog;
prog->aux->load_time = ktime_get_boottime_ns();
err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
sizeof(attr->prog_name));
if (err < 0)
+ goto free_prog;
+
+ err = security_bpf_prog_load(prog, attr, token);
+ if (err)
goto free_prog_sec;
/* run eBPF verifier */
@@ -2808,13 +2882,16 @@ free_used_maps:
*/
__bpf_prog_put_noref(prog, prog->aux->real_func_cnt);
return err;
+
free_prog_sec:
- free_uid(prog->aux->user);
- security_bpf_prog_free(prog->aux);
+ security_bpf_prog_free(prog);
free_prog:
+ free_uid(prog->aux->user);
if (prog->aux->attach_btf)
btf_put(prog->aux->attach_btf);
bpf_prog_free(prog);
+put_token:
+ bpf_token_put(token);
return err;
}
@@ -3501,6 +3578,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
if (!kallsyms_show_value(current_cred()))
addr = 0;
info->perf_event.kprobe.addr = addr;
+ info->perf_event.kprobe.cookie = event->bpf_cookie;
return 0;
}
#endif
@@ -3526,6 +3604,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
else
info->perf_event.type = BPF_PERF_EVENT_UPROBE;
info->perf_event.uprobe.offset = offset;
+ info->perf_event.uprobe.cookie = event->bpf_cookie;
return 0;
}
#endif
@@ -3553,6 +3632,7 @@ static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
ulen = info->perf_event.tracepoint.name_len;
info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
+ info->perf_event.tracepoint.cookie = event->bpf_cookie;
return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
}
@@ -3561,6 +3641,7 @@ static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
{
info->perf_event.event.type = event->attr.type;
info->perf_event.event.config = event->attr.config;
+ info->perf_event.event.cookie = event->bpf_cookie;
info->perf_event.type = BPF_PERF_EVENT_EVENT;
return 0;
}
@@ -3818,7 +3899,7 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
case BPF_PROG_TYPE_SK_LOOKUP:
return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
case BPF_PROG_TYPE_CGROUP_SKB:
- if (!capable(CAP_NET_ADMIN))
+ if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN))
/* cg-skb progs can be loaded by unpriv user.
* check permissions at attach time.
*/
@@ -4021,7 +4102,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
static int bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- if (!capable(CAP_NET_ADMIN))
+ if (!bpf_net_capable())
return -EPERM;
if (CHECK_ATTR(BPF_PROG_QUERY))
return -EINVAL;
@@ -4687,6 +4768,8 @@ static int bpf_map_get_info_by_fd(struct file *file,
info.btf_value_type_id = map->btf_value_type_id;
}
info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
+ if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS)
+ bpf_map_struct_ops_info_fill(&info, map);
if (bpf_map_is_offloaded(map)) {
err = bpf_map_offload_info_fill(&info, map);
@@ -4789,15 +4872,34 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
return err;
}
-#define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size
+#define BPF_BTF_LOAD_LAST_FIELD btf_token_fd
static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
{
+ struct bpf_token *token = NULL;
+
if (CHECK_ATTR(BPF_BTF_LOAD))
return -EINVAL;
- if (!bpf_capable())
+ if (attr->btf_flags & ~BPF_F_TOKEN_FD)
+ return -EINVAL;
+
+ if (attr->btf_flags & BPF_F_TOKEN_FD) {
+ token = bpf_token_get_from_fd(attr->btf_token_fd);
+ if (IS_ERR(token))
+ return PTR_ERR(token);
+ if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) {
+ bpf_token_put(token);
+ token = NULL;
+ }
+ }
+
+ if (!bpf_token_capable(token, CAP_BPF)) {
+ bpf_token_put(token);
return -EPERM;
+ }
+
+ bpf_token_put(token);
return btf_new_fd(attr, uattr, uattr_size);
}
@@ -5415,6 +5517,20 @@ out_prog_put:
return ret;
}
+#define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd
+
+static int token_create(union bpf_attr *attr)
+{
+ if (CHECK_ATTR(BPF_TOKEN_CREATE))
+ return -EINVAL;
+
+ /* no flags are supported yet */
+ if (attr->token_create.flags)
+ return -EINVAL;
+
+ return bpf_token_create(attr);
+}
+
static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
{
union bpf_attr attr;
@@ -5548,6 +5664,9 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
case BPF_PROG_BIND_MAP:
err = bpf_prog_bind_map(&attr);
break;
+ case BPF_TOKEN_CREATE:
+ err = token_create(&attr);
+ break;
default:
err = -EINVAL;
break;
@@ -5654,7 +5773,7 @@ static const struct bpf_func_proto bpf_sys_bpf_proto = {
const struct bpf_func_proto * __weak
tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
BPF_CALL_1(bpf_sys_close, u32, fd)
@@ -5704,7 +5823,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_sys_bpf:
- return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
+ return !bpf_token_capable(prog->aux->token, CAP_PERFMON)
+ ? NULL : &bpf_sys_bpf_proto;
case BPF_FUNC_btf_find_by_name_kind:
return &bpf_btf_find_by_name_kind_proto;
case BPF_FUNC_sys_close:
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index e5c3500443c6..ec4e97c61eef 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -978,6 +978,8 @@ __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it,
BUILD_BUG_ON(__alignof__(struct bpf_iter_task_kern) !=
__alignof__(struct bpf_iter_task));
+ kit->pos = NULL;
+
switch (flags) {
case BPF_TASK_ITER_ALL_THREADS:
case BPF_TASK_ITER_ALL_PROCS:
diff --git a/kernel/bpf/token.c b/kernel/bpf/token.c
new file mode 100644
index 000000000000..d6ccf8d00eab
--- /dev/null
+++ b/kernel/bpf/token.c
@@ -0,0 +1,278 @@
+#include <linux/bpf.h>
+#include <linux/vmalloc.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/idr.h>
+#include <linux/namei.h>
+#include <linux/user_namespace.h>
+#include <linux/security.h>
+
+static bool bpf_ns_capable(struct user_namespace *ns, int cap)
+{
+ return ns_capable(ns, cap) || (cap != CAP_SYS_ADMIN && ns_capable(ns, CAP_SYS_ADMIN));
+}
+
+bool bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ struct user_namespace *userns;
+
+ /* BPF token allows ns_capable() level of capabilities */
+ userns = token ? token->userns : &init_user_ns;
+ if (!bpf_ns_capable(userns, cap))
+ return false;
+ if (token && security_bpf_token_capable(token, cap) < 0)
+ return false;
+ return true;
+}
+
+void bpf_token_inc(struct bpf_token *token)
+{
+ atomic64_inc(&token->refcnt);
+}
+
+static void bpf_token_free(struct bpf_token *token)
+{
+ security_bpf_token_free(token);
+ put_user_ns(token->userns);
+ kfree(token);
+}
+
+static void bpf_token_put_deferred(struct work_struct *work)
+{
+ struct bpf_token *token = container_of(work, struct bpf_token, work);
+
+ bpf_token_free(token);
+}
+
+void bpf_token_put(struct bpf_token *token)
+{
+ if (!token)
+ return;
+
+ if (!atomic64_dec_and_test(&token->refcnt))
+ return;
+
+ INIT_WORK(&token->work, bpf_token_put_deferred);
+ schedule_work(&token->work);
+}
+
+static int bpf_token_release(struct inode *inode, struct file *filp)
+{
+ struct bpf_token *token = filp->private_data;
+
+ bpf_token_put(token);
+ return 0;
+}
+
+static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp)
+{
+ struct bpf_token *token = filp->private_data;
+ u64 mask;
+
+ BUILD_BUG_ON(__MAX_BPF_CMD >= 64);
+ mask = BIT_ULL(__MAX_BPF_CMD) - 1;
+ if ((token->allowed_cmds & mask) == mask)
+ seq_printf(m, "allowed_cmds:\tany\n");
+ else
+ seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds);
+
+ BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64);
+ mask = BIT_ULL(__MAX_BPF_MAP_TYPE) - 1;
+ if ((token->allowed_maps & mask) == mask)
+ seq_printf(m, "allowed_maps:\tany\n");
+ else
+ seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps);
+
+ BUILD_BUG_ON(__MAX_BPF_PROG_TYPE >= 64);
+ mask = BIT_ULL(__MAX_BPF_PROG_TYPE) - 1;
+ if ((token->allowed_progs & mask) == mask)
+ seq_printf(m, "allowed_progs:\tany\n");
+ else
+ seq_printf(m, "allowed_progs:\t0x%llx\n", token->allowed_progs);
+
+ BUILD_BUG_ON(__MAX_BPF_ATTACH_TYPE >= 64);
+ mask = BIT_ULL(__MAX_BPF_ATTACH_TYPE) - 1;
+ if ((token->allowed_attachs & mask) == mask)
+ seq_printf(m, "allowed_attachs:\tany\n");
+ else
+ seq_printf(m, "allowed_attachs:\t0x%llx\n", token->allowed_attachs);
+}
+
+#define BPF_TOKEN_INODE_NAME "bpf-token"
+
+static const struct inode_operations bpf_token_iops = { };
+
+static const struct file_operations bpf_token_fops = {
+ .release = bpf_token_release,
+ .show_fdinfo = bpf_token_show_fdinfo,
+};
+
+int bpf_token_create(union bpf_attr *attr)
+{
+ struct bpf_mount_opts *mnt_opts;
+ struct bpf_token *token = NULL;
+ struct user_namespace *userns;
+ struct inode *inode;
+ struct file *file;
+ struct path path;
+ struct fd f;
+ umode_t mode;
+ int err, fd;
+
+ f = fdget(attr->token_create.bpffs_fd);
+ if (!f.file)
+ return -EBADF;
+
+ path = f.file->f_path;
+ path_get(&path);
+ fdput(f);
+
+ if (path.dentry != path.mnt->mnt_sb->s_root) {
+ err = -EINVAL;
+ goto out_path;
+ }
+ if (path.mnt->mnt_sb->s_op != &bpf_super_ops) {
+ err = -EINVAL;
+ goto out_path;
+ }
+ err = path_permission(&path, MAY_ACCESS);
+ if (err)
+ goto out_path;
+
+ userns = path.dentry->d_sb->s_user_ns;
+ /*
+ * Enforce that creators of BPF tokens are in the same user
+ * namespace as the BPF FS instance. This makes reasoning about
+ * permissions a lot easier and we can always relax this later.
+ */
+ if (current_user_ns() != userns) {
+ err = -EPERM;
+ goto out_path;
+ }
+ if (!ns_capable(userns, CAP_BPF)) {
+ err = -EPERM;
+ goto out_path;
+ }
+
+ /* Creating BPF token in init_user_ns doesn't make much sense. */
+ if (current_user_ns() == &init_user_ns) {
+ err = -EOPNOTSUPP;
+ goto out_path;
+ }
+
+ mnt_opts = path.dentry->d_sb->s_fs_info;
+ if (mnt_opts->delegate_cmds == 0 &&
+ mnt_opts->delegate_maps == 0 &&
+ mnt_opts->delegate_progs == 0 &&
+ mnt_opts->delegate_attachs == 0) {
+ err = -ENOENT; /* no BPF token delegation is set up */
+ goto out_path;
+ }
+
+ mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
+ inode = bpf_get_inode(path.mnt->mnt_sb, NULL, mode);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out_path;
+ }
+
+ inode->i_op = &bpf_token_iops;
+ inode->i_fop = &bpf_token_fops;
+ clear_nlink(inode); /* make sure it is unlinked */
+
+ file = alloc_file_pseudo(inode, path.mnt, BPF_TOKEN_INODE_NAME, O_RDWR, &bpf_token_fops);
+ if (IS_ERR(file)) {
+ iput(inode);
+ err = PTR_ERR(file);
+ goto out_path;
+ }
+
+ token = kzalloc(sizeof(*token), GFP_USER);
+ if (!token) {
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ atomic64_set(&token->refcnt, 1);
+
+ /* remember bpffs owning userns for future ns_capable() checks */
+ token->userns = get_user_ns(userns);
+
+ token->allowed_cmds = mnt_opts->delegate_cmds;
+ token->allowed_maps = mnt_opts->delegate_maps;
+ token->allowed_progs = mnt_opts->delegate_progs;
+ token->allowed_attachs = mnt_opts->delegate_attachs;
+
+ err = security_bpf_token_create(token, attr, &path);
+ if (err)
+ goto out_token;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ err = fd;
+ goto out_token;
+ }
+
+ file->private_data = token;
+ fd_install(fd, file);
+
+ path_put(&path);
+ return fd;
+
+out_token:
+ bpf_token_free(token);
+out_file:
+ fput(file);
+out_path:
+ path_put(&path);
+ return err;
+}
+
+struct bpf_token *bpf_token_get_from_fd(u32 ufd)
+{
+ struct fd f = fdget(ufd);
+ struct bpf_token *token;
+
+ if (!f.file)
+ return ERR_PTR(-EBADF);
+ if (f.file->f_op != &bpf_token_fops) {
+ fdput(f);
+ return ERR_PTR(-EINVAL);
+ }
+
+ token = f.file->private_data;
+ bpf_token_inc(token);
+ fdput(f);
+
+ return token;
+}
+
+bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
+{
+ if (!token)
+ return false;
+ if (!(token->allowed_cmds & BIT_ULL(cmd)))
+ return false;
+ return security_bpf_token_cmd(token, cmd) == 0;
+}
+
+bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type)
+{
+ if (!token || type >= __MAX_BPF_MAP_TYPE)
+ return false;
+
+ return token->allowed_maps & BIT_ULL(type);
+}
+
+bool bpf_token_allow_prog_type(const struct bpf_token *token,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type attach_type)
+{
+ if (!token || prog_type >= __MAX_BPF_PROG_TYPE || attach_type >= __MAX_BPF_ATTACH_TYPE)
+ return false;
+
+ return (token->allowed_progs & BIT_ULL(prog_type)) &&
+ (token->allowed_attachs & BIT_ULL(attach_type));
+}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 65f598694d55..21f80383f8b2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -528,6 +528,11 @@ static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
(bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm));
}
+static bool is_async_callback_calling_insn(struct bpf_insn *insn)
+{
+ return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm);
+}
+
static bool is_storage_get_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_storage_get ||
@@ -1155,6 +1160,12 @@ static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
stack->spilled_ptr.type == SCALAR_VALUE;
}
+static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
+{
+ return stack->slot_type[0] == STACK_SPILL &&
+ stack->spilled_ptr.type == SCALAR_VALUE;
+}
+
/* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which
* case they are equivalent, or it's STACK_ZERO, in which case we preserve
* more precise STACK_ZERO.
@@ -2264,8 +2275,7 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
}
/* Mark a register as having a completely unknown (scalar) value. */
-static void __mark_reg_unknown(const struct bpf_verifier_env *env,
- struct bpf_reg_state *reg)
+static void __mark_reg_unknown_imprecise(struct bpf_reg_state *reg)
{
/*
* Clear type, off, and union(map_ptr, range) and
@@ -2277,10 +2287,20 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
reg->ref_obj_id = 0;
reg->var_off = tnum_unknown;
reg->frameno = 0;
- reg->precise = !env->bpf_capable;
+ reg->precise = false;
__mark_reg_unbounded(reg);
}
+/* Mark a register as having a completely unknown (scalar) value,
+ * initialize .precise as true when not bpf capable.
+ */
+static void __mark_reg_unknown(const struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg)
+{
+ __mark_reg_unknown_imprecise(reg);
+ reg->precise = !env->bpf_capable;
+}
+
static void mark_reg_unknown(struct bpf_verifier_env *env,
struct bpf_reg_state *regs, u32 regno)
{
@@ -4380,20 +4400,6 @@ static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32)
return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value;
}
-static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
-{
- return tnum_is_unknown(reg->var_off) &&
- reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
- reg->umin_value == 0 && reg->umax_value == U64_MAX &&
- reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
- reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
-}
-
-static bool register_is_bounded(struct bpf_reg_state *reg)
-{
- return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
-}
-
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
@@ -4403,6 +4409,18 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
return reg->type != SCALAR_VALUE;
}
+static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
+ struct bpf_reg_state *src_reg)
+{
+ if (src_reg->type == SCALAR_VALUE && !src_reg->id &&
+ !tnum_is_const(src_reg->var_off))
+ /* Ensure that src_reg has a valid ID that will be copied to
+ * dst_reg and then will be used by find_equal_scalars() to
+ * propagate min/max range.
+ */
+ src_reg->id = ++env->id_gen;
+}
+
/* Copy src state preserving dst->parent and dst->live fields */
static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
{
@@ -4438,6 +4456,11 @@ static bool is_bpf_st_mem(struct bpf_insn *insn)
return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
}
+static int get_reg_width(struct bpf_reg_state *reg)
+{
+ return fls64(reg->umax_value);
+}
+
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
@@ -4487,13 +4510,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
return err;
mark_stack_slot_scratched(env, spi);
- if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && env->bpf_capable) {
+ if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
+ bool reg_value_fits;
+
+ reg_value_fits = get_reg_width(reg) <= BITS_PER_BYTE * size;
+ /* Make sure that reg had an ID to build a relation on spill. */
+ if (reg_value_fits)
+ assign_scalar_id_before_mov(env, reg);
save_register_state(env, state, spi, reg, size);
/* Break the relation on a narrowing spill. */
- if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
+ if (!reg_value_fits)
state->stack[spi].spilled_ptr.id = 0;
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
- insn->imm != 0 && env->bpf_capable) {
+ env->bpf_capable) {
struct bpf_reg_state fake_reg = {};
__mark_reg_known(&fake_reg, insn->imm);
@@ -4640,7 +4669,20 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
return -EINVAL;
}
- /* Erase all spilled pointers. */
+ /* If writing_zero and the spi slot contains a spill of value 0,
+ * maintain the spill type.
+ */
+ if (writing_zero && *stype == STACK_SPILL &&
+ is_spilled_scalar_reg(&state->stack[spi])) {
+ struct bpf_reg_state *spill_reg = &state->stack[spi].spilled_ptr;
+
+ if (tnum_is_const(spill_reg->var_off) && spill_reg->var_off.value == 0) {
+ zero_used = true;
+ continue;
+ }
+ }
+
+ /* Erase all other spilled pointers. */
state->stack[spi].spilled_ptr.type = NOT_INIT;
/* Update the slot type. */
@@ -4756,7 +4798,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
if (dst_regno < 0)
return 0;
- if (!(off % BPF_REG_SIZE) && size == spill_size) {
+ if (size <= spill_size &&
+ bpf_stack_narrow_access_ok(off, size, spill_size)) {
/* The earlier check_reg_arg() has decided the
* subreg_def for this insn. Save it first.
*/
@@ -4764,6 +4807,12 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
copy_register_state(&state->regs[dst_regno], reg);
state->regs[dst_regno].subreg_def = subreg_def;
+
+ /* Break the relation on a narrowing fill.
+ * coerce_reg_to_size will adjust the boundaries.
+ */
+ if (get_reg_width(reg) > size * BITS_PER_BYTE)
+ state->regs[dst_regno].id = 0;
} else {
int spill_cnt = 0, zero_cnt = 0;
@@ -5211,6 +5260,11 @@ bad_type:
return -EINVAL;
}
+static bool in_sleepable(struct bpf_verifier_env *env)
+{
+ return env->prog->aux->sleepable;
+}
+
/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
* can dereference RCU protected pointers and result is PTR_TRUSTED.
*/
@@ -5218,7 +5272,7 @@ static bool in_rcu_cs(struct bpf_verifier_env *env)
{
return env->cur_state->active_rcu_lock ||
env->cur_state->active_lock.ptr ||
- !env->prog->aux->sleepable;
+ !in_sleepable(env);
}
/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
@@ -5227,7 +5281,9 @@ BTF_ID(struct, prog_test_ref_kfunc)
#ifdef CONFIG_CGROUPS
BTF_ID(struct, cgroup)
#endif
+#ifdef CONFIG_BPF_JIT
BTF_ID(struct, bpf_cpumask)
+#endif
BTF_ID(struct, task_struct)
BTF_SET_END(rcu_protected_types)
@@ -5768,6 +5824,17 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}
+static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth)
+{
+ if (env->prog->jit_requested)
+ return round_up(stack_depth, 16);
+
+ /* round up to 32-bytes, since this is granularity
+ * of interpreter stack size
+ */
+ return round_up(max_t(u32, stack_depth, 1), 32);
+}
+
/* starting from main bpf function walk all instructions of the function
* and recursively walk all callees that given function can call.
* Ignore jump and exit insns.
@@ -5811,10 +5878,7 @@ process_func:
depth);
return -EACCES;
}
- /* round up to 32-bytes, since this is granularity
- * of interpreter stack size
- */
- depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
+ depth += round_up_stack_depth(env, subprog[idx].stack_depth);
if (depth > MAX_BPF_STACK) {
verbose(env, "combined stack size of %d calls is %d. Too large\n",
frame + 1, depth);
@@ -5908,7 +5972,7 @@ continue_func:
*/
if (frame == 0)
return 0;
- depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
+ depth -= round_up_stack_depth(env, subprog[idx].stack_depth);
frame--;
i = ret_insn[frame];
idx = ret_prog[frame];
@@ -6039,10 +6103,10 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
* values are also truncated so we push 64-bit bounds into
* 32-bit bounds. Above were truncated < 32-bits already.
*/
- if (size < 4) {
+ if (size < 4)
__mark_reg32_unbounded(reg);
- reg_bounds_sync(reg);
- }
+
+ reg_bounds_sync(reg);
}
static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
@@ -8198,6 +8262,7 @@ found:
switch ((int)reg->type) {
case PTR_TO_BTF_ID:
case PTR_TO_BTF_ID | PTR_TRUSTED:
+ case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL:
case PTR_TO_BTF_ID | MEM_RCU:
case PTR_TO_BTF_ID | PTR_MAYBE_NULL:
case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU:
@@ -9300,6 +9365,18 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0);
if (ret)
return ret;
+ } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
+ struct bpf_call_arg_meta meta;
+ int err;
+
+ if (register_is_null(reg) && type_may_be_null(arg->arg_type))
+ continue;
+
+ memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
+ err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta);
+ err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type);
+ if (err)
+ return err;
} else {
bpf_log(log, "verifier bug: unrecognized arg#%d type %d\n",
i, arg->arg_type);
@@ -9375,9 +9452,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins
return -EFAULT;
}
- if (insn->code == (BPF_JMP | BPF_CALL) &&
- insn->src_reg == 0 &&
- insn->imm == BPF_FUNC_timer_set_callback) {
+ if (is_async_callback_calling_insn(insn)) {
struct bpf_verifier_state *async_cb;
/* there is no real recursion here. timer callbacks are async */
@@ -9436,6 +9511,13 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (subprog_is_global(env, subprog)) {
const char *sub_name = subprog_name(env, subprog);
+ /* Only global subprogs cannot be called with a lock held. */
+ if (env->cur_state->active_lock.ptr) {
+ verbose(env, "global function calls are not allowed while holding a lock,\n"
+ "use static function instead\n");
+ return -EINVAL;
+ }
+
if (err) {
verbose(env, "Caller passes invalid args into func#%d ('%s')\n",
subprog, sub_name);
@@ -10092,7 +10174,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}
- if (!env->prog->aux->sleepable && fn->might_sleep) {
+ if (!in_sleepable(env) && fn->might_sleep) {
verbose(env, "helper call might sleep in a non-sleepable prog\n");
return -EINVAL;
}
@@ -10122,7 +10204,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}
- if (env->prog->aux->sleepable && is_storage_get_function(func_id))
+ if (in_sleepable(env) && is_storage_get_function(func_id))
env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
}
@@ -10618,24 +10700,6 @@ static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta)
return meta->kfunc_flags & KF_RCU_PROTECTED;
}
-static bool __kfunc_param_match_suffix(const struct btf *btf,
- const struct btf_param *arg,
- const char *suffix)
-{
- int suffix_len = strlen(suffix), len;
- const char *param_name;
-
- /* In the future, this can be ported to use BTF tagging */
- param_name = btf_name_by_offset(btf, arg->name_off);
- if (str_is_empty(param_name))
- return false;
- len = strlen(param_name);
- if (len < suffix_len)
- return false;
- param_name += len - suffix_len;
- return !strncmp(param_name, suffix, suffix_len);
-}
-
static bool is_kfunc_arg_mem_size(const struct btf *btf,
const struct btf_param *arg,
const struct bpf_reg_state *reg)
@@ -10646,7 +10710,7 @@ static bool is_kfunc_arg_mem_size(const struct btf *btf,
if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
return false;
- return __kfunc_param_match_suffix(btf, arg, "__sz");
+ return btf_param_match_suffix(btf, arg, "__sz");
}
static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
@@ -10659,47 +10723,47 @@ static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
return false;
- return __kfunc_param_match_suffix(btf, arg, "__szk");
+ return btf_param_match_suffix(btf, arg, "__szk");
}
static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__opt");
+ return btf_param_match_suffix(btf, arg, "__opt");
}
static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__k");
+ return btf_param_match_suffix(btf, arg, "__k");
}
static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__ign");
+ return btf_param_match_suffix(btf, arg, "__ign");
}
static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__alloc");
+ return btf_param_match_suffix(btf, arg, "__alloc");
}
static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__uninit");
+ return btf_param_match_suffix(btf, arg, "__uninit");
}
static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr");
+ return btf_param_match_suffix(btf, arg, "__refcounted_kptr");
}
static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__nullable");
+ return btf_param_match_suffix(btf, arg, "__nullable");
}
static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg)
{
- return __kfunc_param_match_suffix(btf, arg, "__str");
+ return btf_param_match_suffix(btf, arg, "__str");
}
static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
@@ -10969,7 +11033,7 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
* type to our caller. When a set of conditions hold in the BTF type of
* arguments, we resolve it to a known kfunc_ptr_arg_type.
*/
- if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
+ if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
return KF_ARG_PTR_TO_CTX;
if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
@@ -11481,7 +11545,7 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
return true;
fallthrough;
default:
- return env->prog->aux->sleepable;
+ return in_sleepable(env);
}
}
@@ -12002,7 +12066,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
}
sleepable = is_kfunc_sleepable(&meta);
- if (sleepable && !env->prog->aux->sleepable) {
+ if (sleepable && !in_sleepable(env)) {
verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
return -EACCES;
}
@@ -12826,6 +12890,19 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
}
switch (base_type(ptr_reg->type)) {
+ case PTR_TO_CTX:
+ case PTR_TO_MAP_VALUE:
+ case PTR_TO_MAP_KEY:
+ case PTR_TO_STACK:
+ case PTR_TO_PACKET_META:
+ case PTR_TO_PACKET:
+ case PTR_TO_TP_BUFFER:
+ case PTR_TO_BTF_ID:
+ case PTR_TO_MEM:
+ case PTR_TO_BUF:
+ case PTR_TO_FUNC:
+ case CONST_PTR_TO_DYNPTR:
+ break;
case PTR_TO_FLOW_KEYS:
if (known)
break;
@@ -12835,16 +12912,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
if (known && smin_val == 0 && opcode == BPF_ADD)
break;
fallthrough;
- case PTR_TO_PACKET_END:
- case PTR_TO_SOCKET:
- case PTR_TO_SOCK_COMMON:
- case PTR_TO_TCP_SOCK:
- case PTR_TO_XDP_SOCK:
+ default:
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
dst, reg_type_str(env, ptr_reg->type));
return -EACCES;
- default:
- break;
}
/* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
@@ -13905,20 +13976,13 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (BPF_SRC(insn->code) == BPF_X) {
struct bpf_reg_state *src_reg = regs + insn->src_reg;
struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
- bool need_id = src_reg->type == SCALAR_VALUE && !src_reg->id &&
- !tnum_is_const(src_reg->var_off);
if (BPF_CLASS(insn->code) == BPF_ALU64) {
if (insn->off == 0) {
/* case: R1 = R2
* copy register state to dest reg
*/
- if (need_id)
- /* Assign src and dst registers the same ID
- * that will be used by find_equal_scalars()
- * to propagate min/max range.
- */
- src_reg->id = ++env->id_gen;
+ assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = DEF_NOT_SUBREG;
@@ -13933,8 +13997,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
bool no_sext;
no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
- if (no_sext && need_id)
- src_reg->id = ++env->id_gen;
+ if (no_sext)
+ assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
if (!no_sext)
dst_reg->id = 0;
@@ -13954,10 +14018,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) {
if (insn->off == 0) {
- bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX;
+ bool is_src_reg_u32 = get_reg_width(src_reg) <= 32;
- if (is_src_reg_u32 && need_id)
- src_reg->id = ++env->id_gen;
+ if (is_src_reg_u32)
+ assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
/* Make sure ID is cleared if src_reg is not in u32
* range otherwise dst_reg min/max could be incorrectly
@@ -13971,8 +14035,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
/* case: W1 = (s8, s16)W2 */
bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
- if (no_sext && need_id)
- src_reg->id = ++env->id_gen;
+ if (no_sext)
+ assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
if (!no_sext)
dst_reg->id = 0;
@@ -15529,7 +15593,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
return DONE_EXPLORING;
case BPF_CALL:
- if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback)
+ if (is_async_callback_calling_insn(insn))
/* Mark this call insn as a prune point to trigger
* is_state_visited() check before call itself is
* processed by __check_func_call(). Otherwise new
@@ -16445,6 +16509,43 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
}
}
+static struct bpf_reg_state unbound_reg;
+
+static __init int unbound_reg_init(void)
+{
+ __mark_reg_unknown_imprecise(&unbound_reg);
+ unbound_reg.live |= REG_LIVE_READ;
+ return 0;
+}
+late_initcall(unbound_reg_init);
+
+static bool is_stack_all_misc(struct bpf_verifier_env *env,
+ struct bpf_stack_state *stack)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i) {
+ if ((stack->slot_type[i] == STACK_MISC) ||
+ (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack))
+ continue;
+ return false;
+ }
+
+ return true;
+}
+
+static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env,
+ struct bpf_stack_state *stack)
+{
+ if (is_spilled_scalar_reg64(stack))
+ return &stack->spilled_ptr;
+
+ if (is_stack_all_misc(env, stack))
+ return &unbound_reg;
+
+ return NULL;
+}
+
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact)
{
@@ -16483,6 +16584,20 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
if (i >= cur->allocated_stack)
return false;
+ /* 64-bit scalar spill vs all slots MISC and vice versa.
+ * Load from all slots MISC produces unbound scalar.
+ * Construct a fake register for such stack and call
+ * regsafe() to ensure scalar ids are compared.
+ */
+ old_reg = scalar_reg_for_stack(env, &old->stack[spi]);
+ cur_reg = scalar_reg_for_stack(env, &cur->stack[spi]);
+ if (old_reg && cur_reg) {
+ if (!regsafe(env, old_reg, cur_reg, idmap, exact))
+ return false;
+ i += BPF_REG_SIZE - 1;
+ continue;
+ }
+
/* if old state was safe with misc data in the stack
* it will be safe with zero-initialized stack.
* The opposite is not true
@@ -16600,6 +16715,9 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
{
int i;
+ if (old->callback_depth > cur->callback_depth)
+ return false;
+
for (i = 0; i < MAX_BPF_REG; i++)
if (!regsafe(env, &old->regs[i], &cur->regs[i],
&env->idmap_scratch, exact))
@@ -17027,7 +17145,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
}
/* attempt to detect infinite loop to avoid unnecessary doomed work */
if (states_maybe_looping(&sl->state, cur) &&
- states_equal(env, &sl->state, cur, false) &&
+ states_equal(env, &sl->state, cur, true) &&
!iter_active_depths_differ(&sl->state, cur) &&
sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
verbose_linfo(env, insn_idx, "; ");
@@ -17536,7 +17654,6 @@ static int do_check(struct bpf_verifier_env *env)
if (env->cur_state->active_lock.ptr) {
if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
- (insn->src_reg == BPF_PSEUDO_CALL) ||
(insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
(insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
verbose(env, "function calls are not allowed while holding a lock\n");
@@ -17584,14 +17701,12 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
process_bpf_exit_full:
- if (env->cur_state->active_lock.ptr &&
- !in_rbtree_lock_required_cb(env)) {
+ if (env->cur_state->active_lock.ptr && !env->cur_state->curframe) {
verbose(env, "bpf_spin_unlock is missing\n");
return -EINVAL;
}
- if (env->cur_state->active_rcu_lock &&
- !in_rbtree_lock_required_cb(env)) {
+ if (env->cur_state->active_rcu_lock && !env->cur_state->curframe) {
verbose(env, "bpf_rcu_read_unlock is missing\n");
return -EINVAL;
}
@@ -17920,6 +18035,8 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SK_STORAGE:
case BPF_MAP_TYPE_TASK_STORAGE:
case BPF_MAP_TYPE_CGRP_STORAGE:
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
break;
default:
verbose(env,
@@ -19565,7 +19682,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
}
if (is_storage_get_function(insn->imm)) {
- if (!env->prog->aux->sleepable ||
+ if (!in_sleepable(env) ||
env->insn_aux_data[i + delta].storage_get_func_atomic)
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
else
@@ -19809,6 +19926,23 @@ patch_map_ops_generic:
continue;
}
+ /* Implement bpf_kptr_xchg inline */
+ if (prog->jit_requested && BITS_PER_LONG == 64 &&
+ insn->imm == BPF_FUNC_kptr_xchg &&
+ bpf_jit_supports_ptr_xchg()) {
+ insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
+ insn_buf[1] = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
+ cnt = 2;
+
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ continue;
+ }
patch_call_imm:
fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
@@ -20041,7 +20175,6 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
state->first_insn_idx = env->subprog_info[subprog].start;
state->last_insn_idx = -1;
-
regs = state->frame[state->curframe]->regs;
if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
const char *sub_name = subprog_name(env, subprog);
@@ -20085,6 +20218,18 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
mark_reg_known_zero(env, regs, i);
reg->mem_size = arg->mem_size;
reg->id = ++env->id_gen;
+ } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
+ reg->type = PTR_TO_BTF_ID;
+ if (arg->arg_type & PTR_MAYBE_NULL)
+ reg->type |= PTR_MAYBE_NULL;
+ if (arg->arg_type & PTR_UNTRUSTED)
+ reg->type |= PTR_UNTRUSTED;
+ if (arg->arg_type & PTR_TRUSTED)
+ reg->type |= PTR_TRUSTED;
+ mark_reg_known_zero(env, regs, i);
+ reg->btf = bpf_get_btf_vmlinux(); /* can't fail at this point */
+ reg->btf_id = arg->btf_id;
+ reg->id = ++env->id_gen;
} else {
WARN_ONCE(1, "BUG: unhandled arg#%d type %d\n",
i - BPF_REG_1, arg->arg_type);
@@ -20233,10 +20378,12 @@ static void print_verification_stats(struct bpf_verifier_env *env)
static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
{
const struct btf_type *t, *func_proto;
+ const struct bpf_struct_ops_desc *st_ops_desc;
const struct bpf_struct_ops *st_ops;
const struct btf_member *member;
struct bpf_prog *prog = env->prog;
u32 btf_id, member_idx;
+ struct btf *btf;
const char *mname;
if (!prog->gpl_compatible) {
@@ -20244,15 +20391,30 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
return -EINVAL;
}
+ if (!prog->aux->attach_btf_id)
+ return -ENOTSUPP;
+
+ btf = prog->aux->attach_btf;
+ if (btf_is_module(btf)) {
+ /* Make sure st_ops is valid through the lifetime of env */
+ env->attach_btf_mod = btf_try_get_module(btf);
+ if (!env->attach_btf_mod) {
+ verbose(env, "struct_ops module %s is not found\n",
+ btf_get_name(btf));
+ return -ENOTSUPP;
+ }
+ }
+
btf_id = prog->aux->attach_btf_id;
- st_ops = bpf_struct_ops_find(btf_id);
- if (!st_ops) {
+ st_ops_desc = bpf_struct_ops_find(btf, btf_id);
+ if (!st_ops_desc) {
verbose(env, "attach_btf_id %u is not a supported struct\n",
btf_id);
return -ENOTSUPP;
}
+ st_ops = st_ops_desc->st_ops;
- t = st_ops->type;
+ t = st_ops_desc->type;
member_idx = prog->expected_attach_type;
if (member_idx >= btf_type_vlen(t)) {
verbose(env, "attach to invalid member idx %u of struct %s\n",
@@ -20261,8 +20423,8 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
member = &btf_type_member(t)[member_idx];
- mname = btf_name_by_offset(btf_vmlinux, member->name_off);
- func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
+ mname = btf_name_by_offset(btf, member->name_off);
+ func_proto = btf_type_resolve_func_ptr(btf, member->type,
NULL);
if (!func_proto) {
verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
@@ -20280,6 +20442,12 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
}
+ /* btf_ctx_access() used this to provide argument type info */
+ prog->aux->ctx_arg_info =
+ st_ops_desc->arg_info[member_idx].info;
+ prog->aux->ctx_arg_info_size =
+ st_ops_desc->arg_info[member_idx].cnt;
+
prog->aux->attach_func_proto = func_proto;
prog->aux->attach_func_name = mname;
env->ops = st_ops->verifier_ops;
@@ -20764,7 +20932,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
- is_priv = bpf_capable();
+
+ env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
+ env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
+ env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token);
+ env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token);
+ env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF);
bpf_get_btf_vmlinux();
@@ -20796,12 +20969,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
env->strict_alignment = false;
- env->allow_ptr_leaks = bpf_allow_ptr_leaks();
- env->allow_uninit_stack = bpf_allow_uninit_stack();
- env->bypass_spec_v1 = bpf_bypass_spec_v1();
- env->bypass_spec_v4 = bpf_bypass_spec_v4();
- env->bpf_capable = bpf_capable();
-
if (is_priv)
env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS;
@@ -20967,6 +21134,8 @@ err_release_maps:
env->prog->expected_attach_type = 0;
*prog = env->prog;
+
+ module_put(env->attach_btf_mod);
err_unlock:
if (!is_priv)
mutex_unlock(&bpf_verifier_lock);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index ba36c073304a..927bef3a598a 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2562,7 +2562,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
update_partition_sd_lb(cs, old_prs);
out_free:
free_cpumasks(NULL, &tmp);
- return 0;
+ return retval;
}
/**
@@ -2598,9 +2598,6 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
return 0;
- if (alloc_cpumasks(NULL, &tmp))
- return -ENOMEM;
-
if (*buf)
compute_effective_exclusive_cpumask(trialcs, NULL);
@@ -2615,6 +2612,9 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval)
return retval;
+ if (alloc_cpumasks(NULL, &tmp))
+ return -ENOMEM;
+
if (old_prs) {
if (cpumask_empty(trialcs->effective_xcpus)) {
invalidate = true;
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index a8350d2d63e6..07e2284bb499 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -562,10 +562,10 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
}
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
-BTF_SET8_START(bpf_rstat_kfunc_ids)
+BTF_KFUNCS_START(bpf_rstat_kfunc_ids)
BTF_ID_FLAGS(func, cgroup_rstat_updated)
BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE)
-BTF_SET8_END(bpf_rstat_kfunc_ids)
+BTF_KFUNCS_END(bpf_rstat_kfunc_ids)
static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config
index 4722b998a324..509ee703de15 100644
--- a/kernel/configs/debug.config
+++ b/kernel/configs/debug.config
@@ -40,6 +40,12 @@ CONFIG_UBSAN_ENUM=y
CONFIG_UBSAN_SHIFT=y
CONFIG_UBSAN_UNREACHABLE=y
#
+# Networking Debugging
+#
+CONFIG_NET_DEV_REFCNT_TRACKER=y
+CONFIG_NET_NS_REFCNT_TRACKER=y
+CONFIG_DEBUG_NET=y
+#
# Memory Debugging
#
# CONFIG_DEBUG_PAGEALLOC is not set
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 6b213c8252d6..d05066cb40b2 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1348,8 +1348,6 @@ do_full_getstr:
/* PROMPT can only be set if we have MEM_READ permission. */
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
raw_smp_processor_id());
- if (defcmd_in_progress)
- strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
/*
* Fetch command from keyboard
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f0f0f71213a1..5ecfa57e3b97 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9302,10 +9302,6 @@ void perf_event_bpf_event(struct bpf_prog *prog,
{
struct perf_bpf_event bpf_event;
- if (type <= PERF_BPF_EVENT_UNKNOWN ||
- type >= PERF_BPF_EVENT_MAX)
- return;
-
switch (type) {
case PERF_BPF_EVENT_PROG_LOAD:
case PERF_BPF_EVENT_PROG_UNLOAD:
@@ -9313,7 +9309,7 @@ void perf_event_bpf_event(struct bpf_prog *prog,
perf_event_bpf_emit_ksymbols(prog, type);
break;
default:
- break;
+ return;
}
if (!atomic_read(&nr_bpf_events))
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 485bb0389b48..929e98c62965 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -537,7 +537,7 @@ retry:
}
}
- ret = __replace_page(vma, vaddr, old_page, new_page);
+ ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
if (new_page)
put_page(new_page);
put_old:
diff --git a/kernel/exit.c b/kernel/exit.c
index 3988a02efaef..dfb963d2f862 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1127,17 +1127,14 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
* and nobody can change them.
*
* psig->stats_lock also protects us from our sub-threads
- * which can reap other children at the same time. Until
- * we change k_getrusage()-like users to rely on this lock
- * we have to take ->siglock as well.
+ * which can reap other children at the same time.
*
* We use thread_group_cputime_adjusted() to get times for
* the thread group, which consolidates times for all threads
* in the group including the group leader.
*/
thread_group_cputime_adjusted(p, &tgutime, &tgstime);
- spin_lock_irq(&current->sighand->siglock);
- write_seqlock(&psig->stats_lock);
+ write_seqlock_irq(&psig->stats_lock);
psig->cutime += tgutime + sig->cutime;
psig->cstime += tgstime + sig->cstime;
psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
@@ -1160,8 +1157,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
psig->cmaxrss = maxrss;
task_io_accounting_add(&psig->ioac, &p->ioac);
task_io_accounting_add(&psig->ioac, &sig->ioac);
- write_sequnlock(&psig->stats_lock);
- spin_unlock_irq(&current->sighand->siglock);
+ write_sequnlock_irq(&psig->stats_lock);
}
if (wo->wo_rusage)
diff --git a/kernel/fork.c b/kernel/fork.c
index 47ff3b35352e..0d944e92a43f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1748,6 +1748,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
spin_lock(&fs->lock);
+ /* "users" and "in_exec" locked for check_unsafe_exec() */
if (fs->in_exec) {
spin_unlock(&fs->lock);
return -EAGAIN;
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index e0e853412c15..1e78ef24321e 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -627,12 +627,21 @@ retry:
}
/*
- * PI futexes can not be requeued and must remove themselves from the
- * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
+ * PI futexes can not be requeued and must remove themselves from the hash
+ * bucket. The hash bucket lock (i.e. lock_ptr) is held.
*/
void futex_unqueue_pi(struct futex_q *q)
{
- __futex_unqueue(q);
+ /*
+ * If the lock was not acquired (due to timeout or signal) then the
+ * rt_waiter is removed before futex_q is. If this is observed by
+ * an unlocker after dropping the rtmutex wait lock and before
+ * acquiring the hash bucket lock, then the unlocker dequeues the
+ * futex_q from the hash bucket list to guarantee consistent state
+ * vs. userspace. Therefore the dequeue here must be conditional.
+ */
+ if (!plist_node_empty(&q->list))
+ __futex_unqueue(q);
BUG_ON(!q->pi_state);
put_pi_state(q->pi_state);
diff --git a/kernel/futex/pi.c b/kernel/futex/pi.c
index 90e5197f4e56..5722467f2737 100644
--- a/kernel/futex/pi.c
+++ b/kernel/futex/pi.c
@@ -1135,6 +1135,7 @@ retry:
hb = futex_hash(&key);
spin_lock(&hb->lock);
+retry_hb:
/*
* Check waiters first. We do not trust user space values at
@@ -1177,12 +1178,17 @@ retry:
/*
* Futex vs rt_mutex waiter state -- if there are no rt_mutex
* waiters even though futex thinks there are, then the waiter
- * is leaving and the uncontended path is safe to take.
+ * is leaving. The entry needs to be removed from the list so a
+ * new futex_lock_pi() is not using this stale PI-state while
+ * the futex is available in user space again.
+ * There can be more than one task on its way out so it needs
+ * to retry.
*/
rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
if (!rt_waiter) {
+ __futex_unqueue(top_waiter);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- goto do_uncontended;
+ goto retry_hb;
}
get_pi_state(pi_state);
@@ -1217,7 +1223,6 @@ retry:
return ret;
}
-do_uncontended:
/*
* We have no kernel internal state, i.e. no waiters in the
* kernel. Waiters which are about to queue themselves are stuck
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 27ca1c866f29..371eb1711d34 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -600,7 +600,7 @@ int __init early_irq_init(void)
mutex_init(&desc[i].request_mutex);
init_waitqueue_head(&desc[i].wait_for_threads);
desc_set_defaults(i, &desc[i], node, NULL, NULL);
- irq_resend_init(desc);
+ irq_resend_init(&desc[i]);
}
return arch_early_irq_init();
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d5a0ee40bf66..9d9095e81792 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1993,7 +1993,7 @@ NOKPROBE_SYMBOL(__kretprobe_find_ret_addr);
unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
struct llist_node **cur)
{
- struct kretprobe_instance *ri = NULL;
+ struct kretprobe_instance *ri;
kprobe_opcode_t *ret;
if (WARN_ON_ONCE(!cur))
@@ -2802,7 +2802,7 @@ static int show_kprobe_addr(struct seq_file *pi, void *v)
{
struct hlist_head *head;
struct kprobe *p, *kp;
- const char *sym = NULL;
+ const char *sym;
unsigned int i = *(loff_t *) v;
unsigned long offset = 0;
char *modname, namebuf[KSYM_NAME_LEN];
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1ae851777806..b2bccfd37c38 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1013,6 +1013,38 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
return needmore;
}
+static void swake_up_one_online_ipi(void *arg)
+{
+ struct swait_queue_head *wqh = arg;
+
+ swake_up_one(wqh);
+}
+
+static void swake_up_one_online(struct swait_queue_head *wqh)
+{
+ int cpu = get_cpu();
+
+ /*
+ * If called from rcutree_report_cpu_starting(), wake up
+ * is dangerous that late in the CPU-down hotplug process. The
+ * scheduler might queue an ignored hrtimer. Defer the wake up
+ * to an online CPU instead.
+ */
+ if (unlikely(cpu_is_offline(cpu))) {
+ int target;
+
+ target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
+ cpu_online_mask);
+
+ smp_call_function_single(target, swake_up_one_online_ipi,
+ wqh, 0);
+ put_cpu();
+ } else {
+ put_cpu();
+ swake_up_one(wqh);
+ }
+}
+
/*
* Awaken the grace-period kthread. Don't do a self-awaken (unless in an
* interrupt or softirq handler, in which case we just might immediately
@@ -1037,7 +1069,7 @@ static void rcu_gp_kthread_wake(void)
return;
WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
- swake_up_one(&rcu_state.gp_wq);
+ swake_up_one_online(&rcu_state.gp_wq);
}
/*
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 6d7cea5d591f..2ac440bc7e10 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -173,7 +173,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
return ret;
}
-
/*
* Report the exit from RCU read-side critical section for the last task
* that queued itself during or before the current expedited preemptible-RCU
@@ -201,7 +200,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (wake) {
smp_mb(); /* EGP done before wake_up(). */
- swake_up_one(&rcu_state.expedited_wq);
+ swake_up_one_online(&rcu_state.expedited_wq);
}
break;
}
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 2ad881d07752..4e715b9b278e 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -162,6 +162,9 @@
| MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
| MEMBARRIER_CMD_GET_REGISTRATIONS)
+static DEFINE_MUTEX(membarrier_ipi_mutex);
+#define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
+
static void ipi_mb(void *info)
{
smp_mb(); /* IPIs should be serializing but paranoid. */
@@ -259,6 +262,7 @@ static int membarrier_global_expedited(void)
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
+ SERIALIZE_IPI();
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
@@ -347,6 +351,7 @@ static int membarrier_private_expedited(int flags, int cpu_id)
if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;
+ SERIALIZE_IPI();
cpus_read_lock();
if (cpu_id >= 0) {
@@ -460,6 +465,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
* between threads which are users of @mm has its membarrier state
* updated.
*/
+ SERIALIZE_IPI();
cpus_read_lock();
rcu_read_lock();
for_each_online_cpu(cpu) {
diff --git a/kernel/sys.c b/kernel/sys.c
index e219fcfa112d..f8e543f1e38a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1785,21 +1785,24 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
struct task_struct *t;
unsigned long flags;
u64 tgutime, tgstime, utime, stime;
- unsigned long maxrss = 0;
+ unsigned long maxrss;
+ struct mm_struct *mm;
struct signal_struct *sig = p->signal;
+ unsigned int seq = 0;
- memset((char *)r, 0, sizeof (*r));
+retry:
+ memset(r, 0, sizeof(*r));
utime = stime = 0;
+ maxrss = 0;
if (who == RUSAGE_THREAD) {
task_cputime_adjusted(current, &utime, &stime);
accumulate_thread_rusage(p, r);
maxrss = sig->maxrss;
- goto out;
+ goto out_thread;
}
- if (!lock_task_sighand(p, &flags))
- return;
+ flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
switch (who) {
case RUSAGE_BOTH:
@@ -1819,9 +1822,6 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
fallthrough;
case RUSAGE_SELF:
- thread_group_cputime_adjusted(p, &tgutime, &tgstime);
- utime += tgutime;
- stime += tgstime;
r->ru_nvcsw += sig->nvcsw;
r->ru_nivcsw += sig->nivcsw;
r->ru_minflt += sig->min_flt;
@@ -1830,28 +1830,42 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_oublock += sig->oublock;
if (maxrss < sig->maxrss)
maxrss = sig->maxrss;
+
+ rcu_read_lock();
__for_each_thread(sig, t)
accumulate_thread_rusage(t, r);
+ rcu_read_unlock();
+
break;
default:
BUG();
}
- unlock_task_sighand(p, &flags);
-out:
- r->ru_utime = ns_to_kernel_old_timeval(utime);
- r->ru_stime = ns_to_kernel_old_timeval(stime);
+ if (need_seqretry(&sig->stats_lock, seq)) {
+ seq = 1;
+ goto retry;
+ }
+ done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
- if (who != RUSAGE_CHILDREN) {
- struct mm_struct *mm = get_task_mm(p);
+ if (who == RUSAGE_CHILDREN)
+ goto out_children;
- if (mm) {
- setmax_mm_hiwater_rss(&maxrss, mm);
- mmput(mm);
- }
+ thread_group_cputime_adjusted(p, &tgutime, &tgstime);
+ utime += tgutime;
+ stime += tgstime;
+
+out_thread:
+ mm = get_task_mm(p);
+ if (mm) {
+ setmax_mm_hiwater_rss(&maxrss, mm);
+ mmput(mm);
}
+
+out_children:
r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
+ r->ru_utime = ns_to_kernel_old_timeval(utime);
+ r->ru_stime = ns_to_kernel_old_timeval(stime);
}
SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c108ed8a9804..3052b1f1168e 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -99,6 +99,7 @@ static u64 suspend_start;
* Interval: 0.5sec.
*/
#define WATCHDOG_INTERVAL (HZ >> 1)
+#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
/*
* Threshold: 0.0312s, when doubled: 0.0625s.
@@ -134,6 +135,7 @@ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
static DEFINE_SPINLOCK(watchdog_lock);
static int watchdog_running;
static atomic_t watchdog_reset_pending;
+static int64_t watchdog_max_interval;
static inline void clocksource_watchdog_lock(unsigned long *flags)
{
@@ -399,8 +401,8 @@ static inline void clocksource_reset_watchdog(void)
static void clocksource_watchdog(struct timer_list *unused)
{
u64 csnow, wdnow, cslast, wdlast, delta;
+ int64_t wd_nsec, cs_nsec, interval;
int next_cpu, reset_pending;
- int64_t wd_nsec, cs_nsec;
struct clocksource *cs;
enum wd_read_status read_ret;
unsigned long extra_wait = 0;
@@ -470,6 +472,27 @@ static void clocksource_watchdog(struct timer_list *unused)
if (atomic_read(&watchdog_reset_pending))
continue;
+ /*
+ * The processing of timer softirqs can get delayed (usually
+ * on account of ksoftirqd not getting to run in a timely
+ * manner), which causes the watchdog interval to stretch.
+ * Skew detection may fail for longer watchdog intervals
+ * on account of fixed margins being used.
+ * Some clocksources, e.g. acpi_pm, cannot tolerate
+ * watchdog intervals longer than a few seconds.
+ */
+ interval = max(cs_nsec, wd_nsec);
+ if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
+ if (system_state > SYSTEM_SCHEDULING &&
+ interval > 2 * watchdog_max_interval) {
+ watchdog_max_interval = interval;
+ pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
+ cs_nsec, wd_nsec);
+ }
+ watchdog_timer.expires = jiffies;
+ continue;
+ }
+
/* Check the deviation from the watchdog clocksource. */
md = cs->uncertainty_margin + watchdog->uncertainty_margin;
if (abs(cs_nsec - wd_nsec) > md) {
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 760793998cdd..edb0f821dcea 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1085,6 +1085,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
enum hrtimer_mode mode)
{
debug_activate(timer, mode);
+ WARN_ON_ONCE(!base->cpu_base->online);
base->cpu_base->active_bases |= 1 << base->index;
@@ -2183,6 +2184,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
cpu_base->softirq_next_timer = NULL;
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
+ cpu_base->online = 1;
hrtimer_cpu_base_init_expiry_lock(cpu_base);
return 0;
}
@@ -2250,6 +2252,7 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
smp_call_function_single(ncpu, retrigger_next_event, NULL, 0);
raw_spin_unlock(&new_base->lock);
+ old_base->online = 0;
raw_spin_unlock(&old_base->lock);
return 0;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a17d26002831..01fb50c1b17e 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1576,13 +1576,23 @@ void tick_setup_sched_timer(void)
void tick_cancel_sched_timer(int cpu)
{
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t idle_sleeptime, iowait_sleeptime;
+ unsigned long idle_calls, idle_sleeps;
# ifdef CONFIG_HIGH_RES_TIMERS
if (ts->sched_timer.base)
hrtimer_cancel(&ts->sched_timer);
# endif
+ idle_sleeptime = ts->idle_sleeptime;
+ iowait_sleeptime = ts->iowait_sleeptime;
+ idle_calls = ts->idle_calls;
+ idle_sleeps = ts->idle_sleeps;
memset(ts, 0, sizeof(*ts));
+ ts->idle_sleeptime = idle_sleeptime;
+ ts->iowait_sleeptime = iowait_sleeptime;
+ ts->idle_calls = idle_calls;
+ ts->idle_sleeps = idle_sleeps;
}
#endif
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 7ac6c52b25eb..241ddf5e3895 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1412,14 +1412,14 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
__bpf_kfunc_end_defs();
-BTF_SET8_START(key_sig_kfunc_set)
+BTF_KFUNCS_START(key_sig_kfunc_set)
BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
#endif
-BTF_SET8_END(key_sig_kfunc_set)
+BTF_KFUNCS_END(key_sig_kfunc_set)
static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
.owner = THIS_MODULE,
@@ -1475,9 +1475,9 @@ __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
__bpf_kfunc_end_defs();
-BTF_SET8_START(fs_kfunc_set_ids)
+BTF_KFUNCS_START(fs_kfunc_set_ids)
BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
-BTF_SET8_END(fs_kfunc_set_ids)
+BTF_KFUNCS_END(fs_kfunc_set_ids)
static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
@@ -1629,7 +1629,7 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -2679,6 +2679,7 @@ static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
struct bpf_link_info *info)
{
+ u64 __user *ucookies = u64_to_user_ptr(info->kprobe_multi.cookies);
u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
struct bpf_kprobe_multi_link *kmulti_link;
u32 ucount = info->kprobe_multi.count;
@@ -2686,6 +2687,8 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
if (!uaddrs ^ !ucount)
return -EINVAL;
+ if (ucookies && !ucount)
+ return -EINVAL;
kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
info->kprobe_multi.count = kmulti_link->cnt;
@@ -2699,6 +2702,18 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
else
ucount = kmulti_link->cnt;
+ if (ucookies) {
+ if (kmulti_link->cookies) {
+ if (copy_to_user(ucookies, kmulti_link->cookies, ucount * sizeof(u64)))
+ return -EFAULT;
+ } else {
+ for (i = 0; i < ucount; i++) {
+ if (put_user(0, ucookies + i))
+ return -EFAULT;
+ }
+ }
+ }
+
if (kallsyms_show_value(current_cred())) {
if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
return -EFAULT;
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 6cd2a4e3afb8..9ff018245840 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -189,9 +189,6 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
{
int size;
- if (num <= 0)
- return -EINVAL;
-
if (!fp->exit_handler) {
fp->rethook = NULL;
return 0;
@@ -199,15 +196,16 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
/* Initialize rethook if needed */
if (fp->nr_maxactive)
- size = fp->nr_maxactive;
+ num = fp->nr_maxactive;
else
- size = num * num_possible_cpus() * 2;
- if (size <= 0)
+ num *= num_possible_cpus() * 2;
+ if (num <= 0)
return -EINVAL;
+ size = sizeof(struct fprobe_rethook_node) + fp->entry_data_size;
+
/* Initialize rethook */
- fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler,
- sizeof(struct fprobe_rethook_node), size);
+ fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler, size, num);
if (IS_ERR(fp->rethook))
return PTR_ERR(fp->rethook);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b01ae7d36021..83ba342aef31 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5325,7 +5325,17 @@ static LIST_HEAD(ftrace_direct_funcs);
static int register_ftrace_function_nolock(struct ftrace_ops *ops);
+/*
+ * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
+ * call will be jumped from ftrace_regs_caller. Only if the architecture does
+ * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
+ * jumps from ftrace_caller for multiple ftrace_ops.
+ */
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
+#else
+#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
+#endif
static int check_direct_multi(struct ftrace_ops *ops)
{
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 13aaf5e85b81..0699027b4f4c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -944,7 +944,7 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
full = 0;
} else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return -EINVAL;
+ return EPOLLERR;
cpu_buffer = buffer->buffers[cpu];
work = &cpu_buffer->irq_work;
@@ -5877,6 +5877,10 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
if (psize <= BUF_PAGE_HDR_SIZE)
return -EINVAL;
+ /* Size of a subbuf cannot be greater than the write counter */
+ if (psize > RB_WRITE_MASK + 1)
+ return -EINVAL;
+
old_order = buffer->subbuf_order;
old_size = buffer->subbuf_size;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2a7c6fd934e9..8198bfc54b58 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -39,6 +39,7 @@
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/panic_notifier.h>
+#include <linux/kmemleak.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
@@ -1532,7 +1533,7 @@ void disable_trace_on_warning(void)
bool tracer_tracing_is_on(struct trace_array *tr)
{
if (tr->array_buffer.buffer)
- return ring_buffer_record_is_on(tr->array_buffer.buffer);
+ return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
return !tr->buffer_disabled;
}
@@ -2320,7 +2321,7 @@ struct saved_cmdlines_buffer {
unsigned *map_cmdline_to_pid;
unsigned cmdline_num;
int cmdline_idx;
- char *saved_cmdlines;
+ char saved_cmdlines[];
};
static struct saved_cmdlines_buffer *savedcmd;
@@ -2334,47 +2335,60 @@ static inline void set_cmdline(int idx, const char *cmdline)
strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
}
-static int allocate_cmdlines_buffer(unsigned int val,
- struct saved_cmdlines_buffer *s)
+static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
+{
+ int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
+
+ kfree(s->map_cmdline_to_pid);
+ kmemleak_free(s);
+ free_pages((unsigned long)s, order);
+}
+
+static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
{
+ struct saved_cmdlines_buffer *s;
+ struct page *page;
+ int orig_size, size;
+ int order;
+
+ /* Figure out how much is needed to hold the given number of cmdlines */
+ orig_size = sizeof(*s) + val * TASK_COMM_LEN;
+ order = get_order(orig_size);
+ size = 1 << (order + PAGE_SHIFT);
+ page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ return NULL;
+
+ s = page_address(page);
+ kmemleak_alloc(s, size, 1, GFP_KERNEL);
+ memset(s, 0, sizeof(*s));
+
+ /* Round up to actual allocation */
+ val = (size - sizeof(*s)) / TASK_COMM_LEN;
+ s->cmdline_num = val;
+
s->map_cmdline_to_pid = kmalloc_array(val,
sizeof(*s->map_cmdline_to_pid),
GFP_KERNEL);
- if (!s->map_cmdline_to_pid)
- return -ENOMEM;
-
- s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
- if (!s->saved_cmdlines) {
- kfree(s->map_cmdline_to_pid);
- return -ENOMEM;
+ if (!s->map_cmdline_to_pid) {
+ free_saved_cmdlines_buffer(s);
+ return NULL;
}
s->cmdline_idx = 0;
- s->cmdline_num = val;
memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
sizeof(s->map_pid_to_cmdline));
memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
val * sizeof(*s->map_cmdline_to_pid));
- return 0;
+ return s;
}
static int trace_create_savedcmd(void)
{
- int ret;
-
- savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
- if (!savedcmd)
- return -ENOMEM;
-
- ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
- if (ret < 0) {
- kfree(savedcmd);
- savedcmd = NULL;
- return -ENOMEM;
- }
+ savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
- return 0;
+ return savedcmd ? 0 : -ENOMEM;
}
int is_tracing_stopped(void)
@@ -6056,26 +6070,14 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
-static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
-{
- kfree(s->saved_cmdlines);
- kfree(s->map_cmdline_to_pid);
- kfree(s);
-}
-
static int tracing_resize_saved_cmdlines(unsigned int val)
{
struct saved_cmdlines_buffer *s, *savedcmd_temp;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = allocate_cmdlines_buffer(val);
if (!s)
return -ENOMEM;
- if (allocate_cmdlines_buffer(val, s) < 0) {
- kfree(s);
- return -ENOMEM;
- }
-
preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
savedcmd_temp = savedcmd;
diff --git a/kernel/trace/trace_btf.c b/kernel/trace/trace_btf.c
index ca224d53bfdc..5bbdbcbbde3c 100644
--- a/kernel/trace/trace_btf.c
+++ b/kernel/trace/trace_btf.c
@@ -91,8 +91,8 @@ retry:
for_each_member(i, type, member) {
if (!member->name_off) {
/* Anonymous union/struct: push it for later use */
- type = btf_type_skip_modifiers(btf, member->type, &tid);
- if (type && top < BTF_ANON_STACK_MAX) {
+ if (btf_type_skip_modifiers(btf, member->type, &tid) &&
+ top < BTF_ANON_STACK_MAX) {
anon_stack[top].tid = tid;
anon_stack[top++].offset =
cur_offset + member->offset;
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index e7af286af4f1..c82b401a294d 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -441,8 +441,9 @@ static unsigned int trace_string(struct synth_trace_event *entry,
if (is_dynamic) {
union trace_synth_field *data = &entry->fields[*n_u64];
+ len = fetch_store_strlen((unsigned long)str_val);
data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
- data->as_dynamic.len = fetch_store_strlen((unsigned long)str_val);
+ data->as_dynamic.len = len;
ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 46439e3bcec4..b33c3861fbbb 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -1470,8 +1470,10 @@ register_snapshot_trigger(char *glob,
struct event_trigger_data *data,
struct trace_event_file *file)
{
- if (tracing_alloc_snapshot_instance(file->tr) != 0)
- return 0;
+ int ret = tracing_alloc_snapshot_instance(file->tr);
+
+ if (ret < 0)
+ return ret;
return register_trigger(glob, data, file);
}
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index bd0d01d00fb9..a8e28f9b9271 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -2444,6 +2444,9 @@ static int timerlat_fd_open(struct inode *inode, struct file *file)
tlat = this_cpu_tmr_var();
tlat->count = 0;
+ hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
+ tlat->timer.function = timerlat_irq;
+
migrate_enable();
return 0;
};
@@ -2526,9 +2529,6 @@ timerlat_fd_read(struct file *file, char __user *ubuf, size_t count,
tlat->tracing_thread = false;
tlat->kthread = current;
- hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
- tlat->timer.function = timerlat_irq;
-
/* Annotate now to drift new period */
tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 4dc74d73fc1d..34289f9c6707 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -1159,9 +1159,12 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
if (!(ctx->flags & TPARG_FL_TEVENT) &&
(strcmp(arg, "$comm") == 0 || strcmp(arg, "$COMM") == 0 ||
strncmp(arg, "\\\"", 2) == 0)) {
- /* The type of $comm must be "string", and not an array. */
- if (parg->count || (t && strcmp(t, "string")))
+ /* The type of $comm must be "string", and not an array type. */
+ if (parg->count || (t && strcmp(t, "string"))) {
+ trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0),
+ NEED_STRING_TYPE);
goto out;
+ }
parg->type = find_fetch_type("string", ctx->flags);
} else
parg->type = find_fetch_type(t, ctx->flags);
@@ -1169,18 +1172,6 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
trace_probe_log_err(ctx->offset + (t ? (t - arg) : 0), BAD_TYPE);
goto out;
}
- parg->offset = *size;
- *size += parg->type->size * (parg->count ?: 1);
-
- ret = -ENOMEM;
- if (parg->count) {
- len = strlen(parg->type->fmttype) + 6;
- parg->fmt = kmalloc(len, GFP_KERNEL);
- if (!parg->fmt)
- goto out;
- snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype,
- parg->count);
- }
code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL);
if (!code)
@@ -1204,6 +1195,19 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
goto fail;
}
}
+ parg->offset = *size;
+ *size += parg->type->size * (parg->count ?: 1);
+
+ if (parg->count) {
+ len = strlen(parg->type->fmttype) + 6;
+ parg->fmt = kmalloc(len, GFP_KERNEL);
+ if (!parg->fmt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ snprintf(parg->fmt, len, "%s[%d]", parg->type->fmttype,
+ parg->count);
+ }
ret = -EINVAL;
/* Store operation */
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 850d9ecb6765..c1877d018269 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -515,7 +515,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(BAD_HYPHEN, "Failed to parse single hyphen. Forgot '>'?"), \
C(NO_BTF_FIELD, "This field is not found."), \
C(BAD_BTF_TID, "Failed to get BTF type info."),\
- C(BAD_TYPE4STR, "This type does not fit for string."),
+ C(BAD_TYPE4STR, "This type does not fit for string."),\
+ C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),
#undef C
#define C(a, b) TP_ERR_##a
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index c774e560f2f9..a4dcf0f24352 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -574,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
}
memcpy(elt->key, key, map->key_size);
- entry->val = elt;
+ /*
+ * Ensure the initialization is visible and
+ * publish the elt.
+ */
+ smp_wmb();
+ WRITE_ONCE(entry->val, elt);
atomic64_inc(&map->hits);
return entry->val;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 76e60faed892..7b482a26d741 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5786,13 +5786,9 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
list_for_each_entry(wq, &workqueues, list) {
if (!(wq->flags & WQ_UNBOUND))
continue;
-
/* creating multiple pwqs breaks ordering guarantee */
- if (!list_empty(&wq->pwqs)) {
- if (wq->flags & __WQ_ORDERED_EXPLICIT)
- continue;
- wq->flags &= ~__WQ_ORDERED;
- }
+ if (wq->flags & __WQ_ORDERED)
+ continue;
ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
if (IS_ERR(ctx)) {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ba25129563ad..ef36b829ae1f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -231,9 +231,10 @@ config DEBUG_INFO
in the "Debug information" choice below, indicating that debug
information will be generated for build targets.
-# Clang is known to generate .{s,u}leb128 with symbol deltas with DWARF5, which
-# some targets may not support: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
-config AS_HAS_NON_CONST_LEB128
+# Clang generates .uleb128 with label differences for DWARF v5, a feature that
+# older binutils ports do not support when utilizing RISC-V style linker
+# relaxation: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
+config AS_HAS_NON_CONST_ULEB128
def_bool $(as-instr,.uleb128 .Lexpr_end4 - .Lexpr_start3\n.Lexpr_start3:\n.Lexpr_end4:)
choice
@@ -258,7 +259,7 @@ config DEBUG_INFO_NONE
config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
bool "Rely on the toolchain's implicit default DWARF version"
select DEBUG_INFO
- depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
+ depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128)
help
The implicit default version of DWARF debug info produced by a
toolchain changes over time.
@@ -282,7 +283,8 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_DWARF5
bool "Generate DWARF Version 5 debuginfo"
select DEBUG_INFO
- depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
+ depends on !ARCH_HAS_BROKEN_DWARF5
+ depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_ULEB128)
help
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
5.0+ accepts the -gdwarf-5 flag but only had partial support for some
@@ -2233,6 +2235,7 @@ config TEST_DIV64
config TEST_IOV_ITER
tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
depends on KUNIT
+ depends on MMU
default KUNIT_ALL_TESTS
help
Enable this to turn on testing of the operation of the I/O iterator
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 09522af227f1..b97692854966 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -348,6 +348,13 @@ unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_weight_and);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight_andnot);
+
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
index 0eed92b77ba3..bf70850035c7 100644
--- a/lib/checksum_kunit.c
+++ b/lib/checksum_kunit.c
@@ -1,15 +1,21 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Test cases csum_partial and csum_fold
+ * Test cases csum_partial, csum_fold, ip_fast_csum, csum_ipv6_magic
*/
#include <kunit/test.h>
#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
#define MAX_LEN 512
#define MAX_ALIGN 64
#define TEST_BUFLEN (MAX_LEN + MAX_ALIGN)
+#define IPv4_MIN_WORDS 5
+#define IPv4_MAX_WORDS 15
+#define NUM_IPv6_TESTS 200
+#define NUM_IP_FAST_CSUM_TESTS 181
+
/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
static const u32 random_init_sum = 0x2847aab;
static const u8 random_buf[] = {
@@ -209,6 +215,237 @@ static const u32 init_sums_no_overflow[] = {
0xffff0000, 0xfffffffb,
};
+static const u16 expected_csum_ipv6_magic[] = {
+ 0x18d4, 0x3085, 0x2e4b, 0xd9f4, 0xbdc8, 0x78f, 0x1034, 0x8422, 0x6fc0,
+ 0xd2f6, 0xbeb5, 0x9d3, 0x7e2a, 0x312e, 0x778e, 0xc1bb, 0x7cf2, 0x9d1e,
+ 0xca21, 0xf3ff, 0x7569, 0xb02e, 0xca86, 0x7e76, 0x4539, 0x45e3, 0xf28d,
+ 0xdf81, 0x8fd5, 0x3b5d, 0x8324, 0xf471, 0x83be, 0x1daf, 0x8c46, 0xe682,
+ 0xd1fb, 0x6b2e, 0xe687, 0x2a33, 0x4833, 0x2d67, 0x660f, 0x2e79, 0xd65e,
+ 0x6b62, 0x6672, 0x5dbd, 0x8680, 0xbaa5, 0x2229, 0x2125, 0x2d01, 0x1cc0,
+ 0x6d36, 0x33c0, 0xee36, 0xd832, 0x9820, 0x8a31, 0x53c5, 0x2e2, 0xdb0e,
+ 0x49ed, 0x17a7, 0x77a0, 0xd72e, 0x3d72, 0x7dc8, 0x5b17, 0xf55d, 0xa4d9,
+ 0x1446, 0x5d56, 0x6b2e, 0x69a5, 0xadb6, 0xff2a, 0x92e, 0xe044, 0x3402,
+ 0xbb60, 0xec7f, 0xe7e6, 0x1986, 0x32f4, 0x8f8, 0x5e00, 0x47c6, 0x3059,
+ 0x3969, 0xe957, 0x4388, 0x2854, 0x3334, 0xea71, 0xa6de, 0x33f9, 0x83fc,
+ 0x37b4, 0x5531, 0x3404, 0x1010, 0xed30, 0x610a, 0xc95, 0x9aed, 0x6ff,
+ 0x5136, 0x2741, 0x660e, 0x8b80, 0xf71, 0xa263, 0x88af, 0x7a73, 0x3c37,
+ 0x1908, 0x6db5, 0x2e92, 0x1cd2, 0x70c8, 0xee16, 0xe80, 0xcd55, 0x6e6,
+ 0x6434, 0x127, 0x655d, 0x2ea0, 0xb4f4, 0xdc20, 0x5671, 0xe462, 0xe52b,
+ 0xdb44, 0x3589, 0xc48f, 0xe60b, 0xd2d2, 0x66ad, 0x498, 0x436, 0xb917,
+ 0xf0ca, 0x1a6e, 0x1cb7, 0xbf61, 0x2870, 0xc7e8, 0x5b30, 0xe4a5, 0x168,
+ 0xadfc, 0xd035, 0xe690, 0xe283, 0xfb27, 0xe4ad, 0xb1a5, 0xf2d5, 0xc4b6,
+ 0x8a30, 0xd7d5, 0x7df9, 0x91d5, 0x63ed, 0x2d21, 0x312b, 0xab19, 0xa632,
+ 0x8d2e, 0xef06, 0x57b9, 0xc373, 0xbd1f, 0xa41f, 0x8444, 0x9975, 0x90cb,
+ 0xc49c, 0xe965, 0x4eff, 0x5a, 0xef6d, 0xe81a, 0xe260, 0x853a, 0xff7a,
+ 0x99aa, 0xb06b, 0xee19, 0xcc2c, 0xf34c, 0x7c49, 0xdac3, 0xa71e, 0xc988,
+ 0x3845, 0x1014
+};
+
+static const u16 expected_fast_csum[] = {
+ 0xda83, 0x45da, 0x4f46, 0x4e4f, 0x34e, 0xe902, 0xa5e9, 0x87a5, 0x7187,
+ 0x5671, 0xf556, 0x6df5, 0x816d, 0x8f81, 0xbb8f, 0xfbba, 0x5afb, 0xbe5a,
+ 0xedbe, 0xabee, 0x6aac, 0xe6b, 0xea0d, 0x67ea, 0x7e68, 0x8a7e, 0x6f8a,
+ 0x3a70, 0x9f3a, 0xe89e, 0x75e8, 0x7976, 0xfa79, 0x2cfa, 0x3c2c, 0x463c,
+ 0x7146, 0x7a71, 0x547a, 0xfd53, 0x99fc, 0xb699, 0x92b6, 0xdb91, 0xe8da,
+ 0x5fe9, 0x1e60, 0xae1d, 0x39ae, 0xf439, 0xa1f4, 0xdda1, 0xede, 0x790f,
+ 0x579, 0x1206, 0x9012, 0x2490, 0xd224, 0x5cd2, 0xa65d, 0xca7, 0x220d,
+ 0xf922, 0xbf9, 0x920b, 0x1b92, 0x361c, 0x2e36, 0x4d2e, 0x24d, 0x2,
+ 0xcfff, 0x90cf, 0xa591, 0x93a5, 0x7993, 0x9579, 0xc894, 0x50c8, 0x5f50,
+ 0xd55e, 0xcad5, 0xf3c9, 0x8f4, 0x4409, 0x5043, 0x5b50, 0x55b, 0x2205,
+ 0x1e22, 0x801e, 0x3780, 0xe137, 0x7ee0, 0xf67d, 0x3cf6, 0xa53c, 0x2ea5,
+ 0x472e, 0x5147, 0xcf51, 0x1bcf, 0x951c, 0x1e95, 0xc71e, 0xe4c7, 0xc3e4,
+ 0x3dc3, 0xee3d, 0xa4ed, 0xf9a4, 0xcbf8, 0x75cb, 0xb375, 0x50b4, 0x3551,
+ 0xf835, 0x19f8, 0x8c1a, 0x538c, 0xad52, 0xa3ac, 0xb0a3, 0x5cb0, 0x6c5c,
+ 0x5b6c, 0xc05a, 0x92c0, 0x4792, 0xbe47, 0x53be, 0x1554, 0x5715, 0x4b57,
+ 0xe54a, 0x20e5, 0x21, 0xd500, 0xa1d4, 0xa8a1, 0x57a9, 0xca57, 0x5ca,
+ 0x1c06, 0x4f1c, 0xe24e, 0xd9e2, 0xf0d9, 0x4af1, 0x474b, 0x8146, 0xe81,
+ 0xfd0e, 0x84fd, 0x7c85, 0xba7c, 0x17ba, 0x4a17, 0x964a, 0xf595, 0xff5,
+ 0x5310, 0x3253, 0x6432, 0x4263, 0x2242, 0xe121, 0x32e1, 0xf632, 0xc5f5,
+ 0x21c6, 0x7d22, 0x8e7c, 0x418e, 0x5641, 0x3156, 0x7c31, 0x737c, 0x373,
+ 0x2503, 0xc22a, 0x3c2, 0x4a04, 0x8549, 0x5285, 0xa352, 0xe8a3, 0x6fe8,
+ 0x1a6f, 0x211a, 0xe021, 0x38e0, 0x7638, 0xf575, 0x9df5, 0x169e, 0xf116,
+ 0x23f1, 0xcd23, 0xece, 0x660f, 0x4866, 0x6a48, 0x716a, 0xee71, 0xa2ee,
+ 0xb8a2, 0x61b9, 0xa361, 0xf7a2, 0x26f7, 0x1127, 0x6611, 0xe065, 0x36e0,
+ 0x1837, 0x3018, 0x1c30, 0x721b, 0x3e71, 0xe43d, 0x99e4, 0x9e9a, 0xb79d,
+ 0xa9b7, 0xcaa, 0xeb0c, 0x4eb, 0x1305, 0x8813, 0xb687, 0xa9b6, 0xfba9,
+ 0xd7fb, 0xccd8, 0x2ecd, 0x652f, 0xae65, 0x3fae, 0x3a40, 0x563a, 0x7556,
+ 0x2776, 0x1228, 0xef12, 0xf9ee, 0xcef9, 0x56cf, 0xa956, 0x24a9, 0xba24,
+ 0x5fba, 0x665f, 0xf465, 0x8ff4, 0x6d8f, 0x346d, 0x5f34, 0x385f, 0xd137,
+ 0xb8d0, 0xacb8, 0x55ac, 0x7455, 0xe874, 0x89e8, 0xd189, 0xa0d1, 0xb2a0,
+ 0xb8b2, 0x36b8, 0x5636, 0xd355, 0x8d3, 0x1908, 0x2118, 0xc21, 0x990c,
+ 0x8b99, 0x158c, 0x7815, 0x9e78, 0x6f9e, 0x4470, 0x1d44, 0x341d, 0x2634,
+ 0x3f26, 0x793e, 0xc79, 0xcc0b, 0x26cc, 0xd126, 0x1fd1, 0xb41f, 0xb6b4,
+ 0x22b7, 0xa122, 0xa1, 0x7f01, 0x837e, 0x3b83, 0xaf3b, 0x6fae, 0x916f,
+ 0xb490, 0xffb3, 0xceff, 0x50cf, 0x7550, 0x7275, 0x1272, 0x2613, 0xaa26,
+ 0xd5aa, 0x7d5, 0x9607, 0x96, 0xb100, 0xf8b0, 0x4bf8, 0xdd4c, 0xeddd,
+ 0x98ed, 0x2599, 0x9325, 0xeb92, 0x8feb, 0xcc8f, 0x2acd, 0x392b, 0x3b39,
+ 0xcb3b, 0x6acb, 0xd46a, 0xb8d4, 0x6ab8, 0x106a, 0x2f10, 0x892f, 0x789,
+ 0xc806, 0x45c8, 0x7445, 0x3c74, 0x3a3c, 0xcf39, 0xd7ce, 0x58d8, 0x6e58,
+ 0x336e, 0x1034, 0xee10, 0xe9ed, 0xc2e9, 0x3fc2, 0xd53e, 0xd2d4, 0xead2,
+ 0x8fea, 0x2190, 0x1162, 0xbe11, 0x8cbe, 0x6d8c, 0xfb6c, 0x6dfb, 0xd36e,
+ 0x3ad3, 0xf3a, 0x870e, 0xc287, 0x53c3, 0xc54, 0x5b0c, 0x7d5a, 0x797d,
+ 0xec79, 0x5dec, 0x4d5e, 0x184e, 0xd618, 0x60d6, 0xb360, 0x98b3, 0xf298,
+ 0xb1f2, 0x69b1, 0xf969, 0xef9, 0xab0e, 0x21ab, 0xe321, 0x24e3, 0x8224,
+ 0x5481, 0x5954, 0x7a59, 0xff7a, 0x7dff, 0x1a7d, 0xa51a, 0x46a5, 0x6b47,
+ 0xe6b, 0x830e, 0xa083, 0xff9f, 0xd0ff, 0xffd0, 0xe6ff, 0x7de7, 0xc67d,
+ 0xd0c6, 0x61d1, 0x3a62, 0xc3b, 0x150c, 0x1715, 0x4517, 0x5345, 0x3954,
+ 0xdd39, 0xdadd, 0x32db, 0x6a33, 0xd169, 0x86d1, 0xb687, 0x3fb6, 0x883f,
+ 0xa487, 0x39a4, 0x2139, 0xbe20, 0xffbe, 0xedfe, 0x8ded, 0x368e, 0xc335,
+ 0x51c3, 0x9851, 0xf297, 0xd6f2, 0xb9d6, 0x95ba, 0x2096, 0xea1f, 0x76e9,
+ 0x4e76, 0xe04d, 0xd0df, 0x80d0, 0xa280, 0xfca2, 0x75fc, 0xef75, 0x32ef,
+ 0x6833, 0xdf68, 0xc4df, 0x76c4, 0xb77, 0xb10a, 0xbfb1, 0x58bf, 0x5258,
+ 0x4d52, 0x6c4d, 0x7e6c, 0xb67e, 0xccb5, 0x8ccc, 0xbe8c, 0xc8bd, 0x9ac8,
+ 0xa99b, 0x52a9, 0x2f53, 0xc30, 0x3e0c, 0xb83d, 0x83b7, 0x5383, 0x7e53,
+ 0x4f7e, 0xe24e, 0xb3e1, 0x8db3, 0x618e, 0xc861, 0xfcc8, 0x34fc, 0x9b35,
+ 0xaa9b, 0xb1aa, 0x5eb1, 0x395e, 0x8639, 0xd486, 0x8bd4, 0x558b, 0x2156,
+ 0xf721, 0x4ef6, 0x14f, 0x7301, 0xdd72, 0x49de, 0x894a, 0x9889, 0x8898,
+ 0x7788, 0x7b77, 0x637b, 0xb963, 0xabb9, 0x7cab, 0xc87b, 0x21c8, 0xcb21,
+ 0xdfca, 0xbfdf, 0xf2bf, 0x6af2, 0x626b, 0xb261, 0x3cb2, 0xc63c, 0xc9c6,
+ 0xc9c9, 0xb4c9, 0xf9b4, 0x91f9, 0x4091, 0x3a40, 0xcc39, 0xd1cb, 0x7ed1,
+ 0x537f, 0x6753, 0xa167, 0xba49, 0x88ba, 0x7789, 0x3877, 0xf037, 0xd3ef,
+ 0xb5d4, 0x55b6, 0xa555, 0xeca4, 0xa1ec, 0xb6a2, 0x7b7, 0x9507, 0xfd94,
+ 0x82fd, 0x5c83, 0x765c, 0x9676, 0x3f97, 0xda3f, 0x6fda, 0x646f, 0x3064,
+ 0x5e30, 0x655e, 0x6465, 0xcb64, 0xcdca, 0x4ccd, 0x3f4c, 0x243f, 0x6f24,
+ 0x656f, 0x6065, 0x3560, 0x3b36, 0xac3b, 0x4aac, 0x714a, 0x7e71, 0xda7e,
+ 0x7fda, 0xda7f, 0x6fda, 0xff6f, 0xc6ff, 0xedc6, 0xd4ed, 0x70d5, 0xeb70,
+ 0xa3eb, 0x80a3, 0xca80, 0x3fcb, 0x2540, 0xf825, 0x7ef8, 0xf87e, 0x73f8,
+ 0xb474, 0xb4b4, 0x92b5, 0x9293, 0x93, 0x3500, 0x7134, 0x9071, 0xfa8f,
+ 0x51fa, 0x1452, 0xba13, 0x7ab9, 0x957a, 0x8a95, 0x6e8a, 0x6d6e, 0x7c6d,
+ 0x447c, 0x9744, 0x4597, 0x8945, 0xef88, 0x8fee, 0x3190, 0x4831, 0x8447,
+ 0xa183, 0x1da1, 0xd41d, 0x2dd4, 0x4f2e, 0xc94e, 0xcbc9, 0xc9cb, 0x9ec9,
+ 0x319e, 0xd531, 0x20d5, 0x4021, 0xb23f, 0x29b2, 0xd828, 0xecd8, 0x5ded,
+ 0xfc5d, 0x4dfc, 0xd24d, 0x6bd2, 0x5f6b, 0xb35e, 0x7fb3, 0xee7e, 0x56ee,
+ 0xa657, 0x68a6, 0x8768, 0x7787, 0xb077, 0x4cb1, 0x764c, 0xb175, 0x7b1,
+ 0x3d07, 0x603d, 0x3560, 0x3e35, 0xb03d, 0xd6b0, 0xc8d6, 0xd8c8, 0x8bd8,
+ 0x3e8c, 0x303f, 0xd530, 0xf1d4, 0x42f1, 0xca42, 0xddca, 0x41dd, 0x3141,
+ 0x132, 0xe901, 0x8e9, 0xbe09, 0xe0bd, 0x2ce0, 0x862d, 0x3986, 0x9139,
+ 0x6d91, 0x6a6d, 0x8d6a, 0x1b8d, 0xac1b, 0xedab, 0x54ed, 0xc054, 0xcebf,
+ 0xc1ce, 0x5c2, 0x3805, 0x6038, 0x5960, 0xd359, 0xdd3, 0xbe0d, 0xafbd,
+ 0x6daf, 0x206d, 0x2c20, 0x862c, 0x8e86, 0xec8d, 0xa2ec, 0xa3a2, 0x51a3,
+ 0x8051, 0xfd7f, 0x91fd, 0xa292, 0xaf14, 0xeeae, 0x59ef, 0x535a, 0x8653,
+ 0x3986, 0x9539, 0xb895, 0xa0b8, 0x26a0, 0x2227, 0xc022, 0x77c0, 0xad77,
+ 0x46ad, 0xaa46, 0x60aa, 0x8560, 0x4785, 0xd747, 0x45d7, 0x2346, 0x5f23,
+ 0x25f, 0x1d02, 0x71d, 0x8206, 0xc82, 0x180c, 0x3018, 0x4b30, 0x4b,
+ 0x3001, 0x1230, 0x2d12, 0x8c2d, 0x148d, 0x4015, 0x5f3f, 0x3d5f, 0x6b3d,
+ 0x396b, 0x473a, 0xf746, 0x44f7, 0x8945, 0x3489, 0xcb34, 0x84ca, 0xd984,
+ 0xf0d9, 0xbcf0, 0x63bd, 0x3264, 0xf332, 0x45f3, 0x7346, 0x5673, 0xb056,
+ 0xd3b0, 0x4ad4, 0x184b, 0x7d18, 0x6c7d, 0xbb6c, 0xfeba, 0xe0fe, 0x10e1,
+ 0x5410, 0x2954, 0x9f28, 0x3a9f, 0x5a3a, 0xdb59, 0xbdc, 0xb40b, 0x1ab4,
+ 0x131b, 0x5d12, 0x6d5c, 0xe16c, 0xb0e0, 0x89b0, 0xba88, 0xbb, 0x3c01,
+ 0xe13b, 0x6fe1, 0x446f, 0xa344, 0x81a3, 0xfe81, 0xc7fd, 0x38c8, 0xb38,
+ 0x1a0b, 0x6d19, 0xf36c, 0x47f3, 0x6d48, 0xb76d, 0xd3b7, 0xd8d2, 0x52d9,
+ 0x4b53, 0xa54a, 0x34a5, 0xc534, 0x9bc4, 0xed9b, 0xbeed, 0x3ebe, 0x233e,
+ 0x9f22, 0x4a9f, 0x774b, 0x4577, 0xa545, 0x64a5, 0xb65, 0x870b, 0x487,
+ 0x9204, 0x5f91, 0xd55f, 0x35d5, 0x1a35, 0x71a, 0x7a07, 0x4e7a, 0xfc4e,
+ 0x1efc, 0x481f, 0x7448, 0xde74, 0xa7dd, 0x1ea7, 0xaa1e, 0xcfaa, 0xfbcf,
+ 0xedfb, 0x6eee, 0x386f, 0x4538, 0x6e45, 0xd96d, 0x11d9, 0x7912, 0x4b79,
+ 0x494b, 0x6049, 0xac5f, 0x65ac, 0x1366, 0x5913, 0xe458, 0x7ae4, 0x387a,
+ 0x3c38, 0xb03c, 0x76b0, 0x9376, 0xe193, 0x42e1, 0x7742, 0x6476, 0x3564,
+ 0x3c35, 0x6a3c, 0xcc69, 0x94cc, 0x5d95, 0xe5e, 0xee0d, 0x4ced, 0xce4c,
+ 0x52ce, 0xaa52, 0xdaaa, 0xe4da, 0x1de5, 0x4530, 0x5445, 0x3954, 0xb639,
+ 0x81b6, 0x7381, 0x1574, 0xc215, 0x10c2, 0x3f10, 0x6b3f, 0xe76b, 0x7be7,
+ 0xbc7b, 0xf7bb, 0x41f7, 0xcc41, 0x38cc, 0x4239, 0xa942, 0x4a9, 0xc504,
+ 0x7cc4, 0x437c, 0x6743, 0xea67, 0x8dea, 0xe88d, 0xd8e8, 0xdcd8, 0x17dd,
+ 0x5718, 0x958, 0xa609, 0x41a5, 0x5842, 0x159, 0x9f01, 0x269f, 0x5a26,
+ 0x405a, 0xc340, 0xb4c3, 0xd4b4, 0xf4d3, 0xf1f4, 0x39f2, 0xe439, 0x67e4,
+ 0x4168, 0xa441, 0xdda3, 0xdedd, 0x9df, 0xab0a, 0xa5ab, 0x9a6, 0xba09,
+ 0x9ab9, 0xad9a, 0x5ae, 0xe205, 0xece2, 0xecec, 0x14ed, 0xd614, 0x6bd5,
+ 0x916c, 0x3391, 0x6f33, 0x206f, 0x8020, 0x780, 0x7207, 0x2472, 0x8a23,
+ 0xb689, 0x3ab6, 0xf739, 0x97f6, 0xb097, 0xa4b0, 0xe6a4, 0x88e6, 0x2789,
+ 0xb28, 0x350b, 0x1f35, 0x431e, 0x1043, 0xc30f, 0x79c3, 0x379, 0x5703,
+ 0x3256, 0x4732, 0x7247, 0x9d72, 0x489d, 0xd348, 0xa4d3, 0x7ca4, 0xbf7b,
+ 0x45c0, 0x7b45, 0x337b, 0x4034, 0x843f, 0xd083, 0x35d0, 0x6335, 0x4d63,
+ 0xe14c, 0xcce0, 0xfecc, 0x35ff, 0x5636, 0xf856, 0xeef8, 0x2def, 0xfc2d,
+ 0x4fc, 0x6e04, 0xb66d, 0x78b6, 0xbb78, 0x3dbb, 0x9a3d, 0x839a, 0x9283,
+ 0x593, 0xd504, 0x23d5, 0x5424, 0xd054, 0x61d0, 0xdb61, 0x17db, 0x1f18,
+ 0x381f, 0x9e37, 0x679e, 0x1d68, 0x381d, 0x8038, 0x917f, 0x491, 0xbb04,
+ 0x23bb, 0x4124, 0xd41, 0xa30c, 0x8ba3, 0x8b8b, 0xc68b, 0xd2c6, 0xebd2,
+ 0x93eb, 0xbd93, 0x99bd, 0x1a99, 0xea19, 0x58ea, 0xcf58, 0x73cf, 0x1073,
+ 0x9e10, 0x139e, 0xea13, 0xcde9, 0x3ecd, 0x883f, 0xf89, 0x180f, 0x2a18,
+ 0x212a, 0xce20, 0x73ce, 0xf373, 0x60f3, 0xad60, 0x4093, 0x8e40, 0xb98e,
+ 0xbfb9, 0xf1bf, 0x8bf1, 0x5e8c, 0xe95e, 0x14e9, 0x4e14, 0x1c4e, 0x7f1c,
+ 0xe77e, 0x6fe7, 0xf26f, 0x13f2, 0x8b13, 0xda8a, 0x5fda, 0xea5f, 0x4eea,
+ 0xa84f, 0x88a8, 0x1f88, 0x2820, 0x9728, 0x5a97, 0x3f5b, 0xb23f, 0x70b2,
+ 0x2c70, 0x232d, 0xf623, 0x4f6, 0x905, 0x7509, 0xd675, 0x28d7, 0x9428,
+ 0x3794, 0xf036, 0x2bf0, 0xba2c, 0xedb9, 0xd7ed, 0x59d8, 0xed59, 0x4ed,
+ 0xe304, 0x18e3, 0x5c19, 0x3d5c, 0x753d, 0x6d75, 0x956d, 0x7f95, 0xc47f,
+ 0x83c4, 0xa84, 0x2e0a, 0x5f2e, 0xb95f, 0x77b9, 0x6d78, 0xf46d, 0x1bf4,
+ 0xed1b, 0xd6ed, 0xe0d6, 0x5e1, 0x3905, 0x5638, 0xa355, 0x99a2, 0xbe99,
+ 0xb4bd, 0x85b4, 0x2e86, 0x542e, 0x6654, 0xd765, 0x73d7, 0x3a74, 0x383a,
+ 0x2638, 0x7826, 0x7677, 0x9a76, 0x7e99, 0x2e7e, 0xea2d, 0xa6ea, 0x8a7,
+ 0x109, 0x3300, 0xad32, 0x5fad, 0x465f, 0x2f46, 0xc62f, 0xd4c5, 0xad5,
+ 0xcb0a, 0x4cb, 0xb004, 0x7baf, 0xe47b, 0x92e4, 0x8e92, 0x638e, 0x1763,
+ 0xc17, 0xf20b, 0x1ff2, 0x8920, 0x5889, 0xcb58, 0xf8cb, 0xcaf8, 0x84cb,
+ 0x9f84, 0x8a9f, 0x918a, 0x4991, 0x8249, 0xff81, 0x46ff, 0x5046, 0x5f50,
+ 0x725f, 0xf772, 0x8ef7, 0xe08f, 0xc1e0, 0x1fc2, 0x9e1f, 0x8b9d, 0x108b,
+ 0x411, 0x2b04, 0xb02a, 0x1fb0, 0x1020, 0x7a0f, 0x587a, 0x8958, 0xb188,
+ 0xb1b1, 0x49b2, 0xb949, 0x7ab9, 0x917a, 0xfc91, 0xe6fc, 0x47e7, 0xbc47,
+ 0x8fbb, 0xea8e, 0x34ea, 0x2635, 0x1726, 0x9616, 0xc196, 0xa6c1, 0xf3a6,
+ 0x11f3, 0x4811, 0x3e48, 0xeb3e, 0xf7ea, 0x1bf8, 0xdb1c, 0x8adb, 0xe18a,
+ 0x42e1, 0x9d42, 0x5d9c, 0x6e5d, 0x286e, 0x4928, 0x9a49, 0xb09c, 0xa6b0,
+ 0x2a7, 0xe702, 0xf5e6, 0x9af5, 0xf9b, 0x810f, 0x8080, 0x180, 0x1702,
+ 0x5117, 0xa650, 0x11a6, 0x1011, 0x550f, 0xd554, 0xbdd5, 0x6bbe, 0xc66b,
+ 0xfc7, 0x5510, 0x5555, 0x7655, 0x177, 0x2b02, 0x6f2a, 0xb70, 0x9f0b,
+ 0xcf9e, 0xf3cf, 0x3ff4, 0xcb40, 0x8ecb, 0x768e, 0x5277, 0x8652, 0x9186,
+ 0x9991, 0x5099, 0xd350, 0x93d3, 0x6d94, 0xe6d, 0x530e, 0x3153, 0xa531,
+ 0x64a5, 0x7964, 0x7c79, 0x467c, 0x1746, 0x3017, 0x3730, 0x538, 0x5,
+ 0x1e00, 0x5b1e, 0x955a, 0xae95, 0x3eaf, 0xff3e, 0xf8ff, 0xb2f9, 0xa1b3,
+ 0xb2a1, 0x5b2, 0xad05, 0x7cac, 0x2d7c, 0xd32c, 0x80d2, 0x7280, 0x8d72,
+ 0x1b8e, 0x831b, 0xac82, 0xfdac, 0xa7fd, 0x15a8, 0xd614, 0xe0d5, 0x7be0,
+ 0xb37b, 0x61b3, 0x9661, 0x9d95, 0xc79d, 0x83c7, 0xd883, 0xead7, 0xceb,
+ 0xf60c, 0xa9f5, 0x19a9, 0xa019, 0x8f9f, 0xd48f, 0x3ad5, 0x853a, 0x985,
+ 0x5309, 0x6f52, 0x1370, 0x6e13, 0xa96d, 0x98a9, 0x5198, 0x9f51, 0xb69f,
+ 0xa1b6, 0x2ea1, 0x672e, 0x2067, 0x6520, 0xaf65, 0x6eaf, 0x7e6f, 0xee7e,
+ 0x17ef, 0xa917, 0xcea8, 0x9ace, 0xff99, 0x5dff, 0xdf5d, 0x38df, 0xa39,
+ 0x1c0b, 0xe01b, 0x46e0, 0xcb46, 0x90cb, 0xba90, 0x4bb, 0x9104, 0x9d90,
+ 0xc89c, 0xf6c8, 0x6cf6, 0x886c, 0x1789, 0xbd17, 0x70bc, 0x7e71, 0x17e,
+ 0x1f01, 0xa01f, 0xbaa0, 0x14bb, 0xfc14, 0x7afb, 0xa07a, 0x3da0, 0xbf3d,
+ 0x48bf, 0x8c48, 0x968b, 0x9d96, 0xfd9d, 0x96fd, 0x9796, 0x6b97, 0xd16b,
+ 0xf4d1, 0x3bf4, 0x253c, 0x9125, 0x6691, 0xc166, 0x34c1, 0x5735, 0x1a57,
+ 0xdc19, 0x77db, 0x8577, 0x4a85, 0x824a, 0x9182, 0x7f91, 0xfd7f, 0xb4c3,
+ 0xb5b4, 0xb3b5, 0x7eb3, 0x617e, 0x4e61, 0xa4f, 0x530a, 0x3f52, 0xa33e,
+ 0x34a3, 0x9234, 0xf091, 0xf4f0, 0x1bf5, 0x311b, 0x9631, 0x6a96, 0x386b,
+ 0x1d39, 0xe91d, 0xe8e9, 0x69e8, 0x426a, 0xee42, 0x89ee, 0x368a, 0x2837,
+ 0x7428, 0x5974, 0x6159, 0x1d62, 0x7b1d, 0xf77a, 0x7bf7, 0x6b7c, 0x696c,
+ 0xf969, 0x4cf9, 0x714c, 0x4e71, 0x6b4e, 0x256c, 0x6e25, 0xe96d, 0x94e9,
+ 0x8f94, 0x3e8f, 0x343e, 0x4634, 0xb646, 0x97b5, 0x8997, 0xe8a, 0x900e,
+ 0x8090, 0xfd80, 0xa0fd, 0x16a1, 0xf416, 0xebf4, 0x95ec, 0x1196, 0x8911,
+ 0x3d89, 0xda3c, 0x9fd9, 0xd79f, 0x4bd7, 0x214c, 0x3021, 0x4f30, 0x994e,
+ 0x5c99, 0x6f5d, 0x326f, 0xab31, 0x6aab, 0xe969, 0x90e9, 0x1190, 0xff10,
+ 0xa2fe, 0xe0a2, 0x66e1, 0x4067, 0x9e3f, 0x2d9e, 0x712d, 0x8170, 0xd180,
+ 0xffd1, 0x25ff, 0x3826, 0x2538, 0x5f24, 0xc45e, 0x1cc4, 0xdf1c, 0x93df,
+ 0xc793, 0x80c7, 0x2380, 0xd223, 0x7ed2, 0xfc7e, 0x22fd, 0x7422, 0x1474,
+ 0xb714, 0x7db6, 0x857d, 0xa85, 0xa60a, 0x88a6, 0x4289, 0x7842, 0xc278,
+ 0xf7c2, 0xcdf7, 0x84cd, 0xae84, 0x8cae, 0xb98c, 0x1aba, 0x4d1a, 0x884c,
+ 0x4688, 0xcc46, 0xd8cb, 0x2bd9, 0xbe2b, 0xa2be, 0x72a2, 0xf772, 0xd2f6,
+ 0x75d2, 0xc075, 0xa3c0, 0x63a3, 0xae63, 0x8fae, 0x2a90, 0x5f2a, 0xef5f,
+ 0x5cef, 0xa05c, 0x89a0, 0x5e89, 0x6b5e, 0x736b, 0x773, 0x9d07, 0xe99c,
+ 0x27ea, 0x2028, 0xc20, 0x980b, 0x4797, 0x2848, 0x9828, 0xc197, 0x48c2,
+ 0x2449, 0x7024, 0x570, 0x3e05, 0xd3e, 0xf60c, 0xbbf5, 0x69bb, 0x3f6a,
+ 0x740, 0xf006, 0xe0ef, 0xbbe0, 0xadbb, 0x56ad, 0xcf56, 0xbfce, 0xa9bf,
+ 0x205b, 0x6920, 0xae69, 0x50ae, 0x2050, 0xf01f, 0x27f0, 0x9427, 0x8993,
+ 0x8689, 0x4087, 0x6e40, 0xb16e, 0xa1b1, 0xe8a1, 0x87e8, 0x6f88, 0xfe6f,
+ 0x4cfe, 0xe94d, 0xd5e9, 0x47d6, 0x3148, 0x5f31, 0xc35f, 0x13c4, 0xa413,
+ 0x5a5, 0x2405, 0xc223, 0x66c2, 0x3667, 0x5e37, 0x5f5e, 0x2f5f, 0x8c2f,
+ 0xe48c, 0xd0e4, 0x4d1, 0xd104, 0xe4d0, 0xcee4, 0xfcf, 0x480f, 0xa447,
+ 0x5ea4, 0xff5e, 0xbefe, 0x8dbe, 0x1d8e, 0x411d, 0x1841, 0x6918, 0x5469,
+ 0x1155, 0xc611, 0xaac6, 0x37ab, 0x2f37, 0xca2e, 0x87ca, 0xbd87, 0xabbd,
+ 0xb3ab, 0xcb4, 0xce0c, 0xfccd, 0xa5fd, 0x72a5, 0xf072, 0x83f0, 0xfe83,
+ 0x97fd, 0xc997, 0xb0c9, 0xadb0, 0xe6ac, 0x88e6, 0x1088, 0xbe10, 0x16be,
+ 0xa916, 0xa3a8, 0x46a3, 0x5447, 0xe953, 0x84e8, 0x2085, 0xa11f, 0xfa1,
+ 0xdd0f, 0xbedc, 0x5abe, 0x805a, 0xc97f, 0x6dc9, 0x826d, 0x4a82, 0x934a,
+ 0x5293, 0xd852, 0xd3d8, 0xadd3, 0xf4ad, 0xf3f4, 0xfcf3, 0xfefc, 0xcafe,
+ 0xb7ca, 0x3cb8, 0xa13c, 0x18a1, 0x1418, 0xea13, 0x91ea, 0xf891, 0x53f8,
+ 0xa254, 0xe9a2, 0x87ea, 0x4188, 0x1c41, 0xdc1b, 0xf5db, 0xcaf5, 0x45ca,
+ 0x6d45, 0x396d, 0xde39, 0x90dd, 0x1e91, 0x1e, 0x7b00, 0x6a7b, 0xa46a,
+ 0xc9a3, 0x9bc9, 0x389b, 0x1139, 0x5211, 0x1f52, 0xeb1f, 0xabeb, 0x48ab,
+ 0x9348, 0xb392, 0x17b3, 0x1618, 0x5b16, 0x175b, 0xdc17, 0xdedb, 0x1cdf,
+ 0xeb1c, 0xd1ea, 0x4ad2, 0xd4b, 0xc20c, 0x24c2, 0x7b25, 0x137b, 0x8b13,
+ 0x618b, 0xa061, 0xff9f, 0xfffe, 0x72ff, 0xf572, 0xe2f5, 0xcfe2, 0xd2cf,
+ 0x75d3, 0x6a76, 0xc469, 0x1ec4, 0xfc1d, 0x59fb, 0x455a, 0x7a45, 0xa479,
+ 0xb7a4
+};
+
static u8 tmp_buf[TEST_BUFLEN];
#define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum))
@@ -338,10 +575,58 @@ static void test_csum_no_carry_inputs(struct kunit *test)
}
}
+static void test_ip_fast_csum(struct kunit *test)
+{
+ __sum16 csum_result;
+ u16 expected;
+
+ for (int len = IPv4_MIN_WORDS; len < IPv4_MAX_WORDS; len++) {
+ for (int index = 0; index < NUM_IP_FAST_CSUM_TESTS; index++) {
+ csum_result = ip_fast_csum(random_buf + index, len);
+ expected =
+ expected_fast_csum[(len - IPv4_MIN_WORDS) *
+ NUM_IP_FAST_CSUM_TESTS +
+ index];
+ CHECK_EQ(to_sum16(expected), csum_result);
+ }
+ }
+}
+
+static void test_csum_ipv6_magic(struct kunit *test)
+{
+#if defined(CONFIG_NET)
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ unsigned int len;
+ unsigned char proto;
+ __wsum csum;
+
+ const int daddr_offset = sizeof(struct in6_addr);
+ const int len_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr);
+ const int proto_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+ sizeof(int);
+ const int csum_offset = sizeof(struct in6_addr) + sizeof(struct in6_addr) +
+ sizeof(int) + sizeof(char);
+
+ for (int i = 0; i < NUM_IPv6_TESTS; i++) {
+ saddr = (const struct in6_addr *)(random_buf + i);
+ daddr = (const struct in6_addr *)(random_buf + i +
+ daddr_offset);
+ len = le32_to_cpu(*(__le32 *)(random_buf + i + len_offset));
+ proto = *(random_buf + i + proto_offset);
+ csum = *(__wsum *)(random_buf + i + csum_offset);
+ CHECK_EQ(to_sum16(expected_csum_ipv6_magic[i]),
+ csum_ipv6_magic(saddr, daddr, len, proto, csum));
+ }
+#endif /* !CONFIG_NET */
+}
+
static struct kunit_case __refdata checksum_test_cases[] = {
KUNIT_CASE(test_csum_fixed_random_inputs),
KUNIT_CASE(test_csum_all_carry_inputs),
KUNIT_CASE(test_csum_no_carry_inputs),
+ KUNIT_CASE(test_ip_fast_csum),
+ KUNIT_CASE(test_csum_ipv6_magic),
{}
};
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index fde0aa244148..a1389db1c30a 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -10,10 +10,77 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/compiler.h>
#include <linux/export.h>
+#include <trace/events/napi.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
+static void dql_check_stall(struct dql *dql)
+{
+ unsigned short stall_thrs;
+ unsigned long now;
+
+ stall_thrs = READ_ONCE(dql->stall_thrs);
+ if (!stall_thrs)
+ return;
+
+ now = jiffies;
+ /* Check for a potential stall */
+ if (time_after_eq(now, dql->last_reap + stall_thrs)) {
+ unsigned long hist_head, t, start, end;
+
+ /* We are trying to detect a period of at least @stall_thrs
+ * jiffies without any Tx completions, but during first half
+ * of which some Tx was posted.
+ */
+dqs_again:
+ hist_head = READ_ONCE(dql->history_head);
+ /* pairs with smp_wmb() in dql_queued() */
+ smp_rmb();
+
+ /* Get the previous entry in the ring buffer, which is the
+ * oldest sample.
+ */
+ start = (hist_head - DQL_HIST_LEN + 1) * BITS_PER_LONG;
+
+ /* Advance start to continue from the last reap time */
+ if (time_before(start, dql->last_reap + 1))
+ start = dql->last_reap + 1;
+
+ /* Newest sample we should have already seen a completion for */
+ end = hist_head * BITS_PER_LONG + (BITS_PER_LONG - 1);
+
+ /* Shrink the search space to [start, (now - start_thrs/2)] if
+ * `end` is beyond the stall zone
+ */
+ if (time_before(now, end + stall_thrs / 2))
+ end = now - stall_thrs / 2;
+
+ /* Search for the queued time in [t, end] */
+ for (t = start; time_before_eq(t, end); t++)
+ if (test_bit(t % (DQL_HIST_LEN * BITS_PER_LONG),
+ dql->history))
+ break;
+
+ /* Variable t contains the time of the queue */
+ if (!time_before_eq(t, end))
+ goto no_stall;
+
+ /* The ring buffer was modified in the meantime, retry */
+ if (hist_head != READ_ONCE(dql->history_head))
+ goto dqs_again;
+
+ dql->stall_cnt++;
+ dql->stall_max = max_t(unsigned short, dql->stall_max, now - t);
+
+ trace_dql_stall_detected(dql->stall_thrs, now - t,
+ dql->last_reap, dql->history_head,
+ now, dql->history);
+ }
+no_stall:
+ dql->last_reap = now;
+}
+
/* Records completed count and recalculates the queue limit */
void dql_completed(struct dql *dql, unsigned int count)
{
@@ -110,6 +177,8 @@ void dql_completed(struct dql *dql, unsigned int count)
dql->prev_last_obj_cnt = dql->last_obj_cnt;
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
+
+ dql_check_stall(dql);
}
EXPORT_SYMBOL(dql_completed);
@@ -125,6 +194,10 @@ void dql_reset(struct dql *dql)
dql->prev_ovlimit = 0;
dql->lowest_slack = UINT_MAX;
dql->slack_start_time = jiffies;
+
+ dql->last_reap = jiffies;
+ dql->history_head = jiffies / BITS_PER_LONG;
+ memset(dql->history, 0, sizeof(dql->history));
}
EXPORT_SYMBOL(dql_reset);
@@ -133,6 +206,7 @@ void dql_init(struct dql *dql, unsigned int hold_time)
dql->max_limit = DQL_MAX_LIMIT;
dql->min_limit = 0;
dql->slack_hold_time = hold_time;
+ dql->stall_thrs = 0;
dql_reset(dql);
}
EXPORT_SYMBOL(dql_init);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e0aa6b440ca5..cf2eb2b2f983 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -166,7 +166,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter) {
.iter_type = ITER_IOVEC,
- .copy_mc = false,
.nofault = false,
.data_source = direction,
.__iov = iov,
@@ -245,26 +244,8 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
static __always_inline
-size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
- size_t len, void *to, void *priv2)
-{
- return copy_mc_to_kernel(to + progress, iter_from, len);
-}
-
-static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(i->count < bytes))
- bytes = i->count;
- if (unlikely(!bytes))
- return 0;
- return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
-}
-
-static __always_inline
size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(iov_iter_is_copy_mc(i)))
- return __copy_from_iter_mc(addr, bytes, i);
return iterate_and_advance(i, bytes, addr,
copy_from_user_iter, memcpy_from_iter);
}
@@ -633,7 +614,6 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_KVEC,
- .copy_mc = false,
.data_source = direction,
.kvec = kvec,
.nr_segs = nr_segs,
@@ -650,7 +630,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_BVEC,
- .copy_mc = false,
.data_source = direction,
.bvec = bvec,
.nr_segs = nr_segs,
@@ -679,7 +658,6 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
BUG_ON(direction & ~1);
*i = (struct iov_iter) {
.iter_type = ITER_XARRAY,
- .copy_mc = false,
.data_source = direction,
.xarray = xarray,
.xarray_start = start,
@@ -703,7 +681,6 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
BUG_ON(direction != READ);
*i = (struct iov_iter){
.iter_type = ITER_DISCARD,
- .copy_mc = false,
.data_source = false,
.count = count,
.iov_offset = 0
diff --git a/lib/kobject.c b/lib/kobject.c
index 59dbcbdb1c91..72fa20f405f1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -74,10 +74,12 @@ static int create_dir(struct kobject *kobj)
if (error)
return error;
- error = sysfs_create_groups(kobj, ktype->default_groups);
- if (error) {
- sysfs_remove_dir(kobj);
- return error;
+ if (ktype) {
+ error = sysfs_create_groups(kobj, ktype->default_groups);
+ if (error) {
+ sysfs_remove_dir(kobj);
+ return error;
+ }
}
/*
@@ -589,7 +591,8 @@ static void __kobject_del(struct kobject *kobj)
sd = kobj->sd;
ktype = get_ktype(kobj);
- sysfs_remove_groups(kobj, ktype->default_groups);
+ if (ktype)
+ sysfs_remove_groups(kobj, ktype->default_groups);
/* send "remove" if the caller did not do it but sent "add" */
if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
@@ -666,6 +669,10 @@ static void kobject_cleanup(struct kobject *kobj)
pr_debug("'%s' (%p): %s, parent %p\n",
kobject_name(kobj), kobj, __func__, kobj->parent);
+ if (t && !t->release)
+ pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
+ kobject_name(kobj), kobj);
+
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
pr_debug("'%s' (%p): auto cleanup kobject_del\n",
@@ -676,13 +683,10 @@ static void kobject_cleanup(struct kobject *kobj)
parent = NULL;
}
- if (t->release) {
+ if (t && t->release) {
pr_debug("'%s' (%p): calling ktype release\n",
kobject_name(kobj), kobj);
t->release(kobj);
- } else {
- pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
- kobject_name(kobj), kobj);
}
/* free name if we allocated it */
@@ -1056,7 +1060,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *pa
{
const struct kobj_ns_type_operations *ops = NULL;
- if (parent && parent->ktype->child_ns_type)
+ if (parent && parent->ktype && parent->ktype->child_ns_type)
ops = parent->ktype->child_ns_type(parent);
return ops;
diff --git a/lib/kunit/device-impl.h b/lib/kunit/device-impl.h
index 54bd55836405..5fcd48ff0f36 100644
--- a/lib/kunit/device-impl.h
+++ b/lib/kunit/device-impl.h
@@ -13,5 +13,7 @@
// For internal use only -- registers the kunit_bus.
int kunit_bus_init(void);
+// For internal use only -- unregisters the kunit_bus.
+void kunit_bus_shutdown(void);
#endif //_KUNIT_DEVICE_IMPL_H
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
index f5371287b375..644a38a1f5b1 100644
--- a/lib/kunit/device.c
+++ b/lib/kunit/device.c
@@ -45,8 +45,8 @@ int kunit_bus_init(void)
int error;
kunit_bus_device = root_device_register("kunit");
- if (!kunit_bus_device)
- return -ENOMEM;
+ if (IS_ERR(kunit_bus_device))
+ return PTR_ERR(kunit_bus_device);
error = bus_register(&kunit_bus_type);
if (error)
@@ -54,6 +54,20 @@ int kunit_bus_init(void)
return error;
}
+/* Unregister the 'kunit_bus' in case the KUnit module is unloaded. */
+void kunit_bus_shutdown(void)
+{
+ /* Make sure the bus exists before we unregister it. */
+ if (IS_ERR_OR_NULL(kunit_bus_device))
+ return;
+
+ bus_unregister(&kunit_bus_type);
+
+ root_device_unregister(kunit_bus_device);
+
+ kunit_bus_device = NULL;
+}
+
/* Release a 'fake' KUnit device. */
static void kunit_device_release(struct device *d)
{
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 717b9599036b..689fff2b2b10 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -146,6 +146,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set)
kfree(suite_set.start);
}
+/*
+ * Filter and reallocate test suites. Must return the filtered test suites set
+ * allocated at a valid virtual address or NULL in case of error.
+ */
struct kunit_suite_set
kunit_filter_suites(const struct kunit_suite_set *suite_set,
const char *filter_glob,
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index c4259d910356..f7980ef236a3 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -720,7 +720,7 @@ static void kunit_device_cleanup_test(struct kunit *test)
long action_was_run = 0;
test_device = kunit_device_register(test, "my_device");
- KUNIT_ASSERT_NOT_NULL(test, test_device);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
/* Add an action to verify cleanup. */
devm_add_action(test_device, test_dev_action, &action_was_run);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index f95d2093a0aa..1d1475578515 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -17,6 +17,7 @@
#include <linux/panic.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
+#include <linux/mm.h>
#include "debugfs.h"
#include "device-impl.h"
@@ -801,12 +802,19 @@ static void kunit_module_exit(struct module *mod)
};
const char *action = kunit_action();
+ /*
+ * Check if the start address is a valid virtual address to detect
+ * if the module load sequence has failed and the suite set has not
+ * been initialized and filtered.
+ */
+ if (!suite_set.start || !virt_addr_valid(suite_set.start))
+ return;
+
if (!action)
__kunit_test_suites_exit(mod->kunit_suites,
mod->num_kunit_suites);
- if (suite_set.start)
- kunit_free_suite_set(suite_set);
+ kunit_free_suite_set(suite_set);
}
static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
@@ -816,12 +824,12 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
switch (val) {
case MODULE_STATE_LIVE:
+ kunit_module_init(mod);
break;
case MODULE_STATE_GOING:
kunit_module_exit(mod);
break;
case MODULE_STATE_COMING:
- kunit_module_init(mod);
break;
case MODULE_STATE_UNFORMED:
break;
@@ -920,6 +928,9 @@ static void __exit kunit_exit(void)
#ifdef CONFIG_MODULES
unregister_module_notifier(&kunit_mod_nb);
#endif
+
+ kunit_bus_shutdown();
+
kunit_debugfs_cleanup();
}
module_exit(kunit_exit);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index dc15e7888fc1..be9c576b6e2d 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -30,6 +30,8 @@ static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
+ [NLA_BE16] = sizeof(__be16),
+ [NLA_BE32] = sizeof(__be32),
};
static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
@@ -43,6 +45,8 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
+ [NLA_BE16] = sizeof(__be16),
+ [NLA_BE32] = sizeof(__be32),
};
/*
@@ -758,7 +762,7 @@ EXPORT_SYMBOL(nla_find);
* @dstsize: Size of destination buffer.
*
* Copies at most dstsize - 1 bytes into the destination buffer.
- * Unlike strlcpy the destination buffer is always padded out.
+ * Unlike strscpy() the destination buffer is always padded out.
*
* Return:
* * srclen - Returns @nla length (not including the trailing %NUL).
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index d0a5081dfd12..92c6b1fd8989 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -388,11 +388,6 @@ static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
unsigned int shallow_depth;
/*
- * For each batch, we wake up one queue. We need to make sure that our
- * batch size is small enough that the full depth of the bitmap,
- * potentially limited by a shallow depth, is enough to wake up all of
- * the queues.
- *
* Each full word of the bitmap has bits_per_word bits, and there might
* be a partial word. There are depth / bits_per_word full words and
* depth % bits_per_word bits left over. In bitwise arithmetic:
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 010c730ca7fc..f3f3436d60a9 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -13,16 +13,26 @@
* seq_buf_init() more than once to reset the seq_buf to start
* from scratch.
*/
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/hex.h>
+#include <linux/minmax.h>
+#include <linux/printk.h>
#include <linux/seq_buf.h>
+#include <linux/seq_file.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
/**
* seq_buf_can_fit - can the new data fit in the current buffer?
* @s: the seq_buf descriptor
* @len: The length to see if it can fit in the current buffer
*
- * Returns true if there's enough unused space in the seq_buf buffer
+ * Returns: true if there's enough unused space in the seq_buf buffer
* to fit the amount of new data according to @len.
*/
static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
@@ -35,7 +45,7 @@ static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
* @m: the seq_file descriptor that is the destination
* @s: the seq_buf descriptor that is the source.
*
- * Returns zero on success, non zero otherwise
+ * Returns: zero on success, non-zero otherwise.
*/
int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
{
@@ -50,9 +60,9 @@ int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
* @fmt: printf format string
* @args: va_list of arguments from a printf() type function
*
- * Writes a vnprintf() format into the sequencce buffer.
+ * Writes a vnprintf() format into the sequence buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
{
@@ -78,7 +88,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
*
* Writes a printf() format into the sequence buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
{
@@ -94,12 +104,12 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
EXPORT_SYMBOL_GPL(seq_buf_printf);
/**
- * seq_buf_do_printk - printk seq_buf line by line
+ * seq_buf_do_printk - printk() seq_buf line by line
* @s: seq_buf descriptor
* @lvl: printk level
*
* printk()-s a multi-line sequential buffer line by line. The function
- * makes sure that the buffer in @s is nul terminated and safe to read
+ * makes sure that the buffer in @s is NUL-terminated and safe to read
* as a string.
*/
void seq_buf_do_printk(struct seq_buf *s, const char *lvl)
@@ -139,7 +149,7 @@ EXPORT_SYMBOL_GPL(seq_buf_do_printk);
* This function will take the format and the binary array and finish
* the conversion into the ASCII string within the buffer.
*
- * Returns zero on success, -1 on overflow.
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
{
@@ -167,7 +177,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
*
* Copy a simple string into the sequence buffer.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_puts(struct seq_buf *s, const char *str)
{
@@ -196,7 +206,7 @@ EXPORT_SYMBOL_GPL(seq_buf_puts);
*
* Copy a single character into the sequence buffer.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putc(struct seq_buf *s, unsigned char c)
{
@@ -212,7 +222,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c)
EXPORT_SYMBOL_GPL(seq_buf_putc);
/**
- * seq_buf_putmem - write raw data into the sequenc buffer
+ * seq_buf_putmem - write raw data into the sequence buffer
* @s: seq_buf descriptor
* @mem: The raw memory to copy into the buffer
* @len: The length of the raw memory to copy (in bytes)
@@ -221,7 +231,7 @@ EXPORT_SYMBOL_GPL(seq_buf_putc);
* buffer and a strcpy() would not work. Using this function allows
* for such cases.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
{
@@ -249,7 +259,7 @@ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
* raw memory into the buffer it writes its ASCII representation of it
* in hex characters.
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len)
@@ -297,7 +307,7 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
*
* Write a path name into the sequence buffer.
*
- * Returns the number of written bytes on success, -1 on overflow
+ * Returns: the number of written bytes on success, -1 on overflow.
*/
int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
{
@@ -332,6 +342,7 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
* or until it reaches the end of the content in the buffer (@s->len),
* whichever comes first.
*
+ * Returns:
* On success, it returns a positive number of the number of bytes
* it copied.
*
@@ -382,11 +393,11 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt)
* linebuf size is maximal length for one line.
* 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for
* separating space
- * 2 - spaces separating hex dump and ascii representation
- * 32 - ascii representation
+ * 2 - spaces separating hex dump and ASCII representation
+ * 32 - ASCII representation
* 1 - terminating '\0'
*
- * Returns zero on success, -1 on overflow
+ * Returns: zero on success, -1 on overflow.
*/
int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index a0be5d05c7f0..4a7055a63d9f 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) "stackdepot: " fmt
+#include <linux/debugfs.h>
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
@@ -21,8 +22,10 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
-#include <linux/percpu.h>
+#include <linux/poison.h>
#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -41,17 +44,7 @@
#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
STACK_DEPOT_EXTRA_BITS)
-#if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32
-/*
- * KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack
- * traces. As KMSAN does not support evicting stack traces from the stack
- * depot, the stack depot capacity might be reached quickly with large stack
- * records. Adjust the maximum number of stack depot pools for this case.
- */
-#define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16))
-#else
#define DEPOT_POOLS_CAP 8192
-#endif
#define DEPOT_MAX_POOLS \
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
(1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
@@ -67,17 +60,30 @@ union handle_parts {
};
struct stack_record {
- struct list_head list; /* Links in hash table or freelist */
+ struct list_head hash_list; /* Links in the hash table */
u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */
- union handle_parts handle;
+ union handle_parts handle; /* Constant after initialization */
refcount_t count;
- unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
+ union {
+ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
+ struct {
+ /*
+ * An important invariant of the implementation is to
+ * only place a stack record onto the freelist iff its
+ * refcount is zero. Because stack records with a zero
+ * refcount are never considered as valid, it is safe to
+ * union @entries and freelist management state below.
+ * Conversely, as soon as an entry is off the freelist
+ * and its refcount becomes non-zero, the below must not
+ * be accessed until being placed back on the freelist.
+ */
+ struct list_head free_list; /* Links in the freelist */
+ unsigned long rcu_state; /* RCU cookie */
+ };
+ };
};
-#define DEPOT_STACK_RECORD_SIZE \
- ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
-
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
@@ -103,17 +109,33 @@ static void *stack_pools[DEPOT_MAX_POOLS];
static void *new_pool;
/* Number of pools in stack_pools. */
static int pools_num;
+/* Offset to the unused space in the currently used pool. */
+static size_t pool_offset = DEPOT_POOL_SIZE;
/* Freelist of stack records within stack_pools. */
static LIST_HEAD(free_stacks);
-/*
- * Stack depot tries to keep an extra pool allocated even before it runs out
- * of space in the currently used pool. This flag marks whether this extra pool
- * needs to be allocated. It has the value 0 when either an extra pool is not
- * yet allocated or if the limit on the number of pools is reached.
- */
-static bool new_pool_required = true;
-/* Lock that protects the variables above. */
-static DEFINE_RWLOCK(pool_rwlock);
+/* The lock must be held when performing pool or freelist modifications. */
+static DEFINE_RAW_SPINLOCK(pool_lock);
+
+/* Statistics counters for debugfs. */
+enum depot_counter_id {
+ DEPOT_COUNTER_REFD_ALLOCS,
+ DEPOT_COUNTER_REFD_FREES,
+ DEPOT_COUNTER_REFD_INUSE,
+ DEPOT_COUNTER_FREELIST_SIZE,
+ DEPOT_COUNTER_PERSIST_COUNT,
+ DEPOT_COUNTER_PERSIST_BYTES,
+ DEPOT_COUNTER_COUNT,
+};
+static long counters[DEPOT_COUNTER_COUNT];
+static const char *const counter_names[] = {
+ [DEPOT_COUNTER_REFD_ALLOCS] = "refcounted_allocations",
+ [DEPOT_COUNTER_REFD_FREES] = "refcounted_frees",
+ [DEPOT_COUNTER_REFD_INUSE] = "refcounted_in_use",
+ [DEPOT_COUNTER_FREELIST_SIZE] = "freelist_size",
+ [DEPOT_COUNTER_PERSIST_COUNT] = "persistent_count",
+ [DEPOT_COUNTER_PERSIST_BYTES] = "persistent_bytes",
+};
+static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
static int __init disable_stack_depot(char *str)
{
@@ -258,174 +280,273 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(stack_depot_init);
-/* Initializes a stack depol pool. */
-static void depot_init_pool(void *pool)
+/*
+ * Initializes new stack pool, and updates the list of pools.
+ */
+static bool depot_init_pool(void **prealloc)
{
- int offset;
+ lockdep_assert_held(&pool_lock);
- lockdep_assert_held_write(&pool_rwlock);
+ if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ /* Bail out if we reached the pool limit. */
+ WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
+ WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return false;
+ }
- WARN_ON(!list_empty(&free_stacks));
+ if (!new_pool && *prealloc) {
+ /* We have preallocated memory, use it. */
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
+ }
- /* Initialize handles and link stack records into the freelist. */
- for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
- offset += DEPOT_STACK_RECORD_SIZE) {
- struct stack_record *stack = pool + offset;
+ if (!new_pool)
+ return false; /* new_pool and *prealloc are NULL */
- stack->handle.pool_index = pools_num;
- stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
- stack->handle.extra = 0;
+ /* Save reference to the pool to be used by depot_fetch_stack(). */
+ stack_pools[pools_num] = new_pool;
- list_add(&stack->list, &free_stacks);
- }
+ /*
+ * Stack depot tries to keep an extra pool allocated even before it runs
+ * out of space in the currently used pool.
+ *
+ * To indicate that a new preallocation is needed new_pool is reset to
+ * NULL; do not reset to NULL if we have reached the maximum number of
+ * pools.
+ */
+ if (pools_num < DEPOT_MAX_POOLS)
+ WRITE_ONCE(new_pool, NULL);
+ else
+ WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
- /* Save reference to the pool to be used by depot_fetch_stack(). */
- stack_pools[pools_num] = pool;
- pools_num++;
+ /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
+ WRITE_ONCE(pools_num, pools_num + 1);
+ ASSERT_EXCLUSIVE_WRITER(pools_num);
+
+ pool_offset = 0;
+
+ return true;
}
/* Keeps the preallocated memory to be used for a new stack depot pool. */
static void depot_keep_new_pool(void **prealloc)
{
- lockdep_assert_held_write(&pool_rwlock);
+ lockdep_assert_held(&pool_lock);
/*
* If a new pool is already saved or the maximum number of
* pools is reached, do not use the preallocated memory.
*/
- if (!new_pool_required)
+ if (new_pool)
return;
- /*
- * Use the preallocated memory for the new pool
- * as long as we do not exceed the maximum number of pools.
- */
- if (pools_num < DEPOT_MAX_POOLS) {
- new_pool = *prealloc;
- *prealloc = NULL;
+ WRITE_ONCE(new_pool, *prealloc);
+ *prealloc = NULL;
+}
+
+/*
+ * Try to initialize a new stack record from the current pool, a cached pool, or
+ * the current pre-allocation.
+ */
+static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
+{
+ struct stack_record *stack;
+ void *current_pool;
+ u32 pool_index;
+
+ lockdep_assert_held(&pool_lock);
+
+ if (pool_offset + size > DEPOT_POOL_SIZE) {
+ if (!depot_init_pool(prealloc))
+ return NULL;
}
- /*
- * At this point, either a new pool is kept or the maximum
- * number of pools is reached. In either case, take note that
- * keeping another pool is not required.
- */
- new_pool_required = false;
+ if (WARN_ON_ONCE(pools_num < 1))
+ return NULL;
+ pool_index = pools_num - 1;
+ current_pool = stack_pools[pool_index];
+ if (WARN_ON_ONCE(!current_pool))
+ return NULL;
+
+ stack = current_pool + pool_offset;
+
+ /* Pre-initialize handle once. */
+ stack->handle.pool_index = pool_index;
+ stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+ INIT_LIST_HEAD(&stack->hash_list);
+
+ pool_offset += size;
+
+ return stack;
}
-/* Updates references to the current and the next stack depot pools. */
-static bool depot_update_pools(void **prealloc)
+/* Try to find next free usable entry from the freelist. */
+static struct stack_record *depot_pop_free(void)
{
- lockdep_assert_held_write(&pool_rwlock);
+ struct stack_record *stack;
- /* Check if we still have objects in the freelist. */
- if (!list_empty(&free_stacks))
- goto out_keep_prealloc;
+ lockdep_assert_held(&pool_lock);
- /* Check if we have a new pool saved and use it. */
- if (new_pool) {
- depot_init_pool(new_pool);
- new_pool = NULL;
+ if (list_empty(&free_stacks))
+ return NULL;
- /* Take note that we might need a new new_pool. */
- if (pools_num < DEPOT_MAX_POOLS)
- new_pool_required = true;
+ /*
+ * We maintain the invariant that the elements in front are least
+ * recently used, and are therefore more likely to be associated with an
+ * RCU grace period in the past. Consequently it is sufficient to only
+ * check the first entry.
+ */
+ stack = list_first_entry(&free_stacks, struct stack_record, free_list);
+ if (!poll_state_synchronize_rcu(stack->rcu_state))
+ return NULL;
- /* Try keeping the preallocated memory for new_pool. */
- goto out_keep_prealloc;
- }
+ list_del(&stack->free_list);
+ counters[DEPOT_COUNTER_FREELIST_SIZE]--;
- /* Bail out if we reached the pool limit. */
- if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return false;
- }
+ return stack;
+}
- /* Check if we have preallocated memory and use it. */
- if (*prealloc) {
- depot_init_pool(*prealloc);
- *prealloc = NULL;
- return true;
- }
+static inline size_t depot_stack_record_size(struct stack_record *s, unsigned int nr_entries)
+{
+ const size_t used = flex_array_size(s, entries, nr_entries);
+ const size_t unused = sizeof(s->entries) - used;
- return false;
+ WARN_ON_ONCE(sizeof(s->entries) < used);
-out_keep_prealloc:
- /* Keep the preallocated memory for a new pool if required. */
- if (*prealloc)
- depot_keep_new_pool(prealloc);
- return true;
+ return ALIGN(sizeof(struct stack_record) - unused, 1 << DEPOT_STACK_ALIGN);
}
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
-depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
{
- struct stack_record *stack;
+ struct stack_record *stack = NULL;
+ size_t record_size;
- lockdep_assert_held_write(&pool_rwlock);
+ lockdep_assert_held(&pool_lock);
- /* Update current and new pools if required and possible. */
- if (!depot_update_pools(prealloc))
+ /* This should already be checked by public API entry points. */
+ if (WARN_ON_ONCE(!nr_entries))
return NULL;
- /* Check if we have a stack record to save the stack trace. */
- if (list_empty(&free_stacks))
- return NULL;
+ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+ if (nr_entries > CONFIG_STACKDEPOT_MAX_FRAMES)
+ nr_entries = CONFIG_STACKDEPOT_MAX_FRAMES;
- /* Get and unlink the first entry from the freelist. */
- stack = list_first_entry(&free_stacks, struct stack_record, list);
- list_del(&stack->list);
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ /*
+ * Evictable entries have to allocate the max. size so they may
+ * safely be re-used by differently sized allocations.
+ */
+ record_size = depot_stack_record_size(stack, CONFIG_STACKDEPOT_MAX_FRAMES);
+ stack = depot_pop_free();
+ } else {
+ record_size = depot_stack_record_size(stack, nr_entries);
+ }
- /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
- if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
- size = CONFIG_STACKDEPOT_MAX_FRAMES;
+ if (!stack) {
+ stack = depot_pop_free_pool(prealloc, record_size);
+ if (!stack)
+ return NULL;
+ }
/* Save the stack trace. */
stack->hash = hash;
- stack->size = size;
- /* stack->handle is already filled in by depot_init_pool(). */
- refcount_set(&stack->count, 1);
- memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
+ stack->size = nr_entries;
+ /* stack->handle is already filled in by depot_pop_free_pool(). */
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, nr_entries));
+
+ if (flags & STACK_DEPOT_FLAG_GET) {
+ refcount_set(&stack->count, 1);
+ counters[DEPOT_COUNTER_REFD_ALLOCS]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]++;
+ } else {
+ /* Warn on attempts to switch to refcounting this entry. */
+ refcount_set(&stack->count, REFCOUNT_SATURATED);
+ counters[DEPOT_COUNTER_PERSIST_COUNT]++;
+ counters[DEPOT_COUNTER_PERSIST_BYTES] += record_size;
+ }
/*
* Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it.
*/
- kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
+ kmsan_unpoison_memory(stack, record_size);
return stack;
}
static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
{
+ const int pools_num_cached = READ_ONCE(pools_num);
union handle_parts parts = { .handle = handle };
void *pool;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
- lockdep_assert_held(&pool_rwlock);
+ lockdep_assert_not_held(&pool_lock);
- if (parts.pool_index > pools_num) {
+ if (parts.pool_index > pools_num_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
- parts.pool_index, pools_num, handle);
+ parts.pool_index, pools_num_cached, handle);
return NULL;
}
pool = stack_pools[parts.pool_index];
- if (!pool)
+ if (WARN_ON(!pool))
return NULL;
stack = pool + offset;
+ if (WARN_ON(!refcount_read(&stack->count)))
+ return NULL;
+
return stack;
}
/* Links stack into the freelist. */
static void depot_free_stack(struct stack_record *stack)
{
- lockdep_assert_held_write(&pool_rwlock);
+ unsigned long flags;
+
+ lockdep_assert_not_held(&pool_lock);
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ printk_deferred_enter();
- list_add(&stack->list, &free_stacks);
+ /*
+ * Remove the entry from the hash list. Concurrent list traversal may
+ * still observe the entry, but since the refcount is zero, this entry
+ * will no longer be considered as valid.
+ */
+ list_del_rcu(&stack->hash_list);
+
+ /*
+ * Due to being used from constrained contexts such as the allocators,
+ * NMI, or even RCU itself, stack depot cannot rely on primitives that
+ * would sleep (such as synchronize_rcu()) or recursively call into
+ * stack depot again (such as call_rcu()).
+ *
+ * Instead, get an RCU cookie, so that we can ensure this entry isn't
+ * moved onto another list until the next grace period, and concurrent
+ * RCU list traversal remains safe.
+ */
+ stack->rcu_state = get_state_synchronize_rcu();
+
+ /*
+ * Add the entry to the freelist tail, so that older entries are
+ * considered first - their RCU cookie is more likely to no longer be
+ * associated with the current grace period.
+ */
+ list_add_tail(&stack->free_list, &free_stacks);
+
+ counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+ counters[DEPOT_COUNTER_REFD_FREES]++;
+ counters[DEPOT_COUNTER_REFD_INUSE]--;
+
+ printk_deferred_exit();
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
/* Calculates the hash for a stack. */
@@ -453,22 +574,52 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
/* Finds a stack in a bucket of the hash table. */
static inline struct stack_record *find_stack(struct list_head *bucket,
- unsigned long *entries, int size,
- u32 hash)
+ unsigned long *entries, int size,
+ u32 hash, depot_flags_t flags)
{
- struct list_head *pos;
- struct stack_record *found;
+ struct stack_record *stack, *ret = NULL;
- lockdep_assert_held(&pool_rwlock);
+ /*
+ * Stack depot may be used from instrumentation that instruments RCU or
+ * tracing itself; use variant that does not call into RCU and cannot be
+ * traced.
+ *
+ * Note: Such use cases must take care when using refcounting to evict
+ * unused entries, because the stack record free-then-reuse code paths
+ * do call into RCU.
+ */
+ rcu_read_lock_sched_notrace();
+
+ list_for_each_entry_rcu(stack, bucket, hash_list) {
+ if (stack->hash != hash || stack->size != size)
+ continue;
+
+ /*
+ * This may race with depot_free_stack() accessing the freelist
+ * management state unioned with @entries. The refcount is zero
+ * in that case and the below refcount_inc_not_zero() will fail.
+ */
+ if (data_race(stackdepot_memcmp(entries, stack->entries, size)))
+ continue;
+
+ /*
+ * Try to increment refcount. If this succeeds, the stack record
+ * is valid and has not yet been freed.
+ *
+ * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior
+ * to then call stack_depot_put() later, and we can assume that
+ * a stack record is never placed back on the freelist.
+ */
+ if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count))
+ continue;
- list_for_each(pos, bucket) {
- found = list_entry(pos, struct stack_record, list);
- if (found->hash == hash &&
- found->size == size &&
- !stackdepot_memcmp(entries, found->entries, size))
- return found;
+ ret = stack;
+ break;
}
- return NULL;
+
+ rcu_read_unlock_sched_notrace();
+
+ return ret;
}
depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
@@ -482,7 +633,6 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
struct page *page = NULL;
void *prealloc = NULL;
bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
- bool need_alloc = false;
unsigned long flags;
u32 hash;
@@ -505,31 +655,16 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & stack_hash_mask];
- read_lock_irqsave(&pool_rwlock, flags);
- printk_deferred_enter();
-
- /* Fast path: look the stack trace up without full locking. */
- found = find_stack(bucket, entries, nr_entries, hash);
- if (found) {
- if (depot_flags & STACK_DEPOT_FLAG_GET)
- refcount_inc(&found->count);
- printk_deferred_exit();
- read_unlock_irqrestore(&pool_rwlock, flags);
+ /* Fast path: look the stack trace up without locking. */
+ found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
+ if (found)
goto exit;
- }
-
- /* Take note if another stack pool needs to be allocated. */
- if (new_pool_required)
- need_alloc = true;
-
- printk_deferred_exit();
- read_unlock_irqrestore(&pool_rwlock, flags);
/*
* Allocate memory for a new pool if required now:
* we won't be able to do that under the lock.
*/
- if (unlikely(can_alloc && need_alloc)) {
+ if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -543,31 +678,36 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
prealloc = page_address(page);
}
- write_lock_irqsave(&pool_rwlock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
printk_deferred_enter();
- found = find_stack(bucket, entries, nr_entries, hash);
+ /* Try to find again, to avoid concurrently inserting duplicates. */
+ found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
if (!found) {
struct stack_record *new =
- depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+ depot_alloc_stack(entries, nr_entries, hash, depot_flags, &prealloc);
if (new) {
- list_add(&new->list, bucket);
+ /*
+ * This releases the stack record into the bucket and
+ * makes it visible to readers in find_stack().
+ */
+ list_add_rcu(&new->hash_list, bucket);
found = new;
}
- } else {
- if (depot_flags & STACK_DEPOT_FLAG_GET)
- refcount_inc(&found->count);
+ }
+
+ if (prealloc) {
/*
- * Stack depot already contains this stack trace, but let's
- * keep the preallocated memory for future.
+ * Either stack depot already contains this stack trace, or
+ * depot_alloc_stack() did not consume the preallocated memory.
+ * Try to keep the preallocated memory for future.
*/
- if (prealloc)
- depot_keep_new_pool(&prealloc);
+ depot_keep_new_pool(&prealloc);
}
printk_deferred_exit();
- write_unlock_irqrestore(&pool_rwlock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
/* Stack depot didn't use this memory, free it. */
@@ -592,7 +732,6 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
struct stack_record *stack;
- unsigned long flags;
*entries = NULL;
/*
@@ -604,13 +743,13 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
if (!handle || stack_depot_disabled)
return 0;
- read_lock_irqsave(&pool_rwlock, flags);
- printk_deferred_enter();
-
stack = depot_fetch_stack(handle);
-
- printk_deferred_exit();
- read_unlock_irqrestore(&pool_rwlock, flags);
+ /*
+ * Should never be NULL, otherwise this is a use-after-put (or just a
+ * corrupt handle).
+ */
+ if (WARN(!stack, "corrupt handle or use after stack_depot_put()"))
+ return 0;
*entries = stack->entries;
return stack->size;
@@ -620,29 +759,20 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
void stack_depot_put(depot_stack_handle_t handle)
{
struct stack_record *stack;
- unsigned long flags;
if (!handle || stack_depot_disabled)
return;
- write_lock_irqsave(&pool_rwlock, flags);
- printk_deferred_enter();
-
stack = depot_fetch_stack(handle);
- if (WARN_ON(!stack))
- goto out;
-
- if (refcount_dec_and_test(&stack->count)) {
- /* Unlink stack from the hash table. */
- list_del(&stack->list);
+ /*
+ * Should always be able to find the stack record, otherwise this is an
+ * unbalanced put attempt (or corrupt handle).
+ */
+ if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()"))
+ return;
- /* Free stack. */
+ if (refcount_dec_and_test(&stack->count))
depot_free_stack(stack);
- }
-
-out:
- printk_deferred_exit();
- write_unlock_irqrestore(&pool_rwlock, flags);
}
EXPORT_SYMBOL_GPL(stack_depot_put);
@@ -690,3 +820,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
return parts.extra;
}
EXPORT_SYMBOL(stack_depot_get_extra_bits);
+
+static int stats_show(struct seq_file *seq, void *v)
+{
+ /*
+ * data race ok: These are just statistics counters, and approximate
+ * statistics are ok for debugging.
+ */
+ seq_printf(seq, "pools: %d\n", data_race(pools_num));
+ for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
+ seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stats);
+
+static int depot_debugfs_init(void)
+{
+ struct dentry *dir;
+
+ if (stack_depot_disabled)
+ return 0;
+
+ dir = debugfs_create_dir("stackdepot", NULL);
+ debugfs_create_file("stats", 0444, dir, NULL, &stats_fops);
+ return 0;
+}
+late_initcall(depot_debugfs_init);
diff --git a/lib/string.c b/lib/string.c
index be26623953d2..6891d15ce991 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -103,21 +103,6 @@ char *strncpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strncpy);
#endif
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *dest, const char *src, size_t size)
-{
- size_t ret = strlen(src);
-
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- __builtin_memcpy(dest, src, len);
- dest[len] = '\0';
- }
- return ret;
-}
-EXPORT_SYMBOL(strlcpy);
-#endif
-
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *dest, const char *src, size_t count)
{
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
index 4c40580a99a3..f247089d63c0 100644
--- a/lib/test_blackhole_dev.c
+++ b/lib/test_blackhole_dev.c
@@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void)
{
struct ipv6hdr *ip6h;
struct sk_buff *skb;
- struct ethhdr *ethh;
struct udphdr *uh;
int data_len;
int ret;
@@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void)
ip6h->saddr = in6addr_loopback;
ip6h->daddr = in6addr_loopback;
/* Ether */
- ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
+ skb_push(skb, sizeof(struct ethhdr));
skb_set_mac_header(skb, 0);
skb->protocol = htons(ETH_P_IPV6);
diff --git a/lib/test_fortify/write_overflow-strlcpy-src.c b/lib/test_fortify/write_overflow-strlcpy-src.c
deleted file mode 100644
index 91bf83ebd34a..000000000000
--- a/lib/test_fortify/write_overflow-strlcpy-src.c
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define TEST \
- strlcpy(small, large_src, sizeof(small) + 1)
-
-#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strlcpy.c b/lib/test_fortify/write_overflow-strlcpy.c
deleted file mode 100644
index 1883db7c0cd6..000000000000
--- a/lib/test_fortify/write_overflow-strlcpy.c
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#define TEST \
- strlcpy(instance.buf, large_src, sizeof(instance.buf) + 1)
-
-#include "test_fortify.h"
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 1e3447bccdb1..e039d05304dd 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -436,7 +436,6 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
INIT_LIST_HEAD(&wb->work_list);
INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn);
- wb->dirty_sleep = jiffies;
err = fprop_local_init_percpu(&wb->completions, gfp);
if (err)
@@ -921,6 +920,7 @@ int bdi_init(struct backing_dev_info *bdi)
INIT_LIST_HEAD(&bdi->bdi_list);
INIT_LIST_HEAD(&bdi->wb_list);
init_waitqueue_head(&bdi->wb_waitq);
+ bdi->last_bdp_sleep = jiffies;
return cgwb_bdi_init(bdi);
}
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 36f6f1d21ff0..5b325749fc12 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1026,6 +1026,9 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
damon_for_each_scheme(s, c) {
struct damos_quota *quota = &s->quota;
+ if (c->passed_sample_intervals != s->next_apply_sis)
+ continue;
+
if (!s->wmarks.activated)
continue;
@@ -1176,10 +1179,6 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
if (c->passed_sample_intervals != s->next_apply_sis)
continue;
- s->next_apply_sis +=
- (s->apply_interval_us ? s->apply_interval_us :
- c->attrs.aggr_interval) / sample_interval;
-
if (!s->wmarks.activated)
continue;
@@ -1195,6 +1194,14 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
damon_for_each_region_safe(r, next_r, t)
damon_do_apply_schemes(c, t, r);
}
+
+ damon_for_each_scheme(s, c) {
+ if (c->passed_sample_intervals != s->next_apply_sis)
+ continue;
+ s->next_apply_sis +=
+ (s->apply_interval_us ? s->apply_interval_us :
+ c->attrs.aggr_interval) / sample_interval;
+ }
}
/*
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
index f2e5f9431892..3de2916a65c3 100644
--- a/mm/damon/lru_sort.c
+++ b/mm/damon/lru_sort.c
@@ -185,9 +185,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
}
+static void damon_lru_sort_copy_quota_status(struct damos_quota *dst,
+ struct damos_quota *src)
+{
+ dst->total_charged_sz = src->total_charged_sz;
+ dst->total_charged_ns = src->total_charged_ns;
+ dst->charged_sz = src->charged_sz;
+ dst->charged_from = src->charged_from;
+ dst->charge_target_from = src->charge_target_from;
+ dst->charge_addr_from = src->charge_addr_from;
+}
+
static int damon_lru_sort_apply_parameters(void)
{
- struct damos *scheme;
+ struct damos *scheme, *hot_scheme, *cold_scheme;
+ struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL;
unsigned int hot_thres, cold_thres;
int err = 0;
@@ -195,18 +207,35 @@ static int damon_lru_sort_apply_parameters(void)
if (err)
return err;
+ damon_for_each_scheme(scheme, ctx) {
+ if (!old_hot_scheme) {
+ old_hot_scheme = scheme;
+ continue;
+ }
+ old_cold_scheme = scheme;
+ }
+
hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
hot_thres_access_freq / 1000;
- scheme = damon_lru_sort_new_hot_scheme(hot_thres);
- if (!scheme)
+ hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+ if (!hot_scheme)
return -ENOMEM;
- damon_set_schemes(ctx, &scheme, 1);
+ if (old_hot_scheme)
+ damon_lru_sort_copy_quota_status(&hot_scheme->quota,
+ &old_hot_scheme->quota);
cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
- scheme = damon_lru_sort_new_cold_scheme(cold_thres);
- if (!scheme)
+ cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
+ if (!cold_scheme) {
+ damon_destroy_scheme(hot_scheme);
return -ENOMEM;
- damon_add_scheme(ctx, scheme);
+ }
+ if (old_cold_scheme)
+ damon_lru_sort_copy_quota_status(&cold_scheme->quota,
+ &old_cold_scheme->quota);
+
+ damon_set_schemes(ctx, &hot_scheme, 1);
+ damon_add_scheme(ctx, cold_scheme);
return damon_set_region_biggest_system_ram_default(target,
&monitor_region_start,
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index ab974e477d2f..66e190f0374a 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -150,9 +150,20 @@ static struct damos *damon_reclaim_new_scheme(void)
&damon_reclaim_wmarks);
}
+static void damon_reclaim_copy_quota_status(struct damos_quota *dst,
+ struct damos_quota *src)
+{
+ dst->total_charged_sz = src->total_charged_sz;
+ dst->total_charged_ns = src->total_charged_ns;
+ dst->charged_sz = src->charged_sz;
+ dst->charged_from = src->charged_from;
+ dst->charge_target_from = src->charge_target_from;
+ dst->charge_addr_from = src->charge_addr_from;
+}
+
static int damon_reclaim_apply_parameters(void)
{
- struct damos *scheme;
+ struct damos *scheme, *old_scheme;
struct damos_filter *filter;
int err = 0;
@@ -164,6 +175,11 @@ static int damon_reclaim_apply_parameters(void)
scheme = damon_reclaim_new_scheme();
if (!scheme)
return -ENOMEM;
+ if (!list_empty(&ctx->schemes)) {
+ damon_for_each_scheme(old_scheme, ctx)
+ damon_reclaim_copy_quota_status(&scheme->quota,
+ &old_scheme->quota);
+ }
if (skip_anon) {
filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
if (!filter) {
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 8dbaac6e5c2d..ae0f0b314f3a 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -1905,6 +1905,10 @@ void damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes,
damon_for_each_scheme(scheme, ctx) {
struct damon_sysfs_scheme *sysfs_scheme;
+ /* user could have removed the scheme sysfs dir */
+ if (i >= sysfs_schemes->nr)
+ break;
+
sysfs_scheme = sysfs_schemes->schemes_arr[i];
damos_sysfs_set_quota_score(sysfs_scheme->quotas->goals,
&scheme->quota);
@@ -2194,7 +2198,7 @@ static void damos_tried_regions_init_upd_status(
sysfs_regions->upd_timeout_jiffies = jiffies +
2 * usecs_to_jiffies(scheme->apply_interval_us ?
scheme->apply_interval_us :
- ctx->attrs.sample_interval);
+ ctx->attrs.aggr_interval);
}
}
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index 5662e29fe253..65c19025da3d 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -362,6 +362,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
vaddr &= HPAGE_PUD_MASK;
pud = pfn_pud(args->pud_pfn, args->page_prot);
+ /*
+ * Some architectures have debug checks to make sure
+ * huge pud mapping are only found with devmap entries
+ * For now test with only devmap entries.
+ */
+ pud = pud_mkdevmap(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page);
pudp_set_wrprotect(args->mm, vaddr, args->pudp);
@@ -374,6 +380,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot);
+ pud = pud_mkdevmap(pud);
pud = pud_wrprotect(pud);
pud = pud_mkclean(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud);
@@ -391,6 +398,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
#endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot);
+ pud = pud_mkdevmap(pud);
pud = pud_mkyoung(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page);
diff --git a/mm/filemap.c b/mm/filemap.c
index ea49677c6338..4a30de98a8c7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2688,6 +2688,7 @@ int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
return filemap_write_and_wait_range(mapping, pos, end);
}
+EXPORT_SYMBOL_GPL(kiocb_write_and_wait);
int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
{
@@ -2715,6 +2716,7 @@ int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
end >> PAGE_SHIFT);
}
+EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);
/**
* generic_file_read_iter - generic filesystem read routine
@@ -4109,28 +4111,40 @@ static void filemap_cachestat(struct address_space *mapping,
rcu_read_lock();
xas_for_each(&xas, folio, last_index) {
+ int order;
unsigned long nr_pages;
pgoff_t folio_first_index, folio_last_index;
+ /*
+ * Don't deref the folio. It is not pinned, and might
+ * get freed (and reused) underneath us.
+ *
+ * We *could* pin it, but that would be expensive for
+ * what should be a fast and lightweight syscall.
+ *
+ * Instead, derive all information of interest from
+ * the rcu-protected xarray.
+ */
+
if (xas_retry(&xas, folio))
continue;
+ order = xa_get_order(xas.xa, xas.xa_index);
+ nr_pages = 1 << order;
+ folio_first_index = round_down(xas.xa_index, 1 << order);
+ folio_last_index = folio_first_index + nr_pages - 1;
+
+ /* Folios might straddle the range boundaries, only count covered pages */
+ if (folio_first_index < first_index)
+ nr_pages -= first_index - folio_first_index;
+
+ if (folio_last_index > last_index)
+ nr_pages -= folio_last_index - last_index;
+
if (xa_is_value(folio)) {
/* page is evicted */
void *shadow = (void *)folio;
bool workingset; /* not used */
- int order = xa_get_order(xas.xa, xas.xa_index);
-
- nr_pages = 1 << order;
- folio_first_index = round_down(xas.xa_index, 1 << order);
- folio_last_index = folio_first_index + nr_pages - 1;
-
- /* Folios might straddle the range boundaries, only count covered pages */
- if (folio_first_index < first_index)
- nr_pages -= first_index - folio_first_index;
-
- if (folio_last_index > last_index)
- nr_pages -= folio_last_index - last_index;
cs->nr_evicted += nr_pages;
@@ -4148,24 +4162,13 @@ static void filemap_cachestat(struct address_space *mapping,
goto resched;
}
- nr_pages = folio_nr_pages(folio);
- folio_first_index = folio_pgoff(folio);
- folio_last_index = folio_first_index + nr_pages - 1;
-
- /* Folios might straddle the range boundaries, only count covered pages */
- if (folio_first_index < first_index)
- nr_pages -= first_index - folio_first_index;
-
- if (folio_last_index > last_index)
- nr_pages -= folio_last_index - last_index;
-
/* page is in cache */
cs->nr_cache += nr_pages;
- if (folio_test_dirty(folio))
+ if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
cs->nr_dirty += nr_pages;
- if (folio_test_writeback(folio))
+ if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
cs->nr_writeback += nr_pages;
resched:
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 94ef5c02b459..94c958f7ebb5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -37,6 +37,7 @@
#include <linux/page_owner.h>
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
+#include <linux/compat.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -809,7 +810,10 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
{
loff_t off_end = off + len;
loff_t off_align = round_up(off, size);
- unsigned long len_pad, ret;
+ unsigned long len_pad, ret, off_sub;
+
+ if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
+ return 0;
if (off_end <= off_align || (off_end - off_align) < size)
return 0;
@@ -835,7 +839,13 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
if (ret == addr)
return addr;
- ret += (off - ret) & (size - 1);
+ off_sub = (off - ret) & (size - 1);
+
+ if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
+ !off_sub)
+ return ret + size;
+
+ ret += off_sub;
return ret;
}
@@ -2437,7 +2447,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
page = pmd_page(old_pmd);
folio = page_folio(page);
if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
- folio_set_dirty(folio);
+ folio_mark_dirty(folio);
if (!folio_test_referenced(folio) && pmd_young(old_pmd))
folio_set_referenced(folio);
folio_remove_rmap_pmd(folio, page, vma);
@@ -3563,7 +3573,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
}
if (pmd_dirty(pmdval))
- folio_set_dirty(folio);
+ folio_mark_dirty(folio);
if (pmd_write(pmdval))
entry = make_writable_migration_entry(page_to_pfn(page));
else if (anon_exclusive)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 610efae91220..6ca63e8dda74 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags)
{
depot_stack_handle_t stack;
- stack = kasan_save_stack(flags,
- STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+ stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
kasan_set_track(track, stack);
}
@@ -266,10 +265,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return true;
/*
- * If the object is not put into quarantine, it will likely be quickly
- * reallocated. Thus, release its metadata now.
+ * Note: Keep per-object metadata to allow KASAN print stack traces for
+ * use-after-free-before-realloc bugs.
*/
- kasan_release_object_meta(cache, object);
/* Let slab put the object onto the freelist. */
return false;
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index df6627f62402..1900f8576034 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
if (alloc_meta) {
/* Zero out alloc meta to mark it as invalid. */
__memset(alloc_meta, 0, sizeof(*alloc_meta));
-
- /*
- * Prepare the lock for saving auxiliary stack traces.
- * Temporarily disable KASAN bug reporting to allow instrumented
- * raw_spin_lock_init to access aux_lock, which resides inside
- * of a redzone.
- */
- kasan_disable_current();
- raw_spin_lock_init(&alloc_meta->aux_lock);
- kasan_enable_current();
}
/*
@@ -506,47 +496,23 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
static void release_alloc_meta(struct kasan_alloc_meta *meta)
{
- /* Evict the stack traces from stack depot. */
- stack_depot_put(meta->alloc_track.stack);
- stack_depot_put(meta->aux_stack[0]);
- stack_depot_put(meta->aux_stack[1]);
-
- /*
- * Zero out alloc meta to mark it as invalid but keep aux_lock
- * initialized to avoid having to reinitialize it when another object
- * is allocated in the same slot.
- */
- __memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
- __memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
+ /* Zero out alloc meta to mark it as invalid. */
+ __memset(meta, 0, sizeof(*meta));
}
static void release_free_meta(const void *object, struct kasan_free_meta *meta)
{
+ if (!kasan_arch_is_ready())
+ return;
+
/* Check if free meta is valid. */
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
return;
- /* Evict the stack trace from the stack depot. */
- stack_depot_put(meta->free_track.stack);
-
/* Mark free meta as invalid. */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
}
-void kasan_release_object_meta(struct kmem_cache *cache, const void *object)
-{
- struct kasan_alloc_meta *alloc_meta;
- struct kasan_free_meta *free_meta;
-
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- release_alloc_meta(alloc_meta);
-
- free_meta = kasan_get_free_meta(cache, object);
- if (free_meta)
- release_free_meta(object, free_meta);
-}
-
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
{
struct kasan_cache *info = &cache->kasan_info;
@@ -571,8 +537,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta;
void *object;
- depot_stack_handle_t new_handle, old_handle;
- unsigned long flags;
if (is_kfence_address(addr) || !slab)
return;
@@ -583,33 +547,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
if (!alloc_meta)
return;
- new_handle = kasan_save_stack(0, depot_flags);
-
- /*
- * Temporarily disable KASAN bug reporting to allow instrumented
- * spinlock functions to access aux_lock, which resides inside of a
- * redzone.
- */
- kasan_disable_current();
- raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
- old_handle = alloc_meta->aux_stack[1];
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = new_handle;
- raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
- kasan_enable_current();
-
- stack_depot_put(old_handle);
+ alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
}
void kasan_record_aux_stack(void *addr)
{
- return __kasan_record_aux_stack(addr,
- STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
+ return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
}
void kasan_record_aux_stack_noalloc(void *addr)
{
- return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
+ return __kasan_record_aux_stack(addr, 0);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@@ -620,7 +569,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
if (!alloc_meta)
return;
- /* Evict previous stack traces (might exist for krealloc or mempool). */
+ /* Invalidate previous stack traces (might exist for krealloc or mempool). */
release_alloc_meta(alloc_meta);
kasan_save_track(&alloc_meta->alloc_track, flags);
@@ -634,7 +583,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
if (!free_meta)
return;
- /* Evict previous stack trace (might exist for mempool). */
+ /* Invalidate previous stack trace (might exist for mempool). */
release_free_meta(object, free_meta);
kasan_save_track(&free_meta->free_track, 0);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d0f172f2b978..fb2b9ac0659a 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -6,7 +6,6 @@
#include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h>
-#include <linux/spinlock.h>
#include <linux/stackdepot.h>
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -265,13 +264,6 @@ struct kasan_global {
struct kasan_alloc_meta {
struct kasan_track alloc_track;
/* Free track is stored in kasan_free_meta. */
- /*
- * aux_lock protects aux_stack from accesses from concurrent
- * kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
- * on RT kernels, as kasan_record_aux_stack_noalloc can be called from
- * non-sleepable contexts.
- */
- raw_spinlock_t aux_lock;
depot_stack_handle_t aux_stack[2];
};
@@ -398,10 +390,8 @@ struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object);
void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
-void kasan_release_object_meta(struct kmem_cache *cache, const void *object);
#else
static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
-static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { }
#endif
depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 3ba02efb952a..6958aa713c67 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -145,7 +145,10 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
void *object = qlink_to_object(qlink, cache);
struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
- kasan_release_object_meta(cache, object);
+ /*
+ * Note: Keep per-object metadata to allow KASAN print stack traces for
+ * use-after-free-before-realloc bugs.
+ */
/*
* If init_on_free is enabled and KASAN's free metadata is stored in
diff --git a/mm/madvise.c b/mm/madvise.c
index 912155a94ed5..cfa5e7288261 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -429,6 +429,7 @@ restart:
if (++batch_count == SWAP_CLUSTER_MAX) {
batch_count = 0;
if (need_resched()) {
+ arch_leave_lazy_mmu_mode();
pte_unmap_unlock(start_pte, ptl);
cond_resched();
goto restart;
diff --git a/mm/memblock.c b/mm/memblock.c
index abd92869874d..d09136e040d3 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -180,8 +180,9 @@ static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
/*
* Address comparison utilities
*/
-static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
- phys_addr_t base2, phys_addr_t size2)
+unsigned long __init_memblock
+memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
+ phys_addr_t size2)
{
return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
}
@@ -2176,6 +2177,9 @@ static void __init memmap_init_reserved_pages(void)
start = region->base;
end = start + region->size;
+ if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES)
+ nid = early_pfn_to_nid(PFN_DOWN(start));
+
reserve_bootmem_region(start, end, nid);
}
}
@@ -2246,6 +2250,7 @@ static const char * const flagname[] = {
[ilog2(MEMBLOCK_MIRROR)] = "MIRROR",
[ilog2(MEMBLOCK_NOMAP)] = "NOMAP",
[ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG",
+ [ilog2(MEMBLOCK_RSRV_NOINIT)] = "RSV_NIT",
};
static int memblock_debug_show(struct seq_file *m, void *private)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e4c8735e7c85..61932c9215e7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -621,6 +621,15 @@ static inline int memcg_events_index(enum vm_event_item idx)
}
struct memcg_vmstats_percpu {
+ /* Stats updates since the last flush */
+ unsigned int stats_updates;
+
+ /* Cached pointers for fast iteration in memcg_rstat_updated() */
+ struct memcg_vmstats_percpu *parent;
+ struct memcg_vmstats *vmstats;
+
+ /* The above should fit a single cacheline for memcg_rstat_updated() */
+
/* Local (CPU and cgroup) page state & events */
long state[MEMCG_NR_STAT];
unsigned long events[NR_MEMCG_EVENTS];
@@ -632,10 +641,7 @@ struct memcg_vmstats_percpu {
/* Cgroup1: threshold notifications & softlimit tree updates */
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
-
- /* Stats updates since the last flush */
- unsigned int stats_updates;
-};
+} ____cacheline_aligned;
struct memcg_vmstats {
/* Aggregated (CPU and subtree) page state & events */
@@ -698,36 +704,35 @@ static void memcg_stats_unlock(void)
}
-static bool memcg_should_flush_stats(struct mem_cgroup *memcg)
+static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
{
- return atomic64_read(&memcg->vmstats->stats_updates) >
+ return atomic64_read(&vmstats->stats_updates) >
MEMCG_CHARGE_BATCH * num_online_cpus();
}
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
{
+ struct memcg_vmstats_percpu *statc;
int cpu = smp_processor_id();
- unsigned int x;
if (!val)
return;
cgroup_rstat_updated(memcg->css.cgroup, cpu);
-
- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
- x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates,
- abs(val));
-
- if (x < MEMCG_CHARGE_BATCH)
+ statc = this_cpu_ptr(memcg->vmstats_percpu);
+ for (; statc; statc = statc->parent) {
+ statc->stats_updates += abs(val);
+ if (statc->stats_updates < MEMCG_CHARGE_BATCH)
continue;
/*
* If @memcg is already flush-able, increasing stats_updates is
* redundant. Avoid the overhead of the atomic update.
*/
- if (!memcg_should_flush_stats(memcg))
- atomic64_add(x, &memcg->vmstats->stats_updates);
- __this_cpu_write(memcg->vmstats_percpu->stats_updates, 0);
+ if (!memcg_vmstats_needs_flush(statc->vmstats))
+ atomic64_add(statc->stats_updates,
+ &statc->vmstats->stats_updates);
+ statc->stats_updates = 0;
}
}
@@ -756,7 +761,7 @@ void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
if (!memcg)
memcg = root_mem_cgroup;
- if (memcg_should_flush_stats(memcg))
+ if (memcg_vmstats_needs_flush(memcg->vmstats))
do_flush_stats(memcg);
}
@@ -770,7 +775,7 @@ void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
static void flush_memcg_stats_dwork(struct work_struct *w)
{
/*
- * Deliberately ignore memcg_should_flush_stats() here so that flushing
+ * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
* in latency-sensitive paths is as cheap as possible.
*/
do_flush_stats(root_mem_cgroup);
@@ -2623,8 +2628,9 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
}
/*
- * Scheduled by try_charge() to be executed from the userland return path
- * and reclaims memory over the high limit.
+ * Reclaims memory over the high limit. Called directly from
+ * try_charge() (context permitting), as well as from the userland
+ * return path where reclaim is always able to block.
*/
void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
@@ -2644,6 +2650,17 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask)
retry_reclaim:
/*
+ * Bail if the task is already exiting. Unlike memory.max,
+ * memory.high enforcement isn't as strict, and there is no
+ * OOM killer involved, which means the excess could already
+ * be much bigger (and still growing) than it could for
+ * memory.max; the dying task could get stuck in fruitless
+ * reclaim for a long time, which isn't desirable.
+ */
+ if (task_is_dying())
+ goto out;
+
+ /*
* The allocating task should reclaim at least the batch size, but for
* subsequent retries we only want to do what's necessary to prevent oom
* or breaching resource isolation.
@@ -2693,6 +2710,9 @@ retry_reclaim:
}
/*
+ * Reclaim didn't manage to push usage below the limit, slow
+ * this allocating task down.
+ *
* If we exit early, we're guaranteed to die (since
* schedule_timeout_killable sets TASK_KILLABLE). This means we don't
* need to account for any ill-begotten jiffies to pay them off later.
@@ -2887,11 +2907,17 @@ done_restock:
}
} while ((memcg = parent_mem_cgroup(memcg)));
+ /*
+ * Reclaim is set up above to be called from the userland
+ * return path. But also attempt synchronous reclaim to avoid
+ * excessive overrun while the task is still inside the
+ * kernel. If this is successful, the return path will see it
+ * when it rechecks the overage and simply bail out.
+ */
if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
!(current->flags & PF_MEMALLOC) &&
- gfpflags_allow_blocking(gfp_mask)) {
+ gfpflags_allow_blocking(gfp_mask))
mem_cgroup_handle_over_high(gfp_mask);
- }
return 0;
}
@@ -5456,10 +5482,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
__mem_cgroup_free(memcg);
}
-static struct mem_cgroup *mem_cgroup_alloc(void)
+static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
{
+ struct memcg_vmstats_percpu *statc, *pstatc;
struct mem_cgroup *memcg;
- int node;
+ int node, cpu;
int __maybe_unused i;
long error = -ENOMEM;
@@ -5483,6 +5510,14 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
if (!memcg->vmstats_percpu)
goto fail;
+ for_each_possible_cpu(cpu) {
+ if (parent)
+ pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
+ statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
+ statc->parent = parent ? pstatc : NULL;
+ statc->vmstats = memcg->vmstats;
+ }
+
for_each_node(node)
if (alloc_mem_cgroup_per_node_info(memcg, node))
goto fail;
@@ -5528,7 +5563,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
struct mem_cgroup *memcg, *old_memcg;
old_memcg = set_active_memcg(parent);
- memcg = mem_cgroup_alloc();
+ memcg = mem_cgroup_alloc(parent);
set_active_memcg(old_memcg);
if (IS_ERR(memcg))
return ERR_CAST(memcg);
@@ -7936,9 +7971,13 @@ bool mem_cgroup_swap_full(struct folio *folio)
static int __init setup_swap_account(char *s)
{
- pr_warn_once("The swapaccount= commandline option is deprecated. "
- "Please report your usecase to linux-mm@kvack.org if you "
- "depend on this functionality.\n");
+ bool res;
+
+ if (!kstrtobool(s, &res) && !res)
+ pr_warn_once("The swapaccount=0 commandline option is deprecated "
+ "in favor of configuring swap control via cgroupfs. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
return 1;
}
__setup("swapaccount=", setup_swap_account);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4f9b61f4a668..9349948f1abf 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -982,7 +982,7 @@ static bool has_extra_refcount(struct page_state *ps, struct page *p,
int count = page_count(p) - 1;
if (extra_pins)
- count -= 1;
+ count -= folio_nr_pages(page_folio(p));
if (count > 0) {
pr_err("%#lx: %s still referenced by %d users\n",
@@ -1377,6 +1377,9 @@ void ClearPageHWPoisonTakenOff(struct page *page)
*/
static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
{
+ if (PageSlab(page))
+ return false;
+
/* Soft offline could migrate non-LRU movable pages */
if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
return true;
diff --git a/mm/memory.c b/mm/memory.c
index 7e1f4849463a..0bfc8b007c01 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1464,7 +1464,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
delay_rmap = 0;
if (!folio_test_anon(folio)) {
if (pte_dirty(ptent)) {
- folio_set_dirty(folio);
+ folio_mark_dirty(folio);
if (tlb_delay_rmap(tlb)) {
delay_rmap = 1;
force_flush = 1;
@@ -3799,6 +3799,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
struct page *page;
struct swap_info_struct *si = NULL;
rmap_t rmap_flags = RMAP_NONE;
+ bool need_clear_cache = false;
bool exclusive = false;
swp_entry_t entry;
pte_t pte;
@@ -3867,6 +3868,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (!folio) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
+ /*
+ * Prevent parallel swapin from proceeding with
+ * the cache flag. Otherwise, another thread may
+ * finish swapin first, free the entry, and swapout
+ * reusing the same entry. It's undetectable as
+ * pte_same() returns true due to entry reuse.
+ */
+ if (swapcache_prepare(entry)) {
+ /* Relax a bit to prevent rapid repeated page faults */
+ schedule_timeout_uninterruptible(1);
+ goto out;
+ }
+ need_clear_cache = true;
+
/* skip swapcache */
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
vma, vmf->address, false);
@@ -4117,6 +4132,9 @@ unlock:
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
+ /* Clear the swap cache pin for direct swapin after PTL unlock */
+ if (need_clear_cache)
+ swapcache_clear(si, entry);
if (si)
put_swap_device(si);
return ret;
@@ -4131,6 +4149,8 @@ out_release:
folio_unlock(swapcache);
folio_put(swapcache);
}
+ if (need_clear_cache)
+ swapcache_clear(si, entry);
if (si)
put_swap_device(si);
return ret;
@@ -5478,7 +5498,7 @@ static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs
return true;
if (regs && !user_mode(regs)) {
- unsigned long ip = instruction_pointer(regs);
+ unsigned long ip = exception_ip(regs);
if (!search_exception_tables(ip))
return false;
}
@@ -5503,7 +5523,7 @@ static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_r
{
mmap_read_unlock(mm);
if (regs && !user_mode(regs)) {
- unsigned long ip = instruction_pointer(regs);
+ unsigned long ip = exception_ip(regs);
if (!search_exception_tables(ip))
return false;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index cc9f2bcd73b4..c27b1f8097d4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2519,6 +2519,14 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
if (managed_zone(pgdat->node_zones + z))
break;
}
+
+ /*
+ * If there are no managed zones, it should not proceed
+ * further.
+ */
+ if (z < 0)
+ return 0;
+
wakeup_kswapd(pgdat->node_zones + z, 0,
folio_order(folio), ZONE_MOVABLE);
return 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index b78e83d351d2..d89770eaab6b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1825,15 +1825,17 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
/*
* mmap_region() will call shmem_zero_setup() to create a file,
* so use shmem's get_unmapped_area in case it can be huge.
- * do_mmap() will clear pgoff, so match alignment.
*/
- pgoff = 0;
get_area = shmem_get_unmapped_area;
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
/* Ensures that larger anonymous mappings are THP aligned. */
get_area = thp_get_unmapped_area;
}
+ /* Always treat pgoff as zero for anonymous memory. */
+ if (!file)
+ pgoff = 0;
+
addr = get_area(file, addr, len, pgoff, flags);
if (IS_ERR_VALUE(addr))
return addr;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index cd4e4ae77c40..3f255534986a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1638,7 +1638,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
*/
dtc->wb_thresh = __wb_calc_thresh(dtc);
dtc->wb_bg_thresh = dtc->thresh ?
- div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
@@ -1921,7 +1921,7 @@ pause:
break;
}
__set_current_state(TASK_KILLABLE);
- wb->dirty_sleep = now;
+ bdi->last_bdp_sleep = jiffies;
io_schedule_timeout(pause);
current->dirty_paused_when = now + pause;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 150d4f23b010..06aa1ebbd21c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4685,8 +4685,8 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
gfp_t gfp = gfp_mask;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
- __GFP_NOMEMALLOC;
+ gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP |
+ __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
PAGE_FRAG_CACHE_MAX_ORDER);
nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
@@ -4699,6 +4699,16 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
return page;
}
+void page_frag_cache_drain(struct page_frag_cache *nc)
+{
+ if (!nc->va)
+ return;
+
+ __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
+ nc->va = NULL;
+}
+EXPORT_SYMBOL(page_frag_cache_drain);
+
void __page_frag_cache_drain(struct page *page, unsigned int count)
{
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
@@ -4708,9 +4718,9 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
}
EXPORT_SYMBOL(__page_frag_cache_drain);
-void *page_frag_alloc_align(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask)
+void *__page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask)
{
unsigned int size = PAGE_SIZE;
struct page *page;
@@ -4779,7 +4789,7 @@ refill:
return nc->va + offset;
}
-EXPORT_SYMBOL(page_frag_alloc_align);
+EXPORT_SYMBOL(__page_frag_alloc_align);
/*
* Frees a page fragment allocated out of either a compound or order 0 page.
diff --git a/mm/readahead.c b/mm/readahead.c
index 23620c57c122..2648ec4f0494 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -469,7 +469,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
if (!folio)
return -ENOMEM;
- mark = round_up(mark, 1UL << order);
+ mark = round_down(mark, 1UL << order);
if (index == mark)
folio_set_readahead(folio);
err = filemap_add_folio(ractl->mapping, folio, index, gfp);
@@ -575,7 +575,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
* It's the expected callback index, assume sequential access.
* Ramp up sizes, and push forward the readahead window.
*/
- expected = round_up(ra->start + ra->size - ra->async_size,
+ expected = round_down(ra->start + ra->size - ra->async_size,
1UL << order);
if (index == expected || index == (ra->start + ra->size)) {
ra->start += ra->size;
diff --git a/mm/swap.h b/mm/swap.h
index 758c46ca671e..fc2f6ade7f80 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -41,6 +41,7 @@ void __delete_from_swap_cache(struct folio *folio,
void delete_from_swap_cache(struct folio *folio);
void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end);
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr);
struct folio *filemap_get_incore_folio(struct address_space *mapping,
@@ -97,6 +98,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
return 0;
}
+static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+{
+}
+
static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr)
{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e671266ad772..7255c01a1e4e 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -680,9 +680,10 @@ skip:
/* The page was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
- if (unlikely(page_allocated))
+ if (unlikely(page_allocated)) {
+ zswap_folio_swapin(folio);
swap_read_folio(folio, false, NULL);
- zswap_folio_swapin(folio);
+ }
return folio;
}
@@ -855,9 +856,10 @@ skip:
/* The folio was likely read above, so no need for plugging here */
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
- if (unlikely(page_allocated))
+ if (unlikely(page_allocated)) {
+ zswap_folio_swapin(folio);
swap_read_folio(folio, false, NULL);
- zswap_folio_swapin(folio);
+ }
return folio;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 556ff7347d5f..746aa9da5302 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3365,6 +3365,19 @@ int swapcache_prepare(swp_entry_t entry)
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+{
+ struct swap_cluster_info *ci;
+ unsigned long offset = swp_offset(entry);
+ unsigned char usage;
+
+ ci = lock_cluster_or_swap_info(si, offset);
+ usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
+ unlock_cluster_or_swap_info(si, ci);
+ if (!usage)
+ free_swap_slot(entry);
+}
+
struct swap_info_struct *swp_swap_info(swp_entry_t entry)
{
return swap_type_to_swap_info(swp_type(entry));
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 20e3b0d9cf7e..7cf7d4384259 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -357,6 +357,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
+ atomic_t *mmap_changing,
uffd_flags_t flags)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
@@ -472,6 +473,15 @@ retry:
goto out;
}
mmap_read_lock(dst_mm);
+ /*
+ * If memory mappings are changing because of non-cooperative
+ * operation (e.g. mremap) running in parallel, bail out and
+ * request the user to retry later
+ */
+ if (mmap_changing && atomic_read(mmap_changing)) {
+ err = -EAGAIN;
+ break;
+ }
dst_vma = NULL;
goto retry;
@@ -506,6 +516,7 @@ extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
+ atomic_t *mmap_changing,
uffd_flags_t flags);
#endif /* CONFIG_HUGETLB_PAGE */
@@ -622,8 +633,8 @@ retry:
* If this is a HUGETLB vma, pass off to appropriate routine
*/
if (is_vm_hugetlb_page(dst_vma))
- return mfill_atomic_hugetlb(dst_vma, dst_start,
- src_start, len, flags);
+ return mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
+ len, mmap_changing, flags);
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
goto out_unlock;
@@ -891,8 +902,8 @@ static int move_present_pte(struct mm_struct *mm,
double_pt_lock(dst_ptl, src_ptl);
- if (!pte_same(*src_pte, orig_src_pte) ||
- !pte_same(*dst_pte, orig_dst_pte)) {
+ if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
+ !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
err = -EAGAIN;
goto out;
}
@@ -935,8 +946,8 @@ static int move_swap_pte(struct mm_struct *mm,
double_pt_lock(dst_ptl, src_ptl);
- if (!pte_same(*src_pte, orig_src_pte) ||
- !pte_same(*dst_pte, orig_dst_pte)) {
+ if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
+ !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
double_pt_unlock(dst_ptl, src_ptl);
return -EAGAIN;
}
@@ -1005,7 +1016,7 @@ retry:
}
spin_lock(dst_ptl);
- orig_dst_pte = *dst_pte;
+ orig_dst_pte = ptep_get(dst_pte);
spin_unlock(dst_ptl);
if (!pte_none(orig_dst_pte)) {
err = -EEXIST;
@@ -1013,7 +1024,7 @@ retry:
}
spin_lock(src_ptl);
- orig_src_pte = *src_pte;
+ orig_src_pte = ptep_get(src_pte);
spin_unlock(src_ptl);
if (pte_none(orig_src_pte)) {
if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
@@ -1043,7 +1054,7 @@ retry:
* page isn't freed under us
*/
spin_lock(src_ptl);
- if (!pte_same(orig_src_pte, *src_pte)) {
+ if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
spin_unlock(src_ptl);
err = -EAGAIN;
goto out;
diff --git a/mm/zswap.c b/mm/zswap.c
index ca25b676048e..db4625af65fb 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -377,10 +377,9 @@ void zswap_folio_swapin(struct folio *folio)
{
struct lruvec *lruvec;
- if (folio) {
- lruvec = folio_lruvec(folio);
- atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
- }
+ VM_WARN_ON_ONCE(!folio_test_locked(folio));
+ lruvec = folio_lruvec(folio);
+ atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
}
/*********************************
@@ -536,10 +535,6 @@ static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
*/
static void zswap_free_entry(struct zswap_entry *entry)
{
- if (entry->objcg) {
- obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
- obj_cgroup_put(entry->objcg);
- }
if (!entry->length)
atomic_dec(&zswap_same_filled_pages);
else {
@@ -548,6 +543,10 @@ static void zswap_free_entry(struct zswap_entry *entry)
atomic_dec(&entry->pool->nr_stored);
zswap_pool_put(entry->pool);
}
+ if (entry->objcg) {
+ obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
+ obj_cgroup_put(entry->objcg);
+ }
zswap_entry_cache_free(entry);
atomic_dec(&zswap_stored_pages);
zswap_update_total_size();
@@ -895,10 +894,8 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
* into the warmer region. We should terminate shrinking (if we're in the dynamic
* shrinker context).
*/
- if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
- ret = LRU_SKIP;
+ if (writeback_result == -EEXIST && encountered_page_in_swapcache)
*encountered_page_in_swapcache = true;
- }
goto put_unlock;
}
@@ -1442,6 +1439,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
spin_unlock(&tree->lock);
delete_from_swap_cache(folio);
+ folio_unlock(folio);
+ folio_put(folio);
return -ENOMEM;
}
spin_unlock(&tree->lock);
@@ -1519,7 +1518,7 @@ bool zswap_store(struct folio *folio)
if (folio_test_large(folio))
return false;
- if (!zswap_enabled || !tree)
+ if (!tree)
return false;
/*
@@ -1534,6 +1533,10 @@ bool zswap_store(struct folio *folio)
zswap_invalidate_entry(tree, dupentry);
}
spin_unlock(&tree->lock);
+
+ if (!zswap_enabled)
+ return false;
+
objcg = get_obj_cgroup_from_folio(folio);
if (objcg && !obj_cgroup_may_zswap(objcg)) {
memcg = get_mem_cgroup_from_objcg(objcg);
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
index 7b3341cef926..850d4a185f55 100644
--- a/net/6lowpan/core.c
+++ b/net/6lowpan/core.c
@@ -179,4 +179,5 @@ static void __exit lowpan_module_exit(void)
module_init(lowpan_module_init);
module_exit(lowpan_module_exit);
+MODULE_DESCRIPTION("IPv6 over Low-Power Wireless Personal Area Network core module");
MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 407b2335f091..39876eff51d2 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -504,28 +504,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
-/*
- * vlan network devices have devices nesting below it, and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key vlan_netdev_xmit_lock_key;
-static struct lock_class_key vlan_netdev_addr_lock_key;
-
-static void vlan_dev_set_lockdep_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
-}
-
-static void vlan_dev_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &vlan_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
-}
-
static __be16 vlan_parse_protocol(const struct sk_buff *skb)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
@@ -559,7 +537,7 @@ static const struct header_ops vlan_passthru_header_ops = {
.parse_protocol = vlan_parse_protocol,
};
-static struct device_type vlan_type = {
+static const struct device_type vlan_type = {
.name = "vlan",
};
@@ -627,7 +605,7 @@ static int vlan_dev_init(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- vlan_dev_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
@@ -784,9 +762,9 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
static int vlan_dev_get_iflink(const struct net_device *dev)
{
- struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+ const struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- return real_dev->ifindex;
+ return READ_ONCE(real_dev->ifindex);
}
static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 214532173536..a3b68243fd4b 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
+ if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+ continue;
m = nla_data(attr);
vlan_dev_set_ingress_priority(dev, m->to, m->from);
}
}
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
+ if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+ continue;
m = nla_data(attr);
err = vlan_dev_set_egress_priority(dev, m->from, m->to);
if (err)
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 7825c129742a..87b959da00cd 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -163,48 +163,34 @@ void vlan_proc_rem_dev(struct net_device *vlandev)
* The following few functions build the content of /proc/net/vlan/config
*/
-/* start read of /proc/net/vlan/config */
-static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(rcu)
+static void *vlan_seq_from_index(struct seq_file *seq, loff_t *pos)
{
+ unsigned long ifindex = *pos;
struct net_device *dev;
- struct net *net = seq_file_net(seq);
- loff_t i = 1;
-
- rcu_read_lock();
- if (*pos == 0)
- return SEQ_START_TOKEN;
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
if (!is_vlan_dev(dev))
continue;
-
- if (i++ == *pos)
- return dev;
+ *pos = dev->ifindex;
+ return dev;
}
+ return NULL;
+}
+
+static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ rcu_read_lock();
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
- return NULL;
+ return vlan_seq_from_index(seq, pos);
}
static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev;
- struct net *net = seq_file_net(seq);
-
++*pos;
-
- dev = v;
- if (v == SEQ_START_TOKEN)
- dev = net_device_entry(&net->dev_base_head);
-
- for_each_netdev_continue_rcu(net, dev) {
- if (!is_vlan_dev(dev))
- continue;
-
- return dev;
- }
-
- return NULL;
+ return vlan_seq_from_index(seq, pos);
}
static void vlan_seq_stop(struct seq_file *seq, void *v)
diff --git a/net/Kconfig b/net/Kconfig
index 4adc47d0c9c2..3e57ccf0da27 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -331,6 +331,7 @@ config NET_RX_BUSY_POLL
config BQL
bool
+ prompt "Enable Byte Queue Limits"
depends on SYSFS
select DQL
default y
diff --git a/net/Makefile b/net/Makefile
index b06b5539e7a6..65bb8c72a35e 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_INET) += ipv4/
obj-$(CONFIG_TLS) += tls/
obj-$(CONFIG_XFRM) += xfrm/
-obj-$(CONFIG_UNIX_SCM) += unix/
+obj-$(CONFIG_UNIX) += unix/
obj-y += ipv6/
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 033871e718a3..324e3ab96bb3 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1532,4 +1532,5 @@ static void __exit atm_mpoa_cleanup(void)
module_init(atm_mpoa_init);
module_exit(atm_mpoa_cleanup);
+MODULE_DESCRIPTION("Multi-Protocol Over ATM (MPOA) driver");
MODULE_LICENSE("GPL");
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 28a939d56090..4c7e85534324 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -684,7 +684,7 @@ static bool batadv_dat_forward_data(struct batadv_priv *bat_priv,
cand = batadv_dat_select_candidates(bat_priv, ip, vid);
if (!cand)
- goto out;
+ return ret;
batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip);
@@ -728,7 +728,6 @@ free_orig:
batadv_orig_node_put(cand[i].orig_node);
}
-out:
kfree(cand);
return ret;
}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 5fc754b0b3f7..75119f1ffccc 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -691,29 +691,31 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
"%s%s", BATADV_UEV_TYPE_VAR,
batadv_uev_type_str[type]);
if (!uevent_env[0])
- goto out;
+ goto report_error;
uevent_env[1] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_ACTION_VAR,
batadv_uev_action_str[action]);
if (!uevent_env[1])
- goto out;
+ goto free_first_env;
/* If the event is DEL, ignore the data field */
if (action != BATADV_UEV_DEL) {
uevent_env[2] = kasprintf(GFP_ATOMIC,
"%s%s", BATADV_UEV_DATA_VAR, data);
if (!uevent_env[2])
- goto out;
+ goto free_second_env;
}
ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
-out:
- kfree(uevent_env[0]);
- kfree(uevent_env[1]);
kfree(uevent_env[2]);
+free_second_env:
+ kfree(uevent_env[1]);
+free_first_env:
+ kfree(uevent_env[0]);
if (ret)
+report_error:
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
batadv_uev_type_str[type],
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 870dcd7f1786..8ca854a75a32 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2024.0"
+#define BATADV_SOURCE_VERSION "2024.1"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index d982daea8329..14088c4ff2f6 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -2175,6 +2175,7 @@ void batadv_mcast_free(struct batadv_priv *bat_priv)
cancel_delayed_work_sync(&bat_priv->mcast.work);
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST_TRACKER, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
/* safely calling outside of worker, as worker was canceled above */
@@ -2198,6 +2199,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
BATADV_MCAST_WANT_NO_RTR4);
batadv_mcast_want_rtr6_update(bat_priv, orig,
BATADV_MCAST_WANT_NO_RTR6);
+ batadv_mcast_have_mc_ptype_update(bat_priv, orig,
+ BATADV_MCAST_HAVE_MC_PTYPE_CAPA);
spin_unlock_bh(&orig->mcast_handler_lock);
}
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 1f7ed9d4f6fd..0954757f0b8b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -15,7 +15,6 @@
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/export.h>
#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/if_ether.h>
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 65601aa52e0d..2821a42cefdc 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1049,6 +1049,7 @@ static void hci_error_reset(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
+ hci_dev_hold(hdev);
BT_DBG("%s", hdev->name);
if (hdev->hw_error)
@@ -1056,10 +1057,10 @@ static void hci_error_reset(struct work_struct *work)
else
bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
- if (hci_dev_do_close(hdev))
- return;
+ if (!hci_dev_do_close(hdev))
+ hci_dev_do_open(hdev);
- hci_dev_do_open(hdev);
+ hci_dev_put(hdev);
}
void hci_uuids_clear(struct hci_dev *hdev)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index ef8c3bed7361..2a5f5a7d2412 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5329,9 +5329,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn || !hci_conn_ssp_enabled(conn))
+ if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
goto unlock;
+ /* Assume remote supports SSP since it has triggered this event */
+ set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
+
hci_conn_hold(conn);
if (!hci_dev_test_flag(hdev, HCI_MGMT))
@@ -6794,6 +6797,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_UNKNOWN_CONN_ID);
+ if (max > hcon->le_conn_max_interval)
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_INVALID_LL_PARAMS);
+
if (hci_check_conn_params(min, max, latency, timeout))
return send_conn_param_neg_reply(hdev, handle,
HCI_ERROR_INVALID_LL_PARAMS);
@@ -7420,10 +7427,10 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
* keep track of the bdaddr of the connection event that woke us up.
*/
if (event == HCI_EV_CONN_REQUEST) {
- bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
+ bacpy(&hdev->wake_addr, &conn_request->bdaddr);
hdev->wake_addr_type = BDADDR_BREDR;
} else if (event == HCI_EV_CONN_COMPLETE) {
- bacpy(&hdev->wake_addr, &conn_request->bdaddr);
+ bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
hdev->wake_addr_type = BDADDR_BREDR;
} else if (event == HCI_EV_LE_META) {
struct hci_ev_le_meta *le_ev = (void *)skb->data;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index a6fc8a2a5c67..5716345a26df 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -2206,8 +2206,11 @@ static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
/* During suspend, only wakeable devices can be in acceptlist */
if (hdev->suspended &&
- !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
+ !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
+ hci_le_del_accept_list_sync(hdev, &params->addr,
+ params->addr_type);
return 0;
+ }
/* Select filter policy to accept all advertising */
if (*num_entries >= hdev->le_accept_list_size)
@@ -5559,7 +5562,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
bt_dev_dbg(hdev, "");
- if (hci_dev_test_flag(hdev, HCI_INQUIRY))
+ if (test_bit(HCI_INQUIRY, &hdev->flags))
return 0;
hci_dev_lock(hdev);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 60298975d5c4..656f49b299d2 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -5613,7 +5613,13 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
- err = hci_check_conn_params(min, max, latency, to_multiplier);
+ if (max > hcon->le_conn_max_interval) {
+ BT_DBG("requested connection interval exceeds current bounds.");
+ err = -EINVAL;
+ } else {
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
+ }
+
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index bb72ff6eb22f..ee3b4aad8bd8 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1045,6 +1045,8 @@ static void rpa_expired(struct work_struct *work)
hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
}
+static int set_discoverable_sync(struct hci_dev *hdev, void *data);
+
static void discov_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -1063,7 +1065,7 @@ static void discov_off(struct work_struct *work)
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hdev->discov_timeout = 0;
- hci_update_discoverable(hdev);
+ hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
mgmt_new_settings(hdev);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 053ef8f25fae..1d34d8497033 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1941,7 +1941,7 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- if (!skb_linearize(skb)) {
+ if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) {
s = rfcomm_recv_frame(s, skb);
if (!s)
break;
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 8906f7bdf4a9..02de71719aed 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -7,7 +7,7 @@
#include <linux/bpf.h>
#include <linux/btf.h>
-extern struct bpf_struct_ops bpf_bpf_dummy_ops;
+static struct bpf_struct_ops bpf_bpf_dummy_ops;
/* A common type for test_N with return value in bpf_dummy_ops */
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
@@ -22,6 +22,8 @@ struct bpf_dummy_ops_test_args {
struct bpf_dummy_ops_state state;
};
+static struct btf *bpf_dummy_ops_btf;
+
static struct bpf_dummy_ops_test_args *
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
{
@@ -90,9 +92,15 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
void *image = NULL;
unsigned int op_idx;
int prog_ret;
+ s32 type_id;
int err;
- if (prog->aux->attach_btf_id != st_ops->type_id)
+ type_id = btf_find_by_name_kind(bpf_dummy_ops_btf,
+ bpf_bpf_dummy_ops.name,
+ BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ if (prog->aux->attach_btf_id != type_id)
return -EOPNOTSUPP;
func_proto = prog->aux->attach_func_proto;
@@ -148,6 +156,7 @@ out:
static int bpf_dummy_init(struct btf *btf)
{
+ bpf_dummy_ops_btf = btf;
return 0;
}
@@ -247,7 +256,7 @@ static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
.test_sleepable = bpf_dummy_test_sleepable,
};
-struct bpf_struct_ops bpf_bpf_dummy_ops = {
+static struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
.check_member = bpf_dummy_ops_check_member,
@@ -256,4 +265,11 @@ struct bpf_struct_ops bpf_bpf_dummy_ops = {
.unreg = bpf_dummy_unreg,
.name = "bpf_dummy_ops",
.cfi_stubs = &__bpf_bpf_dummy_ops,
+ .owner = THIS_MODULE,
};
+
+static int __init bpf_dummy_struct_ops_init(void)
+{
+ return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops);
+}
+late_initcall(bpf_dummy_struct_ops_init);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index dfd919374017..61efeadaff8d 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -12,6 +12,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/sched/signal.h>
#include <net/bpf_sk_storage.h>
+#include <net/hotdata.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/net_namespace.h>
@@ -254,7 +255,8 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
int i, n;
LIST_HEAD(list);
- n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
+ n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
+ (void **)skbs);
if (unlikely(n == 0)) {
for (i = 0; i < nframes; i++)
xdp_return_frame(frames[i]);
@@ -617,21 +619,21 @@ CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_test_modify_return_ids)
+BTF_KFUNCS_START(bpf_test_modify_return_ids)
BTF_ID_FLAGS(func, bpf_modify_return_test)
BTF_ID_FLAGS(func, bpf_modify_return_test2)
BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
-BTF_SET8_END(bpf_test_modify_return_ids)
+BTF_KFUNCS_END(bpf_test_modify_return_ids)
static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
.owner = THIS_MODULE,
.set = &bpf_test_modify_return_ids,
};
-BTF_SET8_START(test_sk_check_kfunc_ids)
+BTF_KFUNCS_START(test_sk_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
-BTF_SET8_END(test_sk_check_kfunc_ids)
+BTF_KFUNCS_END(test_sk_check_kfunc_ids)
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
u32 size, u32 headroom, u32 tailroom)
diff --git a/net/bridge/br.c b/net/bridge/br.c
index ac19b797dbec..2cab878e0a39 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -356,26 +356,21 @@ void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on)
clear_bit(opt, &br->options);
}
-static void __net_exit br_net_exit_batch(struct list_head *net_list)
+static void __net_exit br_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net_device *dev;
struct net *net;
- LIST_HEAD(list);
-
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
for_each_netdev(net, dev)
if (netif_is_bridge_master(dev))
- br_dev_delete(dev, &list);
-
- unregister_netdevice_many(&list);
-
- rtnl_unlock();
+ br_dev_delete(dev, dev_to_kill);
}
static struct pernet_operations br_net_ops = {
- .exit_batch = br_net_exit_batch,
+ .exit_batch_rtnl = br_net_exit_batch_rtnl,
};
static const struct stp_proto br_stp_proto = {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 65cee0ad3c1b..c366ccc8b3db 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -108,38 +108,23 @@ out:
return NETDEV_TX_OK;
}
-static struct lock_class_key bridge_netdev_addr_lock_key;
-
-static void br_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
-}
-
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = br_fdb_hash_init(br);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = br_mdb_hash_init(br);
if (err) {
- free_percpu(dev->tstats);
br_fdb_hash_fini(br);
return err;
}
err = br_vlan_init(br);
if (err) {
- free_percpu(dev->tstats);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
return err;
@@ -147,14 +132,14 @@ static int br_dev_init(struct net_device *dev)
err = br_multicast_init_stats(br);
if (err) {
- free_percpu(dev->tstats);
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
+ return err;
}
- br_set_lockdep_class(dev);
- return err;
+ netdev_lockdep_set_classes(dev);
+ return 0;
}
static void br_dev_uninit(struct net_device *dev)
@@ -166,7 +151,6 @@ static void br_dev_uninit(struct net_device *dev)
br_vlan_flush(br);
br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
- free_percpu(dev->tstats);
}
static int br_dev_open(struct net_device *dev)
@@ -481,7 +465,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_fill_forward_path = br_fill_forward_path,
};
-static struct device_type br_type = {
+static const struct device_type br_type = {
.name = "bridge",
};
@@ -503,6 +487,7 @@ void br_dev_setup(struct net_device *dev)
dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->vlan_features = COMMON_FEATURES;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
br->dev = dev;
spin_lock_init(&br->lock);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c622de5eccd0..c77591e63841 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -35,10 +35,7 @@ static struct kmem_cache *br_fdb_cache __read_mostly;
int __init br_fdb_init(void)
{
- br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
- sizeof(struct net_bridge_fdb_entry),
- 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN);
if (!br_fdb_cache)
return -ENOMEM;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index d7d021af1029..2d7b73242958 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1762,6 +1762,10 @@ static void br_ip6_multicast_querier_expired(struct timer_list *t)
}
#endif
+static void br_multicast_query_delay_expired(struct timer_list *t)
+{
+}
+
static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
struct br_ip *ip,
struct sk_buff *skb)
@@ -3198,7 +3202,7 @@ br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
unsigned long max_delay)
{
if (!timer_pending(&query->timer))
- query->delay_time = jiffies + max_delay;
+ mod_timer(&query->delay_timer, jiffies + max_delay);
mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
}
@@ -4041,13 +4045,11 @@ void br_multicast_ctx_init(struct net_bridge *br,
brmctx->multicast_querier_interval = 255 * HZ;
brmctx->multicast_membership_interval = 260 * HZ;
- brmctx->ip4_other_query.delay_time = 0;
brmctx->ip4_querier.port_ifidx = 0;
seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
brmctx->multicast_igmp_version = 2;
#if IS_ENABLED(CONFIG_IPV6)
brmctx->multicast_mld_version = 1;
- brmctx->ip6_other_query.delay_time = 0;
brmctx->ip6_querier.port_ifidx = 0;
seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
#endif
@@ -4056,6 +4058,8 @@ void br_multicast_ctx_init(struct net_bridge *br,
br_ip4_multicast_local_router_expired, 0);
timer_setup(&brmctx->ip4_other_query.timer,
br_ip4_multicast_querier_expired, 0);
+ timer_setup(&brmctx->ip4_other_query.delay_timer,
+ br_multicast_query_delay_expired, 0);
timer_setup(&brmctx->ip4_own_query.timer,
br_ip4_multicast_query_expired, 0);
#if IS_ENABLED(CONFIG_IPV6)
@@ -4063,6 +4067,8 @@ void br_multicast_ctx_init(struct net_bridge *br,
br_ip6_multicast_local_router_expired, 0);
timer_setup(&brmctx->ip6_other_query.timer,
br_ip6_multicast_querier_expired, 0);
+ timer_setup(&brmctx->ip6_other_query.delay_timer,
+ br_multicast_query_delay_expired, 0);
timer_setup(&brmctx->ip6_own_query.timer,
br_ip6_multicast_query_expired, 0);
#endif
@@ -4197,10 +4203,12 @@ static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
{
del_timer_sync(&brmctx->ip4_mc_router_timer);
del_timer_sync(&brmctx->ip4_other_query.timer);
+ del_timer_sync(&brmctx->ip4_other_query.delay_timer);
del_timer_sync(&brmctx->ip4_own_query.timer);
#if IS_ENABLED(CONFIG_IPV6)
del_timer_sync(&brmctx->ip6_mc_router_timer);
del_timer_sync(&brmctx->ip6_other_query.timer);
+ del_timer_sync(&brmctx->ip6_other_query.delay_timer);
del_timer_sync(&brmctx->ip6_own_query.timer);
#endif
}
@@ -4643,13 +4651,15 @@ int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
max_delay = brmctx->multicast_query_response_interval;
if (!timer_pending(&brmctx->ip4_other_query.timer))
- brmctx->ip4_other_query.delay_time = jiffies + max_delay;
+ mod_timer(&brmctx->ip4_other_query.delay_timer,
+ jiffies + max_delay);
br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
if (!timer_pending(&brmctx->ip6_other_query.timer))
- brmctx->ip6_other_query.delay_time = jiffies + max_delay;
+ mod_timer(&brmctx->ip6_other_query.delay_timer,
+ jiffies + max_delay);
br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
#endif
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index ed1720890757..35e10c5a766d 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -43,6 +43,10 @@
#include <linux/sysctl.h>
#endif
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack_core.h>
+#endif
+
static unsigned int brnf_net_id __read_mostly;
struct brnf_net {
@@ -553,6 +557,90 @@ static unsigned int br_nf_pre_routing(void *priv,
return NF_STOLEN;
}
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+/* conntracks' nf_confirm logic cannot handle cloned skbs referencing
+ * the same nf_conn entry, which will happen for multicast (broadcast)
+ * Frames on bridges.
+ *
+ * Example:
+ * macvlan0
+ * br0
+ * ethX ethY
+ *
+ * ethX (or Y) receives multicast or broadcast packet containing
+ * an IP packet, not yet in conntrack table.
+ *
+ * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting.
+ * -> skb->_nfct now references a unconfirmed entry
+ * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge
+ * interface.
+ * 3. skb gets passed up the stack.
+ * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb
+ * and schedules a work queue to send them out on the lower devices.
+ *
+ * The clone skb->_nfct is not a copy, it is the same entry as the
+ * original skb. The macvlan rx handler then returns RX_HANDLER_PASS.
+ * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb.
+ *
+ * The Macvlan broadcast worker and normal confirm path will race.
+ *
+ * This race will not happen if step 2 already confirmed a clone. In that
+ * case later steps perform skb_clone() with skb->_nfct already confirmed (in
+ * hash table). This works fine.
+ *
+ * But such confirmation won't happen when eb/ip/nftables rules dropped the
+ * packets before they reached the nf_confirm step in postrouting.
+ *
+ * Work around this problem by explicit confirmation of the entry at
+ * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed
+ * entry.
+ *
+ */
+static unsigned int br_nf_local_in(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_conntrack *nfct = skb_nfct(skb);
+ const struct nf_ct_hook *ct_hook;
+ struct nf_conn *ct;
+ int ret;
+
+ if (!nfct || skb->pkt_type == PACKET_HOST)
+ return NF_ACCEPT;
+
+ ct = container_of(nfct, struct nf_conn, ct_general);
+ if (likely(nf_ct_is_confirmed(ct)))
+ return NF_ACCEPT;
+
+ WARN_ON_ONCE(skb_shared(skb));
+ WARN_ON_ONCE(refcount_read(&nfct->use) != 1);
+
+ /* We can't call nf_confirm here, it would create a dependency
+ * on nf_conntrack module.
+ */
+ ct_hook = rcu_dereference(nf_ct_hook);
+ if (!ct_hook) {
+ skb->_nfct = 0ul;
+ nf_conntrack_put(nfct);
+ return NF_ACCEPT;
+ }
+
+ nf_bridge_pull_encap_header(skb);
+ ret = ct_hook->confirm(skb);
+ switch (ret & NF_VERDICT_MASK) {
+ case NF_STOLEN:
+ return NF_STOLEN;
+ default:
+ nf_bridge_push_encap_header(skb);
+ break;
+ }
+
+ ct = container_of(nfct, struct nf_conn, ct_general);
+ WARN_ON_ONCE(!nf_ct_is_confirmed(ct));
+
+ return ret;
+}
+#endif
/* PF_BRIDGE/FORWARD *************************************************/
static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -964,6 +1052,14 @@ static const struct nf_hook_ops br_nf_ops[] = {
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_BRNF,
},
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ {
+ .hook = br_nf_local_in,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_LOCAL_IN,
+ .priority = NF_BR_PRI_LAST,
+ },
+#endif
{
.hook = br_nf_forward,
.pf = NFPROTO_BRIDGE,
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5ad4abfcb7ba..2cf4fc756263 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -455,7 +455,8 @@ static int br_fill_ifinfo(struct sk_buff *skb,
u32 filter_mask, const struct net_device *dev,
bool getlink)
{
- u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
+ u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) :
+ IF_OPER_DOWN;
struct nlattr *af = NULL;
struct net_bridge *br;
struct ifinfomsg *hdr;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index b0a92c344722..86ea5e6689b5 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -78,7 +78,7 @@ struct bridge_mcast_own_query {
/* other querier */
struct bridge_mcast_other_query {
struct timer_list timer;
- unsigned long delay_time;
+ struct timer_list delay_timer;
};
/* selected querier */
@@ -1159,7 +1159,7 @@ __br_multicast_querier_exists(struct net_bridge_mcast *brmctx,
own_querier_enabled = false;
}
- return time_is_before_jiffies(querier->delay_time) &&
+ return !timer_pending(&querier->delay_timer) &&
(own_querier_enabled || timer_pending(&querier->timer));
}
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index ee84e783e1df..7b41ee8740cb 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -595,21 +595,40 @@ br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
}
static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
+ struct net_device *dev,
+ unsigned long action,
enum switchdev_obj_id id,
const struct net_bridge_mdb_entry *mp,
struct net_device *orig_dev)
{
- struct switchdev_obj_port_mdb *mdb;
+ struct switchdev_obj_port_mdb mdb = {
+ .obj = {
+ .id = id,
+ .orig_dev = orig_dev,
+ },
+ };
+ struct switchdev_obj_port_mdb *pmdb;
- mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
- if (!mdb)
- return -ENOMEM;
+ br_switchdev_mdb_populate(&mdb, mp);
+
+ if (action == SWITCHDEV_PORT_OBJ_ADD &&
+ switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) {
+ /* This event is already in the deferred queue of
+ * events, so this replay must be elided, lest the
+ * driver receives duplicate events for it. This can
+ * only happen when replaying additions, since
+ * modifications are always immediately visible in
+ * br->mdb_list, whereas actual event delivery may be
+ * delayed.
+ */
+ return 0;
+ }
- mdb->obj.id = id;
- mdb->obj.orig_dev = orig_dev;
- br_switchdev_mdb_populate(mdb, mp);
- list_add_tail(&mdb->obj.list, mdb_list);
+ pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC);
+ if (!pmdb)
+ return -ENOMEM;
+ list_add_tail(&pmdb->obj.list, mdb_list);
return 0;
}
@@ -677,51 +696,50 @@ br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return 0;
- /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
- * because the write-side protection is br->multicast_lock. But we
- * need to emulate the [ blocking ] calling context of a regular
- * switchdev event, so since both br->multicast_lock and RCU read side
- * critical sections are atomic, we have no choice but to pick the RCU
- * read side lock, queue up all our events, leave the critical section
- * and notify switchdev from blocking context.
+ if (adding)
+ action = SWITCHDEV_PORT_OBJ_ADD;
+ else
+ action = SWITCHDEV_PORT_OBJ_DEL;
+
+ /* br_switchdev_mdb_queue_one() will take care to not queue a
+ * replay of an event that is already pending in the switchdev
+ * deferred queue. In order to safely determine that, there
+ * must be no new deferred MDB notifications enqueued for the
+ * duration of the MDB scan. Therefore, grab the write-side
+ * lock to avoid racing with any concurrent IGMP/MLD snooping.
*/
- rcu_read_lock();
+ spin_lock_bh(&br->multicast_lock);
- hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
+ hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
struct net_bridge_port_group __rcu * const *pp;
const struct net_bridge_port_group *p;
if (mp->host_joined) {
- err = br_switchdev_mdb_queue_one(&mdb_list,
+ err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
SWITCHDEV_OBJ_ID_HOST_MDB,
mp, br_dev);
if (err) {
- rcu_read_unlock();
+ spin_unlock_bh(&br->multicast_lock);
goto out_free_mdb;
}
}
- for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
+ for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->key.port->dev != dev)
continue;
- err = br_switchdev_mdb_queue_one(&mdb_list,
+ err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
SWITCHDEV_OBJ_ID_PORT_MDB,
mp, dev);
if (err) {
- rcu_read_unlock();
+ spin_unlock_bh(&br->multicast_lock);
goto out_free_mdb;
}
}
}
- rcu_read_unlock();
-
- if (adding)
- action = SWITCHDEV_PORT_OBJ_ADD;
- else
- action = SWITCHDEV_PORT_OBJ_DEL;
+ spin_unlock_bh(&br->multicast_lock);
list_for_each_entry(obj, &mdb_list, list) {
err = br_switchdev_mdb_replay_one(nb, dev,
@@ -786,6 +804,16 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
+
+ /* Make sure that the device leaving this bridge has seen all
+ * relevant events before it is disassociated. In the normal
+ * case, when the device is directly attached to the bridge,
+ * this is covered by del_nbp(). If the association was indirect
+ * however, e.g. via a team or bond, and the device is leaving
+ * that intermediate device, then the bridge port remains in
+ * place.
+ */
+ switchdev_deferred_process();
}
/* Let the bridge know that this port is offloaded, so that it can assign a
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 15f44d026e75..9c2fffb827ab 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -841,7 +841,7 @@ void br_vlan_flush(struct net_bridge *br)
vg = br_vlan_group(br);
__vlan_flush(br, NULL, vg);
RCU_INIT_POINTER(br->vlgrp, NULL);
- synchronize_rcu();
+ synchronize_net();
__vlan_group_free(vg);
}
@@ -1372,7 +1372,7 @@ void nbp_vlan_flush(struct net_bridge_port *port)
vg = nbp_vlan_group(port);
__vlan_flush(port->br, port, vg);
RCU_INIT_POINTER(port->vlgrp, NULL);
- synchronize_rcu();
+ synchronize_net();
__vlan_group_free(vg);
}
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 7f304a19ac1b..104c0125e32e 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -39,6 +39,10 @@ config NF_CONNTRACK_BRIDGE
To compile it as a module, choose M here. If unsure, say N.
+# old sockopt interface and eval loop
+config BRIDGE_NF_EBTABLES_LEGACY
+ tristate
+
menuconfig BRIDGE_NF_EBTABLES
tristate "Ethernet Bridge tables (ebtables) support"
depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
@@ -55,6 +59,7 @@ if BRIDGE_NF_EBTABLES
#
config BRIDGE_EBT_BROUTE
tristate "ebt: broute table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables broute table is used to define rules that decide between
bridging and routing frames, giving Linux the functionality of a
@@ -65,6 +70,7 @@ config BRIDGE_EBT_BROUTE
config BRIDGE_EBT_T_FILTER
tristate "ebt: filter table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables filter table is used to define frame filtering rules at
local input, forwarding and local output. See the man page for
@@ -74,6 +80,7 @@ config BRIDGE_EBT_T_FILTER
config BRIDGE_EBT_T_NAT
tristate "ebt: nat table support"
+ select BRIDGE_NF_EBTABLES_LEGACY
help
The ebtables nat table is used to define rules that alter the MAC
source address (MAC SNAT) or the MAC destination address (MAC DNAT).
diff --git a/net/bridge/netfilter/Makefile b/net/bridge/netfilter/Makefile
index 1c9ce49ab651..b9a1303da977 100644
--- a/net/bridge/netfilter/Makefile
+++ b/net/bridge/netfilter/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o
# connection tracking
obj-$(CONFIG_NF_CONNTRACK_BRIDGE) += nf_conntrack_bridge.o
-obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
+obj-$(CONFIG_BRIDGE_NF_EBTABLES_LEGACY) += ebtables.o
# tables
obj-$(CONFIG_BRIDGE_EBT_BROUTE) += ebtable_broute.o
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index abb090f94ed2..6f877e31709b 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -291,6 +291,30 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
return nf_conntrack_in(skb, &bridge_state);
}
+static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ if (skb->pkt_type == PACKET_HOST)
+ return NF_ACCEPT;
+
+ /* nf_conntrack_confirm() cannot handle concurrent clones,
+ * this happens for broad/multicast frames with e.g. macvlan on top
+ * of the bridge device.
+ */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
+ return NF_ACCEPT;
+
+ /* let inet prerouting call conntrack again */
+ skb->_nfct = 0;
+ nf_ct_put(ct);
+
+ return NF_ACCEPT;
+}
+
static void nf_ct_bridge_frag_save(struct sk_buff *skb,
struct nf_bridge_frag_data *data)
{
@@ -386,6 +410,12 @@ static struct nf_hook_ops nf_ct_bridge_hook_ops[] __read_mostly = {
.priority = NF_IP_PRI_CONNTRACK,
},
{
+ .hook = nf_ct_bridge_in,
+ .pf = NFPROTO_BRIDGE,
+ .hooknum = NF_BR_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
+ },
+ {
.hook = nf_ct_bridge_post,
.pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7343fd487dbe..707576eeeb58 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -865,6 +865,8 @@ static __init int can_init(void)
/* check for correct padding to be able to use the structs similarly */
BUILD_BUG_ON(offsetof(struct can_frame, len) !=
offsetof(struct canfd_frame, len) ||
+ offsetof(struct can_frame, len) !=
+ offsetof(struct canxl_frame, flags) ||
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9168114fc87f..27d5fcf0eac9 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -72,9 +72,11 @@
#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
/* use of last_frames[index].flags */
+#define RX_LOCAL 0x10 /* frame was created on the local host */
+#define RX_OWN 0x20 /* frame was sent via the socket it was received on */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
-#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
+#define BCM_CAN_FLAGS_MASK 0x0F /* to clean private flags after usage */
/* get best masking value for can_rx_register() for a given single can_id */
#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
@@ -138,6 +140,16 @@ static LIST_HEAD(bcm_notifier_list);
static DEFINE_SPINLOCK(bcm_notifier_lock);
static struct bcm_sock *bcm_busy_notifier;
+/* Return pointer to store the extra msg flags for bcm_recvmsg().
+ * We use the space of one unsigned int beyond the 'struct sockaddr_can'
+ * in skb->cb.
+ */
+static inline unsigned int *bcm_flags(struct sk_buff *skb)
+{
+ /* return pointer after struct sockaddr_can */
+ return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
+}
+
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
{
return (struct bcm_sock *)sk;
@@ -325,6 +337,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
struct sock *sk = op->sk;
unsigned int datalen = head->nframes * op->cfsiz;
int err;
+ unsigned int *pflags;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
if (!skb)
@@ -332,6 +345,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
skb_put_data(skb, head, sizeof(*head));
+ /* ensure space for sockaddr_can and msg flags */
+ sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
+ sizeof(unsigned int));
+
+ /* initialize msg flags */
+ pflags = bcm_flags(skb);
+ *pflags = 0;
+
if (head->nframes) {
/* CAN frames starting here */
firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
@@ -344,8 +365,14 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* relevant for updates that are generated by the
* BCM, where nframes is 1
*/
- if (head->nframes == 1)
+ if (head->nframes == 1) {
+ if (firstframe->flags & RX_LOCAL)
+ *pflags |= MSG_DONTROUTE;
+ if (firstframe->flags & RX_OWN)
+ *pflags |= MSG_CONFIRM;
+
firstframe->flags &= BCM_CAN_FLAGS_MASK;
+ }
}
if (has_timestamp) {
@@ -360,7 +387,6 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
* containing the interface index.
*/
- sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
@@ -444,7 +470,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
op->frames_filtered = op->frames_abs = 0;
/* this element is not throttled anymore */
- data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
+ data->flags &= ~RX_THR;
memset(&head, 0, sizeof(head));
head.opcode = RX_CHANGED;
@@ -465,13 +491,17 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
*/
static void bcm_rx_update_and_send(struct bcm_op *op,
struct canfd_frame *lastdata,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
memcpy(lastdata, rxdata, op->cfsiz);
/* mark as used and throttled by default */
lastdata->flags |= (RX_RECV|RX_THR);
+ /* add own/local/remote traffic flags */
+ lastdata->flags |= traffic_flags;
+
/* throttling mode inactive ? */
if (!op->kt_ival2) {
/* send RX_CHANGED to the user immediately */
@@ -508,7 +538,8 @@ rx_changed_settime:
* received data stored in op->last_frames[]
*/
static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
- const struct canfd_frame *rxdata)
+ const struct canfd_frame *rxdata,
+ unsigned char traffic_flags)
{
struct canfd_frame *cf = op->frames + op->cfsiz * index;
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
@@ -521,7 +552,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (!(lcf->flags & RX_RECV)) {
/* received data for the first time => send update to user */
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
@@ -529,7 +560,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
for (i = 0; i < rxdata->len; i += 8) {
if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
(get_u64(cf, i) & get_u64(lcf, i))) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -537,7 +568,7 @@ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
if (op->flags & RX_CHECK_DLC) {
/* do a real check in CAN frame length */
if (rxdata->len != lcf->len) {
- bcm_rx_update_and_send(op, lcf, rxdata);
+ bcm_rx_update_and_send(op, lcf, rxdata, traffic_flags);
return;
}
}
@@ -644,6 +675,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
struct bcm_op *op = (struct bcm_op *)data;
const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
unsigned int i;
+ unsigned char traffic_flags;
if (op->can_id != rxframe->can_id)
return;
@@ -673,15 +705,24 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
return;
}
+ /* compute flags to distinguish between own/local/remote CAN traffic */
+ traffic_flags = 0;
+ if (skb->sk) {
+ traffic_flags |= RX_LOCAL;
+ if (skb->sk == op->sk)
+ traffic_flags |= RX_OWN;
+ }
+
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
- bcm_rx_update_and_send(op, op->last_frames, rxframe);
+ bcm_rx_update_and_send(op, op->last_frames, rxframe,
+ traffic_flags);
goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
- bcm_rx_cmp_to_index(op, 0, rxframe);
+ bcm_rx_cmp_to_index(op, 0, rxframe, traffic_flags);
goto rx_starttimer;
}
@@ -698,7 +739,8 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
(get_u64(op->frames, 0) &
get_u64(op->frames + op->cfsiz * i, 0))) {
- bcm_rx_cmp_to_index(op, i, rxframe);
+ bcm_rx_cmp_to_index(op, i, rxframe,
+ traffic_flags);
break;
}
}
@@ -1675,6 +1717,9 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
}
+ /* assign the flags that have been recorded in bcm_send_to_user() */
+ msg->msg_flags |= *(bcm_flags(skb));
+
skb_free_datagram(sk, skb);
return size;
diff --git a/net/can/isotp.c b/net/can/isotp.c
index d1c6f206f429..25bac0fafc83 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -381,8 +381,9 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
return 1;
}
- /* get communication parameters only from the first FC frame */
- if (so->tx.state == ISOTP_WAIT_FIRST_FC) {
+ /* get static/dynamic communication params from first/every FC frame */
+ if (so->tx.state == ISOTP_WAIT_FIRST_FC ||
+ so->opt.flags & CAN_ISOTP_DYN_FC_PARMS) {
so->txfc.bs = cf->data[ae + 1];
so->txfc.stmin = cf->data[ae + 2];
diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
index 16af1a7f80f6..31a93cae5111 100644
--- a/net/can/j1939/j1939-priv.h
+++ b/net/can/j1939/j1939-priv.h
@@ -86,7 +86,7 @@ struct j1939_priv {
unsigned int tp_max_packet_size;
/* lock for j1939_socks list */
- spinlock_t j1939_socks_lock;
+ rwlock_t j1939_socks_lock;
struct list_head j1939_socks;
struct kref rx_kref;
@@ -301,6 +301,7 @@ struct j1939_sock {
int ifindex;
struct j1939_addr addr;
+ spinlock_t filters_lock;
struct j1939_filter *filters;
int nfilters;
pgn_t pgn_rx_filter;
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index ecff1c947d68..a6fb89fa6278 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -274,7 +274,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
return ERR_PTR(-ENOMEM);
j1939_tp_init(priv);
- spin_lock_init(&priv->j1939_socks_lock);
+ rwlock_init(&priv->j1939_socks_lock);
INIT_LIST_HEAD(&priv->j1939_socks);
mutex_lock(&j1939_netdev_lock);
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 14c431663233..305dd72c844c 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -80,16 +80,16 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
jsk->state |= J1939_SOCK_BOUND;
j1939_priv_get(priv);
- spin_lock_bh(&priv->j1939_socks_lock);
+ write_lock_bh(&priv->j1939_socks_lock);
list_add_tail(&jsk->list, &priv->j1939_socks);
- spin_unlock_bh(&priv->j1939_socks_lock);
+ write_unlock_bh(&priv->j1939_socks_lock);
}
static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
{
- spin_lock_bh(&priv->j1939_socks_lock);
+ write_lock_bh(&priv->j1939_socks_lock);
list_del_init(&jsk->list);
- spin_unlock_bh(&priv->j1939_socks_lock);
+ write_unlock_bh(&priv->j1939_socks_lock);
j1939_priv_put(priv);
jsk->state &= ~J1939_SOCK_BOUND;
@@ -262,12 +262,17 @@ static bool j1939_sk_match_dst(struct j1939_sock *jsk,
static bool j1939_sk_match_filter(struct j1939_sock *jsk,
const struct j1939_sk_buff_cb *skcb)
{
- const struct j1939_filter *f = jsk->filters;
- int nfilter = jsk->nfilters;
+ const struct j1939_filter *f;
+ int nfilter;
+
+ spin_lock_bh(&jsk->filters_lock);
+
+ f = jsk->filters;
+ nfilter = jsk->nfilters;
if (!nfilter)
/* receive all when no filters are assigned */
- return true;
+ goto filter_match_found;
for (; nfilter; ++f, --nfilter) {
if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
@@ -276,9 +281,15 @@ static bool j1939_sk_match_filter(struct j1939_sock *jsk,
continue;
if ((skcb->addr.src_name & f->name_mask) != f->name)
continue;
- return true;
+ goto filter_match_found;
}
+
+ spin_unlock_bh(&jsk->filters_lock);
return false;
+
+filter_match_found:
+ spin_unlock_bh(&jsk->filters_lock);
+ return true;
}
static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
@@ -329,13 +340,13 @@ bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
struct j1939_sock *jsk;
bool match = false;
- spin_lock_bh(&priv->j1939_socks_lock);
+ read_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
match = j1939_sk_recv_match_one(jsk, skcb);
if (match)
break;
}
- spin_unlock_bh(&priv->j1939_socks_lock);
+ read_unlock_bh(&priv->j1939_socks_lock);
return match;
}
@@ -344,11 +355,11 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
{
struct j1939_sock *jsk;
- spin_lock_bh(&priv->j1939_socks_lock);
+ read_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
j1939_sk_recv_one(jsk, skb);
}
- spin_unlock_bh(&priv->j1939_socks_lock);
+ read_unlock_bh(&priv->j1939_socks_lock);
}
static void j1939_sk_sock_destruct(struct sock *sk)
@@ -401,6 +412,7 @@ static int j1939_sk_init(struct sock *sk)
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
+ spin_lock_init(&jsk->filters_lock);
/* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
sock_set_flag(sk, SOCK_RCU_FREE);
@@ -703,9 +715,11 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
}
lock_sock(&jsk->sk);
+ spin_lock_bh(&jsk->filters_lock);
ofilters = jsk->filters;
jsk->filters = filters;
jsk->nfilters = count;
+ spin_unlock_bh(&jsk->filters_lock);
release_sock(&jsk->sk);
kfree(ofilters);
return 0;
@@ -1080,12 +1094,12 @@ void j1939_sk_errqueue(struct j1939_session *session,
}
/* spread RX notifications to all sockets subscribed to this session */
- spin_lock_bh(&priv->j1939_socks_lock);
+ read_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
if (j1939_sk_recv_match_one(jsk, &session->skcb))
__j1939_sk_errqueue(session, &jsk->sk, type);
}
- spin_unlock_bh(&priv->j1939_socks_lock);
+ read_unlock_bh(&priv->j1939_socks_lock);
};
void j1939_sk_send_loop_abort(struct sock *sk, int err)
@@ -1273,7 +1287,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
struct j1939_sock *jsk;
int error_code = ENETDOWN;
- spin_lock_bh(&priv->j1939_socks_lock);
+ read_lock_bh(&priv->j1939_socks_lock);
list_for_each_entry(jsk, &priv->j1939_socks, list) {
jsk->sk.sk_err = error_code;
if (!sock_flag(&jsk->sk, SOCK_DEAD))
@@ -1281,7 +1295,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
j1939_sk_queue_drop_all(priv, jsk, error_code);
}
- spin_unlock_bh(&priv->j1939_socks_lock);
+ read_unlock_bh(&priv->j1939_socks_lock);
}
static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
diff --git a/net/can/raw.c b/net/can/raw.c
index e6b822624ba2..00533f64d69d 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -91,6 +91,10 @@ struct raw_sock {
int recv_own_msgs;
int fd_frames;
int xl_frames;
+ struct can_raw_vcid_options raw_vcid_opts;
+ canid_t tx_vcid_shifted;
+ canid_t rx_vcid_shifted;
+ canid_t rx_vcid_mask_shifted;
int join_filters;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
@@ -134,10 +138,29 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
return;
/* make sure to not pass oversized frames to the socket */
- if ((!ro->fd_frames && can_is_canfd_skb(oskb)) ||
- (!ro->xl_frames && can_is_canxl_skb(oskb)))
+ if (!ro->fd_frames && can_is_canfd_skb(oskb))
return;
+ if (can_is_canxl_skb(oskb)) {
+ struct canxl_frame *cxl = (struct canxl_frame *)oskb->data;
+
+ /* make sure to not pass oversized frames to the socket */
+ if (!ro->xl_frames)
+ return;
+
+ /* filter CAN XL VCID content */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_RX_FILTER) {
+ /* apply VCID filter if user enabled the filter */
+ if ((cxl->prio & ro->rx_vcid_mask_shifted) !=
+ (ro->rx_vcid_shifted & ro->rx_vcid_mask_shifted))
+ return;
+ } else {
+ /* no filter => do not forward VCID tagged frames */
+ if (cxl->prio & CANXL_VCID_MASK)
+ return;
+ }
+ }
+
/* eliminate multiple filter matches for the same skb */
if (this_cpu_ptr(ro->uniq)->skb == oskb &&
this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
@@ -698,6 +721,19 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
ro->fd_frames = ro->xl_frames;
break;
+ case CAN_RAW_XL_VCID_OPTS:
+ if (optlen != sizeof(ro->raw_vcid_opts))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&ro->raw_vcid_opts, optval, optlen))
+ return -EFAULT;
+
+ /* prepare 32 bit values for handling in hot path */
+ ro->tx_vcid_shifted = ro->raw_vcid_opts.tx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_shifted = ro->raw_vcid_opts.rx_vcid << CANXL_VCID_OFFSET;
+ ro->rx_vcid_mask_shifted = ro->raw_vcid_opts.rx_vcid_mask << CANXL_VCID_OFFSET;
+ break;
+
case CAN_RAW_JOIN_FILTERS:
if (optlen != sizeof(ro->join_filters))
return -EINVAL;
@@ -720,7 +756,6 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
struct raw_sock *ro = raw_sk(sk);
int len;
void *val;
- int err = 0;
if (level != SOL_CAN_RAW)
return -EINVAL;
@@ -730,7 +765,9 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
switch (optname) {
- case CAN_RAW_FILTER:
+ case CAN_RAW_FILTER: {
+ int err = 0;
+
lock_sock(sk);
if (ro->count > 0) {
int fsize = ro->count * sizeof(struct can_filter);
@@ -755,7 +792,7 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
if (!err)
err = put_user(len, optlen);
return err;
-
+ }
case CAN_RAW_ERR_FILTER:
if (len > sizeof(can_err_mask_t))
len = sizeof(can_err_mask_t);
@@ -786,6 +823,25 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
val = &ro->xl_frames;
break;
+ case CAN_RAW_XL_VCID_OPTS: {
+ int err = 0;
+
+ /* user space buffer to small for VCID opts? */
+ if (len < sizeof(ro->raw_vcid_opts)) {
+ /* return -ERANGE and needed space in optlen */
+ err = -ERANGE;
+ if (put_user(sizeof(ro->raw_vcid_opts), optlen))
+ err = -EFAULT;
+ } else {
+ if (len > sizeof(ro->raw_vcid_opts))
+ len = sizeof(ro->raw_vcid_opts);
+ if (copy_to_user(optval, &ro->raw_vcid_opts, len))
+ err = -EFAULT;
+ }
+ if (!err)
+ err = put_user(len, optlen);
+ return err;
+ }
case CAN_RAW_JOIN_FILTERS:
if (len > sizeof(int))
len = sizeof(int);
@@ -803,23 +859,41 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
return 0;
}
-static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
+static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
+{
+ struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ /* sanitize non CAN XL bits */
+ cxl->prio &= (CANXL_PRIO_MASK | CANXL_VCID_MASK);
+
+ /* clear VCID in CAN XL frame if pass through is disabled */
+ if (!(ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_PASS))
+ cxl->prio &= CANXL_PRIO_MASK;
+
+ /* set VCID in CAN XL frame if enabled */
+ if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_SET) {
+ cxl->prio &= CANXL_PRIO_MASK;
+ cxl->prio |= ro->tx_vcid_shifted;
+ }
+}
+
+static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
{
/* Classical CAN -> no checks for flags and device capabilities */
if (can_is_can_skb(skb))
- return false;
+ return CAN_MTU;
/* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
if (ro->fd_frames && can_is_canfd_skb(skb) &&
(mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
- return false;
+ return CANFD_MTU;
/* CAN XL -> needs to be enabled and a CAN XL device */
if (ro->xl_frames && can_is_canxl_skb(skb) &&
can_is_canxl_dev_mtu(mtu))
- return false;
+ return CANXL_MTU;
- return true;
+ return 0;
}
static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
@@ -829,6 +903,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct sockcm_cookie sockc;
struct sk_buff *skb;
struct net_device *dev;
+ unsigned int txmtu;
int ifindex;
int err = -EINVAL;
@@ -869,9 +944,16 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
goto free_skb;
err = -EINVAL;
- if (raw_bad_txframe(ro, skb, dev->mtu))
+
+ /* check for valid CAN (CC/FD/XL) frame content */
+ txmtu = raw_check_txframe(ro, skb, dev->mtu);
+ if (!txmtu)
goto free_skb;
+ /* only CANXL: clear/forward/set VCID value */
+ if (txmtu == CANXL_MTU)
+ raw_put_canxl_vcid(ro, skb);
+
sockcm_init(&sockc, sk);
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index f9a50d7f0d20..0cb61c76b9b8 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -160,8 +160,9 @@ static size_t sizeof_footer(struct ceph_connection *con)
static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
{
/* Initialize data cursor if it's not a sparse read */
- if (!msg->sparse_read)
- ceph_msg_data_cursor_init(&msg->cursor, msg, data_len);
+ u64 len = msg->sparse_read_total ? : data_len;
+
+ ceph_msg_data_cursor_init(&msg->cursor, msg, len);
}
/*
@@ -991,7 +992,7 @@ static inline int read_partial_message_section(struct ceph_connection *con,
return read_partial_message_chunk(con, section, sec_len, crc);
}
-static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
+static int read_partial_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
{
struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
bool do_bounce = ceph_test_opt(from_msgr(con->msgr), RXBOUNCE);
@@ -1026,7 +1027,7 @@ static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
return 1;
}
-static int read_sparse_msg_data(struct ceph_connection *con)
+static int read_partial_sparse_msg_data(struct ceph_connection *con)
{
struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
@@ -1036,31 +1037,31 @@ static int read_sparse_msg_data(struct ceph_connection *con)
if (do_datacrc)
crc = con->in_data_crc;
- do {
+ while (cursor->total_resid) {
if (con->v1.in_sr_kvec.iov_base)
ret = read_partial_message_chunk(con,
&con->v1.in_sr_kvec,
con->v1.in_sr_len,
&crc);
else if (cursor->sr_resid > 0)
- ret = read_sparse_msg_extent(con, &crc);
-
- if (ret <= 0) {
- if (do_datacrc)
- con->in_data_crc = crc;
- return ret;
- }
+ ret = read_partial_sparse_msg_extent(con, &crc);
+ if (ret <= 0)
+ break;
memset(&con->v1.in_sr_kvec, 0, sizeof(con->v1.in_sr_kvec));
ret = con->ops->sparse_read(con, cursor,
(char **)&con->v1.in_sr_kvec.iov_base);
+ if (ret <= 0) {
+ ret = ret ? ret : 1; /* must return > 0 to indicate success */
+ break;
+ }
con->v1.in_sr_len = ret;
- } while (ret > 0);
+ }
if (do_datacrc)
con->in_data_crc = crc;
- return ret < 0 ? ret : 1; /* must return > 0 to indicate success */
+ return ret;
}
static int read_partial_msg_data(struct ceph_connection *con)
@@ -1253,8 +1254,8 @@ static int read_partial_message(struct ceph_connection *con)
if (!m->num_data_items)
return -EIO;
- if (m->sparse_read)
- ret = read_sparse_msg_data(con);
+ if (m->sparse_read_total)
+ ret = read_partial_sparse_msg_data(con);
else if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE))
ret = read_partial_msg_data_bounce(con);
else
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index f8ec60e1aba3..a0ca5414b333 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -1128,7 +1128,7 @@ static int decrypt_tail(struct ceph_connection *con)
struct sg_table enc_sgt = {};
struct sg_table sgt = {};
struct page **pages = NULL;
- bool sparse = con->in_msg->sparse_read;
+ bool sparse = !!con->in_msg->sparse_read_total;
int dpos = 0;
int tail_len;
int ret;
@@ -2060,7 +2060,7 @@ static int prepare_read_tail_plain(struct ceph_connection *con)
}
if (data_len(msg)) {
- if (msg->sparse_read)
+ if (msg->sparse_read_total)
con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
else
con->v2.in_state = IN_S_PREPARE_READ_DATA;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d3a759e052c8..9d078b37fe0b 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -5510,7 +5510,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
}
m = ceph_msg_get(req->r_reply);
- m->sparse_read = (bool)srlen;
+ m->sparse_read_total = srlen;
dout("get_reply tid %lld %p\n", tid, m);
@@ -5777,11 +5777,8 @@ static int prep_next_sparse_read(struct ceph_connection *con,
}
if (o->o_sparse_op_idx < 0) {
- u64 srlen = sparse_data_requested(req);
-
- dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n",
- __func__, o->o_osd, srlen);
- ceph_msg_data_cursor_init(cursor, con->in_msg, srlen);
+ dout("%s: [%d] starting new sparse read req\n",
+ __func__, o->o_osd);
} else {
u64 end;
@@ -5850,8 +5847,6 @@ static inline void convert_extent_map(struct ceph_sparse_read *sr)
}
#endif
-#define MAX_EXTENTS 4096
-
static int osd_sparse_read(struct ceph_connection *con,
struct ceph_msg_data_cursor *cursor,
char **pbuf)
@@ -5859,8 +5854,8 @@ static int osd_sparse_read(struct ceph_connection *con,
struct ceph_osd *o = con->private;
struct ceph_sparse_read *sr = &o->o_sparse_read;
u32 count = sr->sr_count;
- u64 eoff, elen;
- int ret;
+ u64 eoff, elen, len = 0;
+ int i, ret;
switch (sr->sr_state) {
case CEPH_SPARSE_READ_HDR:
@@ -5882,23 +5877,16 @@ next_op:
if (count > 0) {
if (!sr->sr_extent || count > sr->sr_ext_len) {
- /*
- * Apply a hard cap to the number of extents.
- * If we have more, assume something is wrong.
- */
- if (count > MAX_EXTENTS) {
- dout("%s: OSD returned 0x%x extents in a single reply!\n",
- __func__, count);
- return -EREMOTEIO;
- }
-
/* no extent array provided, or too short */
kfree(sr->sr_extent);
sr->sr_extent = kmalloc_array(count,
sizeof(*sr->sr_extent),
GFP_NOIO);
- if (!sr->sr_extent)
+ if (!sr->sr_extent) {
+ pr_err("%s: failed to allocate %u extents\n",
+ __func__, count);
return -ENOMEM;
+ }
sr->sr_ext_len = count;
}
ret = count * sizeof(*sr->sr_extent);
@@ -5912,8 +5900,20 @@ next_op:
convert_extent_map(sr);
ret = sizeof(sr->sr_datalen);
*pbuf = (char *)&sr->sr_datalen;
- sr->sr_state = CEPH_SPARSE_READ_DATA;
+ sr->sr_state = CEPH_SPARSE_READ_DATA_PRE;
break;
+ case CEPH_SPARSE_READ_DATA_PRE:
+ /* Convert sr_datalen to host-endian */
+ sr->sr_datalen = le32_to_cpu((__force __le32)sr->sr_datalen);
+ for (i = 0; i < count; i++)
+ len += sr->sr_extent[i].len;
+ if (sr->sr_datalen != len) {
+ pr_warn_ratelimited("data len %u != extent len %llu\n",
+ sr->sr_datalen, len);
+ return -EREMOTEIO;
+ }
+ sr->sr_state = CEPH_SPARSE_READ_DATA;
+ fallthrough;
case CEPH_SPARSE_READ_DATA:
if (sr->sr_index >= count) {
sr->sr_state = CEPH_SPARSE_READ_HDR;
diff --git a/net/core/Makefile b/net/core/Makefile
index 821aec06abf1..6e6548011fae 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -18,6 +18,7 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
+obj-y += hotdata.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 103d46fa0eeb..a8b625abe242 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -751,7 +751,7 @@ size_t memcpy_to_iter_csum(void *iter_to, size_t progress,
size_t len, void *from, void *priv2)
{
__wsum *csum = priv2;
- __wsum next = csum_partial_copy_nocheck(from, iter_to, len);
+ __wsum next = csum_partial_copy_nocheck(from + progress, iter_to, len);
*csum = csum_block_add(*csum, next, progress);
return 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index f01a9b858347..0766a245816b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -153,41 +153,21 @@
#include <linux/prandom.h>
#include <linux/once_lite.h>
#include <net/netdev_rx_queue.h>
+#include <net/page_pool/types.h>
+#include <net/page_pool/helpers.h>
+#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
-struct list_head ptype_all __read_mostly; /* Taps */
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev,
struct netlink_ext_ack *extack);
-/*
- * The @dev_base_head list is protected by @dev_base_lock and the rtnl
- * semaphore.
- *
- * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
- *
- * Writers must hold the rtnl semaphore while they loop through the
- * dev_base_head list, and hold dev_base_lock for writing when they do the
- * actual updates. This allows pure readers to access the list even
- * while a writer is preparing to update it.
- *
- * To put it another way, dev_base_lock is held for writing only to
- * protect against pure readers; the rtnl semaphore provides the
- * protection against other writers.
- *
- * See, for example usages, register_netdevice() and
- * unregister_netdevice(), which must be called with the rtnl
- * semaphore held.
- */
-DEFINE_RWLOCK(dev_base_lock);
-EXPORT_SYMBOL(dev_base_lock);
-
static DEFINE_MUTEX(ifalias_mutex);
/* protects napi_hash addition/deletion and napi_gen_id */
@@ -200,8 +180,9 @@ static DECLARE_RWSEM(devnet_rename_sem);
static inline void dev_base_seq_inc(struct net *net)
{
- while (++net->dev_base_seq == 0)
- ;
+ unsigned int val = net->dev_base_seq + 1;
+
+ WRITE_ONCE(net->dev_base_seq, val ?: 1);
}
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
@@ -336,18 +317,27 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
return -ENOMEM;
netdev_name_node_add(net, name_node);
/* The node that holds dev->name acts as a head of per-device list. */
- list_add_tail(&name_node->list, &dev->name_node->list);
+ list_add_tail_rcu(&name_node->list, &dev->name_node->list);
return 0;
}
-static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+static void netdev_name_node_alt_free(struct rcu_head *head)
{
- list_del(&name_node->list);
+ struct netdev_name_node *name_node =
+ container_of(head, struct netdev_name_node, rcu);
+
kfree(name_node->name);
netdev_name_node_free(name_node);
}
+static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+{
+ netdev_name_node_del(name_node);
+ list_del(&name_node->list);
+ call_rcu(&name_node->rcu, netdev_name_node_alt_free);
+}
+
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
{
struct netdev_name_node *name_node;
@@ -362,10 +352,7 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
if (name_node == dev->name_node || name_node->dev != dev)
return -EINVAL;
- netdev_name_node_del(name_node);
- synchronize_rcu();
__netdev_name_node_alt_destroy(name_node);
-
return 0;
}
@@ -373,8 +360,10 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
{
struct netdev_name_node *name_node, *tmp;
- list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
- __netdev_name_node_alt_destroy(name_node);
+ list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
+ list_del(&name_node->list);
+ netdev_name_node_alt_free(&name_node->rcu);
+ }
}
/* Device list insertion */
@@ -385,12 +374,10 @@ static void list_netdevice(struct net_device *dev)
ASSERT_RTNL();
- write_lock(&dev_base_lock);
list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
netdev_name_node_add(net, dev->name_node);
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
- write_unlock(&dev_base_lock);
netdev_for_each_altname(dev, name_node)
netdev_name_node_add(net, name_node);
@@ -404,7 +391,7 @@ static void list_netdevice(struct net_device *dev)
/* Device list removal
* caller must respect a RCU grace period before freeing/reusing dev
*/
-static void unlist_netdevice(struct net_device *dev, bool lock)
+static void unlist_netdevice(struct net_device *dev)
{
struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
@@ -417,13 +404,9 @@ static void unlist_netdevice(struct net_device *dev, bool lock)
netdev_name_node_del(name_node);
/* Unlink dev from the device chain */
- if (lock)
- write_lock(&dev_base_lock);
list_del_rcu(&dev->dev_list);
netdev_name_node_del(dev->name_node);
hlist_del_rcu(&dev->index_hlist);
- if (lock)
- write_unlock(&dev_base_lock);
dev_base_seq_inc(dev_net(dev));
}
@@ -442,6 +425,12 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
EXPORT_PER_CPU_SYMBOL(softnet_data);
+/* Page_pool has a lockless array/stack to alloc/recycle pages.
+ * PP consumers must pay attention to run APIs in the appropriate context
+ * (e.g. NAPI context).
+ */
+static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
+
#ifdef CONFIG_LOCKDEP
/*
* register_netdevice() inits txq->_xmit_lock and sets lockdep class
@@ -551,7 +540,7 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
if (pt->type == htons(ETH_P_ALL))
- return pt->dev ? &pt->dev->ptype_all : &ptype_all;
+ return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
else
return pt->dev ? &pt->dev->ptype_specific :
&ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
@@ -653,7 +642,7 @@ int dev_get_iflink(const struct net_device *dev)
if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
return dev->netdev_ops->ndo_get_iflink(dev);
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
}
EXPORT_SYMBOL(dev_get_iflink);
@@ -738,9 +727,9 @@ EXPORT_SYMBOL_GPL(dev_fill_forward_path);
* @net: the applicable net namespace
* @name: name to find
*
- * Find an interface by name. Must be called under RTNL semaphore
- * or @dev_base_lock. If the name is found a pointer to the device
- * is returned. If the name is not found then %NULL is returned. The
+ * Find an interface by name. Must be called under RTNL semaphore.
+ * If the name is found a pointer to the device is returned.
+ * If the name is not found then %NULL is returned. The
* reference counters are not incremented so the caller must be
* careful with locks.
*/
@@ -821,8 +810,7 @@ EXPORT_SYMBOL(netdev_get_by_name);
* Search for an interface by index. Returns %NULL if the device
* is not found or a pointer to the device. The device has not
* had its reference counter increased so the caller must be careful
- * about locking. The caller must hold either the RTNL semaphore
- * or @dev_base_lock.
+ * about locking. The caller must hold the RTNL semaphore.
*/
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
@@ -1212,13 +1200,13 @@ int dev_change_name(struct net_device *dev, const char *newname)
dev->flags & IFF_UP ? " (while UP)" : "");
old_assign_type = dev->name_assign_type;
- dev->name_assign_type = NET_NAME_RENAMED;
+ WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
rollback:
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
+ WRITE_ONCE(dev->name_assign_type, old_assign_type);
up_write(&devnet_rename_sem);
return ret;
}
@@ -1227,15 +1215,11 @@ rollback:
netdev_adjacent_rename_links(dev, oldname);
- write_lock(&dev_base_lock);
netdev_name_node_del(dev->name_node);
- write_unlock(&dev_base_lock);
- synchronize_rcu();
+ synchronize_net();
- write_lock(&dev_base_lock);
netdev_name_node_add(net, dev->name_node);
- write_unlock(&dev_base_lock);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
@@ -1247,7 +1231,7 @@ rollback:
down_write(&devnet_rename_sem);
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
- dev->name_assign_type = old_assign_type;
+ WRITE_ONCE(dev->name_assign_type, old_assign_type);
old_assign_type = NET_NAME_RENAMED;
goto rollback;
} else {
@@ -2242,7 +2226,8 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
*/
bool dev_nit_active(struct net_device *dev)
{
- return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
+ return !list_empty(&net_hotdata.ptype_all) ||
+ !list_empty(&dev->ptype_all);
}
EXPORT_SYMBOL_GPL(dev_nit_active);
@@ -2253,10 +2238,9 @@ EXPORT_SYMBOL_GPL(dev_nit_active);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
- struct packet_type *ptype;
+ struct list_head *ptype_list = &net_hotdata.ptype_all;
+ struct packet_type *ptype, *pt_prev = NULL;
struct sk_buff *skb2 = NULL;
- struct packet_type *pt_prev = NULL;
- struct list_head *ptype_list = &ptype_all;
rcu_read_lock();
again:
@@ -2302,7 +2286,7 @@ again:
pt_prev = ptype;
}
- if (ptype_list == &ptype_all) {
+ if (ptype_list == &net_hotdata.ptype_all) {
ptype_list = &dev->ptype_all;
goto again;
}
@@ -4421,19 +4405,10 @@ EXPORT_SYMBOL(__dev_direct_xmit);
* Receiver routines
*************************************************************************/
-int netdev_max_backlog __read_mostly = 1000;
-EXPORT_SYMBOL(netdev_max_backlog);
-
-int netdev_tstamp_prequeue __read_mostly = 1;
unsigned int sysctl_skb_defer_max __read_mostly = 64;
-int netdev_budget __read_mostly = 300;
-/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
-unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
int weight_p __read_mostly = 64; /* old backlog weight */
int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
-int dev_rx_weight __read_mostly = 64;
-int dev_tx_weight __read_mostly = 64;
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
@@ -4475,12 +4450,6 @@ static inline void ____napi_schedule(struct softnet_data *sd,
#ifdef CONFIG_RPS
-/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
-EXPORT_SYMBOL(rps_sock_flow_table);
-u32 rps_cpu_mask __read_mostly;
-EXPORT_SYMBOL(rps_cpu_mask);
-
struct static_key_false rps_needed __read_mostly;
EXPORT_SYMBOL(rps_needed);
struct static_key_false rfs_needed __read_mostly;
@@ -4572,7 +4541,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (!hash)
goto done;
- sock_flow_table = rcu_dereference(rps_sock_flow_table);
+ sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
if (flow_table && sock_flow_table) {
struct rps_dev_flow *rflow;
u32 next_cpu;
@@ -4582,10 +4551,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
- if ((ident ^ hash) & ~rps_cpu_mask)
+ if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
goto try_rps;
- next_cpu = ident & rps_cpu_mask;
+ next_cpu = ident & net_hotdata.rps_cpu_mask;
/* OK, now we know there is a match,
* we can look at the local (per receive queue) flow table
@@ -4734,7 +4703,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
struct softnet_data *sd;
unsigned int old_flow, new_flow;
- if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
+ if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
return false;
sd = this_cpu_ptr(&softnet_data);
@@ -4782,7 +4751,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
- if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
+ if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
+ !skb_flow_limit(skb, qlen)) {
if (qlen) {
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
@@ -4858,6 +4828,12 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
skb_headlen(skb) + mac_len, true);
+ if (skb_is_nonlinear(skb)) {
+ skb_shinfo(skb)->xdp_frags_size = skb->data_len;
+ xdp_buff_set_frags_flag(xdp);
+ } else {
+ xdp_buff_clear_frags_flag(xdp);
+ }
orig_data_end = xdp->data_end;
orig_data = xdp->data;
@@ -4887,6 +4863,14 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
skb->len += off; /* positive on grow, negative on shrink */
}
+ /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
+ * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
+ */
+ if (xdp_buff_has_frags(xdp))
+ skb->data_len = skb_shinfo(skb)->xdp_frags_size;
+ else
+ skb->data_len = 0;
+
/* check if XDP changed eth hdr such SKB needs update */
eth = (struct ethhdr *)xdp->data;
if ((orig_eth_type != eth->h_proto) ||
@@ -4920,11 +4904,35 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
return act;
}
-static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+static int
+netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
+{
+ struct sk_buff *skb = *pskb;
+ int err, hroom, troom;
+
+ if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
+ return 0;
+
+ /* In case we have to go down the path and also linearize,
+ * then lets do the pskb_expand_head() work just once here.
+ */
+ hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
+ troom = skb->tail + skb->data_len - skb->end;
+ err = pskb_expand_head(skb,
+ hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
+ troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
+ if (err)
+ return err;
+
+ return skb_linearize(skb);
+}
+
+static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- u32 act = XDP_DROP;
+ struct sk_buff *skb = *pskb;
+ u32 mac_len, act = XDP_DROP;
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
@@ -4932,41 +4940,36 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
if (skb_is_redirected(skb))
return XDP_PASS;
- /* XDP packets must be linear and must have sufficient headroom
- * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
- * native XDP provides, thus we need to do it here as well.
+ /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
+ * bytes. This is the guarantee that also native XDP provides,
+ * thus we need to do it here as well.
*/
+ mac_len = skb->data - skb_mac_header(skb);
+ __skb_push(skb, mac_len);
+
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
- int troom = skb->tail + skb->data_len - skb->end;
-
- /* In case we have to go down the path and also linearize,
- * then lets do the pskb_expand_head() work just once here.
- */
- if (pskb_expand_head(skb,
- hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
- troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
- goto do_drop;
- if (skb_linearize(skb))
+ if (netif_skb_check_for_xdp(pskb, xdp_prog))
goto do_drop;
}
- act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
+ __skb_pull(*pskb, mac_len);
+
+ act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
switch (act) {
case XDP_REDIRECT:
case XDP_TX:
case XDP_PASS:
break;
default:
- bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
+ bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
- trace_xdp_exception(skb->dev, xdp_prog, act);
+ trace_xdp_exception((*pskb)->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
do_drop:
- kfree_skb(skb);
+ kfree_skb(*pskb);
break;
}
@@ -5004,24 +5007,24 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
if (xdp_prog) {
struct xdp_buff xdp;
u32 act;
int err;
- act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
+ act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) {
switch (act) {
case XDP_REDIRECT:
- err = xdp_do_generic_redirect(skb->dev, skb,
+ err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
&xdp, xdp_prog);
if (err)
goto out_redir;
break;
case XDP_TX:
- generic_xdp_tx(skb, xdp_prog);
+ generic_xdp_tx(*pskb, xdp_prog);
break;
}
return XDP_DROP;
@@ -5029,7 +5032,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
}
return XDP_PASS;
out_redir:
- kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
+ kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP;
}
EXPORT_SYMBOL_GPL(do_xdp_generic);
@@ -5038,7 +5041,7 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_rx(skb);
@@ -5330,7 +5333,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
int ret = NET_RX_DROP;
__be16 type;
- net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
@@ -5352,7 +5355,8 @@ another_round:
int ret2;
migrate_disable();
- ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+ ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
+ &skb);
migrate_enable();
if (ret2 != XDP_PASS) {
@@ -5373,7 +5377,7 @@ another_round:
if (pfmemalloc)
goto skip_taps;
- list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -5713,7 +5717,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
{
int ret;
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
if (skb_defer_rx_timestamp(skb))
return NET_RX_SUCCESS;
@@ -5743,7 +5747,8 @@ void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
- net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
+ net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
+ skb);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
@@ -5967,7 +5972,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
net_rps_action_and_irq_enable(sd);
}
- napi->weight = READ_ONCE(dev_rx_weight);
+ napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
while (again) {
struct sk_buff *skb;
@@ -6156,6 +6161,27 @@ struct napi_struct *napi_by_id(unsigned int napi_id)
return NULL;
}
+static void skb_defer_free_flush(struct softnet_data *sd)
+{
+ struct sk_buff *skb, *next;
+
+ /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
+ if (!READ_ONCE(sd->defer_list))
+ return;
+
+ spin_lock(&sd->defer_lock);
+ skb = sd->defer_list;
+ sd->defer_list = NULL;
+ sd->defer_count = 0;
+ spin_unlock(&sd->defer_lock);
+
+ while (skb != NULL) {
+ next = skb->next;
+ napi_consume_skb(skb, 1);
+ skb = next;
+ }
+}
+
#if defined(CONFIG_NET_RX_BUSY_POLL)
static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
@@ -6177,8 +6203,13 @@ static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
clear_bit(NAPI_STATE_SCHED, &napi->state);
}
-static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
- u16 budget)
+enum {
+ NAPI_F_PREFER_BUSY_POLL = 1,
+ NAPI_F_END_ON_RESCHED = 2,
+};
+
+static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
+ unsigned flags, u16 budget)
{
bool skip_schedule = false;
unsigned long timeout;
@@ -6198,7 +6229,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool
local_bh_disable();
- if (prefer_busy_poll) {
+ if (flags & NAPI_F_PREFER_BUSY_POLL) {
napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
timeout = READ_ONCE(napi->dev->gro_flush_timeout);
if (napi->defer_hard_irqs_count && timeout) {
@@ -6222,23 +6253,23 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool
local_bh_enable();
}
-void napi_busy_loop(unsigned int napi_id,
- bool (*loop_end)(void *, unsigned long),
- void *loop_end_arg, bool prefer_busy_poll, u16 budget)
+static void __napi_busy_loop(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, unsigned flags, u16 budget)
{
unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
int (*napi_poll)(struct napi_struct *napi, int budget);
void *have_poll_lock = NULL;
struct napi_struct *napi;
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
restart:
napi_poll = NULL;
- rcu_read_lock();
-
napi = napi_by_id(napi_id);
if (!napi)
- goto out;
+ return;
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
@@ -6254,14 +6285,14 @@ restart:
*/
if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
NAPIF_STATE_IN_BUSY_POLL)) {
- if (prefer_busy_poll)
+ if (flags & NAPI_F_PREFER_BUSY_POLL)
set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
goto count;
}
if (cmpxchg(&napi->state, val,
val | NAPIF_STATE_IN_BUSY_POLL |
NAPIF_STATE_SCHED) != val) {
- if (prefer_busy_poll)
+ if (flags & NAPI_F_PREFER_BUSY_POLL)
set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
goto count;
}
@@ -6275,18 +6306,22 @@ count:
if (work > 0)
__NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work);
+ skb_defer_free_flush(this_cpu_ptr(&softnet_data));
local_bh_enable();
if (!loop_end || loop_end(loop_end_arg, start_time))
break;
if (unlikely(need_resched())) {
+ if (flags & NAPI_F_END_ON_RESCHED)
+ break;
if (napi_poll)
- busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
+ busy_poll_stop(napi, have_poll_lock, flags, budget);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
rcu_read_unlock();
cond_resched();
+ rcu_read_lock();
if (loop_end(loop_end_arg, start_time))
return;
goto restart;
@@ -6294,10 +6329,31 @@ count:
cpu_relax();
}
if (napi_poll)
- busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
+ busy_poll_stop(napi, have_poll_lock, flags, budget);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
-out:
+}
+
+void napi_busy_loop_rcu(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget)
+{
+ unsigned flags = NAPI_F_END_ON_RESCHED;
+
+ if (prefer_busy_poll)
+ flags |= NAPI_F_PREFER_BUSY_POLL;
+
+ __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
+}
+
+void napi_busy_loop(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg, bool prefer_busy_poll, u16 budget)
+{
+ unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0;
+
+ rcu_read_lock();
+ __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
rcu_read_unlock();
}
EXPORT_SYMBOL(napi_busy_loop);
@@ -6680,27 +6736,6 @@ static int napi_thread_wait(struct napi_struct *napi)
return -1;
}
-static void skb_defer_free_flush(struct softnet_data *sd)
-{
- struct sk_buff *skb, *next;
-
- /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
- if (!READ_ONCE(sd->defer_list))
- return;
-
- spin_lock(&sd->defer_lock);
- skb = sd->defer_list;
- sd->defer_list = NULL;
- sd->defer_count = 0;
- spin_unlock(&sd->defer_lock);
-
- while (skb != NULL) {
- next = skb->next;
- napi_consume_skb(skb, 1);
- skb = next;
- }
-}
-
static int napi_threaded_poll(void *data)
{
struct napi_struct *napi = data;
@@ -6742,8 +6777,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
- usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
- int budget = READ_ONCE(netdev_budget);
+ usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
+ int budget = READ_ONCE(net_hotdata.netdev_budget);
LIST_HEAD(list);
LIST_HEAD(repoll);
@@ -8586,12 +8621,12 @@ unsigned int dev_get_flags(const struct net_device *dev)
{
unsigned int flags;
- flags = (dev->flags & ~(IFF_PROMISC |
+ flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
IFF_ALLMULTI |
IFF_RUNNING |
IFF_LOWER_UP |
IFF_DORMANT)) |
- (dev->gflags & (IFF_PROMISC |
+ (READ_ONCE(dev->gflags) & (IFF_PROMISC |
IFF_ALLMULTI));
if (netif_running(dev)) {
@@ -8914,7 +8949,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
}
EXPORT_SYMBOL(dev_set_mac_address);
-static DECLARE_RWSEM(dev_addr_sem);
+DECLARE_RWSEM(dev_addr_sem);
int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack)
@@ -9074,28 +9109,6 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
}
EXPORT_SYMBOL(netdev_port_same_parent_id);
-static void netdev_dpll_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
-{
-#if IS_ENABLED(CONFIG_DPLL)
- rtnl_lock();
- dev->dpll_pin = dpll_pin;
- rtnl_unlock();
-#endif
-}
-
-void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
-{
- WARN_ON(!dpll_pin);
- netdev_dpll_pin_assign(dev, dpll_pin);
-}
-EXPORT_SYMBOL(netdev_dpll_pin_set);
-
-void netdev_dpll_pin_clear(struct net_device *dev)
-{
- netdev_dpll_pin_assign(dev, NULL);
-}
-EXPORT_SYMBOL(netdev_dpll_pin_clear);
-
/**
* dev_change_proto_down - set carrier according to proto_down.
*
@@ -9690,11 +9703,11 @@ static void dev_index_release(struct net *net, int ifindex)
/* Delayed registration/unregisteration */
LIST_HEAD(net_todo_list);
DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+atomic_t dev_unreg_count = ATOMIC_INIT(0);
static void net_set_todo(struct net_device *dev)
{
list_add_tail(&dev->todo_list, &net_todo_list);
- atomic_inc(&dev_net(dev)->dev_unreg_count);
}
static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
@@ -10259,9 +10272,9 @@ int register_netdevice(struct net_device *dev)
goto err_ifindex_release;
ret = netdev_register_kobject(dev);
- write_lock(&dev_base_lock);
- dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
- write_unlock(&dev_base_lock);
+
+ WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
+
if (ret)
goto err_uninit_notify;
@@ -10337,7 +10350,7 @@ EXPORT_SYMBOL(register_netdevice);
* that need to tie several hardware interfaces to a single NAPI
* poll scheduler due to HW limitations.
*/
-int init_dummy_netdev(struct net_device *dev)
+void init_dummy_netdev(struct net_device *dev)
{
/* Clear everything. Note we don't initialize spinlocks
* are they aren't supposed to be taken by any of the
@@ -10365,8 +10378,6 @@ int init_dummy_netdev(struct net_device *dev)
* because users of this 'device' dont need to change
* its refcount.
*/
-
- return 0;
}
EXPORT_SYMBOL_GPL(init_dummy_netdev);
@@ -10521,6 +10532,7 @@ void netdev_run_todo(void)
{
struct net_device *dev, *tmp;
struct list_head list;
+ int cnt;
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
@@ -10551,12 +10563,11 @@ void netdev_run_todo(void)
continue;
}
- write_lock(&dev_base_lock);
- dev->reg_state = NETREG_UNREGISTERED;
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
linkwatch_sync_dev(dev);
}
+ cnt = 0;
while (!list_empty(&list)) {
dev = netdev_wait_allrefs_any(&list);
list_del(&dev->todo_list);
@@ -10574,12 +10585,13 @@ void netdev_run_todo(void)
if (dev->needs_free_netdev)
free_netdev(dev);
- if (atomic_dec_and_test(&dev_net(dev)->dev_unreg_count))
- wake_up(&netdev_unregistering_wq);
+ cnt++;
/* Free network device */
kobject_put(&dev->dev.kobj);
}
+ if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
+ wake_up(&netdev_unregistering_wq);
}
/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
@@ -10656,6 +10668,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
ops->ndo_get_stats64(dev, storage);
} else if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
+ } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
+ dev_get_tstats64(dev, storage);
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
@@ -10970,7 +10984,7 @@ void free_netdev(struct net_device *dev)
}
BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
- dev->reg_state = NETREG_RELEASED;
+ WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
/* will free via device release */
put_device(&dev->dev);
@@ -11026,6 +11040,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
{
struct net_device *dev, *tmp;
LIST_HEAD(close_head);
+ int cnt = 0;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
@@ -11057,10 +11072,8 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) {
/* And unlink it from device chain. */
- write_lock(&dev_base_lock);
- unlist_netdevice(dev, false);
- dev->reg_state = NETREG_UNREGISTERING;
- write_unlock(&dev_base_lock);
+ unlist_netdevice(dev);
+ WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
}
flush_all_backlogs();
@@ -11122,7 +11135,9 @@ void unregister_netdevice_many_notify(struct list_head *head,
list_for_each_entry(dev, head, unreg_list) {
netdev_put(dev, &dev->dev_registered_tracker);
net_set_todo(dev);
+ cnt++;
}
+ atomic_add(cnt, &dev_unreg_count);
list_del(head);
}
@@ -11240,7 +11255,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
dev_close(dev);
/* And unlink it from device chain */
- unlist_netdevice(dev, true);
+ unlist_netdevice(dev);
synchronize_net();
@@ -11551,6 +11566,7 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
static void __net_exit default_device_exit_net(struct net *net)
{
+ struct netdev_name_node *name_node, *tmp;
struct net_device *dev, *aux;
/*
* Push all migratable network devices back to the
@@ -11573,6 +11589,11 @@ static void __net_exit default_device_exit_net(struct net *net)
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
if (netdev_name_in_use(&init_net, fb_name))
snprintf(fb_name, IFNAMSIZ, "dev%%d");
+
+ netdev_for_each_altname_safe(dev, name_node, tmp)
+ if (netdev_name_in_use(&init_net, name_node->name))
+ __netdev_name_node_alt_destroy(name_node);
+
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
pr_emerg("%s: failed to move %s to init_net: %d\n",
@@ -11643,11 +11664,12 @@ static void __init net_dev_struct_check(void)
CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160);
/* TXRX read-mostly hotpath */
+ CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
- CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 30);
+ CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 38);
/* RX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
@@ -11677,6 +11699,28 @@ static void __init net_dev_struct_check(void)
*
*/
+/* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
+#define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
+
+static int net_page_pool_create(int cpuid)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ struct page_pool_params page_pool_params = {
+ .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
+ .flags = PP_FLAG_SYSTEM_POOL,
+ .nid = NUMA_NO_NODE,
+ };
+ struct page_pool *pp_ptr;
+
+ pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
+ if (IS_ERR(pp_ptr))
+ return -ENOMEM;
+
+ per_cpu(system_page_pool, cpuid) = pp_ptr;
+#endif
+ return 0;
+}
+
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
@@ -11695,7 +11739,6 @@ static int __init net_dev_init(void)
if (netdev_kobject_init())
goto out;
- INIT_LIST_HEAD(&ptype_all);
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);
@@ -11729,6 +11772,9 @@ static int __init net_dev_init(void)
init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
+
+ if (net_page_pool_create(i))
+ goto out;
}
dev_boot_phase = 0;
@@ -11756,6 +11802,19 @@ static int __init net_dev_init(void)
WARN_ON(rc < 0);
rc = 0;
out:
+ if (rc < 0) {
+ for_each_possible_cpu(i) {
+ struct page_pool *pp_ptr;
+
+ pp_ptr = per_cpu(system_page_pool, i);
+ if (!pp_ptr)
+ continue;
+
+ page_pool_destroy(pp_ptr);
+ per_cpu(system_page_pool, i) = NULL;
+ }
+ }
+
return rc;
}
diff --git a/net/core/dev.h b/net/core/dev.h
index cf93e188785b..2bcaf8eee50c 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -3,6 +3,7 @@
#define _NET_CORE_DEV_H
#include <linux/types.h>
+#include <linux/rwsem.h>
struct net;
struct net_device;
@@ -37,15 +38,14 @@ int dev_addr_init(struct net_device *dev);
void dev_addr_check(struct net_device *dev);
/* sysctls not referred to from outside net/core/ */
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
extern unsigned int sysctl_skb_defer_max;
-extern int netdev_tstamp_prequeue;
extern int netdev_unregister_timeout_secs;
extern int weight_p;
extern int dev_weight_rx_bias;
extern int dev_weight_tx_bias;
+extern struct rw_semaphore dev_addr_sem;
+
/* rtnl helpers */
extern struct list_head net_todo_list;
void netdev_run_todo(void);
@@ -56,6 +56,7 @@ struct netdev_name_node {
struct list_head list;
struct net_device *dev;
const char *name;
+ struct rcu_head rcu;
};
int netdev_get_name(struct net *net, char *name, int ifindex);
@@ -63,6 +64,9 @@ int dev_change_name(struct net_device *dev, const char *newname);
#define netdev_for_each_altname(dev, namenode) \
list_for_each_entry((namenode), &(dev)->name_node->list, list)
+#define netdev_for_each_altname_safe(dev, namenode, next) \
+ list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
+ list)
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
diff --git a/net/core/dst.c b/net/core/dst.c
index 6838d3212c37..95f533844f17 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -96,7 +96,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
}
EXPORT_SYMBOL(dst_alloc);
-struct dst_entry *dst_destroy(struct dst_entry * dst)
+static void dst_destroy(struct dst_entry *dst)
{
struct dst_entry *child = NULL;
@@ -126,15 +126,13 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
dst = child;
if (dst)
dst_release_immediate(dst);
- return NULL;
}
-EXPORT_SYMBOL(dst_destroy);
static void dst_destroy_rcu(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
- dst = dst_destroy(dst);
+ dst_destroy(dst);
}
/* Operations to mark dst as DEAD and clean up the net device referenced
diff --git a/net/core/filter.c b/net/core/filter.c
index 24061f29c9dd..8adf95765cdd 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -83,11 +83,12 @@
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netkit.h>
#include <linux/un.h>
+#include <net/xdp_sock_drv.h>
#include "dev.h"
static const struct bpf_func_proto *
-bpf_sk_base_func_proto(enum bpf_func_id func_id);
+bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{
@@ -777,7 +778,7 @@ jmp_rest:
BPF_EMIT_JMP;
break;
- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
+ /* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B: {
struct sock_filter tmp = {
.code = BPF_LD | BPF_ABS | BPF_B,
@@ -803,7 +804,7 @@ jmp_rest:
*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
break;
}
- /* RET_K is remaped into 2 insns. RET_A case doesn't need an
+ /* RET_K is remapped into 2 insns. RET_A case doesn't need an
* extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
*/
case BPF_RET | BPF_A:
@@ -2967,7 +2968,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
*
* Then if B is non-zero AND there is no space allocate space and
* compact A, B regions into page. If there is space shift ring to
- * the rigth free'ing the next element in ring to place B, leaving
+ * the right free'ing the next element in ring to place B, leaving
* A untouched except to reduce length.
*/
if (start != offset) {
@@ -4092,10 +4093,46 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset);
skb_frag_size_add(frag, offset);
sinfo->xdp_frags_size += offset;
+ if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ xsk_buff_get_tail(xdp)->data_end += offset;
return 0;
}
+static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
+ struct xdp_mem_info *mem_info, bool release)
+{
+ struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
+
+ if (release) {
+ xsk_buff_del_tail(zc_frag);
+ __xdp_return(NULL, mem_info, false, zc_frag);
+ } else {
+ zc_frag->data_end -= shrink;
+ }
+}
+
+static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
+ int shrink)
+{
+ struct xdp_mem_info *mem_info = &xdp->rxq->mem;
+ bool release = skb_frag_size(frag) == shrink;
+
+ if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) {
+ bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
+ goto out;
+ }
+
+ if (release) {
+ struct page *page = skb_frag_page(frag);
+
+ __xdp_return(page_address(page), mem_info, false, NULL);
+ }
+
+out:
+ return release;
+}
+
static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -4110,12 +4147,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
len_free += shrink;
offset -= shrink;
-
- if (skb_frag_size(frag) == shrink) {
- struct page *page = skb_frag_page(frag);
-
- __xdp_return(page_address(page), &xdp->rxq->mem,
- false, NULL);
+ if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
n_frags_free++;
} else {
skb_frag_size_sub(frag, shrink);
@@ -5956,7 +5988,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
return -ENODEV;
idev = __in6_dev_get_safely(dev);
- if (unlikely(!idev || !idev->cnf.forwarding))
+ if (unlikely(!idev || !READ_ONCE(idev->cnf.forwarding)))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
@@ -7862,7 +7894,7 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -7955,7 +7987,7 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -7974,7 +8006,7 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8161,7 +8193,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8220,13 +8252,13 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
#endif
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
#if IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)
/* The nf_conn___init type is used in the NF_CONNTRACK kfuncs. The
* kfuncs are defined in two different modules, and we want to be able
- * to use them interchangably with the same BTF type ID. Because modules
+ * to use them interchangeably with the same BTF type ID. Because modules
* can't de-duplicate BTF IDs between each other, we need the type to be
* referenced in the vmlinux BTF or the verifier will get confused about
* the different types. So we add this dummy type reference which will
@@ -8281,7 +8313,7 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_sock_proto;
#endif /* CONFIG_INET */
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8323,7 +8355,7 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_cgroup_classid_curr_proto;
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8367,7 +8399,7 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skc_lookup_tcp_proto;
#endif
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8378,7 +8410,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_load_bytes:
return &bpf_flow_dissector_load_bytes_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8405,7 +8437,7 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -8580,7 +8612,7 @@ static bool cg_skb_is_valid_access(int off, int size,
return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
break;
}
@@ -8592,7 +8624,7 @@ static bool cg_skb_is_valid_access(int off, int size,
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
break;
case bpf_ctx_range(struct __sk_buff, tstamp):
- if (!bpf_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
break;
default:
@@ -11236,7 +11268,7 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
@@ -11418,7 +11450,7 @@ sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
default:
- return bpf_sk_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id, prog);
}
}
@@ -11752,7 +11784,7 @@ const struct bpf_func_proto bpf_sock_from_file_proto = {
};
static const struct bpf_func_proto *
-bpf_sk_base_func_proto(enum bpf_func_id func_id)
+bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
const struct bpf_func_proto *func;
@@ -11781,10 +11813,10 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
- if (!perfmon_capable())
+ if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
return NULL;
return func;
@@ -11837,6 +11869,103 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
return 0;
}
+
+__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk,
+ struct bpf_tcp_req_attrs *attrs, int attrs__sz)
+{
+#if IS_ENABLED(CONFIG_SYN_COOKIES)
+ const struct request_sock_ops *ops;
+ struct inet_request_sock *ireq;
+ struct tcp_request_sock *treq;
+ struct request_sock *req;
+ struct net *net;
+ __u16 min_mss;
+ u32 tsoff = 0;
+
+ if (attrs__sz != sizeof(*attrs) ||
+ attrs->reserved[0] || attrs->reserved[1] || attrs->reserved[2])
+ return -EINVAL;
+
+ if (!skb_at_tc_ingress(skb))
+ return -EINVAL;
+
+ net = dev_net(skb->dev);
+ if (net != sock_net(sk))
+ return -ENETUNREACH;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ops = &tcp_request_sock_ops;
+ min_mss = 536;
+ break;
+#if IS_BUILTIN(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ ops = &tcp6_request_sock_ops;
+ min_mss = IPV6_MIN_MTU - 60;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_LISTEN ||
+ sk_is_mptcp(sk))
+ return -EINVAL;
+
+ if (attrs->mss < min_mss)
+ return -EINVAL;
+
+ if (attrs->wscale_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_window_scaling))
+ return -EINVAL;
+
+ if (attrs->snd_wscale > TCP_MAX_WSCALE ||
+ attrs->rcv_wscale > TCP_MAX_WSCALE)
+ return -EINVAL;
+ }
+
+ if (attrs->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack))
+ return -EINVAL;
+
+ if (attrs->tstamp_ok) {
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps))
+ return -EINVAL;
+
+ tsoff = attrs->rcv_tsecr - tcp_ns_to_ts(attrs->usec_ts_ok, tcp_clock_ns());
+ }
+
+ req = inet_reqsk_alloc(ops, sk, false);
+ if (!req)
+ return -ENOMEM;
+
+ ireq = inet_rsk(req);
+ treq = tcp_rsk(req);
+
+ req->rsk_listener = sk;
+ req->syncookie = 1;
+ req->mss = attrs->mss;
+ req->ts_recent = attrs->rcv_tsval;
+
+ ireq->snd_wscale = attrs->snd_wscale;
+ ireq->rcv_wscale = attrs->rcv_wscale;
+ ireq->tstamp_ok = !!attrs->tstamp_ok;
+ ireq->sack_ok = !!attrs->sack_ok;
+ ireq->wscale_ok = !!attrs->wscale_ok;
+ ireq->ecn_ok = !!attrs->ecn_ok;
+
+ treq->req_usec_ts = !!attrs->usec_ts_ok;
+ treq->ts_off = tsoff;
+
+ skb_orphan(skb);
+ skb->sk = req_to_sk(req);
+ skb->destructor = sock_pfree;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
__bpf_kfunc_end_defs();
int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
@@ -11853,17 +11982,21 @@ int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
return 0;
}
-BTF_SET8_START(bpf_kfunc_check_set_skb)
+BTF_KFUNCS_START(bpf_kfunc_check_set_skb)
BTF_ID_FLAGS(func, bpf_dynptr_from_skb)
-BTF_SET8_END(bpf_kfunc_check_set_skb)
+BTF_KFUNCS_END(bpf_kfunc_check_set_skb)
-BTF_SET8_START(bpf_kfunc_check_set_xdp)
+BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
BTF_ID_FLAGS(func, bpf_dynptr_from_xdp)
-BTF_SET8_END(bpf_kfunc_check_set_xdp)
+BTF_KFUNCS_END(bpf_kfunc_check_set_xdp)
-BTF_SET8_START(bpf_kfunc_check_set_sock_addr)
+BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr)
BTF_ID_FLAGS(func, bpf_sock_addr_set_sun_path)
-BTF_SET8_END(bpf_kfunc_check_set_sock_addr)
+BTF_KFUNCS_END(bpf_kfunc_check_set_sock_addr)
+
+BTF_KFUNCS_START(bpf_kfunc_check_set_tcp_reqsk)
+BTF_ID_FLAGS(func, bpf_sk_assign_tcp_reqsk, KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(bpf_kfunc_check_set_tcp_reqsk)
static const struct btf_kfunc_id_set bpf_kfunc_set_skb = {
.owner = THIS_MODULE,
@@ -11880,6 +12013,11 @@ static const struct btf_kfunc_id_set bpf_kfunc_set_sock_addr = {
.set = &bpf_kfunc_check_set_sock_addr,
};
+static const struct btf_kfunc_id_set bpf_kfunc_set_tcp_reqsk = {
+ .owner = THIS_MODULE,
+ .set = &bpf_kfunc_check_set_tcp_reqsk,
+};
+
static int __init bpf_kfunc_init(void)
{
int ret;
@@ -11895,8 +12033,9 @@ static int __init bpf_kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
- return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- &bpf_kfunc_set_sock_addr);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ &bpf_kfunc_set_sock_addr);
+ return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_tcp_reqsk);
}
late_initcall(bpf_kfunc_init);
@@ -11936,9 +12075,9 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock)
__bpf_kfunc_end_defs();
-BTF_SET8_START(bpf_sk_iter_kfunc_ids)
+BTF_KFUNCS_START(bpf_sk_iter_kfunc_ids)
BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS)
-BTF_SET8_END(bpf_sk_iter_kfunc_ids)
+BTF_KFUNCS_END(bpf_sk_iter_kfunc_ids)
static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
diff --git a/net/core/gro.c b/net/core/gro.c
index 0759277dc14e..ee30d4f0c038 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -10,9 +10,6 @@
#define GRO_MAX_HEAD (MAX_HEADER + 128)
static DEFINE_SPINLOCK(offload_lock);
-struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
-/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
-int gro_normal_batch __read_mostly = 8;
/**
* dev_add_offload - register offload handlers
@@ -31,7 +28,7 @@ void dev_add_offload(struct packet_offload *po)
struct packet_offload *elem;
spin_lock(&offload_lock);
- list_for_each_entry(elem, &offload_base, list) {
+ list_for_each_entry(elem, &net_hotdata.offload_base, list) {
if (po->priority < elem->priority)
break;
}
@@ -55,7 +52,7 @@ EXPORT_SYMBOL(dev_add_offload);
*/
static void __dev_remove_offload(struct packet_offload *po)
{
- struct list_head *head = &offload_base;
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *po1;
spin_lock(&offload_lock);
@@ -235,9 +232,9 @@ done:
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
- struct list_head *head = &offload_base;
int err = -ENOENT;
BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
@@ -369,15 +366,21 @@ static void gro_list_prepare(const struct list_head *head,
static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
{
- const struct skb_shared_info *pinfo = skb_shinfo(skb);
- const skb_frag_t *frag0 = &pinfo->frags[0];
+ const struct skb_shared_info *pinfo;
+ const skb_frag_t *frag0;
+ unsigned int headlen;
NAPI_GRO_CB(skb)->data_offset = 0;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ headlen = skb_headlen(skb);
+ NAPI_GRO_CB(skb)->frag0 = skb->data;
+ NAPI_GRO_CB(skb)->frag0_len = headlen;
+ if (headlen)
+ return;
+
+ pinfo = skb_shinfo(skb);
+ frag0 = &pinfo->frags[0];
- if (!skb_headlen(skb) && pinfo->nr_frags &&
- !PageHighMem(skb_frag_page(frag0)) &&
+ if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
(!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
@@ -438,7 +441,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
{
u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
struct gro_list *gro_list = &napi->gro_hash[bucket];
- struct list_head *head = &offload_base;
+ struct list_head *head = &net_hotdata.offload_base;
struct packet_offload *ptype;
__be16 type = skb->protocol;
struct sk_buff *pp = NULL;
@@ -544,7 +547,7 @@ normal:
struct packet_offload *gro_find_receive_by_type(__be16 type)
{
- struct list_head *offload_head = &offload_base;
+ struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@@ -558,7 +561,7 @@ EXPORT_SYMBOL(gro_find_receive_by_type);
struct packet_offload *gro_find_complete_by_type(__be16 type)
{
- struct list_head *offload_head = &offload_base;
+ struct list_head *offload_head = &net_hotdata.offload_base;
struct packet_offload *ptype;
list_for_each_entry_rcu(ptype, offload_head, list) {
@@ -700,7 +703,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb, hlen);
- if (unlikely(skb_gro_header_hard(skb, hlen))) {
+ if (unlikely(!skb_gro_may_pull(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);
if (unlikely(!eth)) {
net_warn_ratelimited("%s: dropping impossible skb from %s\n",
@@ -710,7 +713,10 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
}
} else {
eth = (const struct ethhdr *)skb->data;
- gro_pull_from_frag0(skb, hlen);
+
+ if (NAPI_GRO_CB(skb)->frag0 != skb->data)
+ gro_pull_from_frag0(skb, hlen);
+
NAPI_GRO_CB(skb)->frag0 += hlen;
NAPI_GRO_CB(skb)->frag0_len -= hlen;
}
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index ed5ec5de47f6..ff8e5b64bf6b 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <net/gro_cells.h>
+#include <net/hotdata.h>
struct gro_cell {
struct sk_buff_head napi_skbs;
@@ -26,7 +27,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
cell = this_cpu_ptr(gcells->cells);
- if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
+ if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
drop:
dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
diff --git a/net/core/gso.c b/net/core/gso.c
index 9e1803bfc9c6..bcd156372f4d 100644
--- a/net/core/gso.c
+++ b/net/core/gso.c
@@ -17,7 +17,7 @@ struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
struct packet_offload *ptype;
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
@@ -48,7 +48,7 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
__skb_pull(skb, vlan_depth);
rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
+ list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
segs = ptype->callbacks.gso_segment(skb, features);
break;
diff --git a/net/core/hotdata.c b/net/core/hotdata.c
new file mode 100644
index 000000000000..c8a7a451c18a
--- /dev/null
+++ b/net/core/hotdata.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/hotdata.h>
+#include <linux/cache.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+
+
+struct net_hotdata net_hotdata __cacheline_aligned = {
+ .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
+ .ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all),
+ .gro_normal_batch = 8,
+
+ .netdev_budget = 300,
+ /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
+ .netdev_budget_usecs = 2 * USEC_PER_SEC / HZ,
+
+ .tstamp_prequeue = 1,
+ .max_backlog = 1000,
+ .dev_tx_weight = 64,
+ .dev_rx_weight = 64,
+};
+EXPORT_SYMBOL(net_hotdata);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 429571c258da..8ec35194bfcb 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -33,7 +33,7 @@ static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
-static unsigned char default_operstate(const struct net_device *dev)
+static unsigned int default_operstate(const struct net_device *dev)
{
if (netif_testing(dev))
return IF_OPER_TESTING;
@@ -62,16 +62,13 @@ static unsigned char default_operstate(const struct net_device *dev)
return IF_OPER_UP;
}
-
static void rfc2863_policy(struct net_device *dev)
{
- unsigned char operstate = default_operstate(dev);
+ unsigned int operstate = default_operstate(dev);
- if (operstate == dev->operstate)
+ if (operstate == READ_ONCE(dev->operstate))
return;
- write_lock(&dev_base_lock);
-
switch(dev->link_mode) {
case IF_LINK_MODE_TESTING:
if (operstate == IF_OPER_UP)
@@ -87,9 +84,7 @@ static void rfc2863_policy(struct net_device *dev)
break;
}
- dev->operstate = operstate;
-
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->operstate, operstate);
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 09f7ed1a04e8..a97eceb84e61 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -3,52 +3,22 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/wext.h>
+#include <net/hotdata.h>
#include "dev.h"
-#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
-
-#define get_bucket(x) ((x) >> BUCKET_SPACE)
-#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
-#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
-
-static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
+static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos)
{
- struct net *net = seq_file_net(seq);
+ unsigned long ifindex = *pos;
struct net_device *dev;
- struct hlist_head *h;
- unsigned int count = 0, offset = get_offset(*pos);
- h = &net->dev_index_head[get_bucket(*pos)];
- hlist_for_each_entry_rcu(dev, h, index_hlist) {
- if (++count == offset)
- return dev;
+ for_each_netdev_dump(seq_file_net(seq), dev, ifindex) {
+ *pos = dev->ifindex;
+ return dev;
}
-
- return NULL;
-}
-
-static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
-{
- struct net_device *dev;
- unsigned int bucket;
-
- do {
- dev = dev_from_same_bucket(seq, pos);
- if (dev)
- return dev;
-
- bucket = get_bucket(*pos) + 1;
- *pos = set_bucket_offset(bucket, 1);
- } while (bucket < NETDEV_HASHENTRIES);
-
return NULL;
}
-/*
- * This is invoked by the /proc filesystem handler to display a device
- * in detail.
- */
static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
@@ -56,16 +26,13 @@ static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
if (!*pos)
return SEQ_START_TOKEN;
- if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
- return NULL;
-
- return dev_from_bucket(seq, pos);
+ return dev_seq_from_index(seq, pos);
}
static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
- return dev_from_bucket(seq, pos);
+ return dev_seq_from_index(seq, pos);
}
static void dev_seq_stop(struct seq_file *seq, void *v)
@@ -217,7 +184,7 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
}
}
- list_for_each_entry_rcu(pt, &ptype_all, list) {
+ list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
if (i == pos)
return pt;
++i;
@@ -265,13 +232,13 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
}
- nxt = ptype_all.next;
+ nxt = net_hotdata.ptype_all.next;
goto ptype_all;
}
if (pt->type == htons(ETH_P_ALL)) {
ptype_all:
- if (nxt != &ptype_all)
+ if (nxt != &net_hotdata.ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a09d507c5b03..e3d7a8cfa20b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -24,6 +24,7 @@
#include <linux/of_net.h>
#include <linux/cpu.h>
#include <net/netdev_rx_queue.h>
+#include <net/rps.h>
#include "dev.h"
#include "net-sysfs.h"
@@ -34,10 +35,10 @@ static const char fmt_dec[] = "%d\n";
static const char fmt_ulong[] = "%lu\n";
static const char fmt_u64[] = "%llu\n";
-/* Caller holds RTNL or dev_base_lock */
+/* Caller holds RTNL or RCU */
static inline int dev_isalive(const struct net_device *dev)
{
- return dev->reg_state <= NETREG_REGISTERED;
+ return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED;
}
/* use same locking rules as GIF* ioctl's */
@@ -48,10 +49,10 @@ static ssize_t netdev_show(const struct device *dev,
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(ndev))
ret = (*format)(ndev, buf);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
@@ -60,7 +61,7 @@ static ssize_t netdev_show(const struct device *dev,
#define NETDEVICE_SHOW(field, format_string) \
static ssize_t format_##field(const struct net_device *dev, char *buf) \
{ \
- return sysfs_emit(buf, format_string, dev->field); \
+ return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \
} \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -125,7 +126,7 @@ static DEVICE_ATTR_RO(iflink);
static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
{
- return sysfs_emit(buf, fmt_dec, dev->name_assign_type);
+ return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type));
}
static ssize_t name_assign_type_show(struct device *dev,
@@ -135,24 +136,28 @@ static ssize_t name_assign_type_show(struct device *dev,
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- if (ndev->name_assign_type != NET_NAME_UNKNOWN)
+ if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN)
ret = netdev_show(dev, attr, buf, format_name_assign_type);
return ret;
}
static DEVICE_ATTR_RO(name_assign_type);
-/* use same locking rules as GIFHWADDR ioctl's */
+/* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct net_device *ndev = to_net_dev(dev);
ssize_t ret = -EINVAL;
- read_lock(&dev_base_lock);
+ down_read(&dev_addr_sem);
+
+ rcu_read_lock();
if (dev_isalive(ndev))
ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
+
+ up_read(&dev_addr_sem);
return ret;
}
static DEVICE_ATTR_RO(address);
@@ -161,10 +166,13 @@ static ssize_t broadcast_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *ndev = to_net_dev(dev);
+ int ret = -EINVAL;
+ rcu_read_lock();
if (dev_isalive(ndev))
- return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
- return -EINVAL;
+ ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
+ rcu_read_unlock();
+ return ret;
}
static DEVICE_ATTR_RO(broadcast);
@@ -318,11 +326,9 @@ static ssize_t operstate_show(struct device *dev,
const struct net_device *netdev = to_net_dev(dev);
unsigned char operstate;
- read_lock(&dev_base_lock);
- operstate = netdev->operstate;
+ operstate = READ_ONCE(netdev->operstate);
if (!netif_running(netdev))
operstate = IF_OPER_DOWN;
- read_unlock(&dev_base_lock);
if (operstate >= ARRAY_SIZE(operstates))
return -EINVAL; /* should not happen */
@@ -680,14 +686,14 @@ static ssize_t netstat_show(const struct device *d,
WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
offset % sizeof(u64) != 0);
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (dev_isalive(dev)) {
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
@@ -1409,6 +1415,65 @@ static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
= __ATTR(hold_time, 0644,
bql_show_hold_time, bql_set_hold_time);
+static ssize_t bql_show_stall_thrs(struct netdev_queue *queue, char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sprintf(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs));
+}
+
+static ssize_t bql_set_stall_thrs(struct netdev_queue *queue,
+ const char *buf, size_t len)
+{
+ struct dql *dql = &queue->dql;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err < 0)
+ return err;
+
+ value = msecs_to_jiffies(value);
+ if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG))
+ return -ERANGE;
+
+ if (!dql->stall_thrs && value)
+ dql->last_reap = jiffies;
+ /* Force last_reap to be live */
+ smp_wmb();
+ dql->stall_thrs = value;
+
+ return len;
+}
+
+static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init =
+ __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs);
+
+static ssize_t bql_show_stall_max(struct netdev_queue *queue, char *buf)
+{
+ return sprintf(buf, "%u\n", READ_ONCE(queue->dql.stall_max));
+}
+
+static ssize_t bql_set_stall_max(struct netdev_queue *queue,
+ const char *buf, size_t len)
+{
+ WRITE_ONCE(queue->dql.stall_max, 0);
+ return len;
+}
+
+static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init =
+ __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max);
+
+static ssize_t bql_show_stall_cnt(struct netdev_queue *queue, char *buf)
+{
+ struct dql *dql = &queue->dql;
+
+ return sprintf(buf, "%lu\n", dql->stall_cnt);
+}
+
+static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init =
+ __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL);
+
static ssize_t bql_show_inflight(struct netdev_queue *queue,
char *buf)
{
@@ -1447,6 +1512,9 @@ static struct attribute *dql_attrs[] __ro_after_init = {
&bql_limit_min_attribute.attr,
&bql_hold_time_attribute.attr,
&bql_inflight_attribute.attr,
+ &bql_stall_thrs_attribute.attr,
+ &bql_stall_cnt_attribute.attr,
+ &bql_stall_max_attribute.attr,
NULL
};
@@ -1454,6 +1522,9 @@ static const struct attribute_group dql_group = {
.name = "byte_queue_limits",
.attrs = dql_attrs,
};
+#else
+/* Fake declaration, all the code using it should be dead */
+extern const struct attribute_group dql_group;
#endif /* CONFIG_BQL */
#ifdef CONFIG_XPS
@@ -1691,6 +1762,15 @@ static const struct kobj_type netdev_queue_ktype = {
.get_ownership = netdev_queue_get_ownership,
};
+static bool netdev_uses_bql(const struct net_device *dev)
+{
+ if (dev->features & NETIF_F_LLTX ||
+ dev->priv_flags & IFF_NO_QUEUE)
+ return false;
+
+ return IS_ENABLED(CONFIG_BQL);
+}
+
static int netdev_queue_add_kobject(struct net_device *dev, int index)
{
struct netdev_queue *queue = dev->_tx + index;
@@ -1708,11 +1788,11 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
if (error)
goto err;
-#ifdef CONFIG_BQL
- error = sysfs_create_group(kobj, &dql_group);
- if (error)
- goto err;
-#endif
+ if (netdev_uses_bql(dev)) {
+ error = sysfs_create_group(kobj, &dql_group);
+ if (error)
+ goto err;
+ }
kobject_uevent(kobj, KOBJ_ADD);
return 0;
@@ -1733,9 +1813,9 @@ static int tx_queue_change_owner(struct net_device *ndev, int index,
if (error)
return error;
-#ifdef CONFIG_BQL
- error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
-#endif
+ if (netdev_uses_bql(ndev))
+ error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
+
return error;
}
#endif /* CONFIG_SYSFS */
@@ -1767,9 +1847,10 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
if (!refcount_read(&dev_net(dev)->ns.count))
queue->kobj.uevent_suppress = 1;
-#ifdef CONFIG_BQL
- sysfs_remove_group(&queue->kobj, &dql_group);
-#endif
+
+ if (netdev_uses_bql(dev))
+ sysfs_remove_group(&queue->kobj, &dql_group);
+
kobject_put(&queue->kobj);
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 72799533426b..f0540c557515 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -318,8 +318,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
{
/* Must be called with pernet_ops_rwsem held */
const struct pernet_operations *ops, *saved_ops;
- int error = 0;
LIST_HEAD(net_exit_list);
+ LIST_HEAD(dev_kill_list);
+ int error = 0;
refcount_set(&net->ns.count, 1);
ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
@@ -358,6 +359,15 @@ out_undo:
synchronize_rcu();
ops = saved_ops;
+ rtnl_lock();
+ list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
+ if (ops->exit_batch_rtnl)
+ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+
+ ops = saved_ops;
list_for_each_entry_continue_reverse(ops, &pernet_list, list)
ops_exit_list(ops, &net_exit_list);
@@ -573,6 +583,7 @@ static void cleanup_net(struct work_struct *work)
struct net *net, *tmp, *last;
struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list);
+ LIST_HEAD(dev_kill_list);
/* Atomically snapshot the list of namespaces to cleanup */
net_kill_list = llist_del_all(&cleanup_list);
@@ -611,7 +622,15 @@ static void cleanup_net(struct work_struct *work)
* the rcu_barrier() below isn't sufficient alone.
* Also the pre_exit() and exit() methods need this barrier.
*/
- synchronize_rcu();
+ synchronize_rcu_expedited();
+
+ rtnl_lock();
+ list_for_each_entry_reverse(ops, &pernet_list, list) {
+ if (ops->exit_batch_rtnl)
+ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list)
@@ -1193,7 +1212,17 @@ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_
{
ops_pre_exit_list(ops, net_exit_list);
synchronize_rcu();
+
+ if (ops->exit_batch_rtnl) {
+ LIST_HEAD(dev_kill_list);
+
+ rtnl_lock();
+ ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+ }
ops_exit_list(ops, net_exit_list);
+
ops_free_list(ops, net_exit_list);
}
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index be7f2ebd61b2..8d8ace9ef87f 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -68,6 +68,11 @@ static const struct nla_policy netdev_napi_get_dump_nl_policy[NETDEV_A_NAPI_IFIN
[NETDEV_A_NAPI_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
};
+/* NETDEV_CMD_QSTATS_GET - dump */
+static const struct nla_policy netdev_qstats_get_nl_policy[NETDEV_A_QSTATS_SCOPE + 1] = {
+ [NETDEV_A_QSTATS_SCOPE] = NLA_POLICY_MASK(NLA_UINT, 0x1),
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -138,6 +143,13 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.maxattr = NETDEV_A_NAPI_IFINDEX,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = NETDEV_CMD_QSTATS_GET,
+ .dumpit = netdev_nl_qstats_get_dumpit,
+ .policy = netdev_qstats_get_nl_policy,
+ .maxattr = NETDEV_A_QSTATS_SCOPE,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index a47f2bcbe4fa..4db40fd5b4a9 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -28,6 +28,8 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
enum {
NETDEV_NLGRP_MGMT,
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index fd98936da3ae..7004b3399c2b 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -8,6 +8,7 @@
#include <net/xdp.h>
#include <net/xdp_sock.h>
#include <net/netdev_rx_queue.h>
+#include <net/netdev_queues.h>
#include <net/busy_poll.h>
#include "netdev-genl-gen.h"
@@ -152,10 +153,7 @@ int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
-
- return skb->len;
+ return err;
}
static int
@@ -287,10 +285,7 @@ int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
-
- return skb->len;
+ return err;
}
static int
@@ -463,10 +458,220 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
rtnl_unlock();
- if (err != -EMSGSIZE)
- return err;
+ return err;
+}
+
+#define NETDEV_STAT_NOT_SET (~0ULL)
+
+static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
+{
+ const u64 *add = _add;
+ u64 *sum = _sum;
+
+ while (size) {
+ if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
+ *sum += *add;
+ sum++;
+ add++;
+ size -= 8;
+ }
+}
+
+static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
+{
+ if (value == NETDEV_STAT_NOT_SET)
+ return 0;
+ return nla_put_uint(rsp, attr_id, value);
+}
+
+static int
+netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
+{
+ if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int
+netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
+{
+ if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
+ netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int
+netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
+ u32 q_type, int i, const struct genl_info *info)
+{
+ const struct netdev_stat_ops *ops = netdev->stat_ops;
+ struct netdev_queue_stats_rx rx;
+ struct netdev_queue_stats_tx tx;
+ void *hdr;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
+ nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
+ nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
+ goto nla_put_failure;
+
+ switch (q_type) {
+ case NETDEV_QUEUE_TYPE_RX:
+ memset(&rx, 0xff, sizeof(rx));
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ if (!memchr_inv(&rx, 0xff, sizeof(rx)))
+ goto nla_cancel;
+ if (netdev_nl_stats_write_rx(rsp, &rx))
+ goto nla_put_failure;
+ break;
+ case NETDEV_QUEUE_TYPE_TX:
+ memset(&tx, 0xff, sizeof(tx));
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ if (!memchr_inv(&tx, 0xff, sizeof(tx)))
+ goto nla_cancel;
+ if (netdev_nl_stats_write_tx(rsp, &tx))
+ goto nla_put_failure;
+ break;
+ }
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+nla_cancel:
+ genlmsg_cancel(rsp, hdr);
+ return 0;
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+static int
+netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info,
+ struct netdev_nl_dump_ctx *ctx)
+{
+ const struct netdev_stat_ops *ops = netdev->stat_ops;
+ int i, err;
+
+ if (!(netdev->flags & IFF_UP))
+ return 0;
+
+ i = ctx->rxq_idx;
+ while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
+ err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
+ i, info);
+ if (err)
+ return err;
+ ctx->rxq_idx = i++;
+ }
+ i = ctx->txq_idx;
+ while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
+ err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
+ i, info);
+ if (err)
+ return err;
+ ctx->txq_idx = i++;
+ }
+
+ ctx->rxq_idx = 0;
+ ctx->txq_idx = 0;
+ return 0;
+}
+
+static int
+netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info)
+{
+ struct netdev_queue_stats_rx rx_sum, rx;
+ struct netdev_queue_stats_tx tx_sum, tx;
+ const struct netdev_stat_ops *ops;
+ void *hdr;
+ int i;
+
+ ops = netdev->stat_ops;
+ /* Netdev can't guarantee any complete counters */
+ if (!ops->get_base_stats)
+ return 0;
+
+ memset(&rx_sum, 0xff, sizeof(rx_sum));
+ memset(&tx_sum, 0xff, sizeof(tx_sum));
+
+ ops->get_base_stats(netdev, &rx_sum, &tx_sum);
+
+ /* The op was there, but nothing reported, don't bother */
+ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
+ !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
+ return 0;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr)
+ return -EMSGSIZE;
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ memset(&rx, 0xff, sizeof(rx));
+ if (ops->get_queue_stats_rx)
+ ops->get_queue_stats_rx(netdev, i, &rx);
+ netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
+ }
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ memset(&tx, 0xff, sizeof(tx));
+ if (ops->get_queue_stats_tx)
+ ops->get_queue_stats_tx(netdev, i, &tx);
+ netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
+ }
+
+ if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
+ netdev_nl_stats_write_tx(rsp, &tx_sum))
+ goto nla_put_failure;
+
+ genlmsg_end(rsp, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
+}
+
+int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *netdev;
+ unsigned int scope;
+ int err = 0;
+
+ scope = 0;
+ if (info->attrs[NETDEV_A_QSTATS_SCOPE])
+ scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
+
+ rtnl_lock();
+ for_each_netdev_dump(net, netdev, ctx->ifindex) {
+ if (!netdev->stat_ops)
+ continue;
+
+ switch (scope) {
+ case 0:
+ err = netdev_nl_stats_by_netdev(netdev, skb, info);
+ break;
+ case NETDEV_QSTATS_SCOPE_QUEUE:
+ err = netdev_nl_stats_by_queue(netdev, skb, info, ctx);
+ break;
+ }
+ if (err < 0)
+ break;
+ }
+ rtnl_unlock();
- return skb->len;
+ return err;
}
static int netdev_genl_netdevice_event(struct notifier_block *nb,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 4933762e5a6b..d706fe5548df 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -31,6 +31,8 @@
#define BIAS_MAX (LONG_MAX >> 1)
#ifdef CONFIG_PAGE_POOL_STATS
+static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
+
/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
/* recycle_stat_inc is safe to use when preemption is possible. */
@@ -171,13 +173,16 @@ static void page_pool_producer_unlock(struct page_pool *pool,
}
static int page_pool_init(struct page_pool *pool,
- const struct page_pool_params *params)
+ const struct page_pool_params *params,
+ int cpuid)
{
unsigned int ring_qsize = 1024; /* Default */
memcpy(&pool->p, &params->fast, sizeof(pool->p));
memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
+ pool->cpuid = cpuid;
+
/* Validate only known flags were used */
if (pool->p.flags & ~(PP_FLAG_ALL))
return -EINVAL;
@@ -217,14 +222,23 @@ static int page_pool_init(struct page_pool *pool,
pool->has_init_callback = !!pool->slow.init_callback;
#ifdef CONFIG_PAGE_POOL_STATS
- pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
- if (!pool->recycle_stats)
- return -ENOMEM;
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL)) {
+ pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
+ if (!pool->recycle_stats)
+ return -ENOMEM;
+ } else {
+ /* For system page pool instance we use a singular stats object
+ * instead of allocating a separate percpu variable for each
+ * (also percpu) page pool instance.
+ */
+ pool->recycle_stats = &pp_system_recycle_stats;
+ }
#endif
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
#ifdef CONFIG_PAGE_POOL_STATS
- free_percpu(pool->recycle_stats);
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
+ free_percpu(pool->recycle_stats);
#endif
return -ENOMEM;
}
@@ -248,15 +262,18 @@ static void page_pool_uninit(struct page_pool *pool)
put_device(pool->p.dev);
#ifdef CONFIG_PAGE_POOL_STATS
- free_percpu(pool->recycle_stats);
+ if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
+ free_percpu(pool->recycle_stats);
#endif
}
/**
- * page_pool_create() - create a page pool.
+ * page_pool_create_percpu() - create a page pool for a given cpu.
* @params: parameters, see struct page_pool_params
+ * @cpuid: cpu identifier
*/
-struct page_pool *page_pool_create(const struct page_pool_params *params)
+struct page_pool *
+page_pool_create_percpu(const struct page_pool_params *params, int cpuid)
{
struct page_pool *pool;
int err;
@@ -265,7 +282,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
if (!pool)
return ERR_PTR(-ENOMEM);
- err = page_pool_init(pool, params);
+ err = page_pool_init(pool, params, cpuid);
if (err < 0)
goto err_free;
@@ -282,6 +299,16 @@ err_free:
kfree(pool);
return ERR_PTR(err);
}
+EXPORT_SYMBOL(page_pool_create_percpu);
+
+/**
+ * page_pool_create() - create a page pool
+ * @params: parameters, see struct page_pool_params
+ */
+struct page_pool *page_pool_create(const struct page_pool_params *params)
+{
+ return page_pool_create_percpu(params, -1);
+}
EXPORT_SYMBOL(page_pool_create);
static void page_pool_return_page(struct page_pool *pool, struct page *page);
@@ -934,8 +961,13 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id;
}
-void page_pool_unlink_napi(struct page_pool *pool)
+static void page_pool_disable_direct_recycling(struct page_pool *pool)
{
+ /* Disable direct recycling based on pool->cpuid.
+ * Paired with READ_ONCE() in napi_pp_put_page().
+ */
+ WRITE_ONCE(pool->cpuid, -1);
+
if (!pool->p.napi)
return;
@@ -947,7 +979,6 @@ void page_pool_unlink_napi(struct page_pool *pool)
WRITE_ONCE(pool->p.napi, NULL);
}
-EXPORT_SYMBOL(page_pool_unlink_napi);
void page_pool_destroy(struct page_pool *pool)
{
@@ -957,7 +988,7 @@ void page_pool_destroy(struct page_pool *pool)
if (!page_pool_put(pool))
return;
- page_pool_unlink_napi(pool);
+ page_pool_disable_direct_recycling(pool);
page_pool_free_frag(pool);
if (!page_pool_release(pool))
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index ffe5244e5597..3a3277ba167b 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -94,16 +94,15 @@ netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
state->pp_id = pool->user.id;
err = fill(skb, pool, info);
if (err)
- break;
+ goto out;
}
state->pp_id = 0;
}
+out:
mutex_unlock(&page_pools_lock);
rtnl_unlock();
- if (skb->len && err == -EMSGSIZE)
- return skb->len;
return err;
}
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index f35c2e998406..63de5c635842 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -33,9 +33,6 @@
void reqsk_queue_alloc(struct request_sock_queue *queue)
{
- spin_lock_init(&queue->rskq_lock);
-
- spin_lock_init(&queue->fastopenq.lock);
queue->fastopenq.rskq_rst_head = NULL;
queue->fastopenq.rskq_rst_tail = NULL;
queue->fastopenq.qlen = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f6f29eb03ec2..a3d7847ce69d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -483,24 +483,15 @@ EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
*/
static void rtnl_lock_unregistering_all(void)
{
- struct net *net;
- bool unregistering;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&netdev_unregistering_wq, &wait);
for (;;) {
- unregistering = false;
rtnl_lock();
/* We held write locked pernet_ops_rwsem, and parallel
* setup_net() and cleanup_net() are not possible.
*/
- for_each_net(net) {
- if (atomic_read(&net->dev_unreg_count) > 0) {
- unregistering = true;
- break;
- }
- }
- if (!unregistering)
+ if (!atomic_read(&dev_unreg_count))
break;
__rtnl_unlock();
@@ -851,9 +842,22 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
+void netdev_set_operstate(struct net_device *dev, int newstate)
+{
+ unsigned int old = READ_ONCE(dev->operstate);
+
+ do {
+ if (old == newstate)
+ return;
+ } while (!try_cmpxchg(&dev->operstate, &old, newstate));
+
+ netdev_state_change(dev);
+}
+EXPORT_SYMBOL(netdev_set_operstate);
+
static void set_operstate(struct net_device *dev, unsigned char transition)
{
- unsigned char operstate = dev->operstate;
+ unsigned char operstate = READ_ONCE(dev->operstate);
switch (transition) {
case IF_OPER_UP:
@@ -875,12 +879,7 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
break;
}
- if (dev->operstate != operstate) {
- write_lock(&dev_base_lock);
- dev->operstate = operstate;
- write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- }
+ netdev_set_operstate(dev, operstate);
}
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
@@ -1020,14 +1019,17 @@ static size_t rtnl_xdp_size(void)
static size_t rtnl_prop_list_size(const struct net_device *dev)
{
struct netdev_name_node *name_node;
- size_t size;
+ unsigned int cnt = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(name_node, &dev->name_node->list, list)
+ cnt++;
+ rcu_read_unlock();
- if (list_empty(&dev->name_node->list))
+ if (!cnt)
return 0;
- size = nla_total_size(0);
- list_for_each_entry(name_node, &dev->name_node->list, list)
- size += nla_total_size(ALTIFNAMSIZ);
- return size;
+
+ return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ);
}
static size_t rtnl_proto_down_size(const struct net_device *dev)
@@ -1054,7 +1056,7 @@ static size_t rtnl_dpll_pin_size(const struct net_device *dev)
{
size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
- size += dpll_msg_pin_handle_size(netdev_dpll_pin(dev));
+ size += dpll_netdev_pin_handle_size(dev);
return size;
}
@@ -1453,17 +1455,18 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
return 0;
}
-static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
+static int rtnl_fill_link_ifmap(struct sk_buff *skb,
+ const struct net_device *dev)
{
struct rtnl_link_ifmap map;
memset(&map, 0, sizeof(map));
- map.mem_start = dev->mem_start;
- map.mem_end = dev->mem_end;
- map.base_addr = dev->base_addr;
- map.irq = dev->irq;
- map.dma = dev->dma;
- map.port = dev->if_port;
+ map.mem_start = READ_ONCE(dev->mem_start);
+ map.mem_end = READ_ONCE(dev->mem_end);
+ map.base_addr = READ_ONCE(dev->base_addr);
+ map.irq = READ_ONCE(dev->irq);
+ map.dma = READ_ONCE(dev->dma);
+ map.port = READ_ONCE(dev->if_port);
if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
return -EMSGSIZE;
@@ -1609,10 +1612,10 @@ static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
bool force)
{
- int ifindex = dev_get_iflink(dev);
+ int iflink = dev_get_iflink(dev);
- if (force || dev->ifindex != ifindex)
- return nla_put_u32(skb, IFLA_LINK, ifindex);
+ if (force || READ_ONCE(dev->ifindex) != iflink)
+ return nla_put_u32(skb, IFLA_LINK, iflink);
return 0;
}
@@ -1696,7 +1699,7 @@ static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
struct netdev_name_node *name_node;
int count = 0;
- list_for_each_entry(name_node, &dev->name_node->list, list) {
+ list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
return -EMSGSIZE;
count++;
@@ -1704,6 +1707,7 @@ static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
return count;
}
+/* RCU protected. */
static int rtnl_fill_prop_list(struct sk_buff *skb,
const struct net_device *dev)
{
@@ -1789,7 +1793,7 @@ static int rtnl_fill_dpll_pin(struct sk_buff *skb,
if (!dpll_pin_nest)
return -EMSGSIZE;
- ret = dpll_msg_add_pin_handle(skb, netdev_dpll_pin(dev));
+ ret = dpll_netdev_add_pin_handle(skb, dev);
if (ret < 0)
goto nest_cancel;
@@ -1873,9 +1877,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
}
- if (rtnl_fill_link_ifmap(skb, dev))
- goto nla_put_failure;
-
if (dev->addr_len) {
if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
@@ -1925,10 +1926,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
rcu_read_lock();
if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
goto nla_put_failure_rcu;
- rcu_read_unlock();
-
+ if (rtnl_fill_link_ifmap(skb, dev))
+ goto nla_put_failure_rcu;
if (rtnl_fill_prop_list(skb, dev))
- goto nla_put_failure;
+ goto nla_put_failure_rcu;
+ rcu_read_unlock();
if (dev->dev.parent &&
nla_put_string(skb, IFLA_PARENT_DEV_NAME,
@@ -2197,25 +2199,22 @@ static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
+ const struct rtnl_link_ops *kind_ops = NULL;
struct netlink_ext_ack *extack = cb->extack;
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- struct net *tgt_net = net;
- int h, s_h;
- int idx = 0, s_idx;
- struct net_device *dev;
- struct hlist_head *head;
+ unsigned int flags = NLM_F_MULTI;
struct nlattr *tb[IFLA_MAX+1];
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
+ struct net *tgt_net = net;
u32 ext_filter_mask = 0;
- const struct rtnl_link_ops *kind_ops = NULL;
- unsigned int flags = NLM_F_MULTI;
+ struct net_device *dev;
int master_idx = 0;
int netnsid = -1;
int err, i;
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
if (err < 0) {
if (cb->strict_check)
@@ -2259,36 +2258,18 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
flags |= NLM_F_DUMP_FILTERED;
walk_entries:
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- hlist_for_each_entry(dev, head, index_hlist) {
- if (link_dump_filtered(dev, master_idx, kind_ops))
- goto cont;
- if (idx < s_idx)
- goto cont;
- err = rtnl_fill_ifinfo(skb, dev, net,
- RTM_NEWLINK,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq, 0, flags,
- ext_filter_mask, 0, NULL, 0,
- netnsid, GFP_KERNEL);
-
- if (err < 0) {
- if (likely(skb->len))
- goto out;
-
- goto out_err;
- }
-cont:
- idx++;
- }
+ err = 0;
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
+ if (link_dump_filtered(dev, master_idx, kind_ops))
+ continue;
+ err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq, 0, flags,
+ ext_filter_mask, 0, NULL, 0,
+ netnsid, GFP_KERNEL);
+ if (err < 0)
+ break;
}
-out:
- err = skb->len;
-out_err:
- cb->args[1] = idx;
- cb->args[0] = h;
cb->seq = tgt_net->dev_base_seq;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
if (netnsid >= 0)
@@ -2980,11 +2961,9 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_LINKMODE]) {
unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
- write_lock(&dev_base_lock);
if (dev->link_mode ^ value)
status |= DO_SETLINK_NOTIFY;
- dev->link_mode = value;
- write_unlock(&dev_base_lock);
+ WRITE_ONCE(dev->link_mode, value);
}
if (tb[IFLA_VFINFO_LIST]) {
@@ -5166,10 +5145,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
- struct nlattr *br_spec, *attr = NULL;
+ struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
int rem, err = -EOPNOTSUPP;
u16 flags = 0;
- bool have_flags = false;
if (nlmsg_len(nlh) < sizeof(*ifm))
return -EINVAL;
@@ -5187,11 +5165,11 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
+ if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
if (nla_len(attr) < sizeof(flags))
return -EINVAL;
- have_flags = true;
+ br_flags_attr = attr;
flags = nla_get_u16(attr);
}
@@ -5235,8 +5213,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
}
}
- if (have_flags)
- memcpy(nla_data(attr), &flags, sizeof(flags));
+ if (br_flags_attr)
+ memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
out:
return err;
}
@@ -6550,6 +6528,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
}
owner = link->owner;
dumpit = link->dumpit;
+ flags = link->flags;
if (type == RTM_GETLINK - RTM_BASE)
min_dump_alloc = rtnl_calcit(skb, nlh);
@@ -6567,6 +6546,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.dump = dumpit,
.min_dump_alloc = min_dump_alloc,
.module = owner,
+ .flags = flags,
};
err = netlink_dump_start(rtnl, skb, nlh, &c);
/* netlink_dump_start() will keep a reference on
diff --git a/net/core/scm.c b/net/core/scm.c
index d0e0852a24d5..9cd4b0a01cd6 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -36,6 +36,7 @@
#include <net/compat.h>
#include <net/scm.h>
#include <net/cls_cgroup.h>
+#include <net/af_unix.h>
/*
@@ -85,6 +86,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
return -ENOMEM;
*fplp = fpl;
fpl->count = 0;
+ fpl->count_unix = 0;
fpl->max = SCM_MAX_FD;
fpl->user = NULL;
}
@@ -109,6 +111,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
fput(file);
return -EINVAL;
}
+ if (unix_get_socket(file))
+ fpl->count_unix++;
+
*fpp++ = file;
fpl->count++;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index edbbef563d4d..766219011aea 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -69,6 +69,7 @@
#include <net/sock.h>
#include <net/checksum.h>
#include <net/gso.h>
+#include <net/hotdata.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/mpls.h>
@@ -88,15 +89,10 @@
#include "dev.h"
#include "sock_destructor.h"
-struct kmem_cache *skbuff_cache __ro_after_init;
-static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init;
#endif
-
-static struct kmem_cache *skb_small_head_cache __ro_after_init;
-
#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
@@ -115,6 +111,24 @@ static struct kmem_cache *skb_small_head_cache __ro_after_init;
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
+/* kcm_write_msgs() relies on casting paged frags to bio_vec to use
+ * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the
+ * netmem is a page.
+ */
+static_assert(offsetof(struct bio_vec, bv_page) ==
+ offsetof(skb_frag_t, netmem));
+static_assert(sizeof_field(struct bio_vec, bv_page) ==
+ sizeof_field(skb_frag_t, netmem));
+
+static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len));
+static_assert(sizeof_field(struct bio_vec, bv_len) ==
+ sizeof_field(skb_frag_t, len));
+
+static_assert(offsetof(struct bio_vec, bv_offset) ==
+ offsetof(skb_frag_t, offset));
+static_assert(sizeof_field(struct bio_vec, bv_offset) ==
+ sizeof_field(skb_frag_t, offset));
+
#undef FN
#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
static const char * const drop_reasons[] = {
@@ -297,7 +311,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
fragsz = SKB_DATA_ALIGN(fragsz);
- return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
+ return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ align_mask);
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
@@ -309,13 +324,15 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
if (in_hardirq() || irqs_disabled()) {
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
- data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
+ data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
+ align_mask);
} else {
struct napi_alloc_cache *nc;
local_bh_disable();
nc = this_cpu_ptr(&napi_alloc_cache);
- data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
+ data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
+ align_mask);
local_bh_enable();
}
return data;
@@ -328,7 +345,7 @@ static struct sk_buff *napi_skb_cache_get(void)
struct sk_buff *skb;
if (unlikely(!nc->skb_count)) {
- nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
+ nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC,
NAPI_SKB_CACHE_BULK,
nc->skb_cache);
@@ -337,7 +354,7 @@ static struct sk_buff *napi_skb_cache_get(void)
}
skb = nc->skb_cache[--nc->skb_count];
- kasan_mempool_unpoison_object(skb, kmem_cache_size(skbuff_cache));
+ kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
return skb;
}
@@ -395,7 +412,7 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb;
unsigned int size;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -446,7 +463,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -557,7 +574,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
obj_size = SKB_HEAD_ALIGN(*size);
if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
!(flags & KMALLOC_NOT_NORMAL_BITS)) {
- obj = kmem_cache_alloc_node(skb_small_head_cache,
+ obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache,
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
node);
*size = SKB_SMALL_HEAD_CACHE_SIZE;
@@ -565,7 +582,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
- obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
+ obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node);
goto out;
}
@@ -628,7 +645,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
- ? skbuff_fclone_cache : skbuff_cache;
+ ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC;
@@ -845,17 +862,17 @@ skb_fail:
}
EXPORT_SYMBOL(__napi_alloc_skb);
-void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
- int size, unsigned int truesize)
+void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size, unsigned int truesize)
{
DEBUG_NET_WARN_ON_ONCE(size > truesize);
- skb_fill_page_desc(skb, i, page, off, size);
+ skb_fill_netmem_desc(skb, i, netmem, off, size);
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
}
-EXPORT_SYMBOL(skb_add_rx_frag);
+EXPORT_SYMBOL(skb_add_rx_frag_netmem);
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize)
@@ -895,6 +912,98 @@ static bool is_pp_page(struct page *page)
return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
}
+int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom)
+{
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ u32 size, truesize, len, max_head_size, off;
+ struct sk_buff *skb = *pskb, *nskb;
+ int err, i, head_off;
+ void *data;
+
+ /* XDP does not support fraglist so we need to linearize
+ * the skb.
+ */
+ if (skb_has_frag_list(skb))
+ return -EOPNOTSUPP;
+
+ max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom);
+ if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE)
+ return -ENOMEM;
+
+ size = min_t(u32, skb->len, max_head_size);
+ truesize = SKB_HEAD_ALIGN(size) + headroom;
+ data = page_pool_dev_alloc_va(pool, &truesize);
+ if (!data)
+ return -ENOMEM;
+
+ nskb = napi_build_skb(data, truesize);
+ if (!nskb) {
+ page_pool_free_va(pool, data, true);
+ return -ENOMEM;
+ }
+
+ skb_reserve(nskb, headroom);
+ skb_copy_header(nskb, skb);
+ skb_mark_for_recycle(nskb);
+
+ err = skb_copy_bits(skb, 0, nskb->data, size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+ skb_put(nskb, size);
+
+ head_off = skb_headroom(nskb) - skb_headroom(skb);
+ skb_headers_offset_update(nskb, head_off);
+
+ off = size;
+ len = skb->len - off;
+ for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
+ struct page *page;
+ u32 page_off;
+
+ size = min_t(u32, len, PAGE_SIZE);
+ truesize = size;
+
+ page = page_pool_dev_alloc(pool, &page_off, &truesize);
+ if (!page) {
+ consume_skb(nskb);
+ return -ENOMEM;
+ }
+
+ skb_add_rx_frag(nskb, i, page, page_off, size, truesize);
+ err = skb_copy_bits(skb, off, page_address(page) + page_off,
+ size);
+ if (err) {
+ consume_skb(nskb);
+ return err;
+ }
+
+ len -= size;
+ off += size;
+ }
+
+ consume_skb(skb);
+ *pskb = nskb;
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+EXPORT_SYMBOL(skb_pp_cow_data);
+
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ struct bpf_prog *prog)
+{
+ if (!prog->aux->xdp_has_frags)
+ return -EINVAL;
+
+ return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM);
+}
+EXPORT_SYMBOL(skb_cow_data_for_xdp);
+
#if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(struct page *page, bool napi_safe)
{
@@ -923,9 +1032,10 @@ bool napi_pp_put_page(struct page *page, bool napi_safe)
*/
if (napi_safe || in_softirq()) {
const struct napi_struct *napi = READ_ONCE(pp->p.napi);
+ unsigned int cpuid = smp_processor_id();
- allow_direct = napi &&
- READ_ONCE(napi->list_owner) == smp_processor_id();
+ allow_direct = napi && READ_ONCE(napi->list_owner) == cpuid;
+ allow_direct |= READ_ONCE(pp->cpuid) == cpuid;
}
/* Driver set this to memory recycling info. Reset it on recycle.
@@ -981,7 +1091,7 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
static void skb_kfree_head(void *head, unsigned int end_offset)
{
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
- kmem_cache_free(skb_small_head_cache, head);
+ kmem_cache_free(net_hotdata.skb_small_head_cache, head);
else
kfree(head);
}
@@ -1048,7 +1158,7 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
- kmem_cache_free(skbuff_cache, skb);
+ kmem_cache_free(net_hotdata.skbuff_cache, skb);
return;
case SKB_FCLONE_ORIG:
@@ -1069,7 +1179,7 @@ static void kfree_skbmem(struct sk_buff *skb)
if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
- kmem_cache_free(skbuff_fclone_cache, fclones);
+ kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones);
}
void skb_release_head_state(struct sk_buff *skb)
@@ -1166,7 +1276,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb,
sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
- kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, KFREE_SKB_BULK_SIZE,
sa->skb_array);
sa->skb_count = 0;
}
@@ -1191,7 +1301,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
}
if (sa.skb_count)
- kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, sa.skb_count, sa.skb_array);
}
EXPORT_SYMBOL(kfree_skb_list_reason);
@@ -1353,9 +1463,9 @@ static void napi_skb_cache_put(struct sk_buff *skb)
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_mempool_unpoison_object(nc->skb_cache[i],
- kmem_cache_size(skbuff_cache));
+ kmem_cache_size(net_hotdata.skbuff_cache));
- kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
+ kmem_cache_free_bulk(net_hotdata.skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF;
}
@@ -1906,10 +2016,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
/* skb frags point to kernel buffers */
for (i = 0; i < new_frags - 1; i++) {
- __skb_fill_page_desc(skb, i, head, 0, psize);
+ __skb_fill_netmem_desc(skb, i, page_to_netmem(head), 0, psize);
head = (struct page *)page_private(head);
}
- __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
+ __skb_fill_netmem_desc(skb, new_frags - 1, page_to_netmem(head), 0,
+ d_off);
skb_shinfo(skb)->nr_frags = new_frags;
release:
@@ -1951,7 +2062,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- n = kmem_cache_alloc(skbuff_cache, gfp_mask);
+ n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask);
if (!n)
return NULL;
@@ -3647,7 +3758,8 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
if (plen) {
page = virt_to_head_page(from->head);
offset = from->data - (unsigned char *)page_address(page);
- __skb_fill_page_desc(to, 0, page, offset, plen);
+ __skb_fill_netmem_desc(to, 0, page_to_netmem(page),
+ offset, plen);
get_page(page);
j = 1;
len -= plen;
@@ -4889,7 +5001,7 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
- skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
+ net_hotdata.skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|
@@ -4897,7 +5009,7 @@ void __init skb_init(void)
offsetof(struct sk_buff, cb),
sizeof_field(struct sk_buff, cb),
NULL);
- skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
+ net_hotdata.skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
@@ -4906,7 +5018,7 @@ void __init skb_init(void)
* struct skb_shared_info is located at the end of skb->head,
* and should not be copied to/from user.
*/
- skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
+ net_hotdata.skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
SKB_SMALL_HEAD_CACHE_SIZE,
0,
SLAB_HWCACHE_ALIGN | SLAB_PANIC,
@@ -5779,7 +5891,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{
if (head_stolen) {
skb_release_head_state(skb);
- kmem_cache_free(skbuff_cache, skb);
+ kmem_cache_free(net_hotdata.skbuff_cache, skb);
} else {
__kfree_skb(skb);
}
@@ -6737,6 +6849,14 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
xfrm_state_hold(sp->xvec[i]);
}
#endif
+#ifdef CONFIG_MCTP_FLOWS
+ if (old_active & (1 << SKB_EXT_MCTP)) {
+ struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
+
+ if (flow->key)
+ refcount_inc(&flow->key->refs);
+ }
+#endif
__skb_ext_put(old);
return new;
}
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 93ecfceac1bc..4d75ef9d24bf 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -1226,8 +1226,11 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
rcu_read_lock();
psock = sk_psock(sk);
- if (psock)
- psock->saved_data_ready(sk);
+ if (psock) {
+ read_lock_bh(&sk->sk_callback_lock);
+ sk_psock_data_ready(sk, psock);
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
rcu_read_unlock();
}
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 158dbdebce6a..43bf3818c19e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -107,6 +107,7 @@
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/tcp.h>
+#include <linux/udp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/user_namespace.h>
@@ -282,6 +283,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
+int sysctl_mem_pcpu_rsv __read_mostly = SK_MEMORY_PCPU_RESERVE;
int sysctl_tstamp_allow_data __read_mostly = 1;
@@ -1187,6 +1189,17 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
*/
WRITE_ONCE(sk->sk_txrehash, (u8)val);
return 0;
+ case SO_PEEK_OFF:
+ {
+ int (*set_peek_off)(struct sock *sk, int val);
+
+ set_peek_off = READ_ONCE(sock->ops)->set_peek_off;
+ if (set_peek_off)
+ ret = set_peek_off(sk, val);
+ else
+ ret = -EOPNOTSUPP;
+ return ret;
+ }
}
sockopt_lock_sock(sk);
@@ -1429,18 +1442,6 @@ set_sndbuf:
sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
break;
- case SO_PEEK_OFF:
- {
- int (*set_peek_off)(struct sock *sk, int val);
-
- set_peek_off = READ_ONCE(sock->ops)->set_peek_off;
- if (set_peek_off)
- ret = set_peek_off(sk, val);
- else
- ret = -EOPNOTSUPP;
- break;
- }
-
case SO_NOFCS:
sock_valbool_flag(sk, SOCK_NOFCS, valbool);
break;
@@ -2052,8 +2053,9 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
- memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
- prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
+ prot->obj_size - offsetof(struct sock, sk_dontcopy_end),
+ /* alloc is larger than struct, see sk_prot_alloc() */);
#ifdef CONFIG_SECURITY_NETWORK
nsk->sk_security = sptr;
@@ -2582,8 +2584,18 @@ EXPORT_SYMBOL(sock_efree);
#ifdef CONFIG_INET
void sock_pfree(struct sk_buff *skb)
{
- if (sk_is_refcounted(skb->sk))
- sock_gen_put(skb->sk);
+ struct sock *sk = skb->sk;
+
+ if (!sk_is_refcounted(sk))
+ return;
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) {
+ inet_reqsk(sk)->rsk_listener = NULL;
+ reqsk_free(inet_reqsk(sk));
+ return;
+ }
+
+ sock_gen_put(sk);
}
EXPORT_SYMBOL(sock_pfree);
#endif /* CONFIG_INET */
@@ -4144,8 +4156,14 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
{
struct sock *sk = p;
- return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
- sk_busy_loop_timeout(sk, start_time);
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ return true;
+
+ if (sk_is_udp(sk) &&
+ !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
+ return true;
+
+ return sk_busy_loop_timeout(sk, start_time);
}
EXPORT_SYMBOL(sk_busy_loop_end);
#endif /* CONFIG_NET_RX_BUSY_POLL */
@@ -4217,3 +4235,65 @@ int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return sock_ioctl_out(sk, cmd, arg);
}
EXPORT_SYMBOL(sk_ioctl);
+
+static int __init sock_struct_check(void)
+{
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey);
+
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag);
+ return 0;
+}
+
+core_initcall(sock_struct_check);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0f0cb1465e08..6973dda3abda 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -23,6 +23,8 @@
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
#include <net/pkt_sched.h>
+#include <net/hotdata.h>
+#include <net/rps.h>
#include "dev.h"
@@ -30,6 +32,7 @@ static int int_3600 = 3600;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
+static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -137,7 +140,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_lock(&sock_flow_mutex);
- orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+ orig_sock_table = rcu_dereference_protected(
+ net_hotdata.rps_sock_flow_table,
lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
@@ -158,7 +162,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
mutex_unlock(&sock_flow_mutex);
return -ENOMEM;
}
- rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
+ net_hotdata.rps_cpu_mask =
+ roundup_pow_of_two(nr_cpu_ids) - 1;
sock_table->mask = size - 1;
} else
sock_table = orig_sock_table;
@@ -169,7 +174,8 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
sock_table = NULL;
if (sock_table != orig_sock_table) {
- rcu_assign_pointer(rps_sock_flow_table, sock_table);
+ rcu_assign_pointer(net_hotdata.rps_sock_flow_table,
+ sock_table);
if (sock_table) {
static_branch_inc(&rps_needed);
static_branch_inc(&rfs_needed);
@@ -299,8 +305,8 @@ static int proc_do_dev_weight(struct ctl_table *table, int write,
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
weight = READ_ONCE(weight_p);
- WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
- WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
+ WRITE_ONCE(net_hotdata.dev_rx_weight, weight * dev_weight_rx_bias);
+ WRITE_ONCE(net_hotdata.dev_tx_weight, weight * dev_weight_tx_bias);
}
mutex_unlock(&dev_weight_mutex);
@@ -408,6 +414,14 @@ static struct ctl_table net_core_table[] = {
.extra1 = &min_rcvbuf,
},
{
+ .procname = "mem_pcpu_rsv",
+ .data = &sysctl_mem_pcpu_rsv,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_mem_pcpu_rsv,
+ },
+ {
.procname = "dev_weight",
.data = &weight_p,
.maxlen = sizeof(int),
@@ -430,7 +444,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_max_backlog",
- .data = &netdev_max_backlog,
+ .data = &net_hotdata.max_backlog,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -489,7 +503,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_tstamp_prequeue",
- .data = &netdev_tstamp_prequeue,
+ .data = &net_hotdata.tstamp_prequeue,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -567,7 +581,7 @@ static struct ctl_table net_core_table[] = {
#endif
{
.procname = "netdev_budget",
- .data = &netdev_budget,
+ .data = &net_hotdata.netdev_budget,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
@@ -590,7 +604,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "netdev_budget_usecs",
- .data = &netdev_budget_usecs,
+ .data = &net_hotdata.netdev_budget_usecs,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -623,7 +637,7 @@ static struct ctl_table net_core_table[] = {
},
{
.procname = "gro_normal_batch",
- .data = &gro_normal_batch,
+ .data = &net_hotdata.gro_normal_batch,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 4869c1c2d8f3..41693154e426 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -16,6 +16,7 @@
#include <linux/bug.h>
#include <net/page_pool/helpers.h>
+#include <net/hotdata.h>
#include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
@@ -75,7 +76,7 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
xa = container_of(rcu, struct xdp_mem_allocator, rcu);
/* Allow this ID to be reused */
- ida_simple_remove(&mem_id_pool, xa->mem.id);
+ ida_free(&mem_id_pool, xa->mem.id);
kfree(xa);
}
@@ -242,7 +243,7 @@ static int __mem_id_cyclic_get(gfp_t gfp)
int id;
again:
- id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
+ id = ida_alloc_range(&mem_id_pool, mem_id_next, MEM_ID_MAX - 1, gfp);
if (id < 0) {
if (id == -ENOSPC) {
/* Cyclic allocator, reset next id */
@@ -317,7 +318,7 @@ static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
/* Insert allocator into ID lookup table */
ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
if (IS_ERR(ptr)) {
- ida_simple_remove(&mem_id_pool, mem->id);
+ ida_free(&mem_id_pool, mem->id);
mem->id = 0;
errno = PTR_ERR(ptr);
goto err;
@@ -589,7 +590,7 @@ EXPORT_SYMBOL_GPL(xdp_warn);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
{
- n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
+ n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs);
if (unlikely(!n_skb))
return -ENOMEM;
@@ -658,7 +659,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
{
struct sk_buff *skb;
- skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
+ skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
@@ -771,11 +772,11 @@ __bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
__bpf_kfunc_end_defs();
-BTF_SET8_START(xdp_metadata_kfunc_ids)
+BTF_KFUNCS_START(xdp_metadata_kfunc_ids)
#define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
-BTF_SET8_END(xdp_metadata_kfunc_ids)
+BTF_KFUNCS_END(xdp_metadata_kfunc_ids)
static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index c4bbac99740d..1cba001bb4c8 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -376,15 +376,11 @@ EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
int __init dccp_ackvec_init(void)
{
- dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
- sizeof(struct dccp_ackvec), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ dccp_ackvec_slab = KMEM_CACHE(dccp_ackvec, SLAB_HWCACHE_ALIGN);
if (dccp_ackvec_slab == NULL)
goto out_err;
- dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
- sizeof(struct dccp_ackvec_record),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ dccp_ackvec_record_slab = KMEM_CACHE(dccp_ackvec_record, SLAB_HWCACHE_ALIGN);
if (dccp_ackvec_record_slab == NULL)
goto out_destroy_slab;
diff --git a/net/devlink/core.c b/net/devlink/core.c
index 4275a2bc6d8e..7f0b093208d7 100644
--- a/net/devlink/core.c
+++ b/net/devlink/core.c
@@ -46,7 +46,7 @@ struct devlink_rel {
u32 obj_index;
devlink_rel_notify_cb_t *notify_cb;
devlink_rel_cleanup_cb_t *cleanup_cb;
- struct work_struct notify_work;
+ struct delayed_work notify_work;
} nested_in;
};
@@ -70,7 +70,7 @@ static void __devlink_rel_put(struct devlink_rel *rel)
static void devlink_rel_nested_in_notify_work(struct work_struct *work)
{
struct devlink_rel *rel = container_of(work, struct devlink_rel,
- nested_in.notify_work);
+ nested_in.notify_work.work);
struct devlink *devlink;
devlink = devlinks_xa_get(rel->nested_in.devlink_index);
@@ -96,13 +96,13 @@ rel_put:
return;
reschedule_work:
- schedule_work(&rel->nested_in.notify_work);
+ schedule_delayed_work(&rel->nested_in.notify_work, 1);
}
static void devlink_rel_nested_in_notify_work_schedule(struct devlink_rel *rel)
{
__devlink_rel_get(rel);
- schedule_work(&rel->nested_in.notify_work);
+ schedule_delayed_work(&rel->nested_in.notify_work, 0);
}
static struct devlink_rel *devlink_rel_alloc(void)
@@ -123,8 +123,8 @@ static struct devlink_rel *devlink_rel_alloc(void)
}
refcount_set(&rel->refcount, 1);
- INIT_WORK(&rel->nested_in.notify_work,
- &devlink_rel_nested_in_notify_work);
+ INIT_DELAYED_WORK(&rel->nested_in.notify_work,
+ &devlink_rel_nested_in_notify_work);
return rel;
}
@@ -529,14 +529,20 @@ static int __init devlink_init(void)
{
int err;
- err = genl_register_family(&devlink_nl_family);
- if (err)
- goto out;
err = register_pernet_subsys(&devlink_pernet_ops);
if (err)
goto out;
+ err = genl_register_family(&devlink_nl_family);
+ if (err)
+ goto out_unreg_pernet_subsys;
err = register_netdevice_notifier(&devlink_port_netdevice_nb);
+ if (!err)
+ return 0;
+
+ genl_unregister_family(&devlink_nl_family);
+out_unreg_pernet_subsys:
+ unregister_pernet_subsys(&devlink_pernet_ops);
out:
WARN_ON(err);
return err;
diff --git a/net/devlink/port.c b/net/devlink/port.c
index 62e54e152ecf..4b2d46ccfe48 100644
--- a/net/devlink/port.c
+++ b/net/devlink/port.c
@@ -583,7 +583,7 @@ devlink_nl_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
xa_for_each_start(&devlink->ports, port_index, devlink_port, state->idx) {
err = devlink_nl_port_fill(msg, devlink_port,
- DEVLINK_CMD_NEW,
+ DEVLINK_CMD_PORT_NEW,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, flags,
cb->extack);
@@ -674,7 +674,7 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port,
return -EOPNOTSUPP;
}
if (tb[DEVLINK_PORT_FN_ATTR_STATE] && !ops->port_fn_state_set) {
- NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR],
+ NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FN_ATTR_STATE],
"Function does not support state setting");
return -EOPNOTSUPP;
}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index ac7be864e80d..09d2f5d4b3dd 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <net/dsa_stubs.h>
#include <net/sch_generic.h>
@@ -626,7 +625,6 @@ static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
static int dsa_switch_setup(struct dsa_switch *ds)
{
- struct device_node *dn;
int err;
if (ds->setup)
@@ -666,10 +664,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
dsa_user_mii_bus_init(ds);
- dn = of_get_child_by_name(ds->dev->of_node, "mdio");
-
- err = of_mdiobus_register(ds->user_mii_bus, dn);
- of_node_put(dn);
+ err = mdiobus_register(ds->user_mii_bus);
if (err < 0)
goto free_user_mii_bus;
}
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 2717e9d7b612..1aba1d05c27a 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -75,7 +75,7 @@ sja1105_tagger_private(struct dsa_switch *ds)
}
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
-static inline bool sja1105_is_link_local(const struct sk_buff *skb)
+static bool sja1105_is_link_local(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 dmac = ether_addr_to_u64(hdr->h_dest);
@@ -121,7 +121,7 @@ static void sja1105_meta_unpack(const struct sk_buff *skb,
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
}
-static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
+static bool sja1105_is_meta_frame(const struct sk_buff *skb)
{
const struct ethhdr *hdr = eth_hdr(skb);
u64 smac = ether_addr_to_u64(hdr->h_source);
diff --git a/net/dsa/user.c b/net/dsa/user.c
index b15e71cc342c..16d395bb1a1f 100644
--- a/net/dsa/user.c
+++ b/net/dsa/user.c
@@ -210,7 +210,7 @@ static int dsa_user_sync_uc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_unsync_uc(struct net_device *dev,
@@ -230,7 +230,7 @@ static int dsa_user_unsync_uc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_sync_mc(struct net_device *dev,
@@ -250,7 +250,7 @@ static int dsa_user_sync_mc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
static int dsa_user_unsync_mc(struct net_device *dev,
@@ -270,7 +270,7 @@ static int dsa_user_unsync_mc(struct net_device *dev,
return 0;
return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
- &ctx);
+ &ctx);
}
void dsa_user_sync_ha(struct net_device *dev)
@@ -352,7 +352,7 @@ void dsa_user_mii_bus_init(struct dsa_switch *ds)
/* user device handling ****************************************************/
static int dsa_user_get_iflink(const struct net_device *dev)
{
- return dsa_user_to_conduit(dev)->ifindex;
+ return READ_ONCE(dsa_user_to_conduit(dev)->ifindex);
}
static int dsa_user_open(struct net_device *dev)
@@ -875,8 +875,8 @@ static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
return err;
}
-static inline netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
- struct sk_buff *skb)
+static netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
+ struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
struct dsa_user_priv *p = netdev_priv(dev);
@@ -1222,7 +1222,7 @@ static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
return ret;
}
-static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
@@ -1242,7 +1242,7 @@ static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phylink_ethtool_set_eee(dp->pl, e);
}
-static int dsa_user_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
@@ -2429,7 +2429,7 @@ static const struct net_device_ops dsa_user_netdev_ops = {
.ndo_fill_forward_path = dsa_user_fill_forward_path,
};
-static struct device_type dsa_type = {
+static const struct device_type dsa_type = {
.name = "dsa",
};
@@ -2625,11 +2625,7 @@ int dsa_user_create(struct dsa_port *port)
user_dev->vlan_features = conduit->vlan_features;
p = netdev_priv(user_dev);
- user_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!user_dev->tstats) {
- free_netdev(user_dev);
- return -ENOMEM;
- }
+ user_dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
ret = gro_cells_init(&p->gcells, user_dev);
if (ret)
@@ -2695,7 +2691,6 @@ out_phy:
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
- free_percpu(user_dev->tstats);
free_netdev(user_dev);
port->user = NULL;
return ret;
@@ -2716,7 +2711,6 @@ void dsa_user_destroy(struct net_device *user_dev)
dsa_port_phylink_destroy(dp);
gro_cells_destroy(&p->gcells);
- free_percpu(user_dev->tstats);
free_netdev(user_dev);
}
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index 2853394d06a8..bf398973eb8a 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -4,16 +4,13 @@
#include "common.h"
#include "bitset.h"
-#define EEE_MODES_COUNT \
- (sizeof_field(struct ethtool_eee, supported) * BITS_PER_BYTE)
-
struct eee_req_info {
struct ethnl_req_info base;
};
struct eee_reply_data {
struct ethnl_reply_data base;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define EEE_REPDATA(__reply_base) \
@@ -30,6 +27,7 @@ static int eee_prepare_data(const struct ethnl_req_info *req_base,
{
struct eee_reply_data *data = EEE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
+ struct ethtool_keee *eee = &data->eee;
int ret;
if (!dev->ethtool_ops->get_eee)
@@ -37,7 +35,7 @@ static int eee_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- ret = dev->ethtool_ops->get_eee(dev, &data->eee);
+ ret = dev->ethtool_ops->get_eee(dev, eee);
ethnl_ops_complete(dev);
return ret;
@@ -48,24 +46,21 @@ static int eee_reply_size(const struct ethnl_req_info *req_base,
{
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
const struct eee_reply_data *data = EEE_REPDATA(reply_base);
- const struct ethtool_eee *eee = &data->eee;
+ const struct ethtool_keee *eee = &data->eee;
int len = 0;
int ret;
- BUILD_BUG_ON(sizeof(eee->advertised) * BITS_PER_BYTE !=
- EEE_MODES_COUNT);
- BUILD_BUG_ON(sizeof(eee->lp_advertised) * BITS_PER_BYTE !=
- EEE_MODES_COUNT);
-
/* MODES_OURS */
- ret = ethnl_bitset32_size(&eee->advertised, &eee->supported,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_bitset_size(eee->advertised, eee->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
len += ret;
/* MODES_PEERS */
- ret = ethnl_bitset32_size(&eee->lp_advertised, NULL,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_bitset_size(eee->lp_advertised, NULL,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
len += ret;
@@ -84,24 +79,26 @@ static int eee_fill_reply(struct sk_buff *skb,
{
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
const struct eee_reply_data *data = EEE_REPDATA(reply_base);
- const struct ethtool_eee *eee = &data->eee;
+ const struct ethtool_keee *eee = &data->eee;
int ret;
- ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_OURS,
- &eee->advertised, &eee->supported,
- EEE_MODES_COUNT, link_mode_names, compact);
+ ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_OURS,
+ eee->advertised, eee->supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
- ret = ethnl_put_bitset32(skb, ETHTOOL_A_EEE_MODES_PEER,
- &eee->lp_advertised, NULL, EEE_MODES_COUNT,
- link_mode_names, compact);
+ ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_PEER,
+ eee->lp_advertised, NULL,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_mode_names, compact);
if (ret < 0)
return ret;
- if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, !!eee->eee_active) ||
- nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, !!eee->eee_enabled) ||
+ if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, eee->eee_active) ||
+ nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, eee->eee_enabled) ||
nla_put_u8(skb, ETHTOOL_A_EEE_TX_LPI_ENABLED,
- !!eee->tx_lpi_enabled) ||
+ eee->tx_lpi_enabled) ||
nla_put_u32(skb, ETHTOOL_A_EEE_TX_LPI_TIMER, eee->tx_lpi_timer))
return -EMSGSIZE;
@@ -132,7 +129,7 @@ ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
{
struct net_device *dev = req_info->dev;
struct nlattr **tb = info->attrs;
- struct ethtool_eee eee = {};
+ struct ethtool_keee eee = {};
bool mod = false;
int ret;
@@ -140,14 +137,15 @@ ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info)
if (ret < 0)
return ret;
- ret = ethnl_update_bitset32(&eee.advertised, EEE_MODES_COUNT,
- tb[ETHTOOL_A_EEE_MODES_OURS],
- link_mode_names, info->extack, &mod);
+ ret = ethnl_update_bitset(eee.advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ tb[ETHTOOL_A_EEE_MODES_OURS],
+ link_mode_names, info->extack, &mod);
if (ret < 0)
return ret;
- ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
- ethnl_update_bool32(&eee.tx_lpi_enabled,
- tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
+ ethnl_update_bool(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
+ ethnl_update_bool(&eee.tx_lpi_enabled, tb[ETHTOOL_A_EEE_TX_LPI_ENABLED],
+ &mod);
ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
&mod);
if (!mod)
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 7519b0818b91..5a55270aa86e 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -26,12 +26,12 @@
#include <linux/sched/signal.h>
#include <linux/net.h>
#include <linux/pm_runtime.h>
+#include <linux/utsname.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock_drv.h>
#include <net/flow_offload.h>
#include <linux/ethtool_netlink.h>
-#include <generated/utsrelease.h>
#include "common.h"
/* State held across locks and calls for commands which have devlink fallback */
@@ -713,7 +713,8 @@ ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp)
struct device *parent = dev->dev.parent;
rsp->info.cmd = ETHTOOL_GDRVINFO;
- strscpy(rsp->info.version, UTS_RELEASE, sizeof(rsp->info.version));
+ strscpy(rsp->info.version, init_uts_ns.name.release,
+ sizeof(rsp->info.version));
if (ops->get_drvinfo) {
ops->get_drvinfo(dev, &rsp->info);
if (!rsp->info.bus_info[0] && parent)
@@ -1508,22 +1509,57 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
return 0;
}
+static void eee_to_keee(struct ethtool_keee *keee,
+ const struct ethtool_eee *eee)
+{
+ memset(keee, 0, sizeof(*keee));
+
+ keee->eee_enabled = eee->eee_enabled;
+ keee->tx_lpi_enabled = eee->tx_lpi_enabled;
+ keee->tx_lpi_timer = eee->tx_lpi_timer;
+
+ ethtool_convert_legacy_u32_to_link_mode(keee->advertised,
+ eee->advertised);
+}
+
+static void keee_to_eee(struct ethtool_eee *eee,
+ const struct ethtool_keee *keee)
+{
+ bool overflow;
+
+ memset(eee, 0, sizeof(*eee));
+
+ eee->eee_active = keee->eee_active;
+ eee->eee_enabled = keee->eee_enabled;
+ eee->tx_lpi_enabled = keee->tx_lpi_enabled;
+ eee->tx_lpi_timer = keee->tx_lpi_timer;
+
+ overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported,
+ keee->supported);
+ ethtool_convert_link_mode_to_legacy_u32(&eee->advertised,
+ keee->advertised);
+ ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised,
+ keee->lp_advertised);
+ if (overflow)
+ pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n");
+}
+
static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_eee edata;
+ struct ethtool_keee keee;
+ struct ethtool_eee eee;
int rc;
if (!dev->ethtool_ops->get_eee)
return -EOPNOTSUPP;
- memset(&edata, 0, sizeof(struct ethtool_eee));
- edata.cmd = ETHTOOL_GEEE;
- rc = dev->ethtool_ops->get_eee(dev, &edata);
-
+ memset(&keee, 0, sizeof(keee));
+ rc = dev->ethtool_ops->get_eee(dev, &keee);
if (rc)
return rc;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ keee_to_eee(&eee, &keee);
+ if (copy_to_user(useraddr, &eee, sizeof(eee)))
return -EFAULT;
return 0;
@@ -1531,16 +1567,18 @@ static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_eee edata;
+ struct ethtool_keee keee;
+ struct ethtool_eee eee;
int ret;
if (!dev->ethtool_ops->set_eee)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ if (copy_from_user(&eee, useraddr, sizeof(eee)))
return -EFAULT;
- ret = dev->ethtool_ops->set_eee(dev, &edata);
+ eee_to_keee(&keee, &eee);
+ ret = dev->ethtool_ops->set_eee(dev, &keee);
if (!ret)
ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
return ret;
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index fe3553f60bf3..bd04f28d5cf4 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -477,11 +477,7 @@ out:
return ret;
}
-/* Default ->dumpit() handler for GET requests. Device iteration copied from
- * rtnl_dump_ifinfo(); we have to be more careful about device hashtable
- * persistence as we cannot guarantee to hold RTNL lock through the whole
- * function as rtnetnlink does.
- */
+/* Default ->dumpit() handler for GET requests. */
static int ethnl_default_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -490,14 +486,14 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
struct net_device *dev;
int ret = 0;
- rtnl_lock();
+ rcu_read_lock();
for_each_netdev_dump(net, dev, ctx->pos_ifindex) {
dev_hold(dev);
- rtnl_unlock();
+ rcu_read_unlock();
ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb));
- rtnl_lock();
+ rcu_read_lock();
dev_put(dev);
if (ret < 0 && ret != -EOPNOTSUPP) {
@@ -507,7 +503,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
}
ret = 0;
}
- rtnl_unlock();
+ rcu_read_unlock();
return ret;
}
diff --git a/net/handshake/handshake-test.c b/net/handshake/handshake-test.c
index 16ed7bfd29e4..34fd1d9b2db8 100644
--- a/net/handshake/handshake-test.c
+++ b/net/handshake/handshake-test.c
@@ -471,7 +471,10 @@ static void handshake_req_destroy_test1(struct kunit *test)
handshake_req_cancel(sock->sk);
/* Act */
- fput(filp);
+ /* Ensure the close/release/put process has run to
+ * completion before checking the result.
+ */
+ __fput_sync(filp);
/* Assert */
KUNIT_EXPECT_PTR_EQ(test, handshake_req_destroy_test, req);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 7ceb9ac6e730..c98b5b71ad7c 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -28,29 +28,19 @@ static bool is_slave_up(struct net_device *dev)
return dev && is_admin_up(dev) && netif_oper_up(dev);
}
-static void __hsr_set_operstate(struct net_device *dev, int transition)
-{
- write_lock(&dev_base_lock);
- if (dev->operstate != transition) {
- dev->operstate = transition;
- write_unlock(&dev_base_lock);
- netdev_state_change(dev);
- } else {
- write_unlock(&dev_base_lock);
- }
-}
-
static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
{
- if (!is_admin_up(master->dev)) {
- __hsr_set_operstate(master->dev, IF_OPER_DOWN);
+ struct net_device *dev = master->dev;
+
+ if (!is_admin_up(dev)) {
+ netdev_set_operstate(dev, IF_OPER_DOWN);
return;
}
if (has_carrier)
- __hsr_set_operstate(master->dev, IF_OPER_UP);
+ netdev_set_operstate(dev, IF_OPER_UP);
else
- __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
+ netdev_set_operstate(dev, IF_OPER_LOWERLAYERDOWN);
}
static bool hsr_check_carrier(struct hsr_port *master)
@@ -78,14 +68,14 @@ static void hsr_check_announce(struct net_device *hsr_dev,
hsr = netdev_priv(hsr_dev);
- if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
+ if (READ_ONCE(hsr_dev->operstate) == IF_OPER_UP && old_operstate != IF_OPER_UP) {
/* Went up */
hsr->announce_count = 0;
mod_timer(&hsr->announce_timer,
jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
- if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
+ if (READ_ONCE(hsr_dev->operstate) != IF_OPER_UP && old_operstate == IF_OPER_UP)
/* Went down */
del_timer(&hsr->announce_timer);
}
@@ -100,7 +90,7 @@ void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
/* netif_stacked_transfer_operstate() cannot be used here since
* it doesn't set IF_OPER_LOWERLAYERDOWN (?)
*/
- old_operstate = master->dev->operstate;
+ old_operstate = READ_ONCE(master->dev->operstate);
has_carrier = hsr_check_carrier(master);
hsr_set_operstate(master, has_carrier);
hsr_check_announce(master->dev, old_operstate);
@@ -308,7 +298,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
skb = hsr_init_skb(master);
if (!skb) {
- WARN_ONCE(1, "HSR: Could not send supervision frame\n");
+ netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n");
return;
}
@@ -355,7 +345,7 @@ static void send_prp_supervision_frame(struct hsr_port *master,
skb = hsr_init_skb(master);
if (!skb) {
- WARN_ONCE(1, "PRP: Could not send supervision frame\n");
+ netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n");
return;
}
@@ -477,7 +467,7 @@ static const struct net_device_ops hsr_device_ops = {
.ndo_set_rx_mode = hsr_set_rx_mode,
};
-static struct device_type hsr_type = {
+static const struct device_type hsr_type = {
.name = "hsr",
};
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 80cdc6f6b34c..5d68cb181695 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -83,7 +83,7 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
return false;
/* Get next tlv */
- total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
+ total_length += hsr_sup_tag->tlv.HSR_TLV_length;
if (!pskb_may_pull(skb, total_length))
return false;
skb_pull(skb, total_length);
@@ -435,7 +435,7 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
continue;
/* Don't send frame over port where it has been sent before.
- * Also fro SAN, this shouldn't be done.
+ * Also for SAN, this shouldn't be done.
*/
if (!frame->is_from_san &&
hsr_register_frame_out(port, frame->node_src,
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 2c087b7f17c5..77b4e92027c5 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -93,7 +93,7 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
static int lowpan_get_iflink(const struct net_device *dev)
{
- return lowpan_802154_dev(dev)->wdev->ifindex;
+ return READ_ONCE(lowpan_802154_dev(dev)->wdev->ifindex);
}
static const struct net_device_ops lowpan_netdev_ops = {
@@ -280,5 +280,6 @@ static void __exit lowpan_cleanup_module(void)
module_init(lowpan_init_module);
module_exit(lowpan_cleanup_module);
+MODULE_DESCRIPTION("IPv6 over Low power Wireless Personal Area Network IEEE 802.15.4 core");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 00302e8b9615..990a83455dcf 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1137,4 +1137,5 @@ module_init(af_ieee802154_init);
module_exit(af_ieee802154_remove);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IEEE 802.15.4 socket interface");
MODULE_ALIAS_NETPROTO(PF_IEEE802154);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 835f4f9d98d2..55bd72997b31 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -119,6 +119,7 @@
#endif
#include <net/l3mdev.h>
#include <net/compat.h>
+#include <net/rps.h>
#include <trace/events/sock.h>
@@ -330,6 +331,9 @@ lookup_protocol:
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = SK_CAN_REUSE;
+ if (INET_PROTOSW_ICSK & answer_flags)
+ inet_init_csk_locks(sk);
+
inet = inet_sk(sk);
inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
@@ -1100,7 +1104,7 @@ const struct proto_ops inet_dgram_ops = {
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.splice_eof = inet_splice_eof,
- .set_peek_off = sk_set_peek_off,
+ .set_peek_off = udp_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet_compat_ioctl,
#endif
@@ -1323,7 +1327,7 @@ int inet_sk_rebuild_header(struct sock *sk)
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
- sk->sk_protocol, RT_CONN_FLAGS(sk),
+ sk->sk_protocol, ip_sock_rt_tos(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
@@ -1625,10 +1629,12 @@ EXPORT_SYMBOL(inet_current_timestamp);
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
- if (sk->sk_family == AF_INET)
+ unsigned int family = READ_ONCE(sk->sk_family);
+
+ if (family == AF_INET)
return ip_recv_error(sk, msg, len, addr_len);
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == AF_INET6)
+ if (family == AF_INET6)
return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
#endif
return -EINVAL;
@@ -1746,19 +1752,6 @@ static const struct net_protocol igmp_protocol = {
};
#endif
-static const struct net_protocol tcp_protocol = {
- .handler = tcp_v4_rcv,
- .err_handler = tcp_v4_err,
- .no_policy = 1,
- .icmp_strict_tag_validation = 1,
-};
-
-static const struct net_protocol udp_protocol = {
- .handler = udp_rcv,
- .err_handler = udp_err,
- .no_policy = 1,
-};
-
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.err_handler = icmp_err,
@@ -1899,14 +1892,6 @@ static int ipv4_proc_init(void);
* IP protocol layer initialiser
*/
-static struct packet_offload ip_packet_offload __read_mostly = {
- .type = cpu_to_be16(ETH_P_IP),
- .callbacks = {
- .gso_segment = inet_gso_segment,
- .gro_receive = inet_gro_receive,
- .gro_complete = inet_gro_complete,
- },
-};
static const struct net_offload ipip_offload = {
.callbacks = {
@@ -1933,7 +1918,15 @@ static int __init ipv4_offload_init(void)
if (ipip_offload_init() < 0)
pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
- dev_add_offload(&ip_packet_offload);
+ net_hotdata.ip_packet_offload = (struct packet_offload) {
+ .type = cpu_to_be16(ETH_P_IP),
+ .callbacks = {
+ .gso_segment = inet_gso_segment,
+ .gro_receive = inet_gro_receive,
+ .gro_complete = inet_gro_complete,
+ },
+ };
+ dev_add_offload(&net_hotdata.ip_packet_offload);
return 0;
}
@@ -1987,9 +1980,22 @@ static int __init inet_init(void)
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
pr_crit("%s: Cannot add ICMP protocol\n", __func__);
- if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
+
+ net_hotdata.udp_protocol = (struct net_protocol) {
+ .handler = udp_rcv,
+ .err_handler = udp_err,
+ .no_policy = 1,
+ };
+ if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
pr_crit("%s: Cannot add UDP protocol\n", __func__);
- if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
+
+ net_hotdata.tcp_protocol = (struct net_protocol) {
+ .handler = tcp_v4_rcv,
+ .err_handler = tcp_v4_err,
+ .no_policy = 1,
+ .icmp_strict_tag_validation = 1,
+ };
+ if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
pr_crit("%s: Cannot add TCP protocol\n", __func__);
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index a2e6e1fdf82b..64aec3dff8ec 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -597,5 +597,6 @@ static void __exit ah4_fini(void)
module_init(ah4_init);
module_exit(ah4_fini);
+MODULE_DESCRIPTION("IPv4 AH transformation library");
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 9456f5bb35e5..0d0d725b46ad 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1125,7 +1125,8 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
if (neigh) {
if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) {
read_lock_bh(&neigh->lock);
- memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
+ memcpy(r->arp_ha.sa_data, neigh->ha,
+ min(dev->addr_len, sizeof(r->arp_ha.sa_data_min)));
r->arp_flags = arp_state_to_flags(neigh);
read_unlock_bh(&neigh->lock);
r->arp_ha.sa_family = dev->type;
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index ae8b15e6896f..7f518ea5f4ac 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -12,7 +12,7 @@
#include <net/bpf_sk_storage.h>
/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
-extern struct bpf_struct_ops bpf_tcp_congestion_ops;
+static struct bpf_struct_ops bpf_tcp_congestion_ops;
static u32 unsupported_ops[] = {
offsetof(struct tcp_congestion_ops, get_info),
@@ -20,6 +20,7 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
+static const struct btf_type *tcp_congestion_ops_type;
static int bpf_tcp_ca_init(struct btf *btf)
{
@@ -36,6 +37,11 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
+ type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
+ if (type_id < 0)
+ return -EINVAL;
+ tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
+
return 0;
}
@@ -149,7 +155,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog)
u32 midx;
midx = prog->expected_attach_type;
- t = bpf_tcp_congestion_ops.type;
+ t = tcp_congestion_ops_type;
m = &btf_type_member(t)[midx];
return __btf_member_bit_offset(t, m) / 8;
@@ -191,17 +197,17 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_ktime_get_coarse_ns:
return &bpf_ktime_get_coarse_ns_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
}
-BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
+BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids)
BTF_ID_FLAGS(func, tcp_reno_ssthresh)
BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
BTF_ID_FLAGS(func, tcp_slow_start)
BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
-BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
+BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids)
static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
.owner = THIS_MODULE,
@@ -339,7 +345,7 @@ static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
.release = __bpf_tcp_ca_release,
};
-struct bpf_struct_ops bpf_tcp_congestion_ops = {
+static struct bpf_struct_ops bpf_tcp_congestion_ops = {
.verifier_ops = &bpf_tcp_ca_verifier_ops,
.reg = bpf_tcp_ca_reg,
.unreg = bpf_tcp_ca_unreg,
@@ -350,10 +356,16 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = {
.validate = bpf_tcp_ca_validate,
.name = "tcp_congestion_ops",
.cfi_stubs = &__bpf_ops_tcp_congestion_ops,
+ .owner = THIS_MODULE,
};
static int __init bpf_tcp_ca_kfunc_init(void)
{
- return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
+ ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
+
+ return ret;
}
late_initcall(bpf_tcp_ca_kfunc_init);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index d048aa833293..8b17d83e5fde 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -864,11 +864,8 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
net_clen_bits,
net_spot + 1,
1);
- if (net_spot < 0) {
- if (net_spot == -2)
- return -EFAULT;
+ if (net_spot < 0)
return 0;
- }
switch (doi_def->type) {
case CIPSO_V4_MAP_PASS:
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 2cc50cbfc2a3..cc6d0bd7b0a9 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -119,7 +119,7 @@ void ip4_datagram_release_cb(struct sock *sk)
rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ ip_sock_rt_tos(sk), sk->sk_bound_dev_if);
dst = !IS_ERR(rt) ? &rt->dst : NULL;
sk_dst_set(sk, dst);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ca0ff15dc8fa..7a437f0d4190 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -713,34 +713,37 @@ static void check_lifetime(struct work_struct *work)
rcu_read_lock();
hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
- unsigned long age;
+ unsigned long age, tstamp;
+ u32 preferred_lft;
+ u32 valid_lft;
+ u32 flags;
- if (ifa->ifa_flags & IFA_F_PERMANENT)
+ flags = READ_ONCE(ifa->ifa_flags);
+ if (flags & IFA_F_PERMANENT)
continue;
+ preferred_lft = READ_ONCE(ifa->ifa_preferred_lft);
+ valid_lft = READ_ONCE(ifa->ifa_valid_lft);
+ tstamp = READ_ONCE(ifa->ifa_tstamp);
/* We try to batch several events at once. */
- age = (now - ifa->ifa_tstamp +
+ age = (now - tstamp +
ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
- if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
- age >= ifa->ifa_valid_lft) {
+ if (valid_lft != INFINITY_LIFE_TIME &&
+ age >= valid_lft) {
change_needed = true;
- } else if (ifa->ifa_preferred_lft ==
+ } else if (preferred_lft ==
INFINITY_LIFE_TIME) {
continue;
- } else if (age >= ifa->ifa_preferred_lft) {
- if (time_before(ifa->ifa_tstamp +
- ifa->ifa_valid_lft * HZ, next))
- next = ifa->ifa_tstamp +
- ifa->ifa_valid_lft * HZ;
+ } else if (age >= preferred_lft) {
+ if (time_before(tstamp + valid_lft * HZ, next))
+ next = tstamp + valid_lft * HZ;
- if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
+ if (!(flags & IFA_F_DEPRECATED))
change_needed = true;
- } else if (time_before(ifa->ifa_tstamp +
- ifa->ifa_preferred_lft * HZ,
+ } else if (time_before(tstamp + preferred_lft * HZ,
next)) {
- next = ifa->ifa_tstamp +
- ifa->ifa_preferred_lft * HZ;
+ next = tstamp + preferred_lft * HZ;
}
}
rcu_read_unlock();
@@ -804,24 +807,26 @@ static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
__u32 prefered_lft)
{
unsigned long timeout;
+ u32 flags;
- ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
+ flags = ifa->ifa_flags & ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
timeout = addrconf_timeout_fixup(valid_lft, HZ);
if (addrconf_finite_timeout(timeout))
- ifa->ifa_valid_lft = timeout;
+ WRITE_ONCE(ifa->ifa_valid_lft, timeout);
else
- ifa->ifa_flags |= IFA_F_PERMANENT;
+ flags |= IFA_F_PERMANENT;
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
if (timeout == 0)
- ifa->ifa_flags |= IFA_F_DEPRECATED;
- ifa->ifa_preferred_lft = timeout;
+ flags |= IFA_F_DEPRECATED;
+ WRITE_ONCE(ifa->ifa_preferred_lft, timeout);
}
- ifa->ifa_tstamp = jiffies;
+ WRITE_ONCE(ifa->ifa_flags, flags);
+ WRITE_ONCE(ifa->ifa_tstamp, jiffies);
if (!ifa->ifa_cstamp)
- ifa->ifa_cstamp = ifa->ifa_tstamp;
+ WRITE_ONCE(ifa->ifa_cstamp, ifa->ifa_tstamp);
}
static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
@@ -1312,7 +1317,7 @@ static __be32 in_dev_select_addr(const struct in_device *in_dev,
const struct in_ifaddr *ifa;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
- if (ifa->ifa_flags & IFA_F_SECONDARY)
+ if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
continue;
if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope)
@@ -1340,7 +1345,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
localnet_scope = RT_SCOPE_LINK;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
- if (ifa->ifa_flags & IFA_F_SECONDARY)
+ if (READ_ONCE(ifa->ifa_flags) & IFA_F_SECONDARY)
continue;
if (min(ifa->ifa_scope, localnet_scope) > scope)
continue;
@@ -1671,11 +1676,12 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
}
-static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
+static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
struct inet_fill_args *args)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
+ unsigned long tstamp;
u32 preferred, valid;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
@@ -1686,7 +1692,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET;
ifm->ifa_prefixlen = ifa->ifa_prefixlen;
- ifm->ifa_flags = ifa->ifa_flags;
+ ifm->ifa_flags = READ_ONCE(ifa->ifa_flags);
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
@@ -1694,11 +1700,12 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto nla_put_failure;
+ tstamp = READ_ONCE(ifa->ifa_tstamp);
if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
- preferred = ifa->ifa_preferred_lft;
- valid = ifa->ifa_valid_lft;
+ preferred = READ_ONCE(ifa->ifa_preferred_lft);
+ valid = READ_ONCE(ifa->ifa_valid_lft);
if (preferred != INFINITY_LIFE_TIME) {
- long tval = (jiffies - ifa->ifa_tstamp) / HZ;
+ long tval = (jiffies - tstamp) / HZ;
if (preferred > tval)
preferred -= tval;
@@ -1725,10 +1732,10 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
(ifa->ifa_proto &&
nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
- nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
+ nla_put_u32(skb, IFA_FLAGS, ifm->ifa_flags) ||
(ifa->ifa_rt_priority &&
nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
- put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
+ put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
preferred, valid))
goto nla_put_failure;
@@ -1798,15 +1805,15 @@ static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
}
static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
- struct netlink_callback *cb, int s_ip_idx,
+ struct netlink_callback *cb, int *s_ip_idx,
struct inet_fill_args *fillargs)
{
struct in_ifaddr *ifa;
int ip_idx = 0;
int err;
- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
- if (ip_idx < s_ip_idx) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ if (ip_idx < *s_ip_idx) {
ip_idx++;
continue;
}
@@ -1818,13 +1825,28 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
ip_idx++;
}
err = 0;
-
+ ip_idx = 0;
done:
- cb->args[2] = ip_idx;
+ *s_ip_idx = ip_idx;
return err;
}
+/* Combine dev_addr_genid and dev_base_seq to detect changes.
+ */
+static u32 inet_base_seq(const struct net *net)
+{
+ u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
+ READ_ONCE(net->dev_base_seq);
+
+ /* Must not return 0 (see nl_dump_check_consistent()).
+ * Chose a value far away from 0.
+ */
+ if (!res)
+ res = 0x80000000;
+ return res;
+}
+
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
@@ -1837,76 +1859,51 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
};
struct net *net = sock_net(skb->sk);
struct net *tgt_net = net;
- int h, s_h;
- int idx, s_idx;
- int s_ip_idx;
- struct net_device *dev;
+ struct {
+ unsigned long ifindex;
+ int ip_idx;
+ } *ctx = (void *)cb->ctx;
struct in_device *in_dev;
- struct hlist_head *head;
+ struct net_device *dev;
int err = 0;
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
- s_ip_idx = cb->args[2];
-
+ rcu_read_lock();
if (cb->strict_check) {
err = inet_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
skb->sk, cb);
if (err < 0)
- goto put_tgt_net;
+ goto done;
- err = 0;
if (fillargs.ifindex) {
- dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
- if (!dev) {
- err = -ENODEV;
- goto put_tgt_net;
- }
-
- in_dev = __in_dev_get_rtnl(dev);
- if (in_dev) {
- err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
- &fillargs);
- }
- goto put_tgt_net;
- }
- }
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &tgt_net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
- tgt_net->dev_base_seq;
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (h > s_h || idx > s_idx)
- s_ip_idx = 0;
+ err = -ENODEV;
+ dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
+ if (!dev)
+ goto done;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
- goto cont;
-
- err = in_dev_dump_addr(in_dev, skb, cb, s_ip_idx,
- &fillargs);
- if (err < 0) {
- rcu_read_unlock();
goto done;
- }
-cont:
- idx++;
+ err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ goto done;
}
- rcu_read_unlock();
}
+ cb->seq = inet_base_seq(tgt_net);
+
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ continue;
+ err = in_dev_dump_addr(in_dev, skb, cb, &ctx->ip_idx,
+ &fillargs);
+ if (err < 0)
+ goto done;
+ }
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-put_tgt_net:
if (fillargs.netnsid >= 0)
put_net(tgt_net);
-
- return skb->len ? : err;
+ rcu_read_unlock();
+ return err;
}
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
@@ -1968,7 +1965,7 @@ static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
return -EMSGSIZE;
for (i = 0; i < IPV4_DEVCONF_MAX; i++)
- ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
+ ((u32 *) nla_data(nla))[i] = READ_ONCE(in_dev->cnf.data[i]);
return 0;
}
@@ -2054,9 +2051,9 @@ static int inet_netconf_msgsize_devconf(int type)
}
static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
- struct ipv4_devconf *devconf, u32 portid,
- u32 seq, int event, unsigned int flags,
- int type)
+ const struct ipv4_devconf *devconf,
+ u32 portid, u32 seq, int event,
+ unsigned int flags, int type)
{
struct nlmsghdr *nlh;
struct netconfmsg *ncm;
@@ -2081,27 +2078,28 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
if ((all || type == NETCONFA_FORWARDING) &&
nla_put_s32(skb, NETCONFA_FORWARDING,
- IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_RP_FILTER) &&
nla_put_s32(skb, NETCONFA_RP_FILTER,
- IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
+ IPV4_DEVCONF_RO(*devconf, RP_FILTER)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_MC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
- IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, MC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_BC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_BC_FORWARDING,
- IPV4_DEVCONF(*devconf, BC_FORWARDING)) < 0)
+ IPV4_DEVCONF_RO(*devconf, BC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_PROXY_NEIGH) &&
nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
- IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
+ IPV4_DEVCONF_RO(*devconf, PROXY_ARP)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
- IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
+ IPV4_DEVCONF_RO(*devconf,
+ IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
goto nla_put_failure;
out:
@@ -2190,21 +2188,20 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
- struct nlattr *tb[NETCONFA_MAX+1];
+ struct nlattr *tb[NETCONFA_MAX + 1];
+ const struct ipv4_devconf *devconf;
+ struct in_device *in_dev = NULL;
+ struct net_device *dev = NULL;
struct sk_buff *skb;
- struct ipv4_devconf *devconf;
- struct in_device *in_dev;
- struct net_device *dev;
int ifindex;
int err;
err = inet_netconf_valid_get_req(in_skb, nlh, tb, extack);
if (err)
- goto errout;
+ return err;
- err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
- goto errout;
+ return -EINVAL;
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
switch (ifindex) {
@@ -2215,10 +2212,10 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
devconf = net->ipv4.devconf_dflt;
break;
default:
- dev = __dev_get_by_index(net, ifindex);
- if (!dev)
- goto errout;
- in_dev = __in_dev_get_rtnl(dev);
+ err = -ENODEV;
+ dev = dev_get_by_index(net, ifindex);
+ if (dev)
+ in_dev = in_dev_get(dev);
if (!in_dev)
goto errout;
devconf = &in_dev->cnf;
@@ -2242,6 +2239,9 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
+ if (in_dev)
+ in_dev_put(in_dev);
+ dev_put(dev);
return err;
}
@@ -2250,11 +2250,13 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx, s_idx;
+ struct {
+ unsigned long ifindex;
+ unsigned int all_default;
+ } *ctx = (void *)cb->ctx;
+ const struct in_device *in_dev;
struct net_device *dev;
- struct in_device *in_dev;
- struct hlist_head *head;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -2271,65 +2273,45 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
- net->dev_base_seq;
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- in_dev = __in_dev_get_rcu(dev);
- if (!in_dev)
- goto cont;
-
- if (inet_netconf_fill_devconf(skb, dev->ifindex,
- &in_dev->cnf,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ continue;
+ err = inet_netconf_fill_devconf(skb, dev->ifindex,
+ &in_dev->cnf,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ goto done;
}
- if (h == NETDEV_HASHENTRIES) {
- if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
- net->ipv4.devconf_all,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ if (ctx->all_default == 0) {
+ err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
+ net->ipv4.devconf_all,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
- }
- if (h == NETDEV_HASHENTRIES + 1) {
- if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
- net->ipv4.devconf_dflt,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ ctx->all_default++;
+ }
+ if (ctx->all_default == 1) {
+ err = inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
+ net->ipv4.devconf_dflt,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
+ ctx->all_default++;
}
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-
- return skb->len;
+ rcu_read_unlock();
+ return err;
}
#ifdef CONFIG_SYSCTL
@@ -2810,7 +2792,9 @@ void __init devinet_init(void)
rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
- rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0);
+ rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
+ RTNL_FLAG_DUMP_UNLOCKED);
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
- inet_netconf_dump_devconf, 0);
+ inet_netconf_dump_devconf,
+ RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4ccfc104f13a..4dd9e5040672 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1247,5 +1247,6 @@ static void __exit esp4_fini(void)
module_init(esp4_init);
module_exit(esp4_fini);
+MODULE_DESCRIPTION("IPv4 ESP transformation library");
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 390f4be7f7be..48741352a88a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -916,7 +916,8 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct rtmsg *rtm;
int err, i;
- ASSERT_RTNL();
+ if (filter->rtnl_held)
+ ASSERT_RTNL();
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
NL_SET_ERR_MSG(extack, "Invalid header for FIB dump request");
@@ -961,7 +962,10 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
break;
case RTA_OIF:
ifindex = nla_get_u32(tb[i]);
- filter->dev = __dev_get_by_index(net, ifindex);
+ if (filter->rtnl_held)
+ filter->dev = __dev_get_by_index(net, ifindex);
+ else
+ filter->dev = dev_get_by_index_rcu(net, ifindex);
if (!filter->dev)
return -ENODEV;
break;
@@ -983,20 +987,24 @@ EXPORT_SYMBOL_GPL(ip_valid_fib_dump_req);
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct fib_dump_filter filter = { .dump_routes = true,
- .dump_exceptions = true };
+ struct fib_dump_filter filter = {
+ .dump_routes = true,
+ .dump_exceptions = true,
+ .rtnl_held = false,
+ };
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
- int dumped = 0, err;
+ int dumped = 0, err = 0;
+ rcu_read_lock();
if (cb->strict_check) {
err = ip_valid_fib_dump_req(net, nlh, &filter, cb);
if (err < 0)
- return err;
+ goto unlock;
} else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) {
struct rtmsg *rtm = nlmsg_data(nlh);
@@ -1005,29 +1013,26 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
/* ipv4 does not use prefix flag */
if (filter.flags & RTM_F_PREFIX)
- return skb->len;
+ goto unlock;
if (filter.table_id) {
tb = fib_get_table(net, filter.table_id);
if (!tb) {
if (rtnl_msg_family(cb->nlh) != PF_INET)
- return skb->len;
+ goto unlock;
NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
- return -ENOENT;
+ err = -ENOENT;
+ goto unlock;
}
-
- rcu_read_lock();
err = fib_table_dump(tb, skb, cb, &filter);
- rcu_read_unlock();
- return skb->len ? : err;
+ goto unlock;
}
s_h = cb->args[0];
s_e = cb->args[1];
- rcu_read_lock();
-
+ err = 0;
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
@@ -1038,25 +1043,20 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
err = fib_table_dump(tb, skb, cb, &filter);
- if (err < 0) {
- if (likely(skb->len))
- goto out;
-
- goto out_err;
- }
+ if (err < 0)
+ goto out;
dumped = 1;
next:
e++;
}
}
out:
- err = skb->len;
-out_err:
- rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
+unlock:
+ rcu_read_unlock();
return err;
}
@@ -1659,5 +1659,6 @@ void __init ip_fib_init(void)
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, 0);
+ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
+ RTNL_FLAG_DUMP_UNLOCKED);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 3ff35f811765..f474106464d2 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -501,7 +501,7 @@ static void tnode_free(struct key_vector *tn)
if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {
tnode_free_size = 0;
- synchronize_rcu();
+ synchronize_net();
}
}
@@ -2368,7 +2368,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
* and key == 0 means the dump has wrapped around and we are done.
*/
if (count && !key)
- return skb->len;
+ return 0;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
int err;
@@ -2394,7 +2394,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
cb->args[3] = key;
cb->args[2] = count;
- return skb->len;
+ return 0;
}
void __init fib_trie_init(void)
diff --git a/net/ipv4/fou_bpf.c b/net/ipv4/fou_bpf.c
index 4da03bf45c9b..06e5572f296f 100644
--- a/net/ipv4/fou_bpf.c
+++ b/net/ipv4/fou_bpf.c
@@ -100,10 +100,10 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
__bpf_kfunc_end_defs();
-BTF_SET8_START(fou_kfunc_set)
+BTF_KFUNCS_START(fou_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_set_fou_encap)
BTF_ID_FLAGS(func, bpf_skb_get_fou_encap)
-BTF_SET8_END(fou_kfunc_set)
+BTF_KFUNCS_END(fou_kfunc_set)
static const struct btf_kfunc_id_set fou_bpf_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 0c41076e31ed..a8494f796dca 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -351,7 +351,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
optlen = guehdr->hlen << 2;
len += optlen;
- if (skb_gro_header_hard(skb, len)) {
+ if (!skb_gro_may_pull(skb, len)) {
guehdr = skb_gro_header_slow(skb, len, off);
if (unlikely(!guehdr))
goto out;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 311e70bfce40..5028c72d494a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -174,7 +174,7 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
grehlen += GRE_HEADER_SECTION;
hlen = off + grehlen;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
greh = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!greh))
goto out;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index efeeca2b1328..717e97a389a8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -120,12 +120,12 @@
*/
#define IGMP_V1_SEEN(in_dev) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
((in_dev)->mr_v1_seen && \
time_before(jiffies, (in_dev)->mr_v1_seen)))
#define IGMP_V2_SEEN(in_dev) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8e2eb1793685..7d8090f109ef 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -727,6 +727,10 @@ out:
}
if (req)
reqsk_put(req);
+
+ if (newsk)
+ inet_init_csk_locks(newsk);
+
return newsk;
out_err:
newsk = NULL;
@@ -902,8 +906,9 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
memcpy(nreq_sk, req_sk,
offsetof(struct sock, sk_dontcopy_begin));
- memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
- req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
+ unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
+ req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end),
+ /* alloc is larger than struct, see above */);
sk_node_init(&nreq_sk->sk_node);
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
@@ -1463,7 +1468,7 @@ static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *f
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
inet->inet_saddr, inet->inet_dport,
inet->inet_sport, sk->sk_protocol,
- RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ ip_sock_rt_tos(sk), sk->sk_bound_dev_if);
if (IS_ERR(rt))
rt = NULL;
if (rt)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 93e9193df544..7498af320164 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -24,6 +24,7 @@
#include <net/inet6_hashtables.h>
#endif
#include <net/secure_seq.h>
+#include <net/hotdata.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/sock_reuseport.h>
@@ -32,8 +33,6 @@ u32 inet_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
- static u32 inet_ehash_secret __read_mostly;
-
net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
@@ -1130,10 +1129,33 @@ ok:
return 0;
error:
+ if (sk_hashed(sk)) {
+ spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash);
+
+ sock_prot_inuse_add(net, sk->sk_prot, -1);
+
+ spin_lock(lock);
+ sk_nulls_del_node_init_rcu(sk);
+ spin_unlock(lock);
+
+ sk->sk_hash = 0;
+ inet_sk(sk)->inet_sport = 0;
+ inet_sk(sk)->inet_num = 0;
+
+ if (tw)
+ inet_twsk_bind_unhash(tw, hinfo);
+ }
+
spin_unlock(&head2->lock);
if (tb_created)
inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
- spin_unlock_bh(&head->lock);
+ spin_unlock(&head->lock);
+
+ if (tw)
+ inet_twsk_deschedule_put(tw);
+
+ local_bh_enable();
+
return -ENOMEM;
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e9fed83e9b3c..5bd759963451 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -81,10 +81,7 @@ void __init inet_initpeers(void)
inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
- peer_cachep = kmem_cache_create("inet_peer_cache",
- sizeof(struct inet_peer),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
- NULL);
+ peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
}
/* Called with rcu_read_lock() or base->lock held */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5169c3c72cff..7b16c211b904 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1025,14 +1025,16 @@ static int __net_init ipgre_init_net(struct net *net)
return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
}
-static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipgre_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
+ ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops,
+ dev_to_kill);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
- .exit_batch = ipgre_exit_batch_net,
+ .exit_batch_rtnl = ipgre_exit_batch_rtnl,
.id = &ipgre_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1697,14 +1699,16 @@ static int __net_init ipgre_tap_init_net(struct net *net)
return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
}
-static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipgre_tap_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
+ ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops,
+ dev_to_kill);
}
static struct pernet_operations ipgre_tap_net_ops = {
.init = ipgre_tap_init_net,
- .exit_batch = ipgre_tap_exit_batch_net,
+ .exit_batch_rtnl = ipgre_tap_exit_batch_rtnl,
.id = &gre_tap_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1715,14 +1719,16 @@ static int __net_init erspan_init_net(struct net *net)
&erspan_link_ops, "erspan0");
}
-static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
+static void __net_exit erspan_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
+ ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops,
+ dev_to_kill);
}
static struct pernet_operations erspan_net_ops = {
.init = erspan_init_net,
- .exit_batch = erspan_exit_batch_net,
+ .exit_batch_rtnl = erspan_exit_batch_rtnl,
.id = &erspan_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -1793,6 +1799,7 @@ static void __exit ipgre_fini(void)
module_init(ipgre_init);
module_exit(ipgre_fini);
+MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("gre");
MODULE_ALIAS_RTNL_LINK("gretap");
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b06f678b03a1..33f93dc730a3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -493,7 +493,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
inet->inet_dport,
inet->inet_sport,
sk->sk_protocol,
- RT_CONN_FLAGS_TOS(sk, tos),
+ RT_TOS(tos),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
@@ -972,8 +972,8 @@ static int __ip_append_data(struct sock *sk,
unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
+ bool paged, hold_tskey, extra_uref = false;
unsigned int wmem_alloc_delta = 0;
- bool paged, extra_uref = false;
u32 tskey = 0;
skb = skb_peek_tail(queue);
@@ -982,10 +982,6 @@ static int __ip_append_data(struct sock *sk,
mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
paged = !!cork->gso_size;
- if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
- READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
- tskey = atomic_inc_return(&sk->sk_tskey) - 1;
-
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
@@ -1052,6 +1048,11 @@ static int __ip_append_data(struct sock *sk,
cork->length += length;
+ hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
+ READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
+ if (hold_tskey)
+ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+
/* So, what's going on in the loop below?
*
* We use calculated fragment length to generate chained skb,
@@ -1274,6 +1275,8 @@ error:
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
+ if (hold_tskey)
+ atomic_dec(&sk->sk_tskey);
return err;
}
@@ -1287,6 +1290,12 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
if (unlikely(!rt))
return -EFAULT;
+ cork->fragsize = ip_sk_use_pmtu(sk) ?
+ dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
+
+ if (!inetdev_valid_mtu(cork->fragsize))
+ return -ENETUNREACH;
+
/*
* setup for corking.
*/
@@ -1303,12 +1312,6 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
cork->addr = ipc->addr;
}
- cork->fragsize = ip_sk_use_pmtu(sk) ?
- dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
-
- if (!inetdev_valid_mtu(cork->fragsize))
- return -ENETUNREACH;
-
cork->gso_size = ipc->gso_size;
cork->dst = &rt->dst;
@@ -1455,6 +1458,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
skb->mark = cork->mark;
skb->tstamp = cork->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 7aa9dc0e6760..cf377377b52d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -894,7 +894,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
- int val = 0, err;
+ int val = 0, err, retv;
bool needs_rtnl = setsockopt_needs_rtnl(optname);
switch (optname) {
@@ -938,8 +938,12 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
/* If optlen==0, it is equivalent to val == 0 */
- if (optname == IP_ROUTER_ALERT)
- return ip_ra_control(sk, val ? 1 : 0, NULL);
+ if (optname == IP_ROUTER_ALERT) {
+ retv = ip_ra_control(sk, val ? 1 : 0, NULL);
+ if (retv == 0)
+ inet_assign_bit(RTALERT, sk, val);
+ return retv;
+ }
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
@@ -1363,12 +1367,13 @@ e_inval:
* ipv4_pktinfo_prepare - transfer some info from rtable to skb
* @sk: socket
* @skb: buffer
+ * @drop_dst: if true, drops skb dst
*
* To support IP_CMSG_PKTINFO option, we store rt_iif and specific
* destination in skb->cb[] before dst drop.
* This way, receiver doesn't make cache line misses to read rtable.
*/
-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst)
{
struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
bool prepare = inet_test_bit(PKTINFO, sk) ||
@@ -1397,7 +1402,8 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
pktinfo->ipi_ifindex = 0;
pktinfo->ipi_spec_dst.s_addr = 0;
}
- skb_dst_drop(skb);
+ if (drop_dst)
+ skb_dst_drop(skb);
}
int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
@@ -1573,6 +1579,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_BIND_ADDRESS_NO_PORT:
val = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
goto copyval;
+ case IP_ROUTER_ALERT:
+ val = inet_test_bit(RTALERT, sk);
+ goto copyval;
case IP_TTL:
val = READ_ONCE(inet->uc_ttl);
if (val < 0)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index beeae624c412..d3ee6565fcfb 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -102,10 +102,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else
- cand = t;
+ cand = t;
}
hlist_for_each_entry_rcu(t, head, hash_node) {
@@ -117,9 +116,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -137,9 +136,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (!ip_tunnel_key_match(&t->parms, flags, key))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -150,9 +149,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
!(t->dev->flags & IFF_UP))
continue;
- if (t->parms.link == link)
+ if (READ_ONCE(t->parms.link) == link)
return t;
- else if (!cand)
+ if (!cand)
cand = t;
}
@@ -221,7 +220,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
hlist_for_each_entry_rcu(t, head, hash_node) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
- link == t->parms.link &&
+ link == READ_ONCE(t->parms.link) &&
type == t->dev->type &&
ip_tunnel_key_match(&t->parms, flags, key))
break;
@@ -554,6 +553,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
+static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
+{
+ /* we must cap headroom to some upperlimit, else pskb_expand_head
+ * will overflow header offsets in skb_headers_offset_update().
+ */
+ static const unsigned int max_allowed = 512;
+
+ if (headroom > max_allowed)
+ headroom = max_allowed;
+
+ if (headroom > READ_ONCE(dev->needed_headroom))
+ WRITE_ONCE(dev->needed_headroom, headroom);
+}
+
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
u8 proto, int tunnel_hlen)
{
@@ -632,13 +645,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
- if (headroom > READ_ONCE(dev->needed_headroom))
- WRITE_ONCE(dev->needed_headroom, headroom);
-
- if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+ if (skb_cow_head(skb, headroom)) {
ip_rt_put(rt);
goto tx_dropped;
}
+
+ ip_tunnel_adj_headroom(dev, headroom);
+
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
@@ -747,7 +760,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos),
- dev_net(dev), tunnel->parms.link,
+ dev_net(dev), READ_ONCE(tunnel->parms.link),
tunnel->fwmark, skb_get_hash(skb), 0);
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
@@ -818,16 +831,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
- if (max_headroom > READ_ONCE(dev->needed_headroom))
- WRITE_ONCE(dev->needed_headroom, max_headroom);
- if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+ if (skb_cow_head(skb, max_headroom)) {
ip_rt_put(rt);
DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return;
}
+ ip_tunnel_adj_headroom(dev, max_headroom);
+
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
df, !net_eq(tunnel->net, dev_net(dev)));
return;
@@ -867,7 +880,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
if (t->parms.link != p->link || t->fwmark != fwmark) {
int mtu;
- t->parms.link = p->link;
+ WRITE_ONCE(t->parms.link, p->link);
t->fwmark = fwmark;
mtu = ip_tunnel_bind_dev(dev);
if (set_mtu)
@@ -1057,9 +1070,9 @@ EXPORT_SYMBOL(ip_tunnel_get_link_net);
int ip_tunnel_get_iflink(const struct net_device *dev)
{
- struct ip_tunnel *tunnel = netdev_priv(dev);
+ const struct ip_tunnel *tunnel = netdev_priv(dev);
- return tunnel->parms.link;
+ return READ_ONCE(tunnel->parms.link);
}
EXPORT_SYMBOL(ip_tunnel_get_iflink);
@@ -1130,19 +1143,17 @@ static void ip_tunnel_destroy(struct net *net, struct ip_tunnel_net *itn,
}
void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id,
- struct rtnl_link_ops *ops)
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill)
{
struct ip_tunnel_net *itn;
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
itn = net_generic(net, id);
- ip_tunnel_destroy(net, itn, &list, ops);
+ ip_tunnel_destroy(net, itn, dev_to_kill, ops);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets);
@@ -1271,6 +1282,7 @@ int ip_tunnel_init(struct net_device *dev)
if (tunnel->collect_md)
netif_keep_dst(dev);
+ netdev_lockdep_set_classes(dev);
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1298,4 +1310,5 @@ void ip_tunnel_setup(struct net_device *dev, unsigned int net_id)
}
EXPORT_SYMBOL_GPL(ip_tunnel_setup);
+MODULE_DESCRIPTION("IPv4 tunnel implementation library");
MODULE_LICENSE("GPL");
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 586b1b3e35b8..80ccd6661aa3 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -332,7 +332,7 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
};
skb_reset_network_header(skb);
- csum = csum_partial(icmp6h, len, 0);
+ csum = skb_checksum(skb, skb_transport_offset(skb), len, 0);
icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len,
IPPROTO_ICMPV6, csum);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 9ab9b3ebe0cd..ee587adb169f 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -510,14 +510,16 @@ static int __net_init vti_init_net(struct net *net)
return 0;
}
-static void __net_exit vti_exit_batch_net(struct list_head *list_net)
+static void __net_exit vti_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
+ ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops,
+ dev_to_kill);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
- .exit_batch = vti_exit_batch_net,
+ .exit_batch_rtnl = vti_exit_batch_rtnl,
.id = &vti_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -721,6 +723,7 @@ static void __exit vti_fini(void)
module_init(vti_init);
module_exit(vti_fini);
+MODULE_DESCRIPTION("Virtual (secure) IP tunneling library");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("vti");
MODULE_ALIAS_NETDEV("ip_vti0");
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 27b8f83c6ea2..f2696eaadbe6 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -592,14 +592,16 @@ static int __net_init ipip_init_net(struct net *net)
return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
}
-static void __net_exit ipip_exit_batch_net(struct list_head *list_net)
+static void __net_exit ipip_exit_batch_rtnl(struct list_head *list_net,
+ struct list_head *dev_to_kill)
{
- ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops);
+ ip_tunnel_delete_nets(list_net, ipip_net_id, &ipip_link_ops,
+ dev_to_kill);
}
static struct pernet_operations ipip_net_ops = {
.init = ipip_init_net,
- .exit_batch = ipip_exit_batch_net,
+ .exit_batch_rtnl = ipip_exit_batch_rtnl,
.id = &ipip_net_id,
.size = sizeof(struct ip_tunnel_net),
};
@@ -658,6 +660,7 @@ static void __exit ipip_fini(void)
module_init(ipip_init);
module_exit(ipip_fini);
+MODULE_DESCRIPTION("IP/IP protocol decoder library");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("ipip");
MODULE_ALIAS_NETDEV("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9d6f59531b3a..0708ac6f6c58 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1073,7 +1073,7 @@ static int ipmr_cache_report(const struct mr_table *mrt,
msg = (struct igmpmsg *)skb_network_header(skb);
msg->im_vif = vifi;
msg->im_vif_hi = vifi >> 8;
- ipv4_pktinfo_prepare(mroute_sk, pkt);
+ ipv4_pktinfo_prepare(mroute_sk, pkt, false);
memcpy(skb->cb, pkt->cb, sizeof(skb->cb));
/* Add our header */
igmp = skb_put(skb, sizeof(struct igmphdr));
@@ -2587,7 +2587,9 @@ errout_free:
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
int err;
if (cb->strict_check) {
@@ -3139,10 +3141,7 @@ int __init ip_mr_init(void)
{
int err;
- mrt_cachep = kmem_cache_create("ip_mrt_cache",
- sizeof(struct mfc_cache),
- 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
- NULL);
+ mrt_cachep = KMEM_CACHE(mfc_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
err = register_pernet_subsys(&ipmr_net_ops);
if (err)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index f71a7e9a7de6..8f6e950163a7 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -10,6 +10,10 @@ config NF_DEFRAG_IPV4
tristate
default n
+# old sockopt interface and eval loop
+config IP_NF_IPTABLES_LEGACY
+ tristate
+
config NF_SOCKET_IPV4
tristate "IPv4 socket lookup support"
help
@@ -152,7 +156,7 @@ config IP_NF_MATCH_ECN
config IP_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED
- depends on IP_NF_MANGLE || IP_NF_RAW
+ depends on IP_NF_MANGLE || IP_NF_RAW || NFT_COMPAT
help
This option allows you to match packets whose replies would
go out via the interface the packet came in.
@@ -173,6 +177,7 @@ config IP_NF_MATCH_TTL
config IP_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
+ select IP_NF_IPTABLES_LEGACY
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -182,7 +187,7 @@ config IP_NF_FILTER
config IP_NF_TARGET_REJECT
tristate "REJECT target support"
- depends on IP_NF_FILTER
+ depends on IP_NF_FILTER || NFT_COMPAT
select NF_REJECT_IPV4
default m if NETFILTER_ADVANCED=n
help
@@ -212,6 +217,7 @@ config IP_NF_NAT
default m if NETFILTER_ADVANCED=n
select NF_NAT
select NETFILTER_XT_NAT
+ select IP_NF_IPTABLES_LEGACY
help
This enables the `nat' table in iptables. This allows masquerading,
port forwarding and other forms of full Network Address Port
@@ -252,6 +258,7 @@ endif # IP_NF_NAT
config IP_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -261,7 +268,7 @@ config IP_NF_MANGLE
config IP_NF_TARGET_ECN
tristate "ECN target support"
- depends on IP_NF_MANGLE
+ depends on IP_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `ECN' target, which can be used in the iptables mangle
@@ -286,6 +293,7 @@ config IP_NF_TARGET_TTL
# raw + specific targets
config IP_NF_RAW
tristate 'raw table support (required for NOTRACK/TRACE)'
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -299,6 +307,7 @@ config IP_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
+ select IP_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -309,36 +318,35 @@ endif # IP_NF_IPTABLES
# ARP tables
config IP_NF_ARPTABLES
- tristate "ARP tables support"
- select NETFILTER_XTABLES
- select NETFILTER_FAMILY_ARP
- depends on NETFILTER_ADVANCED
- help
- arptables is a general, extensible packet identification framework.
- The ARP packet filtering and mangling (manipulation)subsystems
- use this: say Y or M here if you want to use either of those.
-
- To compile it as a module, choose M here. If unsure, say N.
+ tristate
-if IP_NF_ARPTABLES
+config NFT_COMPAT_ARP
+ tristate
+ depends on NF_TABLES_ARP && NFT_COMPAT
+ default m if NFT_COMPAT=m
+ default y if NFT_COMPAT=y
config IP_NF_ARPFILTER
- tristate "ARP packet filtering"
+ tristate "arptables-legacy packet filtering support"
+ select IP_NF_ARPTABLES
+ depends on NETFILTER_XTABLES
help
ARP packet filtering defines a table `filter', which has a series of
rules for simple ARP packet filtering at local input and
- local output. On a bridge, you can also specify filtering rules
- for forwarded ARP packets. See the man page for arptables(8).
+ local output. This is only needed for arptables-legacy(8).
+ Neither arptables-nft nor nftables need this to work.
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_ARP_MANGLE
tristate "ARP payload mangling"
+ depends on IP_NF_ARPTABLES || NFT_COMPAT_ARP
help
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
-endif # IP_NF_ARPTABLES
+ This option is needed by both arptables-legacy and arptables-nft.
+ It is not used by nftables.
endmenu
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 5a26f9de1ab9..85502d4dfbb4 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
# generic IP tables
-obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
+obj-$(CONFIG_IP_NF_IPTABLES_LEGACY) += ip_tables.o
# the three instances of ip_tables
obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index bbff68b5b5d4..e34466751d7b 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -26,6 +26,9 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
#define NH_DEV_HASHBITS 8
#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
+#define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \
+ NHA_OP_FLAG_DUMP_HW_STATS)
+
static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ID] = { .type = NLA_U32 },
[NHA_GROUP] = { .type = NLA_BINARY },
@@ -37,10 +40,17 @@ static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ENCAP] = { .type = NLA_NESTED },
[NHA_FDB] = { .type = NLA_FLAG },
[NHA_RES_GROUP] = { .type = NLA_NESTED },
+ [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
};
static const struct nla_policy rtm_nh_policy_get[] = {
[NHA_ID] = { .type = NLA_U32 },
+ [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ NHA_OP_FLAGS_DUMP_ALL),
+};
+
+static const struct nla_policy rtm_nh_policy_del[] = {
+ [NHA_ID] = { .type = NLA_U32 },
};
static const struct nla_policy rtm_nh_policy_dump[] = {
@@ -48,6 +58,8 @@ static const struct nla_policy rtm_nh_policy_dump[] = {
[NHA_GROUPS] = { .type = NLA_FLAG },
[NHA_MASTER] = { .type = NLA_U32 },
[NHA_FDB] = { .type = NLA_FLAG },
+ [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ NHA_OP_FLAGS_DUMP_ALL),
};
static const struct nla_policy rtm_nh_res_policy_new[] = {
@@ -131,6 +143,7 @@ static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
info->nh_grp->num_nh = num_nh;
info->nh_grp->is_fdb = nhg->fdb_nh;
+ info->nh_grp->hw_stats = nhg->hw_stats;
for (i = 0; i < num_nh; i++) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
@@ -162,6 +175,7 @@ static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
return -ENOMEM;
info->nh_res_table->num_nh_buckets = num_nh_buckets;
+ info->nh_res_table->hw_stats = nhg->hw_stats;
for (i = 0; i < num_nh_buckets; i++) {
struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
@@ -474,6 +488,7 @@ static void nexthop_free_group(struct nexthop *nh)
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
WARN_ON(!list_empty(&nhge->nh_list));
+ free_percpu(nhge->stats);
nexthop_put(nhge->nh);
}
@@ -654,8 +669,201 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
+static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
+{
+ struct nh_grp_entry_stats *cpu_stats;
+
+ cpu_stats = this_cpu_ptr(nhge->stats);
+ u64_stats_update_begin(&cpu_stats->syncp);
+ u64_stats_inc(&cpu_stats->packets);
+ u64_stats_update_end(&cpu_stats->syncp);
+}
+
+static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
+ u64 *ret_packets)
+{
+ int i;
+
+ *ret_packets = 0;
+
+ for_each_possible_cpu(i) {
+ struct nh_grp_entry_stats *cpu_stats;
+ unsigned int start;
+ u64 packets;
+
+ cpu_stats = per_cpu_ptr(nhge->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
+ packets = u64_stats_read(&cpu_stats->packets);
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+
+ *ret_packets += packets;
+ }
+}
+
+static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
+ const struct nexthop *nh)
+{
+ struct nh_group *nhg;
+ int i;
+
+ ASSERT_RTNL();
+ nhg = rtnl_dereference(nh->nh_grp);
+
+ info->id = nh->id;
+ info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
+ info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
+ stats, nhg->num_nh),
+ GFP_KERNEL);
+ if (!info->nh_grp_hw_stats)
+ return -ENOMEM;
+
+ info->nh_grp_hw_stats->num_nh = nhg->num_nh;
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
+ }
+
+ return 0;
+}
+
+static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
+{
+ kfree(info->nh_grp_hw_stats);
+}
+
+void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
+ unsigned int nh_idx,
+ u64 delta_packets)
+{
+ info->hw_stats_used = true;
+ info->stats[nh_idx].packets += delta_packets;
+}
+EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
+
+static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
+ struct nh_notifier_info *info)
+{
+ struct nh_group *nhg;
+ int i;
+
+ ASSERT_RTNL();
+ nhg = rtnl_dereference(nh->nh_grp);
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
+ }
+}
+
+static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
+{
+ struct nh_notifier_info info = {
+ .net = nh->net,
+ };
+ struct net *net = nh->net;
+ int err;
+
+ if (nexthop_notifiers_is_empty(net))
+ return 0;
+
+ err = nh_notifier_grp_hw_stats_init(&info, nh);
+ if (err)
+ return err;
+
+ err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
+ NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
+ &info);
+
+ /* Cache whatever we got, even if there was an error, otherwise the
+ * successful stats retrievals would get lost.
+ */
+ nh_grp_hw_stats_apply_update(nh, &info);
+ *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
+
+ nh_notifier_grp_hw_stats_fini(&info);
+ return notifier_to_errno(err);
+}
+
+static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
+ struct nh_grp_entry *nhge,
+ u32 op_flags)
+{
+ struct nlattr *nest;
+ u64 packets;
+
+ nh_grp_entry_stats_read(nhge, &packets);
+
+ nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
+ nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
+ packets + nhge->packets_hw))
+ goto nla_put_failure;
+
+ if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
+ nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
+ nhge->packets_hw))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
+ u32 op_flags)
+{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
+ struct nlattr *nest;
+ bool hw_stats_used;
+ int err;
+ int i;
+
+ if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
+ goto err_out;
+
+ if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
+ nhg->hw_stats) {
+ err = nh_grp_hw_stats_update(nh, &hw_stats_used);
+ if (err)
+ goto out;
+
+ if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
+ goto err_out;
+ }
+
+ nest = nla_nest_start(skb, NHA_GROUP_STATS);
+ if (!nest)
+ goto err_out;
+
+ for (i = 0; i < nhg->num_nh; i++)
+ if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
+ op_flags))
+ goto cancel_out;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+cancel_out:
+ nla_nest_cancel(skb, nest);
+err_out:
+ err = -EMSGSIZE;
+out:
+ return err;
+}
+
+static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
+ u32 op_flags)
{
+ struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
struct nexthop_grp *p;
size_t len = nhg->num_nh * sizeof(*p);
struct nlattr *nla;
@@ -684,6 +892,11 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
goto nla_put_failure;
+ if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
+ (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
+ nla_put_nh_group_stats(skb, nh, op_flags)))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -691,7 +904,8 @@ nla_put_failure:
}
static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
- int event, u32 portid, u32 seq, unsigned int nlflags)
+ int event, u32 portid, u32 seq, unsigned int nlflags,
+ u32 op_flags)
{
struct fib6_nh *fib6_nh;
struct fib_nh *fib_nh;
@@ -718,7 +932,7 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
goto nla_put_failure;
- if (nla_put_nh_group(skb, nhg))
+ if (nla_put_nh_group(skb, nh, op_flags))
goto nla_put_failure;
goto out;
}
@@ -849,7 +1063,7 @@ static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
if (!skb)
goto errout;
- err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
+ err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in nh_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -1104,6 +1318,7 @@ static int nh_check_attr_group(struct net *net,
if (!tb[i])
continue;
switch (i) {
+ case NHA_HW_STATS_ENABLE:
case NHA_FDB:
continue;
case NHA_RES_GROUP:
@@ -1176,6 +1391,7 @@ static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
if (hash > atomic_read(&nhge->hthr.upper_bound))
continue;
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
@@ -1185,7 +1401,7 @@ static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
{
- struct nexthop *rc = NULL;
+ struct nh_grp_entry *nhge0 = NULL;
int i;
if (nhg->fdb_nh)
@@ -1200,16 +1416,20 @@ static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
if (!nexthop_is_good_nh(nhge->nh))
continue;
- if (!rc)
- rc = nhge->nh;
+ if (!nhge0)
+ nhge0 = nhge;
if (hash > atomic_read(&nhge->hthr.upper_bound))
continue;
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
- return rc ? : nhg->nh_entries[0].nh;
+ if (!nhge0)
+ nhge0 = &nhg->nh_entries[0];
+ nh_grp_entry_stats_inc(nhge0);
+ return nhge0->nh;
}
static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
@@ -1225,6 +1445,7 @@ static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
bucket = &res_table->nh_buckets[bucket_index];
nh_res_bucket_set_busy(bucket);
nhge = rcu_dereference(bucket->nh_entry);
+ nh_grp_entry_stats_inc(nhge);
return nhge->nh;
}
@@ -1798,6 +2019,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
newg->has_v4 = true;
list_del(&nhges[i].nh_list);
+ new_nhges[j].stats = nhges[i].stats;
new_nhges[j].nh_parent = nhges[i].nh_parent;
new_nhges[j].nh = nhges[i].nh;
new_nhges[j].weight = nhges[i].weight;
@@ -1813,6 +2035,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
rcu_assign_pointer(nhp->nh_grp, newg);
list_del(&nhge->nh_list);
+ free_percpu(nhge->stats);
nexthop_put(nhge->nh);
/* Removal of a NH from a resilient group is notified through
@@ -2477,6 +2700,13 @@ static struct nexthop *nexthop_create_group(struct net *net,
if (nhi->family == AF_INET)
nhg->has_v4 = true;
+ nhg->nh_entries[i].stats =
+ netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
+ if (!nhg->nh_entries[i].stats) {
+ err = -ENOMEM;
+ nexthop_put(nhe);
+ goto out_no_nh;
+ }
nhg->nh_entries[i].nh = nhe;
nhg->nh_entries[i].weight = entry[i].weight + 1;
list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
@@ -2509,6 +2739,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
if (cfg->nh_fdb)
nhg->fdb_nh = 1;
+ if (cfg->nh_hw_stats)
+ nhg->hw_stats = true;
+
rcu_assign_pointer(nh->nh_grp, nhg);
return nh;
@@ -2516,6 +2749,7 @@ static struct nexthop *nexthop_create_group(struct net *net,
out_no_nh:
for (i--; i >= 0; --i) {
list_del(&nhg->nh_entries[i].nh_list);
+ free_percpu(nhg->nh_entries[i].stats);
nexthop_put(nhg->nh_entries[i].nh);
}
@@ -2850,6 +3084,9 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
cfg, extack);
+ if (tb[NHA_HW_STATS_ENABLE])
+ cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
+
/* no other attributes should be set */
goto out;
}
@@ -2941,6 +3178,10 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
goto out;
}
+ if (tb[NHA_HW_STATS_ENABLE]) {
+ NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
+ goto out;
+ }
err = 0;
out:
@@ -2966,9 +3207,9 @@ static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
-static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
- struct nlattr **tb, u32 *id,
- struct netlink_ext_ack *extack)
+static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
+ struct nlattr **tb, u32 *id, u32 *op_flags,
+ struct netlink_ext_ack *extack)
{
struct nhmsg *nhm = nlmsg_data(nlh);
@@ -2988,22 +3229,12 @@ static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
return -EINVAL;
}
- return 0;
-}
-
-static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
- int err;
-
- err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
- ARRAY_SIZE(rtm_nh_policy_get) - 1,
- rtm_nh_policy_get, extack);
- if (err < 0)
- return err;
+ if (tb[NHA_OP_FLAGS])
+ *op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
+ else
+ *op_flags = 0;
- return __nh_valid_get_del_req(nlh, tb, id, extack);
+ return 0;
}
/* rtnl */
@@ -3011,16 +3242,23 @@ static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
+ struct nlattr *tb[NHA_MAX + 1];
struct nl_info nlinfo = {
.nlh = nlh,
.nl_net = net,
.portid = NETLINK_CB(skb).portid,
};
struct nexthop *nh;
+ u32 op_flags;
int err;
u32 id;
- err = nh_valid_get_del_req(nlh, &id, extack);
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
+ rtm_nh_policy_del, extack);
+ if (err < 0)
+ return err;
+
+ err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
if (err)
return err;
@@ -3038,12 +3276,19 @@ static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
+ struct nlattr *tb[NHA_MAX + 1];
struct sk_buff *skb = NULL;
struct nexthop *nh;
+ u32 op_flags;
int err;
u32 id;
- err = nh_valid_get_del_req(nlh, &id, extack);
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
+ rtm_nh_policy_get, extack);
+ if (err < 0)
+ return err;
+
+ err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
if (err)
return err;
@@ -3058,7 +3303,7 @@ static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout_free;
err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0);
+ nlh->nlmsg_seq, 0, op_flags);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
goto errout_free;
@@ -3079,6 +3324,7 @@ struct nh_dump_filter {
bool group_filter;
bool fdb_filter;
u32 res_bucket_nh_id;
+ u32 op_flags;
};
static bool nh_dump_filtered(struct nexthop *nh,
@@ -3150,6 +3396,11 @@ static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
return -EINVAL;
}
+ if (tb[NHA_OP_FLAGS])
+ filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
+ else
+ filter->op_flags = 0;
+
return 0;
}
@@ -3157,11 +3408,10 @@ static int nh_valid_dump_req(const struct nlmsghdr *nlh,
struct nh_dump_filter *filter,
struct netlink_callback *cb)
{
- struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
+ struct nlattr *tb[NHA_MAX + 1];
int err;
- err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
- ARRAY_SIZE(rtm_nh_policy_dump) - 1,
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
rtm_nh_policy_dump, cb->extack);
if (err < 0)
return err;
@@ -3223,7 +3473,7 @@ static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
}
/* rtnl */
@@ -3300,11 +3550,10 @@ static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
struct netlink_callback *cb)
{
struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
- struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
+ struct nlattr *tb[NHA_MAX + 1];
int err;
- err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
- ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
rtm_nh_policy_dump_bucket, NULL);
if (err < 0)
return err;
@@ -3474,16 +3723,16 @@ static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
u32 *id, u16 *bucket_index,
struct netlink_ext_ack *extack)
{
- struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
+ struct nlattr *tb[NHA_MAX + 1];
+ u32 op_flags;
int err;
- err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
- ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
+ err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, NHA_MAX,
rtm_nh_policy_get_bucket, extack);
if (err < 0)
return err;
- err = __nh_valid_get_del_req(nlh, tb, id, extack);
+ err = nh_valid_get_del_req(nlh, tb, id, &op_flags, extack);
if (err)
return err;
@@ -3631,17 +3880,24 @@ unlock:
}
EXPORT_SYMBOL(register_nexthop_notifier);
-int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
int err;
- rtnl_lock();
err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
nb);
- if (err)
- goto unlock;
- nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
-unlock:
+ if (!err)
+ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+ return err;
+}
+EXPORT_SYMBOL(__unregister_nexthop_notifier);
+
+int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
+{
+ int err;
+
+ rtnl_lock();
+ err = __unregister_nexthop_notifier(net, nb);
rtnl_unlock();
return err;
}
@@ -3737,16 +3993,20 @@ out:
}
EXPORT_SYMBOL(nexthop_res_grp_activity_update);
-static void __net_exit nexthop_net_exit_batch(struct list_head *net_list)
+static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list) {
+ ASSERT_RTNL();
+ list_for_each_entry(net, net_list, exit_list)
flush_all_nexthops(net);
- kfree(net->nexthop.devhash);
- }
- rtnl_unlock();
+}
+
+static void __net_exit nexthop_net_exit(struct net *net)
+{
+ kfree(net->nexthop.devhash);
+ net->nexthop.devhash = NULL;
}
static int __net_init nexthop_net_init(struct net *net)
@@ -3764,7 +4024,8 @@ static int __net_init nexthop_net_init(struct net *net)
static struct pernet_operations nexthop_net_ops = {
.init = nexthop_net_init,
- .exit_batch = nexthop_net_exit_batch,
+ .exit = nexthop_net_exit,
+ .exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
};
static int __init nexthop_init(void)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 5f4654ebff48..914bc9c35cc7 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -395,7 +395,7 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
seq_printf(seq, " %s", snmp4_ipstats_list[i].name);
seq_printf(seq, "\nIp: %d %d",
- IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
+ IPV4_DEVCONF_ALL_RO(net, FORWARDING) ? 1 : 2,
READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 27da9d7294c0..494a6284bd7e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
/* Charge it to the socket. */
- ipv4_pktinfo_prepare(sk, skb);
+ ipv4_pktinfo_prepare(sk, skb, true);
if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) {
kfree_skb_reason(skb, reason);
return NET_RX_DROP;
@@ -310,7 +310,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
}
nf_reset_ct(skb);
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
raw_rcv_skb(sk, skb);
return 0;
@@ -353,6 +353,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
@@ -815,7 +816,7 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
out: return ret;
}
-static int do_raw_setsockopt(struct sock *sk, int level, int optname,
+static int do_raw_setsockopt(struct sock *sk, int optname,
sockptr_t optval, unsigned int optlen)
{
if (optname == ICMP_FILTER) {
@@ -832,11 +833,11 @@ static int raw_setsockopt(struct sock *sk, int level, int optname,
{
if (level != SOL_RAW)
return ip_setsockopt(sk, level, optname, optval, optlen);
- return do_raw_setsockopt(sk, level, optname, optval, optlen);
+ return do_raw_setsockopt(sk, optname, optval, optlen);
}
-static int do_raw_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
+static int do_raw_getsockopt(struct sock *sk, int optname,
+ char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
@@ -852,7 +853,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
{
if (level != SOL_RAW)
return ip_getsockopt(sk, level, optname, optval, optlen);
- return do_raw_getsockopt(sk, level, optname, optval, optlen);
+ return do_raw_getsockopt(sk, optname, optval, optlen);
}
static int raw_ioctl(struct sock *sk, int cmd, int *karg)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 16615d107cf0..c8f76f56dc16 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2313,7 +2313,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (IN_DEV_BFORWARD(in_dev))
goto make_route;
/* not do cache if bc_forwarding is enabled */
- if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
+ if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING))
do_cache = false;
goto brd_input;
}
@@ -2993,7 +2993,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
#ifdef CONFIG_IP_MROUTE
if (ipv4_is_multicast(dst) &&
!ipv4_is_local_multicast(dst) &&
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
+ IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) {
int err = ipmr_get_route(net, skb,
fl4->saddr, fl4->daddr,
r, portid);
@@ -3693,9 +3693,8 @@ int __init ip_rt_init(void)
panic("IP: failed to allocate ip_rt_acct\n");
#endif
- ipv4_dst_ops.kmem_cachep =
- kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC);
ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 61f1c96cfe63..7972ad3d7c73 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -51,15 +51,6 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
count, &syncookie_secret[c]);
}
-/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
-static u64 tcp_ns_to_ts(bool usec_ts, u64 val)
-{
- if (usec_ts)
- return div_u64(val, NSEC_PER_USEC);
-
- return div_u64(val, NSEC_PER_MSEC);
-}
-
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
* tcp options in the lower bits of the timestamp value that will be
@@ -304,6 +295,24 @@ static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb,
return 0;
}
+#if IS_ENABLED(CONFIG_BPF)
+struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb)
+{
+ struct request_sock *req = inet_reqsk(skb->sk);
+
+ skb->sk = NULL;
+ skb->destructor = NULL;
+
+ if (cookie_tcp_reqsk_init(sk, skb, req)) {
+ reqsk_free(req);
+ req = NULL;
+ }
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(cookie_bpf_check);
+#endif
+
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk, struct sk_buff *skb,
struct tcp_options_received *tcp_opt,
@@ -399,16 +408,23 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
struct rtable *rt;
__u8 rcv_wscale;
int full_space;
+ SKB_DR(reason);
if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
- if (IS_ERR(req))
- goto out;
- if (!req)
+ if (cookie_bpf_ok(skb)) {
+ req = cookie_bpf_check(sk, skb);
+ } else {
+ req = cookie_tcp_check(net, sk, skb);
+ if (IS_ERR(req))
+ goto out;
+ }
+ if (!req) {
+ SKB_DR_SET(reason, NO_SOCKET);
goto out_drop;
+ }
ireq = inet_rsk(req);
@@ -420,8 +436,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
*/
RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
- if (security_inet_conn_request(sk, skb, req))
+ if (security_inet_conn_request(sk, skb, req)) {
+ SKB_DR_SET(reason, SECURITY_HOOK);
goto out_free;
+ }
tcp_ao_syncookie(sk, skb, req, AF_INET);
@@ -438,8 +456,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
- if (IS_ERR(rt))
+ if (IS_ERR(rt)) {
+ SKB_DR_SET(reason, IP_OUTNOROUTES);
goto out_free;
+ }
/* Try to redo what tcp_v4_send_synack did. */
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
@@ -454,19 +474,24 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, &rt->dst);
ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
/* ip_queue_xmit() depends on our flow being setup
* Normal sockets get it right from inet_csk_route_child_sock()
*/
- if (ret)
- inet_sk(ret)->cork.fl.u.ip4 = fl4;
+ if (!ret) {
+ SKB_DR_SET(reason, NO_SOCKET);
+ goto out_drop;
+ }
+ inet_sk(ret)->cork.fl.u.ip4 = fl4;
out:
return ret;
out_free:
reqsk_free(req);
out_drop:
+ kfree_skb_reason(skb, reason);
return NULL;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1baa484d2190..c5b83875411a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -279,6 +279,7 @@
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>
+#include <net/rps.h>
/* Track pending CMSGs. */
enum {
@@ -722,6 +723,7 @@ void tcp_push(struct sock *sk, int flags, int mss_now,
if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
+ smp_mb__after_atomic();
}
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED.
@@ -1785,7 +1787,17 @@ static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
static bool can_map_frag(const skb_frag_t *frag)
{
- return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag);
+ struct page *page;
+
+ if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
+ return false;
+
+ page = skb_frag_page(frag);
+
+ if (PageCompound(page) || page->mapping)
+ return false;
+
+ return true;
}
static int find_next_mappable_frag(const skb_frag_t *frag,
@@ -4604,7 +4616,8 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 31);
+ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32);
/* RX read-mostly hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq);
@@ -4639,7 +4652,7 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 113);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 105);
/* TXRX read-write hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags);
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 87db432c6bb4..3afeeb68e8a7 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -509,9 +509,9 @@ static int tcp_ao_hash_header(struct tcp_sigpool *hp,
bool exclude_options, u8 *hash,
int hash_offset, int hash_len)
{
- int err, len = th->doff << 2;
struct scatterlist sg;
u8 *hdr = hp->scratch;
+ int err, len;
/* We are not allowed to change tcphdr, make a local copy */
if (exclude_options) {
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 22358032dd48..05dc2d05bc7c 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -1155,7 +1155,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
.set_state = bbr_set_state,
};
-BTF_SET8_START(tcp_bbr_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_bbr_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, bbr_init)
@@ -1168,7 +1168,7 @@ BTF_ID_FLAGS(func, bbr_min_tso_segs)
BTF_ID_FLAGS(func, bbr_set_state)
#endif
#endif
-BTF_SET8_END(tcp_bbr_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_bbr_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 0fd78ecb67e7..44869ea089e3 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -485,7 +485,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
.name = "cubic",
};
-BTF_SET8_START(tcp_cubic_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_cubic_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, cubictcp_init)
@@ -496,7 +496,7 @@ BTF_ID_FLAGS(func, cubictcp_cwnd_event)
BTF_ID_FLAGS(func, cubictcp_acked)
#endif
#endif
-BTF_SET8_END(tcp_cubic_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_cubic_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index bb23bb5b387a..e33fbe4933e4 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -260,7 +260,7 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = {
.name = "dctcp-reno",
};
-BTF_SET8_START(tcp_dctcp_check_kfunc_ids)
+BTF_KFUNCS_START(tcp_dctcp_check_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID_FLAGS(func, dctcp_init)
@@ -271,7 +271,7 @@ BTF_ID_FLAGS(func, dctcp_cwnd_undo)
BTF_ID_FLAGS(func, dctcp_state)
#endif
#endif
-BTF_SET8_END(tcp_dctcp_check_kfunc_ids)
+BTF_KFUNCS_END(tcp_dctcp_check_kfunc_ids)
static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index df7b13f0e5e0..5d874817a78d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1164,7 +1164,7 @@ static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
* L|R 1 - orig is lost, retransmit is in flight.
* S|R 1 - orig reached receiver, retrans is still in flight.
* (L|S|R is logically valid, it could occur when L|R is sacked,
- * but it is equivalent to plain S and code short-curcuits it to S.
+ * but it is equivalent to plain S and code short-circuits it to S.
* L|S is logically invalid, it would mean -1 packet in flight 8))
*
* These 6 states form finite state machine, controlled by the following events:
@@ -6361,6 +6361,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
inet_csk_reset_xmit_timer(sk,
ICSK_TIME_RETRANS,
TCP_TIMEOUT_MIN, TCP_RTO_MAX);
+ SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE);
goto reset_and_undo;
}
@@ -6369,6 +6370,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_time_stamp_ts(tp))) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
+ SKB_DR_SET(reason, TCP_RFC7323_PAWS);
goto reset_and_undo;
}
@@ -6572,7 +6574,8 @@ discard_and_undo:
reset_and_undo:
tcp_clear_options(&tp->rx_opt);
tp->rx_opt.mss_clamp = saved_clamp;
- return 1;
+ /* we can reuse/return @reason to its caller to handle the exception */
+ return reason;
}
static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
@@ -6616,14 +6619,14 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
* address independent.
*/
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
+enum skb_drop_reason
+tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcphdr *th = tcp_hdr(skb);
struct request_sock *req;
int queued = 0;
- bool acceptable;
SKB_DR(reason);
switch (sk->sk_state) {
@@ -6633,7 +6636,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_LISTEN:
if (th->ack)
- return 1;
+ return SKB_DROP_REASON_TCP_FLAGS;
if (th->rst) {
SKB_DR_SET(reason, TCP_RESET);
@@ -6649,12 +6652,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
*/
rcu_read_lock();
local_bh_disable();
- acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
+ icsk->icsk_af_ops->conn_request(sk, skb);
local_bh_enable();
rcu_read_unlock();
- if (!acceptable)
- return 1;
consume_skb(skb);
return 0;
}
@@ -6699,17 +6700,25 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 0;
/* step 5: check the ACK field */
- acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
- FLAG_UPDATE_TS_RECENT |
- FLAG_NO_CHALLENGE_ACK) > 0;
-
- if (!acceptable) {
- if (sk->sk_state == TCP_SYN_RECV)
- return 1; /* send one RST */
- tcp_send_challenge_ack(sk);
- SKB_DR_SET(reason, TCP_OLD_ACK);
- goto discard;
+ reason = tcp_ack(sk, skb, FLAG_SLOWPATH |
+ FLAG_UPDATE_TS_RECENT |
+ FLAG_NO_CHALLENGE_ACK);
+
+ if ((int)reason <= 0) {
+ if (sk->sk_state == TCP_SYN_RECV) {
+ /* send one RST */
+ if (!reason)
+ return SKB_DROP_REASON_TCP_OLD_ACK;
+ return -reason;
+ }
+ /* accept old ack during closing */
+ if ((int)reason < 0) {
+ tcp_send_challenge_ack(sk);
+ reason = -reason;
+ goto discard;
+ }
}
+ SKB_DR_SET(reason, NOT_SPECIFIED);
switch (sk->sk_state) {
case TCP_SYN_RECV:
tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */
@@ -6777,7 +6786,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (READ_ONCE(tp->linger2) < 0) {
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
@@ -6786,7 +6795,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
tcp_fastopen_active_disable(sk);
tcp_done(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
tmo = tcp_fin_time(sk);
@@ -6851,7 +6860,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk, skb);
- return 1;
+ return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
}
}
fallthrough;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c50c5a32b84..a22ee5838751 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1907,7 +1907,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (tcp_checksum_complete(skb))
goto csum_err;
@@ -1915,9 +1914,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
struct sock *nsk = tcp_v4_cookie_check(sk, skb);
if (!nsk)
- goto discard;
+ return 0;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb)) {
+ reason = tcp_child_process(sk, nsk, skb);
+ if (reason) {
rsk = nsk;
goto reset;
}
@@ -1926,7 +1926,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb)) {
+ reason = tcp_rcv_state_process(sk, skb);
+ if (reason) {
rsk = sk;
goto reset;
}
@@ -2275,10 +2276,12 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v4_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
- tcp_v4_send_reset(nsk, skb);
- goto discard_and_relse;
} else {
+ drop_reason = tcp_child_process(sk, nsk, skb);
+ if (drop_reason) {
+ tcp_v4_send_reset(nsk, skb);
+ goto discard_and_relse;
+ }
sock_put(sk);
return 0;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9e85f2a0bddd..52040b0e2616 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -911,11 +911,11 @@ EXPORT_SYMBOL(tcp_check_req);
* be created.
*/
-int tcp_child_process(struct sock *parent, struct sock *child,
- struct sk_buff *skb)
+enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ struct sk_buff *skb)
__releases(&((child)->sk_lock.slock))
{
- int ret = 0;
+ enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
int state = child->sk_state;
/* record sk_napi_id and sk_rx_queue_mapping of child. */
@@ -923,7 +923,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
- ret = tcp_rcv_state_process(child, skb);
+ reason = tcp_rcv_state_process(child, skb);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent);
@@ -937,6 +937,6 @@ int tcp_child_process(struct sock *parent, struct sock *child,
bh_unlock_sock(child);
sock_put(child);
- return ret;
+ return reason;
}
EXPORT_SYMBOL(tcp_child_process);
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 8311c38267b5..ebe4722bb020 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -204,7 +204,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
goto out;
hlen = off + thlen;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
th = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!th))
goto out;
@@ -299,18 +299,20 @@ out:
void tcp_gro_complete(struct sk_buff *skb)
{
struct tcphdr *th = tcp_hdr(skb);
+ struct skb_shared_info *shinfo;
+
+ if (skb->encapsulation)
+ skb->inner_transport_header = skb->transport_header;
skb->csum_start = (unsigned char *)th - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
- skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ shinfo = skb_shinfo(skb);
+ shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
if (th->cwr)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
- if (skb->encapsulation)
- skb->inner_transport_header = skb->transport_header;
+ shinfo->gso_type |= SKB_GSO_TCP_ECN;
}
EXPORT_SYMBOL(tcp_gro_complete);
@@ -335,24 +337,22 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (NAPI_GRO_CB(skb)->is_atomic)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
+ (NAPI_GRO_CB(skb)->is_atomic * SKB_GSO_TCP_FIXEDID);
tcp_gro_complete(skb);
return 0;
}
-static const struct net_offload tcpv4_offload = {
- .callbacks = {
- .gso_segment = tcp4_gso_segment,
- .gro_receive = tcp4_gro_receive,
- .gro_complete = tcp4_gro_complete,
- },
-};
-
int __init tcpv4_offload_init(void)
{
- return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
+ net_hotdata.tcpv4_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = tcp4_gso_segment,
+ .gro_receive = tcp4_gro_receive,
+ .gro_complete = tcp4_gro_complete,
+ },
+ };
+ return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
}
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 5048c47c79b2..4c1f836aae38 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -294,4 +294,5 @@ static void __exit tunnel4_fini(void)
module_init(tunnel4_init);
module_exit(tunnel4_fini);
+MODULE_DESCRIPTION("IPv4 XFRM tunnel library");
MODULE_LICENSE("GPL");
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 148ffb007969..2beabf5b2d86 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -411,8 +411,6 @@ INDIRECT_CALLABLE_SCOPE
u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
const __be32 faddr, const __be16 fport)
{
- static u32 udp_ehash_secret __read_mostly;
-
net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
@@ -1589,12 +1587,8 @@ int udp_init_sock(struct sock *sk)
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
- if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
- bool slow = lock_sock_fast(sk);
-
+ if (unlikely(READ_ONCE(udp_sk(sk)->peeking_with_offset)))
sk_peek_offset_bwd(sk, len);
- unlock_sock_fast(sk, slow);
- }
if (!skb_unref(skb))
return;
@@ -2169,7 +2163,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
udp_csum_pull_header(skb);
- ipv4_pktinfo_prepare(sk, skb);
+ ipv4_pktinfo_prepare(sk, skb, true);
return __udp_queue_rcv_skb(sk, skb);
csum_error:
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6c95d28d0c4a..b9880743765c 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -737,15 +737,14 @@ INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
}
-static const struct net_offload udpv4_offload = {
- .callbacks = {
- .gso_segment = udp4_ufo_fragment,
- .gro_receive = udp4_gro_receive,
- .gro_complete = udp4_gro_complete,
- },
-};
-
int __init udpv4_offload_init(void)
{
- return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
+ net_hotdata.udpv4_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = udp4_ufo_fragment,
+ .gro_receive = udp4_gro_receive,
+ .gro_complete = udp4_gro_complete,
+ },
+ };
+ return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP);
}
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index a87defb2b167..860aff5f8599 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -253,4 +253,5 @@ struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup);
+MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver");
MODULE_LICENSE("GPL");
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index c54676998eb6..dae35101d189 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -58,7 +58,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
return -iph->protocol;
#endif
- __skb_push(skb, skb->data - skb_network_header(skb));
+ __skb_push(skb, -skb_network_offset(skb));
iph->tot_len = htons(skb->len);
ip_send_check(iph);
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 8489fa106583..8cb266af1393 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -114,5 +114,6 @@ static void __exit ipip_fini(void)
module_init(ipip_init);
module_exit(ipip_fini);
+MODULE_DESCRIPTION("IPv4 XFRM tunnel driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_IPIP);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 733ace18806c..f786b65d12e4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
+ .regen_min_advance = REGEN_MIN_ADVANCE,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
@@ -257,6 +258,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
+ .regen_min_advance = REGEN_MIN_ADVANCE,
.regen_max_retry = REGEN_MAX_RETRY,
.max_desync_factor = MAX_DESYNC_FACTOR,
.max_addresses = IPV6_MAX_ADDRESSES,
@@ -549,7 +551,8 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
goto out;
if ((all || type == NETCONFA_FORWARDING) &&
- nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
+ nla_put_s32(skb, NETCONFA_FORWARDING,
+ READ_ONCE(devconf->forwarding)) < 0)
goto nla_put_failure;
#ifdef CONFIG_IPV6_MROUTE
if ((all || type == NETCONFA_MC_FORWARDING) &&
@@ -558,12 +561,13 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
goto nla_put_failure;
#endif
if ((all || type == NETCONFA_PROXY_NEIGH) &&
- nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
+ nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
+ READ_ONCE(devconf->proxy_ndp)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
- devconf->ignore_routes_with_linkdown) < 0)
+ READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0)
goto nla_put_failure;
out:
@@ -708,16 +712,33 @@ errout:
return err;
}
+/* Combine dev_addr_genid and dev_base_seq to detect changes.
+ */
+static u32 inet6_base_seq(const struct net *net)
+{
+ u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
+ net->dev_base_seq;
+
+ /* Must not return 0 (see nl_dump_check_consistent()).
+ * Chose a value far away from 0.
+ */
+ if (!res)
+ res = 0x80000000;
+ return res;
+}
+
static int inet6_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx, s_idx;
+ struct {
+ unsigned long ifindex;
+ unsigned int all_default;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
+ int err = 0;
if (cb->strict_check) {
struct netlink_ext_ack *extack = cb->extack;
@@ -734,65 +755,46 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
}
}
- s_h = cb->args[0];
- s_idx = idx = cb->args[1];
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- rcu_read_lock();
- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
- net->dev_base_seq;
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
-
- if (inet6_netconf_fill_devconf(skb, dev->ifindex,
- &idev->cnf,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF,
- NLM_F_MULTI,
- NETCONFA_ALL) < 0) {
- rcu_read_unlock();
- goto done;
- }
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = inet6_netconf_fill_devconf(skb, dev->ifindex,
+ &idev->cnf,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF,
+ NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
+ goto done;
}
- if (h == NETDEV_HASHENTRIES) {
- if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
- net->ipv6.devconf_all,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ if (ctx->all_default == 0) {
+ err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
+ net->ipv6.devconf_all,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
- }
- if (h == NETDEV_HASHENTRIES + 1) {
- if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
- net->ipv6.devconf_dflt,
- NETLINK_CB(cb->skb).portid,
- nlh->nlmsg_seq,
- RTM_NEWNETCONF, NLM_F_MULTI,
- NETCONFA_ALL) < 0)
+ ctx->all_default++;
+ }
+ if (ctx->all_default == 1) {
+ err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
+ net->ipv6.devconf_dflt,
+ NETLINK_CB(cb->skb).portid,
+ nlh->nlmsg_seq,
+ RTM_NEWNETCONF, NLM_F_MULTI,
+ NETCONFA_ALL);
+ if (err < 0)
goto done;
- else
- h++;
+ ctx->all_default++;
}
done:
- cb->args[0] = h;
- cb->args[1] = idx;
-
- return skb->len;
+ rcu_read_unlock();
+ return err;
}
#ifdef CONFIG_SYSCTL
@@ -852,7 +854,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
- idev->cnf.forwarding = newf;
+
+ WRITE_ONCE(idev->cnf.forwarding, newf);
if (changed)
dev_forward_change(idev);
}
@@ -869,7 +872,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
net = (struct net *)table->extra2;
old = *p;
- *p = newf;
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_dflt->forwarding) {
if ((!newf) ^ (!old))
@@ -884,7 +887,7 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
if (p == &net->ipv6.devconf_all->forwarding) {
int old_dflt = net->ipv6.devconf_dflt->forwarding;
- net->ipv6.devconf_dflt->forwarding = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf);
if ((!newf) ^ (!old_dflt))
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_FORWARDING,
@@ -916,7 +919,7 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf)
if (idev) {
int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
- idev->cnf.ignore_routes_with_linkdown = newf;
+ WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf);
if (changed)
inet6_netconf_notify_devconf(dev_net(dev),
RTM_NEWNETCONF,
@@ -937,7 +940,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
net = (struct net *)table->extra2;
old = *p;
- *p = newf;
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
if ((!newf) ^ (!old))
@@ -951,7 +954,7 @@ static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
}
if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
- net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf);
addrconf_linkdown_change(net, newf);
if ((!newf) ^ (!old))
inet6_netconf_notify_devconf(net,
@@ -1255,6 +1258,7 @@ static void
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
bool del_rt, bool del_peer)
{
+ struct fib6_table *table;
struct fib6_info *f6i;
f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
@@ -1264,8 +1268,15 @@ cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
if (del_rt)
ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
else {
- if (!(f6i->fib6_flags & RTF_EXPIRES))
+ if (!(f6i->fib6_flags & RTF_EXPIRES)) {
+ table = f6i->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
fib6_set_expires(f6i, expires);
+ fib6_add_gc_list(f6i);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
fib6_info_release(f6i);
}
}
@@ -1331,12 +1342,21 @@ out:
in6_ifa_put(ifp);
}
+static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev)
+{
+ return READ_ONCE(idev->cnf.regen_min_advance) +
+ READ_ONCE(idev->cnf.regen_max_retry) *
+ READ_ONCE(idev->cnf.dad_transmits) *
+ max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+}
+
static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
{
struct inet6_dev *idev = ifp->idev;
unsigned long tmp_tstamp, age;
unsigned long regen_advance;
unsigned long now = jiffies;
+ u32 if_public_preferred_lft;
s32 cnf_temp_preferred_lft;
struct inet6_ifaddr *ift;
struct ifa6_config cfg;
@@ -1348,7 +1368,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
retry:
in6_dev_hold(idev);
- if (idev->cnf.use_tempaddr <= 0) {
+ if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) {
write_unlock_bh(&idev->lock);
pr_info("%s: use_tempaddr is disabled\n", __func__);
in6_dev_put(idev);
@@ -1356,8 +1376,8 @@ retry:
goto out;
}
spin_lock_bh(&ifp->lock);
- if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
- idev->cnf.use_tempaddr = -1; /*XXX*/
+ if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) {
+ WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/
spin_unlock_bh(&ifp->lock);
write_unlock_bh(&idev->lock);
pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
@@ -1372,16 +1392,14 @@ retry:
age = (now - ifp->tstamp) / HZ;
- regen_advance = idev->cnf.regen_max_retry *
- idev->cnf.dad_transmits *
- max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+ regen_advance = ipv6_get_regen_advance(idev);
/* recalculate max_desync_factor each time and update
* idev->desync_factor if it's larger
*/
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
max_desync_factor = min_t(long,
- idev->cnf.max_desync_factor,
+ READ_ONCE(idev->cnf.max_desync_factor),
cnf_temp_preferred_lft - regen_advance);
if (unlikely(idev->desync_factor > max_desync_factor)) {
@@ -1394,11 +1412,13 @@ retry:
}
}
+ if_public_preferred_lft = ifp->prefered_lft;
+
memset(&cfg, 0, sizeof(cfg));
cfg.valid_lft = min_t(__u32, ifp->valid_lft,
- idev->cnf.temp_valid_lft + age);
+ READ_ONCE(idev->cnf.temp_valid_lft) + age);
cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
- cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
+ cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft);
cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft);
cfg.plen = ifp->prefix_len;
@@ -1407,19 +1427,41 @@ retry:
write_unlock_bh(&idev->lock);
- /* A temporary address is created only if this calculated Preferred
- * Lifetime is greater than REGEN_ADVANCE time units. In particular,
- * an implementation must not create a temporary address with a zero
- * Preferred Lifetime.
+ /* From RFC 4941:
+ *
+ * A temporary address is created only if this calculated Preferred
+ * Lifetime is greater than REGEN_ADVANCE time units. In
+ * particular, an implementation must not create a temporary address
+ * with a zero Preferred Lifetime.
+ *
+ * ...
+ *
+ * When creating a temporary address, the lifetime values MUST be
+ * derived from the corresponding prefix as follows:
+ *
+ * ...
+ *
+ * * Its Preferred Lifetime is the lower of the Preferred Lifetime
+ * of the public address or TEMP_PREFERRED_LIFETIME -
+ * DESYNC_FACTOR.
+ *
+ * To comply with the RFC's requirements, clamp the preferred lifetime
+ * to a minimum of regen_advance, unless that would exceed valid_lft or
+ * ifp->prefered_lft.
+ *
* Use age calculation as in addrconf_verify to avoid unnecessary
* temporary addresses being generated.
*/
age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (cfg.preferred_lft <= regen_advance + age) {
- in6_ifa_put(ifp);
- in6_dev_put(idev);
- ret = -1;
- goto out;
+ cfg.preferred_lft = regen_advance + age + 1;
+ if (cfg.preferred_lft > cfg.valid_lft ||
+ cfg.preferred_lft > if_public_preferred_lft) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ ret = -1;
+ goto out;
+ }
}
cfg.ifa_flags = IFA_F_TEMPORARY;
@@ -1498,15 +1540,17 @@ static inline int ipv6_saddr_preferred(int type)
return 0;
}
-static bool ipv6_use_optimistic_addr(struct net *net,
- struct inet6_dev *idev)
+static bool ipv6_use_optimistic_addr(const struct net *net,
+ const struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (!idev)
return false;
- if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
+ !READ_ONCE(idev->cnf.optimistic_dad))
return false;
- if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
+ if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) &&
+ !READ_ONCE(idev->cnf.use_optimistic))
return false;
return true;
@@ -1515,13 +1559,14 @@ static bool ipv6_use_optimistic_addr(struct net *net,
#endif
}
-static bool ipv6_allow_optimistic_dad(struct net *net,
- struct inet6_dev *idev)
+static bool ipv6_allow_optimistic_dad(const struct net *net,
+ const struct inet6_dev *idev)
{
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
if (!idev)
return false;
- if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
+ if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
+ !READ_ONCE(idev->cnf.optimistic_dad))
return false;
return true;
@@ -1627,7 +1672,7 @@ static int ipv6_get_saddr_eval(struct net *net,
*/
int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
- score->ifa->idev->cnf.use_tempaddr >= 2;
+ READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2;
ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
break;
}
@@ -1803,7 +1848,7 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
idev = __in6_dev_get(dst_dev);
if ((dst_type & IPV6_ADDR_MULTICAST) ||
dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
- (idev && idev->cnf.use_oif_addrs_only)) {
+ (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) {
use_oif_addr = true;
}
}
@@ -2110,6 +2155,7 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net *net = dev_net(idev->dev);
+ int max_addresses;
if (addrconf_dad_end(ifp)) {
in6_ifa_put(ifp);
@@ -2147,9 +2193,9 @@ void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
spin_unlock_bh(&ifp->lock);
- if (idev->cnf.max_addresses &&
- ipv6_count_addresses(idev) >=
- idev->cnf.max_addresses)
+ max_addresses = READ_ONCE(idev->cnf.max_addresses);
+ if (max_addresses &&
+ ipv6_count_addresses(idev) >= max_addresses)
goto lock_errdad;
net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
@@ -2546,11 +2592,11 @@ static void manage_tempaddrs(struct inet6_dev *idev,
* (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
*/
age = (now - ift->cstamp) / HZ;
- max_valid = idev->cnf.temp_valid_lft - age;
+ max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age;
if (max_valid < 0)
max_valid = 0;
- max_prefered = idev->cnf.temp_prefered_lft -
+ max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) -
idev->desync_factor - age;
if (max_prefered < 0)
max_prefered = 0;
@@ -2583,7 +2629,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
create = true;
- if (create && idev->cnf.use_tempaddr > 0) {
+ if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) {
/* When a new public address is created as described
* in [ADDRCONF], also create a new temporary address.
*/
@@ -2611,7 +2657,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
int create = 0, update_lft = 0;
if (!ifp && valid_lft) {
- int max_addresses = in6_dev->cnf.max_addresses;
+ int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses);
struct ifa6_config cfg = {
.pfx = addr,
.plen = pinfo->prefix_len,
@@ -2623,8 +2669,8 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
};
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if ((net->ipv6.devconf_all->optimistic_dad ||
- in6_dev->cnf.optimistic_dad) &&
+ if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) ||
+ READ_ONCE(in6_dev->cnf.optimistic_dad)) &&
!net->ipv6.devconf_all->forwarding && sllao)
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -2673,7 +2719,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
*/
update_lft = !create && stored_lft;
- if (update_lft && !in6_dev->cnf.ra_honor_pio_life) {
+ if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) {
const u32 minimum_lft = min_t(u32,
stored_lft, MIN_VALID_LIFETIME);
valid_lft = max(valid_lft, minimum_lft);
@@ -2706,6 +2752,7 @@ EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
{
struct prefix_info *pinfo;
+ struct fib6_table *table;
__u32 valid_lft;
__u32 prefered_lft;
int addr_type, err;
@@ -2782,11 +2829,20 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
if (valid_lft == 0) {
ip6_del_rt(net, rt, false);
rt = NULL;
- } else if (addrconf_finite_timeout(rt_expires)) {
- /* not infinity */
- fib6_set_expires(rt, jiffies + rt_expires);
} else {
- fib6_clean_expires(rt);
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (addrconf_finite_timeout(rt_expires)) {
+ /* not infinity */
+ fib6_set_expires(rt, jiffies + rt_expires);
+ fib6_add_gc_list(rt);
+ } else {
+ fib6_clean_expires(rt);
+ fib6_remove_gc_list(rt);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
}
} else if (valid_lft) {
clock_t expires = 0;
@@ -3247,8 +3303,8 @@ void addrconf_add_linklocal(struct inet6_dev *idev,
struct inet6_ifaddr *ifp;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
- idev->cnf.optimistic_dad) &&
+ if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) ||
+ READ_ONCE(idev->cnf.optimistic_dad)) &&
!dev_net(idev->dev)->ipv6.devconf_all->forwarding)
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
#endif
@@ -3427,7 +3483,8 @@ static void addrconf_dev_config(struct net_device *dev)
/* this device type has no EUI support */
if (dev->type == ARPHRD_NONE &&
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
- idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_RANDOM);
addrconf_addr_gen(idev, false);
}
@@ -3605,7 +3662,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev) {
rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
+ WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
break;
}
@@ -3697,9 +3754,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (idev->cnf.mtu6 != dev->mtu &&
dev->mtu >= IPV6_MIN_MTU) {
rt6_mtu_change(dev, dev->mtu);
- idev->cnf.mtu6 = dev->mtu;
+ WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
}
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
inet6_ifinfo_notify(RTM_NEWLINK, idev);
/*
@@ -3819,10 +3876,10 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
*/
if (!unregister && !idev->cnf.disable_ipv6) {
/* aggregate the system setting and interface setting */
- int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
+ int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down);
if (!_keep_addr)
- _keep_addr = idev->cnf.keep_addr_on_down;
+ _keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down);
keep_addr = (_keep_addr > 0);
}
@@ -3941,7 +3998,7 @@ restart:
ipv6_mc_down(idev);
}
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
idev->ra_mtu = 0;
/* Last: Shot the device (if unregistered) */
@@ -3959,6 +4016,7 @@ static void addrconf_rs_timer(struct timer_list *t)
struct inet6_dev *idev = from_timer(idev, t, rs_timer);
struct net_device *dev = idev->dev;
struct in6_addr lladdr;
+ int rtr_solicits;
write_lock(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY))
@@ -3971,7 +4029,9 @@ static void addrconf_rs_timer(struct timer_list *t)
if (idev->if_flags & IF_RA_RCVD)
goto out;
- if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
+ rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits);
+
+ if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) {
write_unlock(&idev->lock);
if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
ndisc_send_rs(dev, &lladdr,
@@ -3981,11 +4041,12 @@ static void addrconf_rs_timer(struct timer_list *t)
write_lock(&idev->lock);
idev->rs_interval = rfc3315_s14_backoff_update(
- idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
+ idev->rs_interval,
+ READ_ONCE(idev->cnf.rtr_solicit_max_interval));
/* The wait after the last probe can be shorter */
addrconf_mod_rs_timer(idev, (idev->rs_probes ==
- idev->cnf.rtr_solicits) ?
- idev->cnf.rtr_solicit_delay :
+ READ_ONCE(idev->cnf.rtr_solicits)) ?
+ READ_ONCE(idev->cnf.rtr_solicit_delay) :
idev->rs_interval);
} else {
/*
@@ -4006,24 +4067,25 @@ put:
*/
static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
{
- unsigned long rand_num;
struct inet6_dev *idev = ifp->idev;
+ unsigned long rand_num;
u64 nonce;
if (ifp->flags & IFA_F_OPTIMISTIC)
rand_num = 0;
else
- rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
+ rand_num = get_random_u32_below(
+ READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1);
nonce = 0;
- if (idev->cnf.enhanced_dad ||
- dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
+ if (READ_ONCE(idev->cnf.enhanced_dad) ||
+ READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) {
do
get_random_bytes(&nonce, 6);
while (nonce == 0);
}
ifp->dad_nonce = nonce;
- ifp->dad_probes = idev->cnf.dad_transmits;
+ ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits);
addrconf_mod_dad_work(ifp, rand_num);
}
@@ -4043,8 +4105,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
net = dev_net(dev);
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
- (net->ipv6.devconf_all->accept_dad < 1 &&
- idev->cnf.accept_dad < 1) ||
+ (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 &&
+ READ_ONCE(idev->cnf.accept_dad) < 1) ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
bool send_na = false;
@@ -4136,8 +4198,8 @@ static void addrconf_dad_work(struct work_struct *w)
action = DAD_ABORT;
ifp->state = INET6_IFADDR_STATE_POSTDAD;
- if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
- idev->cnf.accept_dad > 1) &&
+ if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->accept_dad) > 1 ||
+ READ_ONCE(idev->cnf.accept_dad) > 1) &&
!idev->cnf.disable_ipv6 &&
!(ifp->flags & IFA_F_STABLE_PRIVACY)) {
struct in6_addr addr;
@@ -4148,7 +4210,7 @@ static void addrconf_dad_work(struct work_struct *w)
if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
ipv6_addr_equal(&ifp->addr, &addr)) {
/* DAD failed for link-local based on MAC */
- idev->cnf.disable_ipv6 = 1;
+ WRITE_ONCE(idev->cnf.disable_ipv6, 1);
pr_info("%s: IPv6 being disabled!\n",
ifp->idev->dev->name);
@@ -4262,7 +4324,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
send_rs = send_mld &&
ipv6_accept_ra(ifp->idev) &&
- ifp->idev->cnf.rtr_solicits != 0 &&
+ READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 &&
(dev->flags & IFF_LOOPBACK) == 0 &&
(dev->type != ARPHRD_TUNNEL) &&
!netif_is_team_port(dev);
@@ -4276,8 +4338,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
/* send unsolicited NA if enabled */
if (send_na &&
- (ifp->idev->cnf.ndisc_notify ||
- dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
+ (READ_ONCE(ifp->idev->cnf.ndisc_notify) ||
+ READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
/*router=*/ !!ifp->idev->cnf.forwarding,
/*solicited=*/ false, /*override=*/ true,
@@ -4297,7 +4359,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
write_lock_bh(&ifp->idev->lock);
spin_lock(&ifp->lock);
ifp->idev->rs_interval = rfc3315_s14_backoff_init(
- ifp->idev->cnf.rtr_solicit_interval);
+ READ_ONCE(ifp->idev->cnf.rtr_solicit_interval));
ifp->idev->rs_probes = 1;
ifp->idev->if_flags |= IF_RS_SENT;
addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
@@ -4577,9 +4639,7 @@ restart:
!ifp->regen_count && ifp->ifpub) {
/* This is a non-regenerated temporary addr. */
- unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
- ifp->idev->cnf.dad_transmits *
- max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
+ unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev);
if (age + regen_advance >= ifp->prefered_lft) {
struct inet6_ifaddr *ifpub = ifp->ifpub;
@@ -4741,6 +4801,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
unsigned long expires, u32 flags,
bool modify_peer)
{
+ struct fib6_table *table;
struct fib6_info *f6i;
u32 prio;
@@ -4761,10 +4822,18 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
} else {
- if (!expires)
+ table = f6i->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (!(flags & RTF_EXPIRES)) {
fib6_clean_expires(f6i);
- else
+ fib6_remove_gc_list(f6i);
+ } else {
fib6_set_expires(f6i, expires);
+ fib6_add_gc_list(f6i);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
fib6_info_release(f6i);
}
@@ -5227,9 +5296,9 @@ next:
fillargs->event = RTM_GETMULTICAST;
/* multicast address */
- for (ifmca = rtnl_dereference(idev->mc_list);
+ for (ifmca = rcu_dereference(idev->mc_list);
ifmca;
- ifmca = rtnl_dereference(ifmca->next), ip_idx++) {
+ ifmca = rcu_dereference(ifmca->next), ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
@@ -5241,8 +5310,8 @@ next:
case ANYCAST_ADDR:
fillargs->event = RTM_GETANYCAST;
/* anycast address */
- for (ifaca = idev->ac_list; ifaca;
- ifaca = ifaca->aca_next, ip_idx++) {
+ for (ifaca = rcu_dereference(idev->ac_list); ifaca;
+ ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
@@ -5337,6 +5406,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
s_idx = idx = cb->args[1];
s_ip_idx = cb->args[2];
+ rcu_read_lock();
if (cb->strict_check) {
err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
skb->sk, cb);
@@ -5361,8 +5431,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
}
}
- rcu_read_lock();
- cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
+ cb->seq = inet6_base_seq(tgt_net);
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &tgt_net->dev_index_head[h];
@@ -5383,10 +5452,10 @@ cont:
}
}
done:
- rcu_read_unlock();
cb->args[0] = h;
cb->args[1] = idx;
put_tgt_net:
+ rcu_read_unlock();
if (fillargs.netnsid >= 0)
put_net(tgt_net);
@@ -5494,9 +5563,10 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
}
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
- if (!addr)
- return -EINVAL;
-
+ if (!addr) {
+ err = -EINVAL;
+ goto errout;
+ }
ifm = nlmsg_data(nlh);
if (ifm->ifa_index)
dev = dev_get_by_index(tgt_net, ifm->ifa_index);
@@ -5562,87 +5632,97 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
}
-static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
- __s32 *array, int bytes)
+static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
+ __s32 *array, int bytes)
{
BUG_ON(bytes < (DEVCONF_MAX * 4));
memset(array, 0, bytes);
- array[DEVCONF_FORWARDING] = cnf->forwarding;
- array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
- array[DEVCONF_MTU6] = cnf->mtu6;
- array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
- array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
- array[DEVCONF_AUTOCONF] = cnf->autoconf;
- array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
- array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
+ array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding);
+ array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit);
+ array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6);
+ array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra);
+ array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects);
+ array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf);
+ array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits);
+ array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits);
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_solicit_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval));
array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_solicit_max_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval));
array[DEVCONF_RTR_SOLICIT_DELAY] =
- jiffies_to_msecs(cnf->rtr_solicit_delay);
- array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay));
+ array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version);
array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
- jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval));
array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
- jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
- array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
- array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
- array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
- array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
- array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
- array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
- array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
- array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
- array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
- array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
+ jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval));
+ array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr);
+ array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft);
+ array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft);
+ array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry);
+ array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor);
+ array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses);
+ array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr);
+ array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric);
+ array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] =
+ READ_ONCE(cnf->accept_ra_min_hop_limit);
+ array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo);
#ifdef CONFIG_IPV6_ROUTER_PREF
- array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
+ array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref);
array[DEVCONF_RTR_PROBE_INTERVAL] =
- jiffies_to_msecs(cnf->rtr_probe_interval);
+ jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval));
#ifdef CONFIG_IPV6_ROUTE_INFO
- array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
- array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] =
+ READ_ONCE(cnf->accept_ra_rt_info_min_plen);
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] =
+ READ_ONCE(cnf->accept_ra_rt_info_max_plen);
#endif
#endif
- array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
- array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
+ array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp);
+ array[DEVCONF_ACCEPT_SOURCE_ROUTE] =
+ READ_ONCE(cnf->accept_source_route);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
- array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
- array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
+ array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad);
+ array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic);
#endif
#ifdef CONFIG_IPV6_MROUTE
array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
#endif
- array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
- array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
- array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
- array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
- array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
- array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
- array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
- array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
+ array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6);
+ array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad);
+ array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao);
+ array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify);
+ array[DEVCONF_SUPPRESS_FRAG_NDISC] =
+ READ_ONCE(cnf->suppress_frag_ndisc);
+ array[DEVCONF_ACCEPT_RA_FROM_LOCAL] =
+ READ_ONCE(cnf->accept_ra_from_local);
+ array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu);
+ array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] =
+ READ_ONCE(cnf->ignore_routes_with_linkdown);
/* we omit DEVCONF_STABLE_SECRET for now */
- array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
- array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
- array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
- array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
- array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
+ array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only);
+ array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] =
+ READ_ONCE(cnf->drop_unicast_in_l2_multicast);
+ array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na);
+ array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down);
+ array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled);
#ifdef CONFIG_IPV6_SEG6_HMAC
- array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
+ array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac);
#endif
- array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
- array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
- array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
- array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
- array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
- array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
- array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
- array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
- array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
- array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
- array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
+ array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad);
+ array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode);
+ array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy);
+ array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass);
+ array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled);
+ array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled);
+ array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id);
+ array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide);
+ array[DEVCONF_NDISC_EVICT_NOCARRIER] =
+ READ_ONCE(cnf->ndisc_evict_nocarrier);
+ array[DEVCONF_ACCEPT_UNTRACKED_NA] =
+ READ_ONCE(cnf->accept_untracked_na);
+ array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft);
}
static inline size_t inet6_ifla6_size(void)
@@ -5722,13 +5802,14 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
u32 ext_filter_mask)
{
- struct nlattr *nla;
struct ifla_cacheinfo ci;
+ struct nlattr *nla;
+ u32 ra_mtu;
- if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
+ if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags)))
goto nla_put_failure;
ci.max_reasm_len = IPV6_MAXPLEN;
- ci.tstamp = cstamp_delta(idev->tstamp);
+ ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp));
ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
@@ -5760,11 +5841,12 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
read_unlock_bh(&idev->lock);
- if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
+ if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE,
+ READ_ONCE(idev->cnf.addr_gen_mode)))
goto nla_put_failure;
- if (idev->ra_mtu &&
- nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
+ ra_mtu = READ_ONCE(idev->ra_mtu);
+ if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu))
goto nla_put_failure;
return 0;
@@ -5826,7 +5908,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
return -EINVAL;
}
- if (idev->cnf.rtr_solicits == 0) {
+ if (READ_ONCE(idev->cnf.rtr_solicits) == 0) {
NL_SET_ERR_MSG(extack,
"Router solicitation is disabled on device");
return -EINVAL;
@@ -5859,7 +5941,7 @@ update_lft:
if (update_rs) {
idev->if_flags |= IF_RS_SENT;
idev->rs_interval = rfc3315_s14_backoff_init(
- idev->cnf.rtr_solicit_interval);
+ READ_ONCE(idev->cnf.rtr_solicit_interval));
idev->rs_probes = 1;
addrconf_mod_rs_timer(idev, idev->rs_interval);
}
@@ -5965,7 +6047,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
- idev->cnf.addr_gen_mode = mode;
+ WRITE_ONCE(idev->cnf.addr_gen_mode, mode);
}
return 0;
@@ -5977,6 +6059,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
struct net_device *dev = idev->dev;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
+ int ifindex, iflink;
void *protoinfo;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
@@ -5987,18 +6070,20 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
hdr->ifi_family = AF_INET6;
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
- hdr->ifi_index = dev->ifindex;
+ ifindex = READ_ONCE(dev->ifindex);
+ hdr->ifi_index = ifindex;
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
+ iflink = dev_get_iflink(dev);
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
- nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
- (dev->ifindex != dev_get_iflink(dev) &&
- nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+ nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
+ (ifindex != iflink &&
+ nla_put_u32(skb, IFLA_LINK, iflink)) ||
nla_put_u8(skb, IFLA_OPERSTATE,
- netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
+ netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
goto nla_put_failure;
protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
if (!protoinfo)
@@ -6044,50 +6129,39 @@ static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx = 0, s_idx;
+ struct {
+ unsigned long ifindex;
+ } *ctx = (void *)cb->ctx;
struct net_device *dev;
struct inet6_dev *idev;
- struct hlist_head *head;
+ int err;
/* only requests using strict checking can pass data to
* influence the dump
*/
if (cb->strict_check) {
- int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
+ err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
if (err < 0)
return err;
}
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
+ err = 0;
rcu_read_lock();
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- idev = __in6_dev_get(dev);
- if (!idev)
- goto cont;
- if (inet6_fill_ifinfo(skb, idev,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWLINK, NLM_F_MULTI) < 0)
- goto out;
-cont:
- idx++;
- }
+ for_each_netdev_dump(net, dev, ctx->ifindex) {
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ continue;
+ err = inet6_fill_ifinfo(skb, idev,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWLINK, NLM_F_MULTI);
+ if (err < 0)
+ break;
}
-out:
rcu_read_unlock();
- cb->args[1] = idx;
- cb->args[0] = h;
- return skb->len;
+ return err;
}
void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
@@ -6308,7 +6382,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
- idev->cnf.disable_ipv6 = newf;
+
+ WRITE_ONCE(idev->cnf.disable_ipv6, newf);
if (changed)
dev_disable_change(idev);
}
@@ -6317,23 +6392,22 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
{
- struct net *net;
+ struct net *net = (struct net *)table->extra2;
int old;
+ if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
+ WRITE_ONCE(*p, newf);
+ return 0;
+ }
+
if (!rtnl_trylock())
return restart_syscall();
- net = (struct net *)table->extra2;
old = *p;
- *p = newf;
-
- if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
- rtnl_unlock();
- return 0;
- }
+ WRITE_ONCE(*p, newf);
if (p == &net->ipv6.devconf_all->disable_ipv6) {
- net->ipv6.devconf_dflt->disable_ipv6 = newf;
+ WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
addrconf_disable_change(net, newf);
} else if ((!newf) ^ (!old))
dev_disable_change((struct inet6_dev *)table->extra1);
@@ -6444,24 +6518,25 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
}
if (idev->cnf.addr_gen_mode != new_val) {
- idev->cnf.addr_gen_mode = new_val;
+ WRITE_ONCE(idev->cnf.addr_gen_mode, new_val);
addrconf_init_auto_addrs(idev->dev);
}
} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
struct net_device *dev;
- net->ipv6.devconf_dflt->addr_gen_mode = new_val;
+ WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val);
for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev &&
idev->cnf.addr_gen_mode != new_val) {
- idev->cnf.addr_gen_mode = new_val;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ new_val);
addrconf_init_auto_addrs(idev->dev);
}
}
}
- *((u32 *)ctl->data) = new_val;
+ WRITE_ONCE(*((u32 *)ctl->data), new_val);
}
out:
@@ -6520,14 +6595,15 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
struct inet6_dev *idev = __in6_dev_get(dev);
if (idev) {
- idev->cnf.addr_gen_mode =
- IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
}
}
} else {
struct inet6_dev *idev = ctl->extra1;
- idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ WRITE_ONCE(idev->cnf.addr_gen_mode,
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
}
out:
@@ -6607,20 +6683,19 @@ void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
static
int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
{
+ struct net *net = (struct net *)ctl->extra2;
struct inet6_dev *idev;
- struct net *net;
-
- if (!rtnl_trylock())
- return restart_syscall();
- *valp = val;
-
- net = (struct net *)ctl->extra2;
if (valp == &net->ipv6.devconf_dflt->disable_policy) {
- rtnl_unlock();
+ WRITE_ONCE(*valp, val);
return 0;
}
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ WRITE_ONCE(*valp, val);
+
if (valp == &net->ipv6.devconf_all->disable_policy) {
struct net_device *dev;
@@ -6790,6 +6865,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "regen_min_advance",
+ .data = &ipv6_devconf.regen_min_advance,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "regen_max_retry",
.data = &ipv6_devconf.regen_max_retry,
.maxlen = sizeof(int),
@@ -7349,7 +7431,8 @@ int __init addrconf_init(void)
if (err < 0)
goto out_addrlabel;
- addrconf_wq = create_workqueue("ipv6_addrconf");
+ /* All works using addrconf_wq need to lock rtnl. */
+ addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
if (!addrconf_wq) {
err = -ENOMEM;
goto out_nowq;
@@ -7372,7 +7455,7 @@ int __init addrconf_init(void)
rtnl_af_register(&inet6_ops);
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
- NULL, inet6_dump_ifinfo, 0);
+ NULL, inet6_dump_ifinfo, RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
@@ -7400,7 +7483,8 @@ int __init addrconf_init(void)
err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
inet6_netconf_get_devconf,
inet6_netconf_dump_devconf,
- RTNL_FLAG_DOIT_UNLOCKED);
+ RTNL_FLAG_DOIT_UNLOCKED |
+ RTNL_FLAG_DUMP_UNLOCKED);
if (err < 0)
goto errout;
err = ipv6_addr_label_rtnl_register();
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 507a8353a6bd..c008d21925d7 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -220,19 +220,26 @@ const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
EXPORT_SYMBOL_GPL(ipv6_stub);
/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
-const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
+const struct in6_addr in6addr_loopback __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_LOOPBACK_INIT;
EXPORT_SYMBOL(in6addr_loopback);
-const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
+const struct in6_addr in6addr_any __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_ANY_INIT;
EXPORT_SYMBOL(in6addr_any);
-const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_linklocal_allnodes __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
EXPORT_SYMBOL(in6addr_linklocal_allnodes);
-const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_linklocal_allrouters __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
EXPORT_SYMBOL(in6addr_linklocal_allrouters);
-const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
+const struct in6_addr in6addr_interfacelocal_allnodes __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
EXPORT_SYMBOL(in6addr_interfacelocal_allnodes);
-const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_interfacelocal_allrouters __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
EXPORT_SYMBOL(in6addr_interfacelocal_allrouters);
-const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
+const struct in6_addr in6addr_sitelocal_allrouters __aligned(BITS_PER_LONG/8)
+ = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
EXPORT_SYMBOL(in6addr_sitelocal_allrouters);
static void snmp6_free_dev(struct inet6_dev *idev)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 13a1833a4df5..8041dc181bd4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -64,6 +64,7 @@
#include <net/xfrm.h>
#include <net/ioam6.h>
#include <net/rawv6.h>
+#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/mroute6.h>
@@ -199,6 +200,9 @@ lookup_protocol:
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = SK_CAN_REUSE;
+ if (INET_PROTOSW_ICSK & answer_flags)
+ inet_init_csk_locks(sk);
+
inet = inet_sk(sk);
inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
@@ -733,7 +737,7 @@ const struct proto_ops inet6_dgram_ops = {
.recvmsg = inet6_recvmsg, /* retpoline's sake */
.read_skb = udp_read_skb,
.mmap = sock_no_mmap,
- .set_peek_off = sk_set_peek_off,
+ .set_peek_off = udp_set_peek_off,
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2016e90e6e1d..eb474f0987ae 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -800,5 +800,6 @@ static void __exit ah6_fini(void)
module_init(ah6_init);
module_exit(ah6_fini);
+MODULE_DESCRIPTION("IPv6 AH transformation helpers");
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index bb17f484ee2c..0f2506e35359 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -296,7 +296,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
- for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+ for (aca = rtnl_dereference(idev->ac_list); aca;
+ aca = rtnl_dereference(aca->aca_next)) {
if (ipv6_addr_equal(&aca->aca_addr, addr)) {
aca->aca_users++;
err = 0;
@@ -317,13 +318,13 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
goto out;
}
- aca->aca_next = idev->ac_list;
- idev->ac_list = aca;
-
/* Hold this for addrconf_join_solict() below before we unlock,
* it is already exposed via idev->ac_list.
*/
aca_get(aca);
+ aca->aca_next = idev->ac_list;
+ rcu_assign_pointer(idev->ac_list, aca);
+
write_unlock_bh(&idev->lock);
ipv6_add_acaddr_hash(net, aca);
@@ -350,7 +351,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
write_lock_bh(&idev->lock);
prev_aca = NULL;
- for (aca = idev->ac_list; aca; aca = aca->aca_next) {
+ for (aca = rtnl_dereference(idev->ac_list); aca;
+ aca = rtnl_dereference(aca->aca_next)) {
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
prev_aca = aca;
@@ -364,9 +366,9 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
return 0;
}
if (prev_aca)
- prev_aca->aca_next = aca->aca_next;
+ rcu_assign_pointer(prev_aca->aca_next, aca->aca_next);
else
- idev->ac_list = aca->aca_next;
+ rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
@@ -392,8 +394,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
struct ifacaddr6 *aca;
write_lock_bh(&idev->lock);
- while ((aca = idev->ac_list) != NULL) {
- idev->ac_list = aca->aca_next;
+ while ((aca = rtnl_dereference(idev->ac_list)) != NULL) {
+ rcu_assign_pointer(idev->ac_list, aca->aca_next);
write_unlock_bh(&idev->lock);
ipv6_del_acaddr_hash(aca);
@@ -420,11 +422,10 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
idev = __in6_dev_get(dev);
if (idev) {
- read_lock_bh(&idev->lock);
- for (aca = idev->ac_list; aca; aca = aca->aca_next)
+ for (aca = rcu_dereference(idev->ac_list); aca;
+ aca = rcu_dereference(aca->aca_next))
if (ipv6_addr_equal(&aca->aca_addr, addr))
break;
- read_unlock_bh(&idev->lock);
return aca != NULL;
}
return false;
@@ -477,30 +478,25 @@ bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
struct ac6_iter_state {
struct seq_net_private p;
struct net_device *dev;
- struct inet6_dev *idev;
};
#define ac6_seq_private(seq) ((struct ac6_iter_state *)(seq)->private)
static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
{
- struct ifacaddr6 *im = NULL;
struct ac6_iter_state *state = ac6_seq_private(seq);
struct net *net = seq_file_net(seq);
+ struct ifacaddr6 *im = NULL;
- state->idev = NULL;
for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
+
idev = __in6_dev_get(state->dev);
if (!idev)
continue;
- read_lock_bh(&idev->lock);
- im = idev->ac_list;
- if (im) {
- state->idev = idev;
+ im = rcu_dereference(idev->ac_list);
+ if (im)
break;
- }
- read_unlock_bh(&idev->lock);
}
return im;
}
@@ -508,22 +504,17 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im)
{
struct ac6_iter_state *state = ac6_seq_private(seq);
+ struct inet6_dev *idev;
- im = im->aca_next;
+ im = rcu_dereference(im->aca_next);
while (!im) {
- if (likely(state->idev != NULL))
- read_unlock_bh(&state->idev->lock);
-
state->dev = next_net_device_rcu(state->dev);
- if (!state->dev) {
- state->idev = NULL;
+ if (!state->dev)
break;
- }
- state->idev = __in6_dev_get(state->dev);
- if (!state->idev)
+ idev = __in6_dev_get(state->dev);
+ if (!idev)
continue;
- read_lock_bh(&state->idev->lock);
- im = state->idev->ac_list;
+ im = rcu_dereference(idev->ac_list);
}
return im;
}
@@ -555,12 +546,6 @@ static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ac6_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
- struct ac6_iter_state *state = ac6_seq_private(seq);
-
- if (likely(state->idev != NULL)) {
- read_unlock_bh(&state->idev->lock);
- state->idev = NULL;
- }
rcu_read_unlock();
}
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 1578ed9e97d8..eb8ee1e9373a 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -657,11 +657,8 @@ static int calipso_map_cat_ntoh(const struct calipso_doi *doi_def,
net_clen_bits,
spot + 1,
1);
- if (spot < 0) {
- if (spot == -2)
- return -EFAULT;
+ if (spot < 0)
return 0;
- }
ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
spot,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 2cc1a45742d8..6e6efe026cdc 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -1301,5 +1301,6 @@ static void __exit esp6_fini(void)
module_init(esp6_init);
module_exit(esp6_fini);
+MODULE_DESCRIPTION("IPv6 ESP transformation helpers");
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 4952ae792450..25a5b394481b 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -50,6 +50,7 @@
#endif
#include <net/rpl.h>
#include <linux/ioam6.h>
+#include <linux/ioam6_genl.h>
#include <net/ioam6.h>
#include <net/dst_metadata.h>
@@ -177,6 +178,8 @@ static bool ip6_parse_tlv(bool hopbyhop,
case IPV6_TLV_IOAM:
if (!ipv6_hop_ioam(skb, off))
return false;
+
+ nh = skb_network_header(skb);
break;
case IPV6_TLV_JUMBO:
if (!ipv6_hop_jumbo(skb, off))
@@ -376,9 +379,8 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
idev = __in6_dev_get(skb->dev);
- accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
- if (accept_seg6 > idev->cnf.seg6_enabled)
- accept_seg6 = idev->cnf.seg6_enabled;
+ accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled),
+ READ_ONCE(idev->cnf.seg6_enabled));
if (!accept_seg6) {
kfree_skb(skb);
@@ -652,10 +654,13 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct ipv6_rt_hdr *hdr;
struct rt0_hdr *rthdr;
struct net *net = dev_net(skb->dev);
- int accept_source_route = net->ipv6.devconf_all->accept_source_route;
+ int accept_source_route;
+
+ accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route);
- if (idev && accept_source_route > idev->cnf.accept_source_route)
- accept_source_route = idev->cnf.accept_source_route;
+ if (idev)
+ accept_source_route = min(accept_source_route,
+ READ_ONCE(idev->cnf.accept_source_route));
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
@@ -799,7 +804,7 @@ looped_back:
ip6_route_input(skb);
if (skb_dst(skb)->error) {
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
dst_input(skb);
return -1;
}
@@ -816,7 +821,7 @@ looped_back:
goto looped_back;
}
- skb_push(skb, skb->data - skb_network_header(skb));
+ skb_push(skb, -skb_network_offset(skb));
dst_input(skb);
return -1;
@@ -916,7 +921,7 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
goto drop;
/* Ignore if IOAM is not enabled on ingress */
- if (!__in6_dev_get(skb->dev)->cnf.ioam6_enabled)
+ if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled))
goto ignore;
/* Truncated Option header */
@@ -943,7 +948,18 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
if (!skb_valid_dst(skb))
ip6_route_input(skb);
+ /* About to mangle packet header */
+ if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len))
+ goto drop;
+
+ /* Trace pointer may have changed */
+ trace = (struct ioam6_trace_hdr *)(skb_network_header(skb)
+ + optoff + sizeof(*hdr));
+
ioam6_fill_trace_data(skb, ns, trace, true);
+
+ ioam6_event(IOAM6_EVENT_TRACE, dev_net(skb->dev),
+ GFP_ATOMIC, (void *)trace, hdr->opt_len - 2);
break;
default:
break;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b0e8d278e8a9..2e81383b663b 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -14,6 +14,7 @@
#include <linux/random.h>
#include <net/addrconf.h>
+#include <net/hotdata.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
@@ -25,16 +26,13 @@ u32 inet6_ehashfn(const struct net *net,
const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const __be16 fport)
{
- static u32 inet6_ehash_secret __read_mostly;
- static u32 ipv6_hash_secret __read_mostly;
-
u32 lhash, fhash;
net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
- net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ net_get_random_once(&tcp_ipv6_hash_secret, sizeof(tcp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
- fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
+ fhash = __ipv6_addr_jhash(faddr, tcp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
inet6_ehash_secret + net_hash_mix(net));
diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c
index 571f0e4d9cf3..08c929513065 100644
--- a/net/ipv6/ioam6.c
+++ b/net/ipv6/ioam6.c
@@ -612,6 +612,68 @@ static const struct genl_ops ioam6_genl_ops[] = {
},
};
+#define IOAM6_GENL_EV_GRP_OFFSET 0
+
+static const struct genl_multicast_group ioam6_mcgrps[] = {
+ [IOAM6_GENL_EV_GRP_OFFSET] = { .name = IOAM6_GENL_EV_GRP_NAME,
+ .flags = GENL_MCAST_CAP_NET_ADMIN },
+};
+
+static int ioam6_event_put_trace(struct sk_buff *skb,
+ struct ioam6_trace_hdr *trace,
+ unsigned int len)
+{
+ if (nla_put_u16(skb, IOAM6_EVENT_ATTR_TRACE_NAMESPACE,
+ be16_to_cpu(trace->namespace_id)) ||
+ nla_put_u8(skb, IOAM6_EVENT_ATTR_TRACE_NODELEN, trace->nodelen) ||
+ nla_put_u32(skb, IOAM6_EVENT_ATTR_TRACE_TYPE,
+ be32_to_cpu(trace->type_be32)) ||
+ nla_put(skb, IOAM6_EVENT_ATTR_TRACE_DATA,
+ len - sizeof(struct ioam6_trace_hdr) - trace->remlen * 4,
+ trace->data + trace->remlen * 4))
+ return 1;
+
+ return 0;
+}
+
+void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
+ void *opt, unsigned int opt_len)
+{
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+
+ if (!genl_has_listeners(&ioam6_genl_family, net,
+ IOAM6_GENL_EV_GRP_OFFSET))
+ return;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!skb)
+ return;
+
+ nlh = genlmsg_put(skb, 0, 0, &ioam6_genl_family, 0, type);
+ if (!nlh)
+ goto nla_put_failure;
+
+ switch (type) {
+ case IOAM6_EVENT_UNSPEC:
+ WARN_ON_ONCE(1);
+ break;
+ case IOAM6_EVENT_TRACE:
+ if (ioam6_event_put_trace(skb, (struct ioam6_trace_hdr *)opt,
+ opt_len))
+ goto nla_put_failure;
+ break;
+ }
+
+ genlmsg_end(skb, nlh);
+ genlmsg_multicast_netns(&ioam6_genl_family, net, skb, 0,
+ IOAM6_GENL_EV_GRP_OFFSET, gfp);
+ return;
+
+nla_put_failure:
+ nlmsg_free(skb);
+}
+
static struct genl_family ioam6_genl_family __ro_after_init = {
.name = IOAM6_GENL_NAME,
.version = IOAM6_GENL_VERSION,
@@ -620,6 +682,8 @@ static struct genl_family ioam6_genl_family __ro_after_init = {
.ops = ioam6_genl_ops,
.n_ops = ARRAY_SIZE(ioam6_genl_ops),
.resv_start_op = IOAM6_CMD_NS_SET_SCHEMA + 1,
+ .mcgrps = ioam6_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(ioam6_mcgrps),
.module = THIS_MODULE,
};
@@ -663,7 +727,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)__in6_dev_get(skb->dev)->cnf.ioam6_id;
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -671,7 +735,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw16 = IOAM6_U16_UNAVAILABLE;
else
- raw16 = (__force u16)__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id;
+ raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
*(__be16 *)data = cpu_to_be16(raw16);
data += sizeof(__be16);
@@ -758,7 +822,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (!skb->dev)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = __in6_dev_get(skb->dev)->cnf.ioam6_id_wide;
+ raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
@@ -766,7 +830,7 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
raw32 = IOAM6_U32_UNAVAILABLE;
else
- raw32 = __in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide;
+ raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
*(__be32 *)data = cpu_to_be32(raw32);
data += sizeof(__be32);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 38a0348b1d17..5c558dc1c683 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -160,6 +160,8 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh)
INIT_LIST_HEAD(&f6i->fib6_siblings);
refcount_set(&f6i->fib6_ref, 1);
+ INIT_HLIST_NODE(&f6i->gc_link);
+
return f6i;
}
@@ -246,6 +248,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
net->ipv6.fib6_null_entry);
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&table->tb6_peers);
+ INIT_HLIST_HEAD(&table->tb6_gc_hlist);
}
return table;
@@ -617,8 +620,11 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct rt6_rtnl_dump_arg arg = { .filter.dump_exceptions = true,
- .filter.dump_routes = true };
+ struct rt6_rtnl_dump_arg arg = {
+ .filter.dump_exceptions = true,
+ .filter.dump_routes = true,
+ .filter.rtnl_held = true,
+ };
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
@@ -1055,6 +1061,9 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
lockdep_is_held(&table->tb6_lock));
}
}
+
+ fib6_clean_expires(rt);
+ fib6_remove_gc_list(rt);
}
/*
@@ -1115,10 +1124,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
rt->fib6_nsiblings = 0;
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
- if (!(rt->fib6_flags & RTF_EXPIRES))
+ if (!(rt->fib6_flags & RTF_EXPIRES)) {
fib6_clean_expires(iter);
- else
+ fib6_remove_gc_list(iter);
+ } else {
fib6_set_expires(iter, rt->expires);
+ fib6_add_gc_list(iter);
+ }
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
@@ -1477,6 +1489,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (rt->nh)
list_add(&rt->nh_list, &rt->nh->f6i_list);
__fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
+
+ if (rt->fib6_flags & RTF_EXPIRES)
+ fib6_add_gc_list(rt);
+
fib6_start_gc(info->nl_net, rt);
}
@@ -2280,9 +2296,8 @@ static void fib6_flush_trees(struct net *net)
* Garbage collection
*/
-static int fib6_age(struct fib6_info *rt, void *arg)
+static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
{
- struct fib6_gc_args *gc_args = arg;
unsigned long now = jiffies;
/*
@@ -2307,6 +2322,42 @@ static int fib6_age(struct fib6_info *rt, void *arg)
return 0;
}
+static void fib6_gc_table(struct net *net,
+ struct fib6_table *tb6,
+ struct fib6_gc_args *gc_args)
+{
+ struct fib6_info *rt;
+ struct hlist_node *n;
+ struct nl_info info = {
+ .nl_net = net,
+ .skip_notify = false,
+ };
+
+ hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link)
+ if (fib6_age(rt, gc_args) == -1)
+ fib6_del(rt, &info);
+}
+
+static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ spin_lock_bh(&table->tb6_lock);
+
+ fib6_gc_table(net, table, gc_args);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ }
+ rcu_read_unlock();
+}
+
void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
struct fib6_gc_args gc_args;
@@ -2322,7 +2373,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = 0;
- fib6_clean_all(net, fib6_age, &gc_args);
+ fib6_gc_all(net, &gc_args);
now = jiffies;
net->ipv6.ip6_rt_last_gc = now;
@@ -2382,6 +2433,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
+ INIT_HLIST_HEAD(&net->ipv6.fib6_main_tbl->tb6_gc_hlist);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
@@ -2394,6 +2446,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
+ INIT_HLIST_HEAD(&net->ipv6.fib6_local_tbl->tb6_gc_hlist);
#endif
fib6_tables_init(net);
@@ -2443,10 +2496,8 @@ int __init fib6_init(void)
{
int ret = -ENOMEM;
- fib6_node_kmem = kmem_cache_create("fib6_nodes",
- sizeof(struct fib6_node), 0,
- SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
- NULL);
+ fib6_node_kmem = KMEM_CACHE(fib6_node,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT);
if (!fib6_node_kmem)
goto out;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 070d87abf7c0..5e97e0aa8e07 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1511,6 +1511,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
ip6gre_tnl_init_features(dev);
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
cleanup_dst_cache_init:
@@ -1632,21 +1633,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit ip6gre_exit_batch_net(struct list_head *net_list)
+static void __net_exit ip6gre_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- ip6gre_destroy_tunnels(net, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ ip6gre_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6gre_net_ops = {
.init = ip6gre_init_net,
- .exit_batch = ip6gre_exit_batch_net,
+ .exit_batch_rtnl = ip6gre_exit_batch_rtnl,
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
};
@@ -1903,6 +1902,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
ip6erspan_tnl_link_config(tunnel, 1);
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
cleanup_dst_cache_init:
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index b8378814532c..133610a49da6 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -168,9 +168,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
SKB_DR_SET(reason, NOT_SPECIFIED);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
- !idev || unlikely(idev->cnf.disable_ipv6)) {
+ !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
- if (idev && unlikely(idev->cnf.disable_ipv6))
+ if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
SKB_DR_SET(reason, IPV6DISABLED);
goto drop;
}
@@ -236,7 +236,7 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (!ipv6_addr_is_multicast(&hdr->daddr) &&
(skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) &&
- idev->cnf.drop_unicast_in_l2_multicast) {
+ READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) {
SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
goto err;
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index cca64c7809be..b41e35af69ea 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -419,14 +419,6 @@ static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
return inet_gro_complete(skb, nhoff);
}
-static struct packet_offload ipv6_packet_offload __read_mostly = {
- .type = cpu_to_be16(ETH_P_IPV6),
- .callbacks = {
- .gso_segment = ipv6_gso_segment,
- .gro_receive = ipv6_gro_receive,
- .gro_complete = ipv6_gro_complete,
- },
-};
static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
netdev_features_t features)
@@ -486,7 +478,15 @@ static int __init ipv6_offload_init(void)
if (ipv6_exthdrs_offload_init() < 0)
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
- dev_add_offload(&ipv6_packet_offload);
+ net_hotdata.ipv6_packet_offload = (struct packet_offload) {
+ .type = cpu_to_be16(ETH_P_IPV6),
+ .callbacks = {
+ .gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = ipv6_gro_complete,
+ },
+ };
+ dev_add_offload(&net_hotdata.ipv6_packet_offload);
inet_add_offload(&sit_offload, IPPROTO_IPV6);
inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a722a43dd668..02eeca5492cd 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -234,7 +234,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
- if (unlikely(idev->cnf.disable_ipv6)) {
+ if (unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
return 0;
@@ -501,7 +501,7 @@ int ip6_forward(struct sk_buff *skb)
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
- if (net->ipv6.devconf_all->forwarding == 0)
+ if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
goto error;
if (skb->pkt_type != PACKET_HOST)
@@ -513,8 +513,8 @@ int ip6_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
- if (!net->ipv6.devconf_all->disable_policy &&
- (!idev || !idev->cnf.disable_policy) &&
+ if (!READ_ONCE(net->ipv6.devconf_all->disable_policy) &&
+ (!idev || !READ_ONCE(idev->cnf.disable_policy)) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
@@ -552,7 +552,7 @@ int ip6_forward(struct sk_buff *skb)
}
/* XXX: idev->cnf.proxy_ndp? */
- if (net->ipv6.devconf_all->proxy_ndp &&
+ if (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0) {
@@ -1424,11 +1424,11 @@ static int __ip6_append_data(struct sock *sk,
bool zc = false;
u32 tskey = 0;
struct rt6_info *rt = (struct rt6_info *)cork->dst;
+ bool paged, hold_tskey, extra_uref = false;
struct ipv6_txoptions *opt = v6_cork->opt;
int csummode = CHECKSUM_NONE;
unsigned int maxnonfragsize, headersize;
unsigned int wmem_alloc_delta = 0;
- bool paged, extra_uref = false;
skb = skb_peek_tail(queue);
if (!skb) {
@@ -1440,10 +1440,6 @@ static int __ip6_append_data(struct sock *sk,
mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
orig_mtu = mtu;
- if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
- READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
- tskey = atomic_inc_return(&sk->sk_tskey) - 1;
-
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
@@ -1538,6 +1534,11 @@ emsgsize:
flags &= ~MSG_SPLICE_PAGES;
}
+ hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
+ READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
+ if (hold_tskey)
+ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+
/*
* Let's try using as much space as possible.
* Use MTU if total length of the message fits into the MTU.
@@ -1794,6 +1795,8 @@ error:
cork->length -= length;
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
+ if (hold_tskey)
+ atomic_dec(&sk->sk_tskey);
return err;
}
@@ -1922,7 +1925,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = cork->base.mark;
skb->tstamp = cork->base.transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
ip6_cork_steal_dst(skb, cork);
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
if (proto == IPPROTO_ICMPV6) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 46c19bd48990..e9cc315832cb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -247,7 +247,6 @@ static void ip6_dev_free(struct net_device *dev)
gro_cells_destroy(&t->gro_cells);
dst_cache_destroy(&t->dst_cache);
- free_percpu(dev->tstats);
}
static int ip6_tnl_create2(struct net_device *dev)
@@ -796,8 +795,8 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
struct sk_buff *skb),
bool log_ecn_err)
{
- const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
- int err;
+ const struct ipv6hdr *ipv6h;
+ int nh, err;
if ((!(tpi->flags & TUNNEL_CSUM) &&
(tunnel->parms.i_flags & TUNNEL_CSUM)) ||
@@ -829,7 +828,6 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
goto drop;
}
- ipv6h = ipv6_hdr(skb);
skb->protocol = eth_type_trans(skb, tunnel->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
} else {
@@ -837,7 +835,23 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
skb_reset_mac_header(skb);
}
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
+ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ ipv6h = (struct ipv6hdr *)(skb->head + nh);
+
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
@@ -1741,7 +1755,7 @@ int ip6_tnl_get_iflink(const struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- return t->parms.link;
+ return READ_ONCE(t->parms.link);
}
EXPORT_SYMBOL(ip6_tnl_get_iflink);
@@ -1833,6 +1847,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
dev->features |= NETIF_F_LLTX;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
dev->features |= IPXIPX_FEATURES;
@@ -1858,13 +1873,10 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
if (ret)
- goto free_stats;
+ return ret;
ret = gro_cells_init(&t->gro_cells, dev);
if (ret)
@@ -1883,13 +1895,11 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
destroy_dst:
dst_cache_destroy(&t->dst_cache);
-free_stats:
- free_percpu(dev->tstats);
- dev->tstats = NULL;
return ret;
}
@@ -2267,21 +2277,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
+static void __net_exit ip6_tnl_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- ip6_tnl_destroy_tunnels(net, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ ip6_tnl_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations ip6_tnl_net_ops = {
.init = ip6_tnl_init_net,
- .exit_batch = ip6_tnl_exit_batch_net,
+ .exit_batch_rtnl = ip6_tnl_exit_batch_rtnl,
.id = &ip6_tnl_net_id,
.size = sizeof(struct ip6_tnl_net),
};
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index a7bf0327b380..c99053189ea8 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -182,4 +182,5 @@ struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(udp_tunnel6_dst_lookup);
+MODULE_DESCRIPTION("IPv6 Foo over UDP tunnel driver");
MODULE_LICENSE("GPL");
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index e550240c85e1..7f4f976aa24a 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -935,6 +935,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
netdev_hold(dev, &t->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -1174,24 +1175,22 @@ err_alloc_dev:
return err;
}
-static void __net_exit vti6_exit_batch_net(struct list_head *net_list)
+static void __net_exit vti6_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct vti6_net *ip6n;
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
ip6n = net_generic(net, vti6_net_id);
- vti6_destroy_tunnels(ip6n, &list);
+ vti6_destroy_tunnels(ip6n, dev_to_kill);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations vti6_net_ops = {
.init = vti6_init_net,
- .exit_batch = vti6_exit_batch_net,
+ .exit_batch_rtnl = vti6_exit_batch_rtnl,
.id = &vti6_net_id,
.size = sizeof(struct vti6_net),
};
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9782c180fee6..cb0ee81a068a 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1373,10 +1373,7 @@ int __init ip6_mr_init(void)
{
int err;
- mrt_cachep = kmem_cache_create("ip6_mrt_cache",
- sizeof(struct mfc6_cache),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ mrt_cachep = KMEM_CACHE(mfc6_cache, SLAB_HWCACHE_ALIGN);
if (!mrt_cachep)
return -ENOMEM;
@@ -2595,7 +2592,9 @@ static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = cb->nlh;
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
int err;
if (cb->strict_check) {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 56c3c467f9de..d4c28ec1bc51 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -948,6 +948,8 @@ done:
if (optlen < sizeof(int))
goto e_inval;
retv = ip6_ra_control(sk, val);
+ if (retv == 0)
+ inet6_assign_bit(RTALERT, sk, valbool);
break;
case IPV6_FLOWLABEL_MGR:
retv = ipv6_flowlabel_opt(sk, optval, optlen);
@@ -1346,7 +1348,7 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
if (val < 0)
- val = sock_net(sk)->ipv6.devconf_all->hop_limit;
+ val = READ_ONCE(sock_net(sk)->ipv6.devconf_all->hop_limit);
break;
}
@@ -1445,6 +1447,10 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
val = np->rxopt.bits.recvfragsize;
break;
+ case IPV6_ROUTER_ALERT:
+ val = inet6_test_bit(RTALERT, sk);
+ break;
+
case IPV6_ROUTER_ALERT_ISOLATE:
val = inet6_test_bit(RTALERT_ISOLATE, sk);
break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index bc6e0a0bad3c..7ba01d8cfbae 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -159,9 +159,9 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
int iv;
if (mld_in_v1_mode(idev))
- iv = idev->cnf.mldv1_unsolicited_report_interval;
+ iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
else
- iv = idev->cnf.mldv2_unsolicited_report_interval;
+ iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
return iv > 0 ? iv : 1;
}
@@ -1202,15 +1202,15 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
static int mld_force_mld_version(const struct inet6_dev *idev)
{
+ const struct net *net = dev_net(idev->dev);
+ int all_force;
+
+ all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
/* Normally, both are 0 here. If enforcement to a particular is
* being used, individual device enforcement will have a lower
* precedence over 'all' device (.../conf/all/force_mld_version).
*/
-
- if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
- return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
- else
- return idev->cnf.force_mld_version;
+ return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
}
static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
@@ -2719,7 +2719,6 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Should stop work after group drop. or we will
* start work again in mld_ifc_event()
*/
- synchronize_net();
mld_query_stop_work(idev);
mld_report_stop_work(idev);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 83d2a8be263f..6a16a5bd0d91 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -405,6 +405,7 @@ static void __exit mip6_fini(void)
module_init(mip6_init);
module_exit(mip6_fini);
+MODULE_DESCRIPTION("IPv6 Mobility driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_DSTOPTS);
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ROUTING);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index a19999b30bc0..ae134634c323 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -451,7 +451,7 @@ static void ip6_nd_hdr(struct sk_buff *skb,
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
- tclass = idev ? idev->cnf.ndisc_tclass : 0;
+ tclass = idev ? READ_ONCE(idev->cnf.ndisc_tclass) : 0;
rcu_read_unlock();
skb_push(skb, sizeof(*hdr));
@@ -535,7 +535,7 @@ void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr,
src_addr = solicited_addr;
if (ifp->flags & IFA_F_OPTIMISTIC)
override = false;
- inc_opt |= ifp->idev->cnf.force_tllao;
+ inc_opt |= READ_ONCE(ifp->idev->cnf.force_tllao);
in6_ifa_put(ifp);
} else {
if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
@@ -903,8 +903,9 @@ have_ifp:
}
if (ipv6_chk_acast_addr(net, dev, &msg->target) ||
- (idev->cnf.forwarding &&
- (net->ipv6.devconf_all->proxy_ndp || idev->cnf.proxy_ndp) &&
+ (READ_ONCE(idev->cnf.forwarding) &&
+ (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) ||
+ READ_ONCE(idev->cnf.proxy_ndp)) &&
(is_router = pndisc_is_router(&msg->target, dev)) >= 0)) {
if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
skb->pkt_type != PACKET_HOST &&
@@ -929,7 +930,7 @@ have_ifp:
}
if (is_router < 0)
- is_router = idev->cnf.forwarding;
+ is_router = READ_ONCE(idev->cnf.forwarding);
if (dad) {
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &msg->target,
@@ -973,7 +974,7 @@ static int accept_untracked_na(struct net_device *dev, struct in6_addr *saddr)
{
struct inet6_dev *idev = __in6_dev_get(dev);
- switch (idev->cnf.accept_untracked_na) {
+ switch (READ_ONCE(idev->cnf.accept_untracked_na)) {
case 0: /* Don't accept untracked na (absent in neighbor cache) */
return 0;
case 1: /* Create new entries from na if currently untracked */
@@ -1024,7 +1025,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* drop_unsolicited_na takes precedence over accept_untracked_na
*/
if (!msg->icmph.icmp6_solicited && idev &&
- idev->cnf.drop_unsolicited_na)
+ READ_ONCE(idev->cnf.drop_unsolicited_na))
return reason;
if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts))
@@ -1080,7 +1081,7 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* Note that we don't do a (daddr == all-routers-mcast) check.
*/
new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE;
- if (!neigh && lladdr && idev && idev->cnf.forwarding) {
+ if (!neigh && lladdr && idev && READ_ONCE(idev->cnf.forwarding)) {
if (accept_untracked_na(dev, saddr)) {
neigh = neigh_create(&nd_tbl, &msg->target, dev);
new_state = NUD_STALE;
@@ -1100,7 +1101,8 @@ static enum skb_drop_reason ndisc_recv_na(struct sk_buff *skb)
* has already sent a NA to us.
*/
if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) &&
- net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp &&
+ READ_ONCE(net->ipv6.devconf_all->forwarding) &&
+ READ_ONCE(net->ipv6.devconf_all->proxy_ndp) &&
pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) {
/* XXX: idev->cnf.proxy_ndp */
goto out;
@@ -1148,7 +1150,7 @@ static enum skb_drop_reason ndisc_recv_rs(struct sk_buff *skb)
}
/* Don't accept RS if we're not in router mode */
- if (!idev->cnf.forwarding)
+ if (!READ_ONCE(idev->cnf.forwarding))
goto out;
/*
@@ -1237,6 +1239,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
struct ndisc_options ndopts;
struct fib6_info *rt = NULL;
struct inet6_dev *in6_dev;
+ struct fib6_table *table;
u32 defrtr_usr_metric;
unsigned int pref = 0;
__u32 old_if_flags;
@@ -1317,7 +1320,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
if (old_if_flags != in6_dev->if_flags)
send_ifinfo_notify = true;
- if (!in6_dev->cnf.accept_ra_defrtr) {
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_defrtr)) {
ND_PRINTK(2, info,
"RA: %s, defrtr is false for dev: %s\n",
__func__, skb->dev->name);
@@ -1325,7 +1328,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
- if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
+ if (lifetime != 0 &&
+ lifetime < READ_ONCE(in6_dev->cnf.accept_ra_min_lft)) {
ND_PRINTK(2, info,
"RA: router lifetime (%ds) is too short: %s\n",
lifetime, skb->dev->name);
@@ -1336,7 +1340,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
* accept_ra_from_local is set to true.
*/
net = dev_net(in6_dev->dev);
- if (!in6_dev->cnf.accept_ra_from_local &&
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(net, &ipv6_hdr(skb)->saddr, in6_dev->dev, 0)) {
ND_PRINTK(2, info,
"RA from local address detected on dev: %s: default router ignored\n",
@@ -1348,7 +1352,7 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
pref = ra_msg->icmph.icmp6_router_pref;
/* 10b is handled as if it were 00b (medium) */
if (pref == ICMPV6_ROUTER_PREF_INVALID ||
- !in6_dev->cnf.accept_ra_rtr_pref)
+ !READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref))
pref = ICMPV6_ROUTER_PREF_MEDIUM;
#endif
/* routes added from RAs do not use nexthop objects */
@@ -1382,7 +1386,8 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
neigh_release(neigh);
rt = rt6_add_dflt_router(net, &ipv6_hdr(skb)->saddr,
- skb->dev, pref, defrtr_usr_metric);
+ skb->dev, pref, defrtr_usr_metric,
+ lifetime);
if (!rt) {
ND_PRINTK(0, err,
"RA: %s failed to add default route\n",
@@ -1409,12 +1414,21 @@ static enum skb_drop_reason ndisc_router_discovery(struct sk_buff *skb)
inet6_rt_notify(RTM_NEWROUTE, rt, &nlinfo, NLM_F_REPLACE);
}
- if (rt)
+ if (rt) {
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
fib6_set_expires(rt, jiffies + (HZ * lifetime));
- if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+ fib6_add_gc_list(rt);
+
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) < 256 &&
ra_msg->icmph.icmp6_hop_limit) {
- if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+ if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) <=
+ ra_msg->icmph.icmp6_hop_limit) {
+ WRITE_ONCE(in6_dev->cnf.hop_limit,
+ ra_msg->icmph.icmp6_hop_limit);
fib6_metric_set(rt, RTAX_HOPLIMIT,
ra_msg->icmph.icmp6_hop_limit);
} else {
@@ -1496,7 +1510,7 @@ skip_linkparms:
}
#ifdef CONFIG_IPV6_ROUTE_INFO
- if (!in6_dev->cnf.accept_ra_from_local &&
+ if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) &&
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
in6_dev->dev, 0)) {
ND_PRINTK(2, info,
@@ -1505,7 +1519,7 @@ skip_linkparms:
goto skip_routeinfo;
}
- if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
+ if (READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref) && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_ri;
p;
@@ -1517,14 +1531,14 @@ skip_linkparms:
continue;
#endif
if (ri->prefix_len == 0 &&
- !in6_dev->cnf.accept_ra_defrtr)
+ !READ_ONCE(in6_dev->cnf.accept_ra_defrtr))
continue;
if (ri->lifetime != 0 &&
- ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
+ ntohl(ri->lifetime) < READ_ONCE(in6_dev->cnf.accept_ra_min_lft))
continue;
- if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+ if (ri->prefix_len < READ_ONCE(in6_dev->cnf.accept_ra_rt_info_min_plen))
continue;
- if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
+ if (ri->prefix_len > READ_ONCE(in6_dev->cnf.accept_ra_rt_info_max_plen))
continue;
rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
&ipv6_hdr(skb)->saddr);
@@ -1544,7 +1558,7 @@ skip_routeinfo:
}
#endif
- if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
+ if (READ_ONCE(in6_dev->cnf.accept_ra_pinfo) && ndopts.nd_opts_pi) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_pi;
p;
@@ -1555,7 +1569,7 @@ skip_routeinfo:
}
}
- if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) {
+ if (ndopts.nd_opts_mtu && READ_ONCE(in6_dev->cnf.accept_ra_mtu)) {
__be32 n;
u32 mtu;
@@ -1569,8 +1583,8 @@ skip_routeinfo:
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
- } else if (in6_dev->cnf.mtu6 != mtu) {
- in6_dev->cnf.mtu6 = mtu;
+ } else if (READ_ONCE(in6_dev->cnf.mtu6) != mtu) {
+ WRITE_ONCE(in6_dev->cnf.mtu6, mtu);
fib6_metric_set(rt, RTAX_MTU, mtu);
rt6_mtu_change(skb->dev, mtu);
}
@@ -1804,7 +1818,7 @@ static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
if (!idev)
return true;
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED &&
- idev->cnf.suppress_frag_ndisc) {
+ READ_ONCE(idev->cnf.suppress_frag_ndisc)) {
net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n");
return true;
}
@@ -1881,8 +1895,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
idev = in6_dev_get(dev);
if (!idev)
break;
- if (idev->cnf.ndisc_notify ||
- net->ipv6.devconf_all->ndisc_notify)
+ if (READ_ONCE(idev->cnf.ndisc_notify) ||
+ READ_ONCE(net->ipv6.devconf_all->ndisc_notify))
ndisc_send_unsol_na(dev);
in6_dev_put(idev);
break;
@@ -1891,8 +1905,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
if (!idev)
evict_nocarrier = true;
else {
- evict_nocarrier = idev->cnf.ndisc_evict_nocarrier &&
- net->ipv6.devconf_all->ndisc_evict_nocarrier;
+ evict_nocarrier = READ_ONCE(idev->cnf.ndisc_evict_nocarrier) &&
+ READ_ONCE(net->ipv6.devconf_all->ndisc_evict_nocarrier);
in6_dev_put(idev);
}
@@ -1966,7 +1980,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer,
if (ctl->data == &NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME))
idev->nd_parms->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(idev->nd_parms, BASE_REACHABLE_TIME));
- idev->tstamp = jiffies;
+ WRITE_ONCE(idev->tstamp, jiffies);
inet6_ifinfo_notify(RTM_NEWLINK, idev);
in6_dev_put(idev);
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 0ba62f4868f9..f3c8e2d918e1 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -6,6 +6,10 @@
menu "IPv6: Netfilter Configuration"
depends on INET && IPV6 && NETFILTER
+# old sockopt interface and eval loop
+config IP6_NF_IPTABLES_LEGACY
+ tristate
+
config NF_SOCKET_IPV6
tristate "IPv6 socket lookup support"
help
@@ -147,7 +151,7 @@ config IP6_NF_MATCH_MH
config IP6_NF_MATCH_RPFILTER
tristate '"rpfilter" reverse path filter match support'
depends on NETFILTER_ADVANCED
- depends on IP6_NF_MANGLE || IP6_NF_RAW
+ depends on IP6_NF_MANGLE || IP6_NF_RAW || NFT_COMPAT
help
This option allows you to match packets whose replies would
go out via the interface the packet came in.
@@ -186,6 +190,8 @@ config IP6_NF_TARGET_HL
config IP6_NF_FILTER
tristate "Packet filtering"
default m if NETFILTER_ADVANCED=n
+ select IP6_NF_IPTABLES_LEGACY
+ tristate
help
Packet filtering defines a table `filter', which has a series of
rules for simple packet filtering at local input, forwarding and
@@ -195,7 +201,7 @@ config IP6_NF_FILTER
config IP6_NF_TARGET_REJECT
tristate "REJECT target support"
- depends on IP6_NF_FILTER
+ depends on IP6_NF_FILTER || NFT_COMPAT
select NF_REJECT_IPV6
default m if NETFILTER_ADVANCED=n
help
@@ -221,6 +227,7 @@ config IP6_NF_TARGET_SYNPROXY
config IP6_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `mangle' table to iptables: see the man page for
iptables(8). This table is used for various packet alterations
@@ -230,6 +237,7 @@ config IP6_NF_MANGLE
config IP6_NF_RAW
tristate 'raw table support (required for TRACE)'
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `raw' table to ip6tables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
@@ -243,6 +251,7 @@ config IP6_NF_SECURITY
tristate "Security table"
depends on SECURITY
depends on NETFILTER_ADVANCED
+ select IP6_NF_IPTABLES_LEGACY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
@@ -254,6 +263,7 @@ config IP6_NF_NAT
depends on NF_CONNTRACK
depends on NETFILTER_ADVANCED
select NF_NAT
+ select IP6_NF_IPTABLES_LEGACY
select NETFILTER_XT_NAT
help
This enables the `nat' table in ip6tables. This allows masquerading,
@@ -262,25 +272,23 @@ config IP6_NF_NAT
To compile it as a module, choose M here. If unsure, say N.
-if IP6_NF_NAT
-
config IP6_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
select NETFILTER_XT_TARGET_MASQUERADE
+ depends on IP6_NF_NAT
help
This is a backwards-compat option for the user's convenience
(e.g. when running oldconfig). It selects NETFILTER_XT_TARGET_MASQUERADE.
config IP6_NF_TARGET_NPT
tristate "NPT (Network Prefix translation) target support"
+ depends on IP6_NF_NAT || NFT_COMPAT
help
This option adds the `SNPT' and `DNPT' target, which perform
stateless IPv6-to-IPv6 Network Prefix Translation per RFC 6296.
To compile it as a module, choose M here. If unsure, say N.
-endif # IP6_NF_NAT
-
endif # IP6_NF_IPTABLES
endmenu
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index b8d6dc9aeeb6..66ce6fa5b2f5 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -4,7 +4,7 @@
#
# Link order matters here.
-obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
+obj-$(CONFIG_IP6_NF_IPTABLES_LEGACY) += ip6_tables.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index b2dd48911c8d..1a51a44571c3 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -327,9 +327,9 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
if (!reasm_data)
goto err;
- payload_len = ((skb->data - skb_network_header(skb)) -
+ payload_len = -skb_network_offset(skb) -
sizeof(struct ipv6hdr) + fq->q.len -
- sizeof(struct frag_hdr));
+ sizeof(struct frag_hdr);
if (payload_len > IPV6_MAXPLEN) {
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
payload_len);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 196dd4ecb5e2..dedee264b8f6 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -83,7 +83,7 @@ struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
- net->ipv6.devconf_all->hop_limit);
+ READ_ONCE(net->ipv6.devconf_all->hop_limit));
nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
@@ -124,7 +124,7 @@ struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER);
nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
- net->ipv6.devconf_all->hop_limit);
+ READ_ONCE(net->ipv6.devconf_all->hop_limit));
skb_reset_transport_header(nskb);
icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index b5205311f372..806d4b5dd1e6 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -111,9 +111,9 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
- hoplimit = idev->cnf.hop_limit;
+ hoplimit = READ_ONCE(idev->cnf.hop_limit);
else
- hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
+ hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit);
rcu_read_unlock();
}
return hoplimit;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 03dbb874c363..779274055abf 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -288,8 +288,7 @@ out:
}
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
- struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
bool recverr = inet6_test_bit(RECVERR6, sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -344,7 +343,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
if (!raw_v6_match(net, sk, nexthdr, &ip6h->saddr, &ip6h->daddr,
inet6_iif(skb), inet6_iif(skb)))
continue;
- rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
+ rawv6_err(sk, skb, type, code, inner_offset, info);
}
rcu_read_unlock();
}
@@ -616,7 +615,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
skb_put(skb, length);
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
@@ -935,7 +934,7 @@ do_confirm:
goto done;
}
-static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
+static int rawv6_seticmpfilter(struct sock *sk, int optname,
sockptr_t optval, int optlen)
{
switch (optname) {
@@ -952,7 +951,7 @@ static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
return 0;
}
-static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
+static int rawv6_geticmpfilter(struct sock *sk, int optname,
char __user *optval, int __user *optlen)
{
int len;
@@ -1038,7 +1037,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
- return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
+ return rawv6_seticmpfilter(sk, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
@@ -1099,7 +1098,7 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
- return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
+ return rawv6_geticmpfilter(sk, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM ||
optname == IPV6_HDRINCL)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5ebc47da1000..acb4f119e11f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -272,9 +272,9 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
if (!reasm_data)
goto out_oom;
- payload_len = ((skb->data - skb_network_header(skb)) -
+ payload_len = -skb_network_offset(skb) -
sizeof(struct ipv6hdr) + fq->q.len -
- sizeof(struct frag_hdr));
+ sizeof(struct frag_hdr);
if (payload_len > IPV6_MAXPLEN)
goto out_oversize;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 63b4c6056582..1f4b935a0e57 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -645,14 +645,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
write_lock_bh(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
- neigh->updated + idev->cnf.rtr_probe_interval)) {
+ neigh->updated +
+ READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work)
__neigh_set_probe_once(neigh);
}
write_unlock_bh(&neigh->lock);
} else if (time_after(jiffies, last_probe +
- idev->cnf.rtr_probe_interval)) {
+ READ_ONCE(idev->cnf.rtr_probe_interval))) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
@@ -931,6 +932,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
struct net *net = dev_net(dev);
struct route_info *rinfo = (struct route_info *) opt;
struct in6_addr prefix_buf, *prefix;
+ struct fib6_table *table;
unsigned int pref;
unsigned long lifetime;
struct fib6_info *rt;
@@ -989,10 +991,18 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
(rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
if (rt) {
- if (!addrconf_finite_timeout(lifetime))
+ table = rt->fib6_table;
+ spin_lock_bh(&table->tb6_lock);
+
+ if (!addrconf_finite_timeout(lifetime)) {
fib6_clean_expires(rt);
- else
+ fib6_remove_gc_list(rt);
+ } else {
fib6_set_expires(rt, jiffies + HZ * lifetime);
+ fib6_add_gc_list(rt);
+ }
+
+ spin_unlock_bh(&table->tb6_lock);
fib6_info_release(rt);
}
@@ -1587,7 +1597,7 @@ static unsigned int fib6_mtu(const struct fib6_result *res)
rcu_read_lock();
idev = __in6_dev_get(dev);
- mtu = idev->cnf.mtu6;
+ mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
}
@@ -2211,7 +2221,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
strict |= flags & RT6_LOOKUP_F_IFACE;
strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
- if (net->ipv6.devconf_all->forwarding == 0)
+ if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
rcu_read_lock();
@@ -3240,8 +3250,8 @@ u32 ip6_mtu_from_fib6(const struct fib6_result *res,
mtu = IPV6_MIN_MTU;
idev = __in6_dev_get(dev);
- if (idev && idev->cnf.mtu6 > mtu)
- mtu = idev->cnf.mtu6;
+ if (idev)
+ mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
}
mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
@@ -3765,8 +3775,6 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (cfg->fc_flags & RTF_EXPIRES)
fib6_set_expires(rt, jiffies +
clock_t_to_jiffies(cfg->fc_expires));
- else
- fib6_clean_expires(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
@@ -4142,7 +4150,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev)
return;
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
+ if (READ_ONCE(in6_dev->cnf.forwarding) ||
+ !READ_ONCE(in6_dev->cnf.accept_redirects))
return;
/* RFC2461 8.1:
@@ -4355,7 +4364,8 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev,
unsigned int pref,
- u32 defrtr_usr_metric)
+ u32 defrtr_usr_metric,
+ int lifetime)
{
struct fib6_config cfg = {
.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
@@ -4368,6 +4378,7 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
.fc_nlinfo.portid = 0,
.fc_nlinfo.nlh = NULL,
.fc_nlinfo.nl_net = net,
+ .fc_expires = jiffies_to_clock_t(lifetime * HZ),
};
cfg.fc_gateway = *gwaddr;
@@ -4574,8 +4585,8 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
f6i->dst_nocount = true;
if (!anycast &&
- (net->ipv6.devconf_all->disable_policy ||
- idev->cnf.disable_policy))
+ (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
+ READ_ONCE(idev->cnf.disable_policy)))
f6i->dst_nopolicy = true;
}
@@ -5332,19 +5343,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
err_nh = NULL;
list_for_each_entry(nh, &rt6_nh_list, next) {
err = __ip6_ins_rt(nh->fib6_info, info, extack);
- fib6_info_release(nh->fib6_info);
-
- if (!err) {
- /* save reference to last route successfully inserted */
- rt_last = nh->fib6_info;
-
- /* save reference to first route for notification */
- if (!rt_notif)
- rt_notif = nh->fib6_info;
- }
- /* nh->fib6_info is used or freed at this point, reset to NULL*/
- nh->fib6_info = NULL;
if (err) {
if (replace && nhn)
NL_SET_ERR_MSG_MOD(extack,
@@ -5352,6 +5351,12 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
err_nh = nh;
goto add_errout;
}
+ /* save reference to last route successfully inserted */
+ rt_last = nh->fib6_info;
+
+ /* save reference to first route for notification */
+ if (!rt_notif)
+ rt_notif = nh->fib6_info;
/* Because each route is added like a single route we remove
* these flags after the first nexthop: if there is a collision,
@@ -5412,8 +5417,7 @@ add_errout:
cleanup:
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
- if (nh->fib6_info)
- fib6_info_release(nh->fib6_info);
+ fib6_info_release(nh->fib6_info);
list_del(&nh->next);
kfree(nh);
}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 29346a6eec9f..35508abd76f4 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -512,22 +512,24 @@ int __init seg6_init(void)
{
int err;
- err = genl_register_family(&seg6_genl_family);
+ err = register_pernet_subsys(&ip6_segments_ops);
if (err)
goto out;
- err = register_pernet_subsys(&ip6_segments_ops);
+ err = genl_register_family(&seg6_genl_family);
if (err)
- goto out_unregister_genl;
+ goto out_unregister_pernet;
#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
err = seg6_iptunnel_init();
if (err)
- goto out_unregister_pernet;
+ goto out_unregister_genl;
err = seg6_local_init();
- if (err)
- goto out_unregister_pernet;
+ if (err) {
+ seg6_iptunnel_exit();
+ goto out_unregister_genl;
+ }
#endif
#ifdef CONFIG_IPV6_SEG6_HMAC
@@ -548,11 +550,11 @@ out_unregister_iptun:
#endif
#endif
#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
-out_unregister_pernet:
- unregister_pernet_subsys(&ip6_segments_ops);
-#endif
out_unregister_genl:
genl_unregister_family(&seg6_genl_family);
+#endif
+out_unregister_pernet:
+ unregister_pernet_subsys(&ip6_segments_ops);
goto out;
}
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index d43c50a7310d..861e0366f549 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -241,6 +241,7 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
struct sr6_tlv_hmac *tlv;
struct ipv6_sr_hdr *srh;
struct inet6_dev *idev;
+ int require_hmac;
idev = __in6_dev_get(skb->dev);
@@ -248,16 +249,17 @@ bool seg6_hmac_validate_skb(struct sk_buff *skb)
tlv = seg6_get_tlv_hmac(srh);
+ require_hmac = READ_ONCE(idev->cnf.seg6_require_hmac);
/* mandatory check but no tlv */
- if (idev->cnf.seg6_require_hmac > 0 && !tlv)
+ if (require_hmac > 0 && !tlv)
return false;
/* no check */
- if (idev->cnf.seg6_require_hmac < 0)
+ if (require_hmac < 0)
return true;
/* check only if present */
- if (idev->cnf.seg6_require_hmac == 0 && !tlv)
+ if (require_hmac == 0 && !tlv)
return true;
/* now, seg6_require_hmac >= 0 && tlv */
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index cc24cefdb85c..655c9b1a19b8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1398,7 +1398,6 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_uninit = ipip6_tunnel_uninit,
.ndo_start_xmit = sit_tunnel_xmit,
.ndo_siocdevprivate = ipip6_tunnel_siocdevprivate,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_get_iflink = ip_tunnel_get_iflink,
.ndo_tunnel_ctl = ipip6_tunnel_ctl,
};
@@ -1408,7 +1407,6 @@ static void ipip6_dev_free(struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
dst_cache_destroy(&tunnel->dst_cache);
- free_percpu(dev->tstats);
}
#define SIT_FEATURES (NETIF_F_SG | \
@@ -1437,6 +1435,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= SIT_FEATURES;
dev->hw_features |= SIT_FEATURES;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
}
static int ipip6_tunnel_init(struct net_device *dev)
@@ -1449,17 +1449,13 @@ static int ipip6_tunnel_init(struct net_device *dev)
strcpy(tunnel->parms.name, dev->name);
ipip6_tunnel_bind_dev(dev);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
- if (err) {
- free_percpu(dev->tstats);
- dev->tstats = NULL;
+ if (err)
return err;
- }
+
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -1875,22 +1871,19 @@ err_alloc_dev:
return err;
}
-static void __net_exit sit_exit_batch_net(struct list_head *net_list)
+static void __net_exit sit_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- LIST_HEAD(list);
struct net *net;
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list)
- sit_destroy_tunnels(net, &list);
-
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ sit_destroy_tunnels(net, dev_to_kill);
}
static struct pernet_operations sit_net_ops = {
.init = sit_init_net,
- .exit_batch = sit_exit_batch_net,
+ .exit_batch_rtnl = sit_exit_batch_rtnl,
.id = &sit_net_id,
.size = sizeof(struct sit_net),
};
@@ -1956,6 +1949,7 @@ xfrm_tunnel_failed:
module_init(sit_init);
module_exit(sit_cleanup);
+MODULE_DESCRIPTION("IPv6-in-IPv4 tunnel SIT driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK("sit");
MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index c8d2ca27220c..8bad0a44a0a6 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -177,24 +177,33 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct sock *ret = sk;
__u8 rcv_wscale;
int full_space;
+ SKB_DR(reason);
if (!READ_ONCE(net->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
goto out;
- req = cookie_tcp_check(net, sk, skb);
- if (IS_ERR(req))
- goto out;
- if (!req)
+ if (cookie_bpf_ok(skb)) {
+ req = cookie_bpf_check(sk, skb);
+ } else {
+ req = cookie_tcp_check(net, sk, skb);
+ if (IS_ERR(req))
+ goto out;
+ }
+ if (!req) {
+ SKB_DR_SET(reason, NO_SOCKET);
goto out_drop;
+ }
ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- if (security_inet_conn_request(sk, skb, req))
+ if (security_inet_conn_request(sk, skb, req)) {
+ SKB_DR_SET(reason, SECURITY_HOOK);
goto out_free;
+ }
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -231,8 +240,10 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
- if (IS_ERR(dst))
+ if (IS_ERR(dst)) {
+ SKB_DR_SET(reason, IP_OUTNOROUTES);
goto out_free;
+ }
}
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
@@ -247,14 +258,20 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok, &rcv_wscale,
dst_metric(dst, RTAX_INITRWND));
- ireq->rcv_wscale = rcv_wscale;
+ if (!req->syncookie)
+ ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok &= cookie_ecn_ok(net, dst);
ret = tcp_get_cookie_sock(sk, skb, req, dst);
+ if (!ret) {
+ SKB_DR_SET(reason, NO_SOCKET);
+ goto out_drop;
+ }
out:
return ret;
out_free:
reqsk_free(req);
out_drop:
+ kfree_skb_reason(skb, reason);
return NULL;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 57b25b1fc9d9..3f4cba49e9ee 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -58,6 +58,7 @@
#include <net/timewait_sock.h>
#include <net/inet_common.h>
#include <net/secure_seq.h>
+#include <net/hotdata.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
@@ -1623,7 +1624,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (np->rxopt.all)
opt_skb = skb_clone_and_charge_r(skb, sk);
- reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst;
@@ -1653,12 +1653,12 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v6_cookie_check(sk, skb);
- if (!nsk)
- goto discard;
-
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb))
- goto reset;
+ if (nsk) {
+ reason = tcp_child_process(sk, nsk, skb);
+ if (reason)
+ goto reset;
+ }
if (opt_skb)
__kfree_skb(opt_skb);
return 0;
@@ -1666,7 +1666,8 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
} else
sock_rps_save_rxhash(sk, skb);
- if (tcp_rcv_state_process(sk, skb))
+ reason = tcp_rcv_state_process(sk, skb);
+ if (reason)
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
@@ -1856,10 +1857,12 @@ process:
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
- } else if (tcp_child_process(sk, nsk, skb)) {
- tcp_v6_send_reset(nsk, skb);
- goto discard_and_relse;
} else {
+ drop_reason = tcp_child_process(sk, nsk, skb);
+ if (drop_reason) {
+ tcp_v6_send_reset(nsk, skb);
+ goto discard_and_relse;
+ }
sock_put(sk);
return 0;
}
@@ -2365,11 +2368,6 @@ struct proto tcpv6_prot = {
};
EXPORT_SYMBOL_GPL(tcpv6_prot);
-static const struct inet6_protocol tcpv6_protocol = {
- .handler = tcp_v6_rcv,
- .err_handler = tcp_v6_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-};
static struct inet_protosw tcpv6_protosw = {
.type = SOCK_STREAM,
@@ -2406,7 +2404,12 @@ int __init tcpv6_init(void)
{
int ret;
- ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
+ .handler = tcp_v6_rcv,
+ .err_handler = tcp_v6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+ ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
if (ret)
goto out;
@@ -2431,7 +2434,7 @@ out_tcpv6_pernet_subsys:
out_tcpv6_protosw:
inet6_unregister_protosw(&tcpv6_protosw);
out_tcpv6_protocol:
- inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
goto out;
}
@@ -2439,5 +2442,5 @@ void tcpv6_exit(void)
{
unregister_pernet_subsys(&tcpv6_net_ops);
inet6_unregister_protosw(&tcpv6_protosw);
- inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
+ inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index bf0c957e4b5e..4b07d1e6c952 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -66,15 +66,15 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
return tcp_gso_segment(skb, features);
}
-static const struct net_offload tcpv6_offload = {
- .callbacks = {
- .gso_segment = tcp6_gso_segment,
- .gro_receive = tcp6_gro_receive,
- .gro_complete = tcp6_gro_complete,
- },
-};
int __init tcpv6_offload_init(void)
{
- return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
+ net_hotdata.tcpv6_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = tcp6_gso_segment,
+ .gro_receive = tcp6_gro_receive,
+ .gro_complete = tcp6_gro_complete,
+ },
+ };
+ return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
}
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 00e8d8b1c9a7..dc4ea9b11794 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -302,4 +302,5 @@ static void __exit tunnel6_fini(void)
module_init(tunnel6_init);
module_exit(tunnel6_fini);
+MODULE_DESCRIPTION("IP-in-IPv6 tunnel driver");
MODULE_LICENSE("GPL");
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3f2249b4cd5f..80ad8f436b17 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -79,9 +79,6 @@ u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *faddr,
const __be16 fport)
{
- static u32 udp6_ehash_secret __read_mostly;
- static u32 udp_ipv6_hash_secret __read_mostly;
-
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
@@ -1702,11 +1699,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
-static const struct inet6_protocol udpv6_protocol = {
- .handler = udpv6_rcv,
- .err_handler = udpv6_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
-};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
@@ -1803,7 +1795,12 @@ int __init udpv6_init(void)
{
int ret;
- ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
+ net_hotdata.udpv6_protocol = (struct inet6_protocol) {
+ .handler = udpv6_rcv,
+ .err_handler = udpv6_err,
+ .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
+ };
+ ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
@@ -1814,12 +1811,12 @@ out:
return ret;
out_udpv6_protocol:
- inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
- inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
+ inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
}
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 6b95ba241ebe..312bcaeea96f 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -192,20 +192,19 @@ INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
}
-static const struct net_offload udpv6_offload = {
- .callbacks = {
- .gso_segment = udp6_ufo_fragment,
- .gro_receive = udp6_gro_receive,
- .gro_complete = udp6_gro_complete,
- },
-};
-
-int udpv6_offload_init(void)
+int __init udpv6_offload_init(void)
{
- return inet6_add_offload(&udpv6_offload, IPPROTO_UDP);
+ net_hotdata.udpv6_offload = (struct net_offload) {
+ .callbacks = {
+ .gso_segment = udp6_ufo_fragment,
+ .gro_receive = udp6_gro_receive,
+ .gro_complete = udp6_gro_complete,
+ },
+ };
+ return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
int udpv6_offload_exit(void)
{
- return inet6_del_offload(&udpv6_offload, IPPROTO_UDP);
+ return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP);
}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 6e36e5047fba..a17d783dc7c0 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -43,7 +43,7 @@ static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
int xfrm6_transport_finish(struct sk_buff *skb, int async)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- int nhlen = skb->data - skb_network_header(skb);
+ int nhlen = -skb_network_offset(skb);
skb_network_header(skb)[IP6CB(skb)->nhoff] =
XFRM_MODE_SKB_CB(skb)->protocol;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 0f3df26878a3..bf140ef781c1 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -398,5 +398,6 @@ static void __exit xfrm6_tunnel_fini(void)
module_init(xfrm6_tunnel_init);
module_exit(xfrm6_tunnel_fini);
+MODULE_DESCRIPTION("IPv6 XFRM tunnel driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 498a0c35b7bb..4aa1c72e6c49 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1060,13 +1060,12 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
int i;
/* skip iucv_array lying in the headroom */
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = (u32)virt_to_phys(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
@@ -1162,13 +1161,12 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = (u32)virt_to_phys(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
rc = pr_iucv->message_receive(path, msg,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 6334f64f04d5..5b56ae6612dd 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -156,7 +156,7 @@ static char iucv_error_pathid[16] = "INVALID PATHID";
static LIST_HEAD(iucv_handler_list);
/*
- * iucv_path_table: an array of iucv_path structures.
+ * iucv_path_table: array of pointers to iucv_path structures.
*/
static struct iucv_path **iucv_path_table;
static unsigned long iucv_max_pathid;
@@ -286,6 +286,7 @@ static union iucv_param *iucv_param_irq[NR_CPUS];
*/
static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
{
+ unsigned long reg1 = virt_to_phys(parm);
int cc;
asm volatile(
@@ -296,7 +297,7 @@ static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
" srl %[cc],28\n"
: [cc] "=&d" (cc), "+m" (*parm)
: [reg0] "d" ((unsigned long)command),
- [reg1] "d" ((unsigned long)parm)
+ [reg1] "d" (reg1)
: "cc", "0", "1");
return cc;
}
@@ -544,7 +545,7 @@ static int iucv_enable(void)
cpus_read_lock();
rc = -ENOMEM;
- alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
+ alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
if (!iucv_path_table)
goto out;
@@ -1123,7 +1124,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ipmsgid = msg->id;
parm->db.ippathid = path->pathid;
@@ -1241,7 +1242,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) reply;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(reply);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags;
@@ -1293,7 +1294,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.ipmsgtag = msg->tag;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags | IUCV_IPNORPY;
@@ -1378,7 +1379,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
parm->dpl.ipsrccls = srccls;
parm->dpl.ipmsgtag = msg->tag;
- parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
+ parm->dpl.ipbfadr2 = (u32)virt_to_phys(answer);
parm->dpl.ipbfln2f = (u32) asize;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
@@ -1387,9 +1388,9 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->db.iptrgcls = msg->class;
parm->db.ipsrccls = srccls;
parm->db.ipmsgtag = msg->tag;
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = (u32)virt_to_phys(buffer);
parm->db.ipbfln1f = (u32) size;
- parm->db.ipbfadr2 = (u32)(addr_t) answer;
+ parm->db.ipbfadr2 = (u32)virt_to_phys(answer);
parm->db.ipbfln2f = (u32) asize;
}
rc = iucv_call_b2f0(IUCV_SEND, parm);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 1184d40167b8..dc46f17a3187 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -627,7 +627,8 @@ retry:
skb = txm->frag_skb;
}
- if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
+ if (WARN_ON(!skb_shinfo(skb)->nr_frags) ||
+ WARN_ON_ONCE(!skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
ret = -EINVAL;
goto out;
}
@@ -637,8 +638,8 @@ retry:
msize += skb_frag_size(&skb_shinfo(skb)->frags[i]);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
- skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags,
- msize);
+ (const struct bio_vec *)skb_shinfo(skb)->frags,
+ skb_shinfo(skb)->nr_frags, msize);
iov_iter_advance(&msg.msg_iter, txm->frag_offset);
do {
@@ -1877,15 +1878,11 @@ static int __init kcm_init(void)
{
int err = -ENOMEM;
- kcm_muxp = kmem_cache_create("kcm_mux_cache",
- sizeof(struct kcm_mux), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ kcm_muxp = KMEM_CACHE(kcm_mux, SLAB_HWCACHE_ALIGN);
if (!kcm_muxp)
goto fail;
- kcm_psockp = kmem_cache_create("kcm_psock_cache",
- sizeof(struct kcm_psock), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ kcm_psockp = KMEM_CACHE(kcm_psock, SLAB_HWCACHE_ALIGN);
if (!kcm_psockp)
goto fail;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index d68d01804dc7..f79fb99271ed 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3924,5 +3924,6 @@ out_unregister_key_proto:
module_init(ipsec_pfkey_init);
module_exit(ipsec_pfkey_exit);
+MODULE_DESCRIPTION("PF_KEY socket helpers");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_KEY);
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 25ca89f80414..39e487ccc468 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -100,7 +100,7 @@ static const struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static struct device_type l2tpeth_type = {
+static const struct device_type l2tpeth_type = {
.name = "l2tpeth",
};
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 9a2a9ed3ba47..970af3983d11 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -478,7 +478,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rt = ip_route_output_ports(sock_net(sk), fl4, sk,
daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
- sk->sk_protocol, RT_CONN_FLAGS(sk),
+ sk->sk_protocol, ip_sock_rt_tos(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index dd3153966173..7bf14cf9ffaa 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -627,7 +627,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
back_from_confirm:
lock_sock(sk);
- ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
+ ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
&fl6, (struct rt6_info *)dst,
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 9b06c380866b..fde1140d899e 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -226,6 +226,8 @@ static int llc_ui_release(struct socket *sock)
}
netdev_put(llc->dev, &llc->dev_tracker);
sock_put(sk);
+ sock_orphan(sk);
+ sock->sk = NULL;
llc_sk_free(sk);
out:
return 0;
@@ -928,14 +930,15 @@ copy_uaddr:
*/
static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
+ DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
int flags = msg->msg_flags;
int noblock = flags & MSG_DONTWAIT;
+ int rc = -EINVAL, copied = 0, hdrlen, hh_len;
struct sk_buff *skb = NULL;
+ struct net_device *dev;
size_t size = 0;
- int rc = -EINVAL, copied = 0, hdrlen;
dprintk("%s: sending from %02X to %02X\n", __func__,
llc->laddr.lsap, llc->daddr.lsap);
@@ -955,22 +958,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (rc)
goto out;
}
- hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
+ dev = llc->dev;
+ hh_len = LL_RESERVED_SPACE(dev);
+ hdrlen = llc_ui_header_len(sk, addr);
size = hdrlen + len;
- if (size > llc->dev->mtu)
- size = llc->dev->mtu;
+ size = min_t(size_t, size, READ_ONCE(dev->mtu));
copied = size - hdrlen;
rc = -EINVAL;
if (copied < 0)
goto out;
release_sock(sk);
- skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+ skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
lock_sock(sk);
if (!skb)
goto out;
- skb->dev = llc->dev;
+ if (sock_flag(sk, SOCK_ZAPPED) ||
+ llc->dev != dev ||
+ hdrlen != llc_ui_header_len(sk, addr) ||
+ hh_len != LL_RESERVED_SPACE(dev) ||
+ size > READ_ONCE(dev->mtu))
+ goto out;
+ skb->dev = dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
- skb_reserve(skb, hdrlen);
+ skb_reserve(skb, hh_len + hdrlen);
rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
if (rc)
goto out;
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 6e387aadffce..4f16d9c88350 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
.func = llc_rcv,
};
-static struct packet_type llc_tr_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_TR_802_2),
- .func = llc_rcv,
-};
-
static int __init llc_init(void)
{
dev_add_pack(&llc_packet_type);
- dev_add_pack(&llc_tr_packet_type);
return 0;
}
static void __exit llc_exit(void)
{
dev_remove_pack(&llc_packet_type);
- dev_remove_pack(&llc_tr_packet_type);
}
module_init(llc_init);
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index cb0291decf2e..13438cc0a6b1 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -62,7 +62,6 @@ config MAC80211_KUNIT_TEST
depends on KUNIT
depends on MAC80211
default KUNIT_ALL_TESTS
- depends on !KERNEL_6_2
help
Enable this option to test mac80211 internals with kunit.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 4406b4f8f3b9..a33884967f21 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -29,7 +29,7 @@ mac80211-y := \
spectmgmt.o \
tx.o \
key.o \
- util.o \
+ util.o parse.o \
wme.o \
chan.o \
trace.o mlme.o \
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b8a278355e18..21d55dc539f6 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -616,7 +616,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if (!pubsta->deflink.ht_cap.ht_supported &&
- sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
+ sta->sdata->vif.bss_conf.chanreq.oper.chan->band != NL80211_BAND_6GHZ)
return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 489dd97f5172..0744113f3535 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -886,33 +886,32 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
- int ret = 0;
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
+ int ret;
lockdep_assert_wiphy(local->hw.wiphy);
- if (cfg80211_chandef_identical(&local->monitor_chandef, chandef))
+ if (cfg80211_chandef_identical(&local->monitor_chanreq.oper,
+ &chanreq.oper))
return 0;
- if (local->use_chanctx) {
- sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
- if (sdata) {
- ieee80211_link_release_channel(&sdata->deflink);
- ret = ieee80211_link_use_channel(&sdata->deflink,
- chandef,
- IEEE80211_CHANCTX_EXCLUSIVE);
- }
- } else {
- if (local->open_count == local->monitors) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
- }
+ sdata = wiphy_dereference(local->hw.wiphy,
+ local->monitor_sdata);
+ if (!sdata)
+ goto done;
- if (ret == 0)
- local->monitor_chandef = *chandef;
+ if (cfg80211_chandef_identical(&sdata->vif.bss_conf.chanreq.oper,
+ &chanreq.oper))
+ return 0;
- return ret;
+ ieee80211_link_release_channel(&sdata->deflink);
+ ret = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
+ IEEE80211_CHANCTX_EXCLUSIVE);
+ if (ret)
+ return ret;
+done:
+ local->monitor_chanreq = chanreq;
+ return 0;
}
static int
@@ -953,7 +952,8 @@ ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
struct cfg80211_fils_discovery *params,
struct ieee80211_link_data *link,
- struct ieee80211_bss_conf *link_conf)
+ struct ieee80211_bss_conf *link_conf,
+ u64 *changed)
{
struct fils_discovery_data *new, *old = NULL;
struct ieee80211_fils_discovery *fd;
@@ -980,14 +980,16 @@ static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
}
- return BSS_CHANGED_FILS_DISCOVERY;
+ *changed |= BSS_CHANGED_FILS_DISCOVERY;
+ return 0;
}
static int
ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
struct cfg80211_unsol_bcast_probe_resp *params,
struct ieee80211_link_data *link,
- struct ieee80211_bss_conf *link_conf)
+ struct ieee80211_bss_conf *link_conf,
+ u64 *changed)
{
struct unsol_bcast_probe_resp_data *new, *old = NULL;
@@ -1011,7 +1013,8 @@ ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL);
}
- return BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
+ *changed |= BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
+ return 0;
}
static int ieee80211_set_ftm_responder_params(
@@ -1256,6 +1259,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
unsigned int link_id = params->beacon.link_id;
struct ieee80211_link_data *link;
struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_chan_req chanreq = { .oper = params->chandef };
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1339,8 +1343,6 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
link_conf->eht_support = true;
- link_conf->eht_puncturing = params->punct_bitmap;
- changed |= BSS_CHANGED_EHT_PUNCTURING;
link_conf->eht_su_beamformer =
params->eht_cap->fixed.phy_cap_info[0] &
@@ -1368,7 +1370,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return err;
}
- err = ieee80211_link_use_channel(link, &params->chandef,
+ err = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (!err)
ieee80211_link_copy_chanctx_to_vlans(link, false);
@@ -1443,17 +1445,15 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
goto error;
err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
goto error;
- changed |= err;
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
&params->unsol_bcast_probe_resp,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
goto error;
- changed |= err;
err = drv_start_ap(sdata->local, sdata, link_conf);
if (err) {
@@ -1518,17 +1518,15 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
return err;
err = ieee80211_set_fils_discovery(sdata, &params->fils_discovery,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
return err;
- changed |= err;
err = ieee80211_set_unsol_bcast_probe_resp(sdata,
&params->unsol_bcast_probe_resp,
- link, link_conf);
+ link, link_conf, &changed);
if (err < 0)
return err;
- changed |= err;
if (beacon->he_bss_color_valid &&
beacon->he_bss_color.enabled != link_conf->he_bss_color.enabled) {
@@ -1618,7 +1616,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->ema_ap = false;
link_conf->bssid_indicator = 0;
- __sta_info_flush(sdata, true);
+ __sta_info_flush(sdata, true, link_id);
ieee80211_free_keys(sdata, true);
link_conf->enable_beacon = false;
@@ -1629,7 +1627,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_BEACON_ENABLED);
if (sdata->wdev.cac_started) {
- chandef = link_conf->chandef;
+ chandef = link_conf->chanreq.oper;
wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
@@ -1829,7 +1827,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
if (params->supported_rates &&
params->supported_rates_len) {
- ieee80211_parse_bitrates(link->conf->chandef.width,
+ ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
sband, params->supported_rates,
params->supported_rates_len,
&link_sta->pub->supp_rates[sband->band]);
@@ -1869,6 +1867,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
sband->band);
}
+ ieee80211_sta_set_rx_nss(link_sta);
+
return ret;
}
@@ -1942,6 +1942,9 @@ static int sta_apply_parameters(struct ieee80211_local *local,
clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
}
+ if (mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ sta->sta.spp_amsdu = set & BIT(NL80211_STA_FLAG_SPP_AMSDU);
+
/* mark TDLS channel switch support, if the AP allows it */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
!sdata->deflink.u.mgd.tdls_chan_switch_prohibited &&
@@ -2093,7 +2096,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (params->mac)
return sta_info_destroy_addr_bss(sdata, params->mac);
- sta_info_flush(sdata);
+ sta_info_flush(sdata, params->link_id);
return 0;
}
@@ -2600,6 +2603,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
const struct mesh_setup *setup)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = setup->chandef };
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
int err;
@@ -2616,7 +2620,7 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
return err;
@@ -2659,7 +2663,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
return -EINVAL;
if (params->basic_rates) {
- if (!ieee80211_parse_bitrates(link->conf->chandef.width,
+ if (!ieee80211_parse_bitrates(link->conf->chanreq.oper.width,
wiphy->bands[sband->band],
params->basic_rates,
params->basic_rates_len,
@@ -3081,7 +3085,7 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy,
if (local->ops->get_txpower)
return drv_get_txpower(local, sdata, dbm);
- if (!local->use_chanctx)
+ if (local->emulate_chanctx)
*dbm = local->hw.conf.power_level;
else
*dbm = sdata->vif.bss_conf.txpower;
@@ -3174,7 +3178,7 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
* the new value until we associate.
*/
if (!sdata->u.mgd.associated ||
- link->conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ link->conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT)
return 0;
ap = sdata->vif.cfg.ap_addr;
@@ -3329,9 +3333,11 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
* so at a basic rate so that all clients can receive it.
*/
if (rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) &&
- sdata->vif.bss_conf.chandef.chan) {
+ sdata->vif.bss_conf.chanreq.oper.chan) {
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- enum nl80211_band band = sdata->vif.bss_conf.chandef.chan->band;
+ enum nl80211_band band;
+
+ band = sdata->vif.bss_conf.chanreq.oper.chan->band;
if (!(mask->control[band].legacy & basic_rates))
return -EINVAL;
@@ -3383,6 +3389,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
u32 cac_time_ms)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
struct ieee80211_local *local = sdata->local;
int err;
@@ -3397,7 +3404,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
goto out_unlock;
@@ -3540,13 +3547,24 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
return new_beacon;
}
-void ieee80211_csa_finish(struct ieee80211_vif *vif)
+void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
rcu_read_lock();
+ link_data = rcu_dereference(sdata->link[link_id]);
+ if (WARN_ON(!link_data)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /* TODO: MBSSID with MLO changes */
if (vif->mbssid_tx_vif == vif) {
/* Trigger ieee80211_csa_finish() on the non-transmitting
* interfaces when channel switch is received on
@@ -3565,7 +3583,7 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
&iter->deflink.csa_finalize_work);
}
}
- wiphy_work_queue(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
+ wiphy_work_queue(local->hw.wiphy, &link_data->csa_finalize_work);
rcu_read_unlock();
}
@@ -3583,20 +3601,21 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t
}
EXPORT_SYMBOL(ieee80211_channel_switch_disconnect);
-static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_set_after_csa_beacon(struct ieee80211_link_data *link_data,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link_data->sdata;
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link_data->u.ap.next_beacon)
return -EINVAL;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
- sdata->deflink.u.ap.next_beacon,
+ err = ieee80211_assign_beacon(sdata, link_data,
+ link_data->u.ap.next_beacon,
NULL, NULL, changed);
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
if (err < 0)
return err;
@@ -3625,6 +3644,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
{
struct ieee80211_sub_if_data *sdata = link_data->sdata;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_bss_conf *link_conf = link_data->conf;
u64 changed = 0;
int err;
@@ -3646,25 +3666,19 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
if (link_data->reserved_ready)
return 0;
- return ieee80211_link_use_reserved_context(&sdata->deflink);
+ return ieee80211_link_use_reserved_context(link_data);
}
- if (!cfg80211_chandef_identical(&link_data->conf->chandef,
- &link_data->csa_chandef))
+ if (!cfg80211_chandef_identical(&link_conf->chanreq.oper,
+ &link_data->csa_chanreq.oper))
return -EINVAL;
- sdata->vif.bss_conf.csa_active = false;
+ link_conf->csa_active = false;
- err = ieee80211_set_after_csa_beacon(sdata, &changed);
+ err = ieee80211_set_after_csa_beacon(link_data, &changed);
if (err)
return err;
- if (sdata->vif.bss_conf.eht_puncturing != sdata->vif.bss_conf.csa_punct_bitmap) {
- sdata->vif.bss_conf.eht_puncturing =
- sdata->vif.bss_conf.csa_punct_bitmap;
- changed |= BSS_CHANGED_EHT_PUNCTURING;
- }
-
ieee80211_link_info_change_notify(sdata, link_data, changed);
if (link_data->csa_block_tx) {
@@ -3677,9 +3691,8 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
if (err)
return err;
- cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chandef,
- link_data->link_id,
- link_data->conf->eht_puncturing);
+ cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chanreq.oper,
+ link_data->link_id);
return 0;
}
@@ -3689,7 +3702,8 @@ static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
struct ieee80211_sub_if_data *sdata = link_data->sdata;
if (__ieee80211_csa_finalize(link_data)) {
- sdata_info(sdata, "failed to finalize CSA, disconnecting\n");
+ sdata_info(sdata, "failed to finalize CSA on link %d, disconnecting\n",
+ link_data->link_id);
cfg80211_stop_iface(sdata->local->hw.wiphy, &sdata->wdev,
GFP_KERNEL);
}
@@ -3714,18 +3728,19 @@ void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work)
ieee80211_csa_finalize(link);
}
-static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
+static int ieee80211_set_csa_beacon(struct ieee80211_link_data *link_data,
struct cfg80211_csa_settings *params,
u64 *changed)
{
+ struct ieee80211_sub_if_data *sdata = link_data->sdata;
struct ieee80211_csa_settings csa = {};
int err;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->deflink.u.ap.next_beacon =
+ link_data->u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_after);
- if (!sdata->deflink.u.ap.next_beacon)
+ if (!link_data->u.ap.next_beacon)
return -ENOMEM;
/*
@@ -3751,7 +3766,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
IEEE80211_MAX_CNTDWN_COUNTERS_NUM) ||
(params->n_counter_offsets_presp >
IEEE80211_MAX_CNTDWN_COUNTERS_NUM)) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
return -EINVAL;
}
@@ -3761,11 +3776,11 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
csa.n_counter_offsets_presp = params->n_counter_offsets_presp;
csa.count = params->count;
- err = ieee80211_assign_beacon(sdata, &sdata->deflink,
+ err = ieee80211_assign_beacon(sdata, link_data,
&params->beacon_csa, &csa,
NULL, changed);
if (err < 0) {
- ieee80211_free_next_beacon(&sdata->deflink);
+ ieee80211_free_next_beacon(link_data);
return err;
}
@@ -3812,7 +3827,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
/* changes into another band are not supported */
- if (sdata->vif.bss_conf.chandef.chan->band !=
+ if (sdata->vif.bss_conf.chanreq.oper.chan->band !=
params->chandef.chan->band)
return -EINVAL;
@@ -3860,11 +3875,15 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_csa_settings *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_chan_req chanreq = { .oper = params->chandef };
struct ieee80211_local *local = sdata->local;
struct ieee80211_channel_switch ch_switch;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_data *link_data;
u64 changed = 0;
+ u8 link_id = params->link_id;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -3875,16 +3894,23 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
if (sdata->wdev.cac_started)
return -EBUSY;
- if (cfg80211_chandef_identical(&params->chandef,
- &sdata->vif.bss_conf.chandef))
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return -EINVAL;
+
+ link_data = wiphy_dereference(wiphy, sdata->link[link_id]);
+ if (!link_data)
+ return -ENOLINK;
+
+ link_conf = link_data->conf;
+
+ if (chanreq.oper.punctured && !link_conf->eht_support)
return -EINVAL;
/* don't allow another channel switch if one is already active. */
- if (sdata->vif.bss_conf.csa_active)
+ if (link_conf->csa_active)
return -EBUSY;
- conf = rcu_dereference_protected(sdata->vif.bss_conf.chanctx_conf,
- lockdep_is_held(&local->hw.wiphy->mtx));
+ conf = wiphy_dereference(wiphy, link_conf->chanctx_conf);
if (!conf) {
err = -EBUSY;
goto out;
@@ -3901,14 +3927,14 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
ch_switch.timestamp = 0;
ch_switch.device_timestamp = 0;
ch_switch.block_tx = params->block_tx;
- ch_switch.chandef = params->chandef;
+ ch_switch.chandef = chanreq.oper;
ch_switch.count = params->count;
err = drv_pre_channel_switch(sdata, &ch_switch);
if (err)
goto out;
- err = ieee80211_link_reserve_chanctx(&sdata->deflink, &params->chandef,
+ err = ieee80211_link_reserve_chanctx(link_data, &chanreq,
chanctx->mode,
params->radar_required);
if (err)
@@ -3917,44 +3943,38 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
/* if reservation is invalid then this will fail */
err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0);
if (err) {
- ieee80211_link_unreserve_chanctx(&sdata->deflink);
+ ieee80211_link_unreserve_chanctx(link_data);
goto out;
}
/* if there is a color change in progress, abort it */
- if (sdata->vif.bss_conf.color_change_active)
+ if (link_conf->color_change_active)
ieee80211_color_change_abort(sdata);
- err = ieee80211_set_csa_beacon(sdata, params, &changed);
+ err = ieee80211_set_csa_beacon(link_data, params, &changed);
if (err) {
- ieee80211_link_unreserve_chanctx(&sdata->deflink);
+ ieee80211_link_unreserve_chanctx(link_data);
goto out;
}
- if (params->punct_bitmap && !sdata->vif.bss_conf.eht_support)
- goto out;
-
- sdata->deflink.csa_chandef = params->chandef;
- sdata->deflink.csa_block_tx = params->block_tx;
- sdata->vif.bss_conf.csa_active = true;
- sdata->vif.bss_conf.csa_punct_bitmap = params->punct_bitmap;
+ link_data->csa_chanreq = chanreq;
+ link_data->csa_block_tx = params->block_tx;
+ link_conf->csa_active = true;
- if (sdata->deflink.csa_block_tx)
+ if (link_data->csa_block_tx)
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
cfg80211_ch_switch_started_notify(sdata->dev,
- &sdata->deflink.csa_chandef, 0,
- params->count, params->block_tx,
- sdata->vif.bss_conf.csa_punct_bitmap);
+ &link_data->csa_chanreq.oper, 0,
+ params->count, params->block_tx);
if (changed) {
- ieee80211_link_info_change_notify(sdata, &sdata->deflink,
- changed);
- drv_channel_switch_beacon(sdata, &params->chandef);
+ ieee80211_link_info_change_notify(sdata, link_data, changed);
+ drv_channel_switch_beacon(sdata, &link_data->csa_chanreq.oper);
} else {
/* if the beacon didn't change, we can finalize immediately */
- ieee80211_csa_finalize(&sdata->deflink);
+ ieee80211_csa_finalize(link_data);
}
out:
@@ -4204,15 +4224,12 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
chanctx_conf = rcu_dereference(link->conf->chanctx_conf);
if (chanctx_conf) {
- *chandef = link->conf->chandef;
+ *chandef = link->conf->chanreq.oper;
ret = 0;
} else if (local->open_count > 0 &&
local->open_count == local->monitors &&
sdata->vif.type == NL80211_IFTYPE_MONITOR) {
- if (local->use_chanctx)
- *chandef = local->monitor_chandef;
- else
- *chandef = local->_oper_chandef;
+ *chandef = local->monitor_chanreq.oper;
ret = 0;
}
out:
@@ -4260,12 +4277,13 @@ static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_link_data *link;
+ struct ieee80211_chan_req chanreq = { .oper = *chandef };
int ret;
u64 changed = 0;
link = sdata_dereference(sdata->link[link_id], sdata);
- ret = ieee80211_link_change_bandwidth(link, chandef, &changed);
+ ret = ieee80211_link_change_chanreq(link, &chanreq, &changed);
if (ret == 0)
ieee80211_link_info_change_notify(sdata, link, changed);
@@ -4747,7 +4765,7 @@ EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);
void
ieee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
- u64 color_bitmap, gfp_t gfp)
+ u64 color_bitmap)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_link_data *link = &sdata->deflink;
@@ -4966,6 +4984,17 @@ static int ieee80211_set_hw_timestamp(struct wiphy *wiphy,
return local->ops->set_hw_timestamp(&local->hw, &sdata->vif, hwts);
}
+static int
+ieee80211_set_ttlm(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ttlm_params *params)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+ return ieee80211_req_neg_ttlm(sdata, params);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -5078,4 +5107,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.mod_link_station = ieee80211_mod_link_station,
.del_link_station = ieee80211_del_link_station,
.set_hw_timestamp = ieee80211_set_hw_timestamp,
+ .set_ttlm = ieee80211_set_ttlm,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index ef4c2cebc080..38acdc458c7c 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* mac80211 - channel management
- * Copyright 2020 - 2022 Intel Corporation
+ * Copyright 2020 - 2024 Intel Corporation
*/
#include <linux/nl80211.h>
@@ -81,87 +81,122 @@ ieee80211_link_get_chanctx(struct ieee80211_link_data *link)
return container_of(conf, struct ieee80211_chanctx, conf);
}
-static const struct cfg80211_chan_def *
-ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
+bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b)
+{
+ if (!cfg80211_chandef_identical(&a->oper, &b->oper))
+ return false;
+ if (!a->ap.chan && !b->ap.chan)
+ return true;
+ return cfg80211_chandef_identical(&a->ap, &b->ap);
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanreq_compatible(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct cfg80211_chan_def *compat;
+
+ if (a->ap.chan && b->ap.chan &&
+ !cfg80211_chandef_identical(&a->ap, &b->ap))
+ return NULL;
+
+ compat = cfg80211_chandef_compatible(&a->oper, &b->oper);
+ if (!compat)
+ return NULL;
+
+ /* Note: later code assumes this always fills & returns tmp if compat */
+ tmp->oper = *compat;
+ tmp->ap = a->ap.chan ? a->ap : b->ap;
+ return tmp;
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_compatible(struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct ieee80211_chan_req *ret;
+ struct ieee80211_chan_req tmp2;
+
+ *tmp = (struct ieee80211_chan_req){
+ .oper = ctx->conf.def,
+ .ap = ctx->conf.ap,
+ };
+
+ ret = ieee80211_chanreq_compatible(tmp, req, &tmp2);
+ if (!ret)
+ return NULL;
+ *tmp = *ret;
+ return tmp;
+}
+
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_reserved_chanreq(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
{
struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(link, &ctx->reserved_links,
- reserved_chanctx_list) {
- if (!compat)
- compat = &link->reserved_chandef;
+ if (WARN_ON(!req))
+ return NULL;
- compat = cfg80211_chandef_compatible(&link->reserved_chandef,
- compat);
- if (!compat)
+ list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) {
+ req = ieee80211_chanreq_compatible(&link->reserved, req, tmp);
+ if (!req)
break;
}
- return compat;
+ return req;
}
-static const struct cfg80211_chan_def *
+static const struct ieee80211_chan_req *
ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
+ const struct ieee80211_chan_req *compat,
+ struct ieee80211_chan_req *tmp)
{
struct ieee80211_link_data *link;
+ const struct ieee80211_chan_req *comp_def = compat;
lockdep_assert_wiphy(local->hw.wiphy);
- list_for_each_entry(link, &ctx->assigned_links,
- assigned_chanctx_list) {
+ list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) {
struct ieee80211_bss_conf *link_conf = link->conf;
if (link->reserved_chanctx)
continue;
- if (!compat)
- compat = &link_conf->chandef;
-
- compat = cfg80211_chandef_compatible(
- &link_conf->chandef, compat);
- if (!compat)
+ comp_def = ieee80211_chanreq_compatible(&link_conf->chanreq,
+ comp_def, tmp);
+ if (!comp_def)
break;
}
- return compat;
-}
-
-static const struct cfg80211_chan_def *
-ieee80211_chanctx_combined_chandef(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *compat)
-{
- lockdep_assert_wiphy(local->hw.wiphy);
-
- compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
- if (!compat)
- return NULL;
-
- compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat);
- if (!compat)
- return NULL;
-
- return compat;
+ return comp_def;
}
static bool
-ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- const struct cfg80211_chan_def *def)
+ieee80211_chanctx_can_reserve(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req)
{
+ struct ieee80211_chan_req tmp;
+
lockdep_assert_wiphy(local->hw.wiphy);
- if (ieee80211_chanctx_combined_chandef(local, ctx, def))
- return true;
+ if (!ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp))
+ return false;
+
+ if (!ieee80211_chanctx_non_reserved_chandef(local, ctx, req, &tmp))
+ return false;
if (!list_empty(&ctx->reserved_links) &&
- ieee80211_chanctx_reserved_chandef(local, ctx, def))
+ ieee80211_chanctx_reserved_chanreq(local, ctx, req, &tmp))
return true;
return false;
@@ -169,7 +204,7 @@ ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -186,8 +221,7 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
continue;
- if (!ieee80211_chanctx_can_reserve_chandef(local, ctx,
- chandef))
+ if (!ieee80211_chanctx_can_reserve(local, ctx, chanreq))
continue;
return ctx;
@@ -202,7 +236,7 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
enum ieee80211_sta_rx_bandwidth width;
struct link_sta_info *link_sta;
- link_sta = rcu_dereference(sta->link[link_id]);
+ link_sta = wiphy_dereference(sta->local->hw.wiphy, sta->link[link_id]);
/* no effect if this STA has no presence on this link */
if (!link_sta)
@@ -240,9 +274,10 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta,
}
static enum nl80211_chan_width
-ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
- unsigned int link_id)
+ieee80211_get_max_required_bw(struct ieee80211_link_data *link)
{
+ struct ieee80211_sub_if_data *sdata = link->sdata;
+ unsigned int link_id = link->link_id;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
struct sta_info *sta;
@@ -258,31 +293,25 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
}
static enum nl80211_chan_width
-ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
+ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx,
+ struct ieee80211_link_data *rsvd_for)
{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
- struct ieee80211_vif *vif = &sdata->vif;
- int link_id;
- rcu_read_lock();
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ for_each_sdata_link(local, link) {
enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
- struct ieee80211_link_data *link =
- rcu_dereference(sdata->link[link_id]);
-
- if (!link)
- continue;
if (link != rsvd_for &&
rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
continue;
- switch (vif->type) {
+ switch (link->sdata->vif.type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
- width = ieee80211_get_max_required_bw(sdata, link_id);
+ width = ieee80211_get_max_required_bw(link);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -290,8 +319,8 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
* point, so take the width from the chandef, but
* account also for TDLS peers
*/
- width = max(link->conf->chandef.width,
- ieee80211_get_max_required_bw(sdata, link_id));
+ width = max(link->conf->chanreq.oper.width,
+ ieee80211_get_max_required_bw(link));
break;
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_NAN:
@@ -299,7 +328,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_OCB:
- width = link->conf->chandef.width;
+ width = link->conf->chanreq.oper.width;
break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_UNSPECIFIED:
@@ -312,40 +341,13 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
max_bw = max(max_bw, width);
}
- rcu_read_unlock();
-
- return max_bw;
-}
-
-static enum nl80211_chan_width
-ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
- struct ieee80211_chanctx *ctx,
- struct ieee80211_link_data *rsvd_for)
-{
- struct ieee80211_sub_if_data *sdata;
- enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- enum nl80211_chan_width width;
-
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx,
- rsvd_for);
-
- max_bw = max(max_bw, width);
- }
/* use the configured bandwidth in case of monitor interface */
- sdata = rcu_dereference(local->monitor_sdata);
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
if (sdata &&
rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf)
max_bw = max(max_bw, ctx->conf.def.width);
- rcu_read_unlock();
-
return max_bw;
}
@@ -382,7 +384,7 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
/* downgrade chandef up to max_bw */
min_def = ctx->conf.def;
while (min_def.width > max_bw)
- ieee80211_chandef_downgrade(&min_def);
+ ieee80211_chandef_downgrade(&min_def, NULL);
if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
return 0;
@@ -395,7 +397,7 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
}
/* calling this function is assuming that station vif is updated to
- * lates changes by calling ieee80211_link_update_chandef
+ * lates changes by calling ieee80211_link_update_chanreq
*/
static void ieee80211_chan_bw_change(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
@@ -475,10 +477,15 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
static void _ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_chanctx *old_ctx,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
struct ieee80211_link_data *rsvd_for)
{
- u32 changed;
+ const struct cfg80211_chan_def *chandef = &chanreq->oper;
+ struct ieee80211_chan_req ctx_req = {
+ .oper = ctx->conf.def,
+ .ap = ctx->conf.ap,
+ };
+ u32 changed = 0;
/* expected to handle only 20/40/80/160/320 channel widths */
switch (chandef->width) {
@@ -500,47 +507,52 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local,
*/
ieee80211_chan_bw_change(local, old_ctx, true);
- if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
+ if (ieee80211_chanreq_identical(&ctx_req, chanreq)) {
ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
return;
}
- WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
+ WARN_ON(ieee80211_chanctx_refcount(local, ctx) > 1 &&
+ !cfg80211_chandef_compatible(&ctx->conf.def, &chanreq->oper));
ieee80211_remove_wbrf(local, &ctx->conf.def);
+ if (!cfg80211_chandef_identical(&ctx->conf.def, &chanreq->oper)) {
+ if (ctx->conf.def.width != chanreq->oper.width)
+ changed |= IEEE80211_CHANCTX_CHANGE_WIDTH;
+ if (ctx->conf.def.punctured != chanreq->oper.punctured)
+ changed |= IEEE80211_CHANCTX_CHANGE_PUNCTURING;
+ }
+ if (!cfg80211_chandef_identical(&ctx->conf.ap, &chanreq->ap))
+ changed |= IEEE80211_CHANCTX_CHANGE_AP;
ctx->conf.def = *chandef;
+ ctx->conf.ap = chanreq->ap;
/* check if min chanctx also changed */
- changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
- _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
+ changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
ieee80211_add_wbrf(local, &ctx->conf.def);
drv_change_chanctx(local, ctx, changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
- }
-
- /* check is BW wider */
+ /* check if BW is wider */
ieee80211_chan_bw_change(local, old_ctx, false);
}
static void ieee80211_change_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_chanctx *old_ctx,
- const struct cfg80211_chan_def *chandef)
+ const struct ieee80211_chan_req *chanreq)
{
- _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL);
+ _ieee80211_change_chanctx(local, ctx, old_ctx, chanreq, NULL);
}
static struct ieee80211_chanctx *
ieee80211_find_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
+ struct ieee80211_chan_req tmp;
struct ieee80211_chanctx *ctx;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -549,7 +561,7 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
return NULL;
list_for_each_entry(ctx, &local->chanctx_list, list) {
- const struct cfg80211_chan_def *compat;
+ const struct ieee80211_chan_req *compat;
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE)
continue;
@@ -557,12 +569,12 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
continue;
- compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
+ compat = ieee80211_chanctx_compatible(ctx, chanreq, &tmp);
if (!compat)
continue;
- compat = ieee80211_chanctx_reserved_chandef(local, ctx,
- compat);
+ compat = ieee80211_chanctx_reserved_chanreq(local, ctx,
+ compat, &tmp);
if (!compat)
continue;
@@ -576,26 +588,14 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
bool ieee80211_is_radar_required(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- unsigned int link_id;
-
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
-
- if (link && link->radar_required) {
- rcu_read_unlock();
- return true;
- }
- }
+ for_each_sdata_link(local, link) {
+ if (link->radar_required)
+ return true;
}
- rcu_read_unlock();
return false;
}
@@ -605,43 +605,24 @@ ieee80211_chanctx_radar_required(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
- struct ieee80211_sub_if_data *sdata;
- bool required = false;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- unsigned int link_id;
-
- if (!ieee80211_sdata_running(sdata))
+ for_each_sdata_link(local, link) {
+ if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
continue;
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
- if (!link)
- continue;
-
- if (rcu_access_pointer(link->conf->chanctx_conf) != conf)
- continue;
- if (!link->radar_required)
- continue;
- required = true;
- break;
- }
-
- if (required)
- break;
+ if (!link->radar_required)
+ continue;
+ return true;
}
- rcu_read_unlock();
- return required;
+ return false;
}
static struct ieee80211_chanctx *
ieee80211_alloc_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -654,7 +635,8 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
INIT_LIST_HEAD(&ctx->assigned_links);
INIT_LIST_HEAD(&ctx->reserved_links);
- ctx->conf.def = *chandef;
+ ctx->conf.def = chanreq->oper;
+ ctx->conf.ap = chanreq->ap;
ctx->conf.rx_chains_static = 1;
ctx->conf.rx_chains_dynamic = 1;
ctx->mode = mode;
@@ -674,23 +656,15 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
ieee80211_add_wbrf(local, &ctx->conf.def);
- if (!local->use_chanctx)
- local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
-
/* turn idle off *before* setting channel -- some drivers need that */
changed = ieee80211_idle_off(local);
if (changed)
ieee80211_hw_config(local, changed);
- if (!local->use_chanctx) {
- local->_oper_chandef = ctx->conf.def;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- err = drv_add_chanctx(local, ctx);
- if (err) {
- ieee80211_recalc_idle(local);
- return err;
- }
+ err = drv_add_chanctx(local, ctx);
+ if (err) {
+ ieee80211_recalc_idle(local);
+ return err;
}
return 0;
@@ -698,7 +672,7 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
static struct ieee80211_chanctx *
ieee80211_new_chanctx(struct ieee80211_local *local,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_chanctx *ctx;
@@ -706,7 +680,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local,
lockdep_assert_wiphy(local->hw.wiphy);
- ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+ ctx = ieee80211_alloc_chanctx(local, chanreq, mode);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -725,32 +699,7 @@ static void ieee80211_del_chanctx(struct ieee80211_local *local,
{
lockdep_assert_wiphy(local->hw.wiphy);
- if (!local->use_chanctx) {
- struct cfg80211_chan_def *chandef = &local->_oper_chandef;
- /* S1G doesn't have 20MHz, so get the correct width for the
- * current channel.
- */
- if (chandef->chan->band == NL80211_BAND_S1GHZ)
- chandef->width =
- ieee80211_s1g_channel_width(chandef->chan);
- else
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef->center_freq1 = chandef->chan->center_freq;
- chandef->freq1_offset = chandef->chan->freq_offset;
- chandef->center_freq2 = 0;
-
- /* NOTE: Disabling radar is only valid here for
- * single channel context. To be sure, check it ...
- */
- WARN_ON(local->hw.conf.radar_enabled &&
- !list_empty(&local->chanctx_list));
-
- local->hw.conf.radar_enabled = false;
-
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- } else {
- drv_remove_chanctx(local, ctx);
- }
+ drv_remove_chanctx(local, ctx);
ieee80211_recalc_idle(local);
@@ -773,64 +722,53 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
struct ieee80211_chanctx_conf *conf = &ctx->conf;
- struct ieee80211_sub_if_data *sdata;
- const struct cfg80211_chan_def *compat = NULL;
+ const struct ieee80211_chan_req *compat = NULL;
+ struct ieee80211_link_data *link;
+ struct ieee80211_chan_req tmp;
struct sta_info *sta;
lockdep_assert_wiphy(local->hw.wiphy);
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- int link_id;
+ for_each_sdata_link(local, link) {
+ struct ieee80211_bss_conf *link_conf;
- if (!ieee80211_sdata_running(sdata))
+ if (link->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
continue;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- continue;
+ link_conf = link->conf;
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(sdata->vif.link_conf[link_id]);
-
- if (!link_conf)
- continue;
-
- if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
- continue;
+ if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
+ continue;
- if (!compat)
- compat = &link_conf->chandef;
+ if (!compat)
+ compat = &link_conf->chanreq;
- compat = cfg80211_chandef_compatible(&link_conf->chandef,
- compat);
- if (WARN_ON_ONCE(!compat))
- break;
- }
+ compat = ieee80211_chanreq_compatible(&link_conf->chanreq,
+ compat, &tmp);
+ if (WARN_ON_ONCE(!compat))
+ return;
}
- if (WARN_ON_ONCE(!compat)) {
- rcu_read_unlock();
+ if (WARN_ON_ONCE(!compat))
return;
- }
/* TDLS peers can sometimes affect the chandef width */
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry(sta, &local->sta_list, list) {
+ struct ieee80211_chan_req tdls_chanreq = {};
if (!sta->uploaded ||
!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
!test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
!sta->tdls_chandef.chan)
continue;
- compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
- compat);
+ tdls_chanreq.oper = sta->tdls_chandef;
+
+ /* note this always fills and returns &tmp if compat */
+ compat = ieee80211_chanreq_compatible(&tdls_chanreq,
+ compat, &tmp);
if (WARN_ON_ONCE(!compat))
- break;
+ return;
}
- rcu_read_unlock();
-
- if (!compat)
- return;
ieee80211_change_chanctx(local, ctx, ctx, compat);
}
@@ -849,11 +787,6 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
chanctx->conf.radar_enabled = radar_enabled;
- if (!local->use_chanctx) {
- local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- }
-
drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
}
@@ -924,23 +857,19 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata;
u8 rx_chains_static, rx_chains_dynamic;
+ struct ieee80211_link_data *link;
lockdep_assert_wiphy(local->hw.wiphy);
rx_chains_static = 1;
rx_chains_dynamic = 1;
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ for_each_sdata_link(local, link) {
u8 needed_static, needed_dynamic;
- unsigned int link_id;
- if (!ieee80211_sdata_running(sdata))
- continue;
-
- switch (sdata->vif.type) {
+ switch (link->sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- if (!sdata->u.mgd.associated)
+ if (!link->sdata->u.mgd.associated)
continue;
break;
case NL80211_IFTYPE_AP:
@@ -952,59 +881,38 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
continue;
}
- for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
- struct ieee80211_link_data *link;
-
- link = rcu_dereference(sdata->link[link_id]);
-
- if (!link)
- continue;
-
- if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf)
- continue;
-
- switch (link->smps_mode) {
- default:
- WARN_ONCE(1, "Invalid SMPS mode %d\n",
- link->smps_mode);
- fallthrough;
- case IEEE80211_SMPS_OFF:
- needed_static = link->needed_rx_chains;
- needed_dynamic = link->needed_rx_chains;
- break;
- case IEEE80211_SMPS_DYNAMIC:
- needed_static = 1;
- needed_dynamic = link->needed_rx_chains;
- break;
- case IEEE80211_SMPS_STATIC:
- needed_static = 1;
- needed_dynamic = 1;
- break;
- }
+ if (rcu_access_pointer(link->conf->chanctx_conf) != &chanctx->conf)
+ continue;
- rx_chains_static = max(rx_chains_static, needed_static);
- rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
+ switch (link->smps_mode) {
+ default:
+ WARN_ONCE(1, "Invalid SMPS mode %d\n",
+ link->smps_mode);
+ fallthrough;
+ case IEEE80211_SMPS_OFF:
+ needed_static = link->needed_rx_chains;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ needed_static = 1;
+ needed_dynamic = link->needed_rx_chains;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ needed_static = 1;
+ needed_dynamic = 1;
+ break;
}
+
+ rx_chains_static = max(rx_chains_static, needed_static);
+ rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
}
/* Disable SMPS for the monitor interface */
- sdata = rcu_dereference(local->monitor_sdata);
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
if (sdata &&
rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &chanctx->conf)
rx_chains_dynamic = rx_chains_static = local->rx_chains;
- rcu_read_unlock();
-
- if (!local->use_chanctx) {
- if (rx_chains_static > 1)
- local->smps_mode = IEEE80211_SMPS_OFF;
- else if (rx_chains_dynamic > 1)
- local->smps_mode = IEEE80211_SMPS_DYNAMIC;
- else
- local->smps_mode = IEEE80211_SMPS_STATIC;
- ieee80211_hw_config(local, 0);
- }
-
if (rx_chains_static == chanctx->conf.rx_chains_static &&
rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
return;
@@ -1043,17 +951,16 @@ __ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
if (clear)
conf = NULL;
- rcu_read_lock();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
struct ieee80211_bss_conf *vlan_conf;
- vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]);
+ vlan_conf = wiphy_dereference(local->hw.wiphy,
+ vlan->vif.link_conf[link_id]);
if (WARN_ON(!vlan_conf))
continue;
rcu_assign_pointer(vlan_conf->chanctx_conf, conf);
}
- rcu_read_unlock();
}
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
@@ -1103,7 +1010,7 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link)
}
int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode,
bool radar_required)
{
@@ -1114,13 +1021,13 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
lockdep_assert_wiphy(local->hw.wiphy);
curr_ctx = ieee80211_link_get_chanctx(link);
- if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx)
+ if (curr_ctx && !local->ops->switch_vif_chanctx)
return -EOPNOTSUPP;
- new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_find_reservation_chanctx(local, chanreq, mode);
if (!new_ctx) {
if (ieee80211_can_create_new_chanctx(local)) {
- new_ctx = ieee80211_new_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_new_chanctx(local, chanreq, mode);
if (IS_ERR(new_ctx))
return PTR_ERR(new_ctx);
} else {
@@ -1174,7 +1081,7 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
!list_empty(&curr_ctx->reserved_links))
return -EBUSY;
- new_ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+ new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode);
if (!new_ctx)
return -ENOMEM;
@@ -1192,7 +1099,7 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links);
link->reserved_chanctx = new_ctx;
- link->reserved_chandef = *chandef;
+ link->reserved = *chanreq;
link->reserved_radar_required = radar_required;
link->reserved_ready = false;
@@ -1231,29 +1138,28 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link)
}
static void
-ieee80211_link_update_chandef(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef)
+ieee80211_link_update_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
unsigned int link_id = link->link_id;
struct ieee80211_sub_if_data *vlan;
- link->conf->chandef = *chandef;
+ link->conf->chanreq = *chanreq;
if (sdata->vif.type != NL80211_IFTYPE_AP)
return;
- rcu_read_lock();
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
struct ieee80211_bss_conf *vlan_conf;
- vlan_conf = rcu_dereference(vlan->vif.link_conf[link_id]);
+ vlan_conf = wiphy_dereference(sdata->local->hw.wiphy,
+ vlan->vif.link_conf[link_id]);
if (WARN_ON(!vlan_conf))
continue;
- vlan_conf->chandef = *chandef;
+ vlan_conf->chanreq = *chanreq;
}
- rcu_read_unlock();
}
static int
@@ -1264,7 +1170,8 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
struct ieee80211_local *local = sdata->local;
struct ieee80211_vif_chanctx_switch vif_chsw[1] = {};
struct ieee80211_chanctx *old_ctx, *new_ctx;
- const struct cfg80211_chan_def *chandef;
+ const struct ieee80211_chan_req *chanreq;
+ struct ieee80211_chan_req tmp;
u64 changed = 0;
int err;
@@ -1286,17 +1193,18 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
IEEE80211_CHANCTX_REPLACES_OTHER))
return -EINVAL;
- chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &link->reserved_chandef);
- if (WARN_ON(!chandef))
+ chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+ &link->reserved,
+ &tmp);
+ if (WARN_ON(!chanreq))
return -EINVAL;
- if (link_conf->chandef.width != link->reserved_chandef.width)
+ if (link_conf->chanreq.oper.width != link->reserved.oper.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ ieee80211_link_update_chanreq(link, &link->reserved);
- _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link);
+ _ieee80211_change_chanctx(local, new_ctx, old_ctx, chanreq, link);
vif_chsw[0].vif = &sdata->vif;
vif_chsw[0].old_ctx = &old_ctx->conf;
@@ -1344,7 +1252,8 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx *old_ctx, *new_ctx;
- const struct cfg80211_chan_def *chandef;
+ const struct ieee80211_chan_req *chanreq;
+ struct ieee80211_chan_req tmp;
int err;
old_ctx = ieee80211_link_get_chanctx(link);
@@ -1363,12 +1272,13 @@ ieee80211_link_use_reserved_assign(struct ieee80211_link_data *link)
IEEE80211_CHANCTX_REPLACES_OTHER))
return -EINVAL;
- chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
- &link->reserved_chandef);
- if (WARN_ON(!chandef))
+ chanreq = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+ &link->reserved,
+ &tmp);
+ if (WARN_ON(!chanreq))
return -EINVAL;
- ieee80211_change_chanctx(local, new_ctx, new_ctx, chandef);
+ ieee80211_change_chanctx(local, new_ctx, new_ctx, chanreq);
list_del(&link->reserved_chanctx_list);
link->reserved_chanctx = NULL;
@@ -1412,24 +1322,6 @@ ieee80211_link_has_in_place_reservation(struct ieee80211_link_data *link)
return true;
}
-static int ieee80211_chsw_switch_hwconf(struct ieee80211_local *local,
- struct ieee80211_chanctx *new_ctx)
-{
- const struct cfg80211_chan_def *chandef;
-
- lockdep_assert_wiphy(local->hw.wiphy);
-
- chandef = ieee80211_chanctx_reserved_chandef(local, new_ctx, NULL);
- if (WARN_ON(!chandef))
- return -EINVAL;
-
- local->hw.conf.radar_enabled = new_ctx->conf.radar_enabled;
- local->_oper_chandef = *chandef;
- ieee80211_hw_config(local, 0);
-
- return 0;
-}
-
static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local,
int n_vifs)
{
@@ -1518,7 +1410,6 @@ err:
static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
{
struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
- struct ieee80211_chanctx *new_ctx = NULL;
int err, n_assigned, n_reserved, n_ready;
int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0;
@@ -1551,9 +1442,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
goto err;
}
- if (!local->use_chanctx)
- new_ctx = ctx;
-
n_ctx++;
n_assigned = 0;
@@ -1607,9 +1495,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
if (WARN_ON(n_ctx == 0) ||
WARN_ON(n_vifs_switch == 0 &&
n_vifs_assign == 0 &&
- n_vifs_ctxless == 0) ||
- WARN_ON(n_ctx > 1 && !local->use_chanctx) ||
- WARN_ON(!new_ctx && !local->use_chanctx)) {
+ n_vifs_ctxless == 0)) {
err = -EINVAL;
goto err;
}
@@ -1619,20 +1505,14 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
* reservations and driver capabilities.
*/
- if (local->use_chanctx) {
- if (n_vifs_switch > 0) {
- err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
- if (err)
- goto err;
- }
+ if (n_vifs_switch > 0) {
+ err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
+ if (err)
+ goto err;
+ }
- if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
- err = ieee80211_chsw_switch_ctxs(local);
- if (err)
- goto err;
- }
- } else {
- err = ieee80211_chsw_switch_hwconf(local, new_ctx);
+ if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
+ err = ieee80211_chsw_switch_ctxs(local);
if (err)
goto err;
}
@@ -1672,10 +1552,10 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
link->radar_required = link->reserved_radar_required;
- if (link_conf->chandef.width != link->reserved_chandef.width)
+ if (link_conf->chanreq.oper.width != link->reserved.oper.width)
changed = BSS_CHANGED_BANDWIDTH;
- ieee80211_link_update_chandef(link, &link->reserved_chandef);
+ ieee80211_link_update_chanreq(link, &link->reserved);
if (changed)
ieee80211_link_info_change_notify(sdata,
link,
@@ -1810,7 +1690,7 @@ static void __ieee80211_link_release_channel(struct ieee80211_link_data *link)
}
int ieee80211_link_use_channel(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *chanreq,
enum ieee80211_chanctx_mode mode)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
@@ -1823,36 +1703,36 @@ int ieee80211_link_use_channel(struct ieee80211_link_data *link,
if (sdata->vif.active_links &&
!(sdata->vif.active_links & BIT(link->link_id))) {
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
return 0;
}
ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
- chandef,
+ &chanreq->oper,
sdata->wdev.iftype);
if (ret < 0)
goto out;
if (ret > 0)
- radar_detect_width = BIT(chandef->width);
+ radar_detect_width = BIT(chanreq->oper.width);
link->radar_required = ret;
- ret = ieee80211_check_combinations(sdata, chandef, mode,
+ ret = ieee80211_check_combinations(sdata, &chanreq->oper, mode,
radar_detect_width);
if (ret < 0)
goto out;
__ieee80211_link_release_channel(link);
- ctx = ieee80211_find_chanctx(local, chandef, mode);
+ ctx = ieee80211_find_chanctx(local, chanreq, mode);
if (!ctx)
- ctx = ieee80211_new_chanctx(local, chandef, mode);
+ ctx = ieee80211_new_chanctx(local, chanreq, mode);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out;
}
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
ret = ieee80211_assign_link_chanctx(link, ctx);
if (ret) {
@@ -1932,28 +1812,79 @@ int ieee80211_link_use_reserved_context(struct ieee80211_link_data *link)
return 0;
}
-int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- u64 *changed)
+/*
+ * This is similar to ieee80211_chanctx_compatible(), but rechecks
+ * against all the links actually using it (except the one that's
+ * passed, since that one is changing).
+ * This is done in order to allow changes to the AP's bandwidth for
+ * wider bandwidth OFDMA purposes, which wouldn't be treated as
+ * compatible by ieee80211_chanctx_recheck() but is OK if the link
+ * requesting the update is the only one using it.
+ */
+static const struct ieee80211_chan_req *
+ieee80211_chanctx_recheck(struct ieee80211_local *local,
+ struct ieee80211_link_data *skip_link,
+ struct ieee80211_chanctx *ctx,
+ const struct ieee80211_chan_req *req,
+ struct ieee80211_chan_req *tmp)
+{
+ const struct ieee80211_chan_req *ret = req;
+ struct ieee80211_link_data *link;
+
+ lockdep_assert_wiphy(local->hw.wiphy);
+
+ for_each_sdata_link(local, link) {
+ if (link == skip_link)
+ continue;
+
+ if (rcu_access_pointer(link->conf->chanctx_conf) == &ctx->conf) {
+ ret = ieee80211_chanreq_compatible(ret,
+ &link->conf->chanreq,
+ tmp);
+ if (!ret)
+ return NULL;
+ }
+
+ if (link->reserved_chanctx == ctx) {
+ ret = ieee80211_chanreq_compatible(ret,
+ &link->reserved,
+ tmp);
+ if (!ret)
+ return NULL;
+ }
+ }
+
+ *tmp = *ret;
+ return tmp;
+}
+
+int ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *chanreq,
+ u64 *changed)
{
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_bss_conf *link_conf = link->conf;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *ctx;
- const struct cfg80211_chan_def *compat;
+ const struct ieee80211_chan_req *compat;
+ struct ieee80211_chan_req tmp;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ &chanreq->oper,
IEEE80211_CHAN_DISABLED))
return -EINVAL;
- if (cfg80211_chandef_identical(chandef, &link_conf->chandef))
+ /* for non-HT 20 MHz the rest doesn't matter */
+ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT &&
+ cfg80211_chandef_identical(&chanreq->oper, &link_conf->chanreq.oper))
return 0;
- if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
- link_conf->chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+ /* but you cannot switch to/from it */
+ if (chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT)
return -EINVAL;
conf = rcu_dereference_protected(link_conf->chanctx_conf,
@@ -1963,13 +1894,14 @@ int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
ctx = container_of(conf, struct ieee80211_chanctx, conf);
- compat = cfg80211_chandef_compatible(&conf->def, chandef);
+ compat = ieee80211_chanctx_recheck(local, link, ctx, chanreq, &tmp);
if (!compat)
return -EINVAL;
switch (ctx->replace_state) {
case IEEE80211_CHANCTX_REPLACE_NONE:
- if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat))
+ if (!ieee80211_chanctx_reserved_chanreq(local, ctx, compat,
+ &tmp))
return -EBUSY;
break;
case IEEE80211_CHANCTX_WILL_BE_REPLACED:
@@ -1984,7 +1916,7 @@ int ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
break;
}
- ieee80211_link_update_chandef(link, chandef);
+ ieee80211_link_update_chanreq(link, chanreq);
ieee80211_recalc_chanctx_chantype(local, ctx);
@@ -2019,12 +1951,11 @@ void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link)
ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
- rcu_read_lock();
- ap_conf = rcu_dereference(ap->vif.link_conf[link_id]);
- conf = rcu_dereference_protected(ap_conf->chanctx_conf,
- lockdep_is_held(&local->hw.wiphy->mtx));
+ ap_conf = wiphy_dereference(local->hw.wiphy,
+ ap->vif.link_conf[link_id]);
+ conf = wiphy_dereference(local->hw.wiphy,
+ ap_conf->chanctx_conf);
rcu_assign_pointer(link_conf->chanctx_conf, conf);
- rcu_read_unlock();
}
void ieee80211_iter_chan_contexts_atomic(
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
index d49894df2351..49da401c5340 100644
--- a/net/mac80211/debug.h
+++ b/net/mac80211/debug.h
@@ -152,16 +152,17 @@ do { \
else \
_sdata_err((link)->sdata, fmt, ##__VA_ARGS__); \
} while (0)
-#define link_dbg(link, fmt, ...) \
+#define _link_id_dbg(print, sdata, link_id, fmt, ...) \
do { \
- if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \
- _sdata_dbg(1, (link)->sdata, "[link %d] " fmt, \
- (link)->link_id, \
- ##__VA_ARGS__); \
+ if (ieee80211_vif_is_mld(&(sdata)->vif)) \
+ _sdata_dbg(print, sdata, "[link %d] " fmt, \
+ link_id, ##__VA_ARGS__); \
else \
- _sdata_dbg(1, (link)->sdata, fmt, \
- ##__VA_ARGS__); \
+ _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__); \
} while (0)
+#define link_dbg(link, fmt, ...) \
+ _link_id_dbg(1, (link)->sdata, (link)->link_id, \
+ fmt, ##__VA_ARGS__)
#define ht_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_HT_DEBUG, \
@@ -226,6 +227,9 @@ do { \
#define mlme_dbg(sdata, fmt, ...) \
_sdata_dbg(MAC80211_MLME_DEBUG, \
sdata, fmt, ##__VA_ARGS__)
+#define mlme_link_id_dbg(sdata, link_id, fmt, ...) \
+ _link_id_dbg(MAC80211_MLME_DEBUG, sdata, link_id, \
+ fmt, ##__VA_ARGS__)
#define mlme_dbg_ratelimited(sdata, fmt, ...) \
_sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index dce5606ed66d..68596ef78b15 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -997,8 +997,8 @@ static void add_link_files(struct ieee80211_link_data *link,
}
}
-void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata,
- bool mld_vif)
+static void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata,
+ bool mld_vif)
{
char buf[10+IFNAMSIZ];
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index b226b1aae88a..a02ec0a413f6 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -11,8 +11,6 @@
#include "ieee80211_i.h"
#ifdef CONFIG_MAC80211_DEBUGFS
-void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata,
- bool mld_vif);
void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata,
@@ -24,9 +22,6 @@ void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link);
void ieee80211_link_debugfs_drv_add(struct ieee80211_link_data *link);
void ieee80211_link_debugfs_drv_remove(struct ieee80211_link_data *link);
#else
-static inline void ieee80211_debugfs_add_netdev(
- struct ieee80211_sub_if_data *sdata, bool mld_vif)
-{}
static inline void ieee80211_debugfs_remove_netdev(
struct ieee80211_sub_if_data *sdata)
{}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index eb482fb8c3af..e20c64edb880 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1695,4 +1695,23 @@ int drv_change_sta_links(struct ieee80211_local *local,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links);
+static inline enum ieee80211_neg_ttlm_res
+drv_can_neg_ttlm(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ enum ieee80211_neg_ttlm_res res = NEG_TTLM_RES_REJECT;
+
+ might_sleep();
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_can_neg_ttlm(local, sdata, neg_ttlm);
+ if (local->ops->can_neg_ttlm)
+ res = local->ops->can_neg_ttlm(&local->hw, &sdata->vif,
+ neg_ttlm);
+ trace_drv_neg_ttlm_res(local, sdata, res, neg_ttlm);
+
+ return res;
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 749f4ecab990..c3330aea4da3 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright 2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -257,7 +257,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!link_conf))
width = NL80211_CHAN_WIDTH_20_NOHT;
else
- width = link_conf->chandef.width;
+ width = link_conf->chanreq.oper.width;
switch (width) {
default:
@@ -603,6 +603,8 @@ void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id,
if (WARN_ON(!link))
goto out;
+ trace_api_request_smps(sdata->local, sdata, link, smps_mode);
+
if (link->u.mgd.driver_smps_mode == smps_mode)
goto out;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 8f2b445a5ec3..7ace5cdc6c26 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -223,7 +223,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
struct cfg80211_bss *bss;
u64 bss_change;
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq = {};
struct ieee80211_channel *chan;
struct beacon_data *presp;
struct cfg80211_inform_bss bss_meta = {};
@@ -237,7 +237,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
drv_reset_tsf(local, sdata);
if (!ether_addr_equal(ifibss->bssid, bssid))
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
/* if merging, indicate to driver that we leave the old IBSS */
if (sdata->vif.cfg.ibss_joined) {
@@ -257,22 +257,22 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
kfree_rcu(presp, rcu_head);
/* make a copy of the chandef, it could be modified below. */
- chandef = *req_chandef;
- chan = chandef.chan;
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+ chanreq.oper = *req_chandef;
+ chan = chanreq.oper.chan;
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper,
NL80211_IFTYPE_ADHOC)) {
- if (chandef.width == NL80211_CHAN_WIDTH_5 ||
- chandef.width == NL80211_CHAN_WIDTH_10 ||
- chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- chandef.width == NL80211_CHAN_WIDTH_20) {
+ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_10 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_20) {
sdata_info(sdata,
"Failed to join IBSS, beacons forbidden\n");
return;
}
- chandef.width = NL80211_CHAN_WIDTH_20;
- chandef.center_freq1 = chan->center_freq;
+ chanreq.oper.width = NL80211_CHAN_WIDTH_20;
+ chanreq.oper.center_freq1 = chan->center_freq;
/* check again for downgraded chandef */
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper,
NL80211_IFTYPE_ADHOC)) {
sdata_info(sdata,
"Failed to join IBSS, beacons forbidden\n");
@@ -281,7 +281,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
- &chandef, NL80211_IFTYPE_ADHOC);
+ &chanreq.oper, NL80211_IFTYPE_ADHOC);
if (err < 0) {
sdata_info(sdata,
"Failed to join IBSS, invalid chandef\n");
@@ -295,7 +295,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
radar_required = err;
- if (ieee80211_link_use_channel(&sdata->deflink, &chandef,
+ if (ieee80211_link_use_channel(&sdata->deflink, &chanreq,
ifibss->fixed_channel ?
IEEE80211_CHANCTX_SHARED :
IEEE80211_CHANCTX_EXCLUSIVE)) {
@@ -307,7 +307,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(ifibss->bssid, bssid, ETH_ALEN);
presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
- capability, tsf, &chandef,
+ capability, tsf, &chanreq.oper,
&have_higher_than_11mbit, NULL);
if (!presp)
return;
@@ -533,12 +533,12 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
IEEE80211_PRIVACY(ifibss->privacy));
/* XXX: should not really modify cfg80211 data */
if (cbss) {
- cbss->channel = sdata->deflink.csa_chandef.chan;
+ cbss->channel = sdata->deflink.csa_chanreq.oper.chan;
cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
}
}
- ifibss->chandef = sdata->deflink.csa_chandef;
+ ifibss->chandef = sdata->deflink.csa_chanreq.oper;
/* generate the beacon */
return ieee80211_ibss_csa_beacon(sdata, NULL, changed);
@@ -682,7 +682,7 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
spin_lock_bh(&ifibss->incomplete_lock);
while (!list_empty(&ifibss->incomplete_stations)) {
@@ -757,21 +757,22 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
enum nl80211_channel_type ch_type;
int err;
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn = {
+ .mode = IEEE80211_CONN_MODE_HT,
+ .bw_limit = IEEE80211_CONN_BW_LIMIT_40,
+ };
u32 vht_cap_info = 0;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- conn_flags = IEEE80211_CONN_DISABLE_VHT;
-
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
+ conn.mode = IEEE80211_CONN_MODE_LEGACY;
fallthrough;
case NL80211_CHAN_WIDTH_20:
- conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
break;
default:
break;
@@ -783,8 +784,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems,
ifibss->chandef.chan->band,
- vht_cap_info,
- conn_flags, ifibss->bssid, &csa_ie);
+ vht_cap_info, &conn,
+ ifibss->bssid, &csa_ie);
/* can't switch to destination channel, fail */
if (err < 0)
goto disconnect;
@@ -798,7 +799,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
goto disconnect;
params.count = csa_ie.count;
- params.chandef = csa_ie.chandef;
+ params.chandef = csa_ie.chanreq.oper;
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -857,7 +858,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
params.radar_required = err;
if (cfg80211_chandef_identical(&params.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chanreq.oper)) {
ibss_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 0b2b53550bd9..4bec625a84d1 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef IEEE80211_I_H
@@ -370,19 +370,32 @@ enum ieee80211_sta_flags {
IEEE80211_STA_ENABLE_RRM = BIT(15),
};
-typedef u32 __bitwise ieee80211_conn_flags_t;
-
-enum ieee80211_conn_flags {
- IEEE80211_CONN_DISABLE_HT = (__force ieee80211_conn_flags_t)BIT(0),
- IEEE80211_CONN_DISABLE_40MHZ = (__force ieee80211_conn_flags_t)BIT(1),
- IEEE80211_CONN_DISABLE_VHT = (__force ieee80211_conn_flags_t)BIT(2),
- IEEE80211_CONN_DISABLE_80P80MHZ = (__force ieee80211_conn_flags_t)BIT(3),
- IEEE80211_CONN_DISABLE_160MHZ = (__force ieee80211_conn_flags_t)BIT(4),
- IEEE80211_CONN_DISABLE_HE = (__force ieee80211_conn_flags_t)BIT(5),
- IEEE80211_CONN_DISABLE_EHT = (__force ieee80211_conn_flags_t)BIT(6),
- IEEE80211_CONN_DISABLE_320MHZ = (__force ieee80211_conn_flags_t)BIT(7),
+enum ieee80211_conn_mode {
+ IEEE80211_CONN_MODE_S1G,
+ IEEE80211_CONN_MODE_LEGACY,
+ IEEE80211_CONN_MODE_HT,
+ IEEE80211_CONN_MODE_VHT,
+ IEEE80211_CONN_MODE_HE,
+ IEEE80211_CONN_MODE_EHT,
};
+#define IEEE80211_CONN_MODE_HIGHEST IEEE80211_CONN_MODE_EHT
+
+enum ieee80211_conn_bw_limit {
+ IEEE80211_CONN_BW_LIMIT_20,
+ IEEE80211_CONN_BW_LIMIT_40,
+ IEEE80211_CONN_BW_LIMIT_80,
+ IEEE80211_CONN_BW_LIMIT_160, /* also 80+80 */
+ IEEE80211_CONN_BW_LIMIT_320,
+};
+
+struct ieee80211_conn_settings {
+ enum ieee80211_conn_mode mode;
+ enum ieee80211_conn_bw_limit bw_limit;
+};
+
+extern const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited;
+
struct ieee80211_mgd_auth_data {
struct cfg80211_bss *bss;
unsigned long timeout;
@@ -416,7 +429,7 @@ struct ieee80211_mgd_assoc_data {
size_t elems_len;
u8 *elems; /* pointing to inside ie[] below */
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn;
u16 status;
@@ -441,6 +454,7 @@ struct ieee80211_mgd_assoc_data {
bool timeout_started;
bool comeback; /* whether the AP has requested association comeback */
bool s1g;
+ bool spp_amsdu;
unsigned int assoc_link_id;
@@ -509,6 +523,8 @@ struct ieee80211_if_managed {
unsigned int flags;
+ u16 mcast_seq_last;
+
bool status_acked;
bool status_received;
__le16 status_fc;
@@ -579,6 +595,10 @@ struct ieee80211_if_managed {
/* TID-to-link mapping support */
struct wiphy_delayed_work ttlm_work;
struct ieee80211_adv_ttlm_info ttlm_info;
+
+ /* dialog token enumerator for neg TTLM request */
+ u8 dialog_token_alloc;
+ struct wiphy_delayed_work neg_ttlm_timeout_work;
};
struct ieee80211_if_ibss {
@@ -866,6 +886,9 @@ struct ieee80211_chanctx {
enum ieee80211_chanctx_mode mode;
bool driver_present;
+ /* temporary data for search algorithm etc. */
+ struct ieee80211_chan_req req;
+
struct ieee80211_chanctx_conf conf;
};
@@ -938,7 +961,7 @@ struct ieee80211_link_data_managed {
enum ieee80211_smps_mode req_smps, /* requested smps mode */
driver_smps_mode; /* smps mode request */
- ieee80211_conn_flags_t conn_flags;
+ struct ieee80211_conn_settings conn;
s16 p2p_noa_index;
@@ -983,8 +1006,6 @@ struct ieee80211_link_data_managed {
int mu_edca_last_param_set;
u8 bss_param_ch_cnt;
-
- struct cfg80211_bss *bss;
};
struct ieee80211_link_data_ap {
@@ -1017,7 +1038,7 @@ struct ieee80211_link_data {
bool operating_11g_mode;
- struct cfg80211_chan_def csa_chandef;
+ struct ieee80211_chan_req csa_chanreq;
struct wiphy_work color_change_finalize_work;
struct delayed_work color_collision_detect_work;
@@ -1025,7 +1046,7 @@ struct ieee80211_link_data {
/* context reservation -- protected with wiphy mutex */
struct ieee80211_chanctx *reserved_chanctx;
- struct cfg80211_chan_def reserved_chandef;
+ struct ieee80211_chan_req reserved;
bool reserved_radar_required;
bool reserved_ready;
@@ -1160,6 +1181,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
#define sdata_dereference(p, sdata) \
wiphy_dereference(sdata->local->hw.wiphy, p)
+#define for_each_sdata_link(_local, _link) \
+ /* outer loop just to define the variables ... */ \
+ for (struct ieee80211_sub_if_data *___sdata = NULL; \
+ !___sdata; \
+ ___sdata = (void *)~0 /* always stop */) \
+ list_for_each_entry(___sdata, &(_local)->interfaces, list) \
+ if (ieee80211_sdata_running(___sdata)) \
+ for (int ___link_id = 0; \
+ ___link_id < ARRAY_SIZE(___sdata->link); \
+ ___link_id++) \
+ if ((_link = wiphy_dereference((local)->hw.wiphy, \
+ ___sdata->link[___link_id])))
+
static inline int
ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
struct cfg80211_rnr_elems *rnr_elems,
@@ -1330,7 +1364,8 @@ struct ieee80211_local {
bool wiphy_ciphers_allocated;
- bool use_chanctx;
+ struct cfg80211_chan_def dflt_chandef;
+ bool emulate_chanctx;
/* protects the aggregated multicast list and filter calls */
spinlock_t filter_lock;
@@ -1456,8 +1491,6 @@ struct ieee80211_local {
enum mac80211_scan_state next_scan_state;
struct wiphy_delayed_work scan_work;
struct ieee80211_sub_if_data __rcu *scan_sdata;
- /* For backward compatibility only -- do not use */
- struct cfg80211_chan_def _oper_chandef;
/* Temporary remain-on-channel for off-channel operations */
struct ieee80211_channel *tmp_channel;
@@ -1531,8 +1564,6 @@ struct ieee80211_local {
int user_power_level; /* in dBm, for all interfaces */
- enum ieee80211_smps_mode smps_mode;
-
struct work_struct restart_work;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1559,7 +1590,7 @@ struct ieee80211_local {
/* virtual monitor interface */
struct ieee80211_sub_if_data __rcu *monitor_sdata;
- struct cfg80211_chan_def monitor_chandef;
+ struct ieee80211_chan_req monitor_chanreq;
/* extended capabilities provided by mac80211 */
u8 ext_capa[8];
@@ -1624,7 +1655,7 @@ ieee80211_get_link_sband(struct ieee80211_link_data *link)
/* this struct holds the value parsing from channel switch IE */
struct ieee80211_csa_ie {
- struct cfg80211_chan_def chandef;
+ struct ieee80211_chan_req chanreq;
u8 mode;
u8 count;
u8 ttl;
@@ -1633,6 +1664,14 @@ struct ieee80211_csa_ie {
u32 max_switch_time;
};
+enum ieee80211_elems_parse_error {
+ IEEE80211_PARSE_ERR_INVALID_END = BIT(0),
+ IEEE80211_PARSE_ERR_DUP_ELEM = BIT(1),
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE = BIT(2),
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM = BIT(3),
+ IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC = BIT(4),
+};
+
/* Parsed Information Elements */
struct ieee802_11_elems {
const u8 *ie_start;
@@ -1743,8 +1782,8 @@ struct ieee802_11_elems {
struct ieee80211_mle_per_sta_profile *prof;
size_t sta_prof_len;
- /* whether a parse error occurred while retrieving these elements */
- bool parse_error;
+ /* whether/which parse error occurred while retrieving these elements */
+ u8 parse_error;
/*
* scratch buffer that can be used for various element parsing related
@@ -1801,6 +1840,8 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
unsigned int mpdu_len,
unsigned int mpdu_offset);
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
+int ieee80211_hw_conf_chan(struct ieee80211_local *local);
+void ieee80211_hw_conf_init(struct ieee80211_local *local);
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed);
@@ -2166,9 +2207,8 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
* @elems: parsed 802.11 elements received with the frame
* @current_band: indicates the current band
* @vht_cap_info: VHT capabilities of the transmitter
- * @conn_flags: contains information about own capabilities and restrictions
- * to decide which channel switch announcements can be accepted, using
- * flags from &enum ieee80211_conn_flags.
+ * @conn: contains information about own capabilities and restrictions
+ * to decide which channel switch announcements can be accepted
* @bssid: the currently connected bssid (for reporting)
* @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl.
* All of them will be filled with if success only.
@@ -2178,7 +2218,8 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
- ieee80211_conn_flags_t conn_flags, u8 *bssid,
+ struct ieee80211_conn_settings *conn,
+ u8 *bssid,
struct ieee80211_csa_ie *csa_ie);
/* Suspend/resume and hw reconfiguration */
@@ -2202,6 +2243,9 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
/* utility functions/constants */
extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
+const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode);
+enum ieee80211_conn_bw_limit
+ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef);
int ieee80211_frame_duration(enum nl80211_band band, size_t len,
int rate, int erp, int short_preamble);
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -2243,6 +2287,7 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
/**
* struct ieee80211_elems_parse_params - element parsing parameters
+ * @mode: connection mode for parsing
* @start: pointer to the elements
* @len: length of the elements
* @action: %true if the elements came from an action frame
@@ -2260,6 +2305,7 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
* for EHT capabilities parsing)
*/
struct ieee80211_elems_parse_params {
+ enum ieee80211_conn_mode mode;
const u8 *start;
size_t len;
bool action;
@@ -2279,6 +2325,7 @@ ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
struct cfg80211_bss *bss)
{
struct ieee80211_elems_parse_params params = {
+ .mode = IEEE80211_CONN_MODE_HIGHEST,
.start = start,
.len = len,
.action = action,
@@ -2408,7 +2455,6 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *da, const u8 *bssid,
u16 stype, u16 reason,
bool send_frame, u8 *frame_buf);
-u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end);
enum {
IEEE80211_PROBE_FLAG_DIRECTED = BIT(0),
@@ -2453,32 +2499,36 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
const struct cfg80211_chan_def *chandef);
-u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- u8 *end);
-void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode,
- struct sk_buff *skb);
+u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata);
u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef);
u8 *ieee80211_ie_build_eht_oper(u8 *pos, struct cfg80211_chan_def *chandef,
const struct ieee80211_sta_eht_cap *eht_cap);
int ieee80211_parse_bitrates(enum nl80211_chan_width width,
const struct ieee80211_supported_band *sband,
const u8 *srates, int srates_len, u32 *rates);
-int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band);
-int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band);
u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_s1g_cap *caps,
struct sk_buff *skb);
void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
-u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap);
+
+/* element building in SKBs */
+int ieee80211_put_srates_elem(struct sk_buff *skb,
+ const struct ieee80211_supported_band *sband,
+ u32 basic_rates, u32 rate_flags, u32 masked_rates,
+ u8 element_id);
+int ieee80211_put_he_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn);
+int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode);
+int ieee80211_put_eht_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn);
/* channel management */
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
@@ -2488,23 +2538,36 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
const struct ieee80211_ht_operation *htop,
struct cfg80211_chan_def *chandef);
void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
- bool support_160, bool support_320,
struct cfg80211_chan_def *chandef);
-bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef);
bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
struct cfg80211_chan_def *chandef);
-ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
+void ieee80211_chandef_downgrade(struct cfg80211_chan_def *chandef,
+ struct ieee80211_conn_settings *conn);
+static inline void
+ieee80211_chanreq_downgrade(struct ieee80211_chan_req *chanreq,
+ struct ieee80211_conn_settings *conn)
+{
+ ieee80211_chandef_downgrade(&chanreq->oper, conn);
+ if (WARN_ON(!conn))
+ return;
+ if (conn->mode < IEEE80211_CONN_MODE_EHT)
+ chanreq->ap.chan = NULL;
+}
+
+bool ieee80211_chanreq_identical(const struct ieee80211_chan_req *a,
+ const struct ieee80211_chan_req *b);
int __must_check
ieee80211_link_use_channel(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *req,
enum ieee80211_chanctx_mode mode);
int __must_check
ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
+ const struct ieee80211_chan_req *req,
enum ieee80211_chanctx_mode mode,
bool radar_required);
int __must_check
@@ -2512,9 +2575,9 @@ ieee80211_link_use_reserved_context(struct ieee80211_link_data *link);
int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link);
int __must_check
-ieee80211_link_change_bandwidth(struct ieee80211_link_data *link,
- const struct cfg80211_chan_def *chandef,
- u64 *changed);
+ieee80211_link_change_chanreq(struct ieee80211_link_data *link,
+ const struct ieee80211_chan_req *req,
+ u64 *changed);
void ieee80211_link_release_channel(struct ieee80211_link_data *link);
void ieee80211_link_vlan_copy_chanctx(struct ieee80211_link_data *link);
void ieee80211_link_copy_chanctx_to_vlans(struct ieee80211_link_data *link,
@@ -2589,12 +2652,7 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache);
void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache);
-u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_eht_cap(u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- const struct ieee80211_sta_eht_cap *eht_cap,
- u8 *end,
- bool for_ap);
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata);
void
ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
@@ -2603,6 +2661,12 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_eht_cap_elem *eht_cap_ie_elem,
u8 eht_cap_len,
struct link_sta_info *link_sta);
+void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
+void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len);
+int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ttlm_params *params);
void ieee80211_check_wbrf_support(struct ieee80211_local *local);
void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index e4e7c0b38cb6..b75b83a5142b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -511,7 +511,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
* would have removed them, but in other modes there shouldn't
* be any stations.
*/
- flushed = sta_info_flush(sdata);
+ flushed = sta_info_flush(sdata, -1);
WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN && flushed > 0);
/* don't count this interface for allmulti while it is down */
@@ -557,7 +557,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
+ chandef = sdata->vif.bss_conf.chanreq.oper;
WARN_ON(local->suspended);
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev, &chandef,
@@ -1164,7 +1164,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
rcu_assign_pointer(local->monitor_sdata, sdata);
mutex_unlock(&local->iflist_mtx);
- ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chandef,
+ ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chanreq,
IEEE80211_CHANCTX_EXCLUSIVE);
if (ret) {
mutex_lock(&local->iflist_mtx);
@@ -1252,7 +1252,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
sdata->vif.cab_queue = master->vif.cab_queue;
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
- sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+ sdata->vif.bss_conf.chanreq = master->vif.bss_conf.chanreq;
sdata->crypto_tx_tailroom_needed_cnt +=
master->crypto_tx_tailroom_needed_cnt;
@@ -1288,8 +1288,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
res = drv_start(local);
if (res)
goto err_del_bss;
- /* we're brought up, everything changes */
- hw_reconf_flags = ~0;
ieee80211_led_radio(local, true);
ieee80211_mod_tpt_led_trig(local,
IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
@@ -1436,7 +1434,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (coming_up)
local->open_count++;
- if (hw_reconf_flags)
+ if (local->open_count == 1)
+ ieee80211_hw_conf_init(local);
+ else if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
ieee80211_recalc_ps(local);
@@ -1546,6 +1546,22 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local,
default:
break;
}
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_EHT) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ switch (mgmt->u.action.u.ttlm_req.action_code) {
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
+ ieee80211_process_neg_ttlm_req(sdata, mgmt,
+ skb->len);
+ break;
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
+ ieee80211_process_neg_ttlm_res(sdata, mgmt,
+ skb->len);
+ break;
+ default:
+ break;
+ }
+ }
} else if (ieee80211_is_ext(mgmt->frame_control)) {
if (sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_sta_rx_queued_ext(sdata, skb);
@@ -1783,7 +1799,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
/* need to do this after the switch so vif.type is correct */
ieee80211_link_setup(&sdata->deflink);
- ieee80211_debugfs_add_netdev(sdata, false);
+ ieee80211_debugfs_recreate_netdev(sdata, false);
}
static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index af74d7f9d94d..a2cce62c97b7 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -925,6 +925,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
*/
key->color = atomic_inc_return(&key_color);
+ /* keep this flag for easier access later */
+ if (sta && sta->sta.spp_amsdu)
+ key->conf.flags |= IEEE80211_KEY_FLAG_SPP_AMSDU;
+
increment_tailroom_need_count(sdata);
ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key);
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index d4f86955afa6..87a413374ece 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -2,7 +2,7 @@
/*
* MLO link handling
*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -73,6 +73,8 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
ieee80211_mgd_stop_link(link);
cancel_delayed_work_sync(&link->color_collision_detect_work);
+ wiphy_work_cancel(link->sdata->local->hw.wiphy,
+ &link->csa_finalize_work);
ieee80211_link_release_channel(link);
}
@@ -402,7 +404,8 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
link = sdata_dereference(sdata->link[link_id], sdata);
- ret = ieee80211_link_use_channel(link, &link->conf->chandef,
+ ret = ieee80211_link_use_channel(link,
+ &link->conf->chanreq,
IEEE80211_CHANCTX_SHARED);
WARN_ON_ONCE(ret);
@@ -444,6 +447,9 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links)
lockdep_assert_wiphy(local->hw.wiphy);
+ if (WARN_ON(!active_links))
+ return -EINVAL;
+
if (!drv_can_activate_links(local, sdata, active_links))
return -EINVAL;
@@ -472,6 +478,9 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ if (WARN_ON(!active_links))
+ return;
+
if (!ieee80211_sdata_running(sdata))
return;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f2ece7793573..4eaea0a9975b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -93,16 +93,32 @@ static void ieee80211_reconfig_filter(struct wiphy *wiphy,
ieee80211_configure_filter(local);
}
-static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
+static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *ctx)
{
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef = {};
+ struct cfg80211_chan_def *oper = NULL;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_STATIC;
u32 changed = 0;
int power;
u32 offchannel_flag;
+ if (!local->emulate_chanctx)
+ return 0;
+
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ if (ctx && !WARN_ON(!ctx->def.chan)) {
+ oper = &ctx->def;
+ if (ctx->rx_chains_static > 1)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else if (ctx->rx_chains_dynamic > 1)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_STATIC;
+ }
+
if (local->scan_chandef.chan) {
chandef = local->scan_chandef;
} else if (local->tmp_channel) {
@@ -110,25 +126,30 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
chandef.center_freq1 = chandef.chan->center_freq;
chandef.freq1_offset = chandef.chan->freq_offset;
- } else
- chandef = local->_oper_chandef;
+ } else if (oper) {
+ chandef = *oper;
+ } else {
+ chandef = local->dflt_chandef;
+ }
- WARN(!cfg80211_chandef_valid(&chandef),
- "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
- chandef.chan->center_freq, chandef.chan->freq_offset,
- chandef.width, chandef.center_freq1, chandef.freq1_offset,
- chandef.center_freq2);
+ if (WARN(!cfg80211_chandef_valid(&chandef),
+ "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz",
+ chandef.chan ? chandef.chan->center_freq : -1,
+ chandef.chan ? chandef.chan->freq_offset : 0,
+ chandef.width, chandef.center_freq1, chandef.freq1_offset,
+ chandef.center_freq2))
+ return 0;
- if (!cfg80211_chandef_identical(&chandef, &local->_oper_chandef))
+ if (!oper || !cfg80211_chandef_identical(&chandef, oper))
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
else
local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (offchannel_flag ||
- !cfg80211_chandef_identical(&local->hw.conf.chandef,
- &local->_oper_chandef)) {
+ /* force it also for scanning, since drivers might config differently */
+ if (offchannel_flag || local->scanning ||
+ !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) {
local->hw.conf.chandef = chandef;
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
}
@@ -140,8 +161,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
* that otherwise STATIC is used.
*/
local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
- } else if (local->hw.conf.smps_mode != local->smps_mode) {
- local->hw.conf.smps_mode = local->smps_mode;
+ } else if (local->hw.conf.smps_mode != smps_mode) {
+ local->hw.conf.smps_mode = smps_mode;
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
@@ -173,12 +194,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
might_sleep();
- if (!local->use_chanctx)
- changed |= ieee80211_hw_conf_chan(local);
- else
- changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
- IEEE80211_CONF_CHANGE_POWER |
- IEEE80211_CONF_CHANGE_SMPS);
+ WARN_ON(changed & (IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS));
if (changed && local->open_count) {
ret = drv_config(local, changed);
@@ -202,13 +220,115 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
return ret;
}
+/* for scanning, offchannel and chanctx emulation only */
+static int _ieee80211_hw_conf_chan(struct ieee80211_local *local,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ u32 changed;
+
+ if (!local->open_count)
+ return 0;
+
+ changed = ieee80211_calc_hw_conf_chan(local, ctx);
+ if (!changed)
+ return 0;
+
+ return drv_config(local, changed);
+}
+
+int ieee80211_hw_conf_chan(struct ieee80211_local *local)
+{
+ struct ieee80211_chanctx *ctx;
+
+ ctx = list_first_entry_or_null(&local->chanctx_list,
+ struct ieee80211_chanctx,
+ list);
+
+ return _ieee80211_hw_conf_chan(local, ctx ? &ctx->conf : NULL);
+}
+
+void ieee80211_hw_conf_init(struct ieee80211_local *local)
+{
+ u32 changed = ~(IEEE80211_CONF_CHANGE_CHANNEL |
+ IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_SMPS);
+
+ if (WARN_ON(!local->open_count))
+ return;
+
+ if (local->emulate_chanctx) {
+ struct ieee80211_chanctx *ctx;
+
+ ctx = list_first_entry_or_null(&local->chanctx_list,
+ struct ieee80211_chanctx,
+ list);
+
+ changed |= ieee80211_calc_hw_conf_chan(local,
+ ctx ? &ctx->conf : NULL);
+ }
+
+ WARN_ON(drv_config(local, changed));
+}
+
+int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = ctx->radar_enabled;
+
+ return _ieee80211_hw_conf_chan(local, ctx);
+}
+EXPORT_SYMBOL(ieee80211_emulate_add_chanctx);
+
+void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = false;
+
+ _ieee80211_hw_conf_chan(local, NULL);
+}
+EXPORT_SYMBOL(ieee80211_emulate_remove_chanctx);
+
+void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ local->hw.conf.radar_enabled = ctx->radar_enabled;
+
+ _ieee80211_hw_conf_chan(local, ctx);
+}
+EXPORT_SYMBOL(ieee80211_emulate_change_chanctx);
+
+int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ if (n_vifs <= 0)
+ return -EINVAL;
+
+ local->hw.conf.radar_enabled = vifs[0].new_ctx->radar_enabled;
+ _ieee80211_hw_conf_chan(local, vifs[0].new_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(ieee80211_emulate_switch_vif_chanctx);
+
#define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\
BSS_CHANGED_IDLE |\
BSS_CHANGED_PS |\
BSS_CHANGED_IBSS |\
BSS_CHANGED_ARP_FILTER |\
BSS_CHANGED_SSID |\
- BSS_CHANGED_MLD_VALID_LINKS)
+ BSS_CHANGED_MLD_VALID_LINKS |\
+ BSS_CHANGED_MLD_TTLM)
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u64 changed)
@@ -644,7 +764,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
struct ieee80211_local *local;
int priv_size, i;
struct wiphy *wiphy;
- bool use_chanctx;
+ bool emulate_chanctx;
if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
!ops->add_interface || !ops->remove_interface ||
@@ -659,12 +779,26 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
return NULL;
/* check all or no channel context operations exist */
- i = !!ops->add_chanctx + !!ops->remove_chanctx +
- !!ops->change_chanctx + !!ops->assign_vif_chanctx +
- !!ops->unassign_vif_chanctx;
- if (WARN_ON(i != 0 && i != 5))
- return NULL;
- use_chanctx = i == 5;
+ if (ops->add_chanctx == ieee80211_emulate_add_chanctx &&
+ ops->remove_chanctx == ieee80211_emulate_remove_chanctx &&
+ ops->change_chanctx == ieee80211_emulate_change_chanctx) {
+ if (WARN_ON(ops->assign_vif_chanctx ||
+ ops->unassign_vif_chanctx))
+ return NULL;
+ emulate_chanctx = true;
+ } else {
+ if (WARN_ON(ops->add_chanctx == ieee80211_emulate_add_chanctx ||
+ ops->remove_chanctx == ieee80211_emulate_remove_chanctx ||
+ ops->change_chanctx == ieee80211_emulate_change_chanctx))
+ return NULL;
+ if (WARN_ON(!ops->add_chanctx ||
+ !ops->remove_chanctx ||
+ !ops->change_chanctx ||
+ !ops->assign_vif_chanctx ||
+ !ops->unassign_vif_chanctx))
+ return NULL;
+ emulate_chanctx = false;
+ }
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wiphy priv data for both our ieee80211_local and for
@@ -698,7 +832,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
WIPHY_FLAG_REPORTS_OBSS |
WIPHY_FLAG_OFFCHAN_TX;
- if (!use_chanctx || ops->remain_on_channel)
+ if (emulate_chanctx || ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
@@ -734,8 +868,11 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
}
- if (!ops->set_key)
+ if (!ops->set_key) {
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+ }
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM);
@@ -752,7 +889,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
local->ops = ops;
- local->use_chanctx = use_chanctx;
+ local->emulate_chanctx = emulate_chanctx;
+
+ if (emulate_chanctx)
+ ieee80211_hw_set(&local->hw, CHANCTX_STA_CSA);
/*
* We need a bit of data queued to build aggregates properly, so
@@ -829,7 +969,6 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
ieee80211_dfs_radar_detected_work);
wiphy_work_init(&local->reconfig_filter, ieee80211_reconfig_filter);
- local->smps_mode = IEEE80211_SMPS_OFF;
wiphy_work_init(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
@@ -980,7 +1119,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
* as much, e.g. monitoring beacons would be hard if we
* might not even know which link is active at which time.
*/
- if (WARN_ON(!local->use_chanctx))
+ if (WARN_ON(local->emulate_chanctx))
return -EINVAL;
if (WARN_ON(!local->ops->link_info_changed))
@@ -1024,7 +1163,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
return -EINVAL;
#endif
- if (!local->use_chanctx) {
+ if (local->emulate_chanctx) {
for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *comb;
@@ -1090,11 +1229,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
&sband->channels[i],
NL80211_CHAN_NO_HT);
/* init channel we're on */
- if (!local->use_chanctx && !local->_oper_chandef.chan) {
+ local->monitor_chanreq.oper = dflt_chandef;
+ if (local->emulate_chanctx) {
+ local->dflt_chandef = dflt_chandef;
local->hw.conf.chandef = dflt_chandef;
- local->_oper_chandef = dflt_chandef;
}
- local->monitor_chandef = dflt_chandef;
}
channels += sband->n_channels;
@@ -1115,8 +1254,26 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_vht = supp_vht || sband->vht_cap.vht_supported;
for_each_sband_iftype_data(sband, i, iftd) {
+ u8 he_40_mhz_cap;
+
supp_he = supp_he || iftd->he_cap.has_he;
supp_eht = supp_eht || iftd->eht_cap.has_eht;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_40_mhz_cap =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else
+ he_40_mhz_cap =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+
+ /* currently no support for HE client where HT has 40 MHz but not HT */
+ if (iftd->he_cap.has_he &&
+ iftd->types_mask & (BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT)) &&
+ sband->ht_cap.ht_supported &&
+ sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ !(iftd->he_cap.he_cap_elem.phy_cap_info[0] & he_40_mhz_cap))
+ return -EINVAL;
}
/* HT, VHT, HE require QoS, thus >= 4 queues */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index fccbcde3359a..32475da98d73 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
@@ -97,7 +97,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.bss_conf.basic_rates != basic_rates)
return false;
- cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
+ cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chanreq.oper.chan,
NL80211_CHAN_NO_HT);
ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
@@ -107,10 +107,11 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
ie->vht_operation, ie->ht_operation,
&sta_chan_def);
- ieee80211_chandef_he_6ghz_oper(sdata, ie->he_operation, ie->eht_operation,
+ ieee80211_chandef_he_6ghz_oper(sdata->local, ie->he_operation,
+ ie->eht_operation,
&sta_chan_def);
- if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
+ if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chanreq.oper,
&sta_chan_def))
return false;
@@ -435,9 +436,9 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!sband->ht_cap.ht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -476,16 +477,16 @@ int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!ht_cap->ht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
return -ENOMEM;
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
- ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chandef,
+ ieee80211_ie_build_ht_oper(pos, ht_cap, &sdata->vif.bss_conf.chanreq.oper,
sdata->vif.bss_conf.ht_operation_mode,
false);
@@ -507,9 +508,9 @@ int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!sband->vht_cap.vht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
@@ -548,9 +549,9 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
return 0;
if (!vht_cap->vht_supported ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation))
@@ -558,7 +559,7 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
ieee80211_ie_build_vht_oper(pos, vht_cap,
- &sdata->vif.bss_conf.chandef);
+ &sdata->vif.bss_conf.chanreq.oper);
return 0;
}
@@ -566,29 +567,18 @@ int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
int mesh_add_he_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
- const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
- u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
- he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
-
- if (!he_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ if (sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
- if (skb_tailroom(skb) < ie_len)
- return -ENOMEM;
-
- pos = skb_put(skb, ie_len);
- ieee80211_ie_build_he_cap(0, pos, he_cap, pos + ie_len);
-
- return 0;
+ return ieee80211_put_he_cap(skb, sdata, sband, NULL);
}
int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
@@ -605,20 +595,20 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!he_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + sizeof(struct ieee80211_he_operation);
- if (sdata->vif.bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ if (sdata->vif.bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
len += sizeof(struct ieee80211_he_6ghz_oper);
if (skb_tailroom(skb) < len)
return -ENOMEM;
pos = skb_put(skb, len);
- ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chandef);
+ ieee80211_ie_build_he_oper(pos, &sdata->vif.bss_conf.chanreq.oper);
return 0;
}
@@ -639,37 +629,25 @@ int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
if (!iftd)
return 0;
- ieee80211_ie_build_he_6ghz_cap(sdata, sdata->deflink.smps_mode, skb);
+ ieee80211_put_he_6ghz_cap(skb, sdata, sdata->deflink.smps_mode);
return 0;
}
int mesh_add_eht_cap_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ie_len)
{
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
struct ieee80211_supported_band *sband;
- u8 *pos;
sband = ieee80211_get_sband(sdata);
if (!sband)
return -EINVAL;
- he_cap = ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
- eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
- if (!he_cap || !eht_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ if (sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
- if (skb_tailroom(skb) < ie_len)
- return -ENOMEM;
-
- pos = skb_put(skb, ie_len);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + ie_len, false);
-
- return 0;
+ return ieee80211_put_eht_cap(skb, sdata, sband, NULL);
}
int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
@@ -685,9 +663,9 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk
eht_cap = ieee80211_get_eht_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT);
if (!eht_cap ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return 0;
len = 2 + 1 + offsetof(struct ieee80211_eht_operation, optional) +
@@ -697,7 +675,7 @@ int mesh_add_eht_oper_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *sk
return -ENOMEM;
pos = skb_put(skb, len);
- ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chandef, eht_cap);
+ ieee80211_ie_build_eht_oper(pos, &sdata->vif.bss_conf.chanreq.oper, eht_cap);
return 0;
}
@@ -745,9 +723,9 @@ ieee80211_mesh_update_bss_params(struct ieee80211_sub_if_data *sdata,
return;
if (!ieee80211_get_he_iftype_cap(sband, NL80211_IFTYPE_MESH_POINT) ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
- sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ sdata->vif.bss_conf.chanreq.oper.width == NL80211_CHAN_WIDTH_10)
return;
sdata->vif.bss_conf.he_support = true;
@@ -966,24 +944,22 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
int head_len, tail_len;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- struct ieee80211_chanctx_conf *chanctx_conf;
struct mesh_csa_settings *csa;
- enum nl80211_band band;
+ const struct ieee80211_supported_band *sband;
u8 ie_len_he_cap, ie_len_eht_cap;
u8 *pos;
struct ieee80211_sub_if_data *sdata;
int hdr_len = offsetofend(struct ieee80211_mgmt, u.beacon);
+ u32 rate_flags;
sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
- band = chanctx_conf->def.chan->band;
- rcu_read_unlock();
- ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
- ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
+ sband = ieee80211_get_sband(sdata);
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
+
+ ie_len_he_cap = ieee80211_ie_len_he_cap(sdata);
+ ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata);
head_len = hdr_len +
2 + /* NULL SSID */
/* Channel Switch Announcement */
@@ -1107,7 +1083,9 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
}
rcu_read_unlock();
- if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
+ if (ieee80211_put_srates_elem(skb, sband,
+ sdata->vif.bss_conf.basic_rates,
+ rate_flags, 0, WLAN_EID_SUPP_RATES) ||
mesh_add_ds_params_ie(sdata, skb))
goto out_free;
@@ -1118,7 +1096,9 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
skb_trim(skb, 0);
bcn->tail = bcn->head + bcn->head_len;
- if (ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+ if (ieee80211_put_srates_elem(skb, sband,
+ sdata->vif.bss_conf.basic_rates,
+ rate_flags, 0, WLAN_EID_EXT_SUPP_RATES) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_ht_cap_ie(sdata, skb) ||
mesh_add_ht_oper_ie(sdata, skb) ||
@@ -1234,7 +1214,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
netif_carrier_off(sdata->dev);
/* flush STAs and mpaths on this iface */
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
ieee80211_free_keys(sdata, true);
mesh_path_flush_by_iface(sdata);
@@ -1276,11 +1256,12 @@ static void ieee80211_mesh_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
* unavailable.
*/
err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
- &sdata->vif.bss_conf.chandef,
+ &sdata->vif.bss_conf.chanreq.oper,
NL80211_IFTYPE_MESH_POINT);
if (err > 0)
cfg80211_radar_event(sdata->local->hw.wiphy,
- &sdata->vif.bss_conf.chandef, GFP_ATOMIC);
+ &sdata->vif.bss_conf.chanreq.oper,
+ GFP_ATOMIC);
}
static bool
@@ -1292,7 +1273,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_supported_band *sband;
int err;
- ieee80211_conn_flags_t conn_flags = 0;
+ struct ieee80211_conn_settings conn = ieee80211_conn_settings_unlimited;
u32 vht_cap_info = 0;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -1301,15 +1282,18 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
if (!sband)
return false;
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- fallthrough;
+ conn.mode = IEEE80211_CONN_MODE_LEGACY;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_20:
- conn_flags |= IEEE80211_CONN_DISABLE_40MHZ;
- fallthrough;
+ conn.mode = IEEE80211_CONN_MODE_HT;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_40:
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
+ conn.mode = IEEE80211_CONN_MODE_HT;
+ conn.bw_limit = IEEE80211_CONN_BW_LIMIT_40;
break;
default:
break;
@@ -1321,8 +1305,8 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
memset(&params, 0, sizeof(params));
err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band,
- vht_cap_info,
- conn_flags, sdata->vif.addr,
+ vht_cap_info, &conn,
+ sdata->vif.addr,
&csa_ie);
if (err < 0)
return false;
@@ -1335,7 +1319,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
if (csa_ie.reason_code == WLAN_REASON_MESH_CHAN_REGULATORY)
ieee80211_mesh_csa_mark_radar(sdata);
- params.chandef = csa_ie.chandef;
+ params.chandef = csa_ie.chanreq.oper;
params.count = csa_ie.count;
if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
@@ -1371,7 +1355,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
params.radar_required = err;
if (cfg80211_chandef_identical(&params.chandef,
- &sdata->vif.bss_conf.chandef)) {
+ &sdata->vif.bss_conf.chanreq.oper)) {
mcsa_dbg(sdata,
"received csa with an identical chandef, ignoring\n");
return true;
@@ -1551,7 +1535,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed)
*changed |= BSS_CHANGED_BEACON;
mcsa_dbg(sdata, "complete switching to center freq %d MHz",
- sdata->vif.bss_conf.chandef.chan->center_freq);
+ sdata->vif.bss_conf.chanreq.oper.chan->center_freq);
return 0;
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ad8469293d71..d913ce7ba72e 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*/
@@ -94,6 +94,7 @@ enum mesh_deferred_task_flags {
* @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
* @path_change_count: the number of path changes to destination
+ * @fast_tx_check: timestamp of last fast-xmit enable attempt
*
*
* The dst address is unique in the mesh path table. Since the mesh_path is
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 735edde1bd81..91b55d6a68b9 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -600,11 +600,10 @@ unlock_sta:
void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
{
unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
- struct mesh_tx_cache *cache;
+ struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
return;
@@ -622,7 +621,6 @@ void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (entry->mpath == mpath)
@@ -637,7 +635,6 @@ void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (rcu_access_pointer(entry->mpath->next_hop) == sta)
@@ -651,7 +648,6 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
if (entry)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 28bf794f67f8..8f2b492a9fe9 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2008, 2009 open80211s Ltd.
- * Copyright (C) 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2019, 2021-2024 Intel Corporation
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*/
#include <linux/gfp.h>
@@ -163,7 +163,7 @@ static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
u16 ht_opmode;
bool non_ht_sta = false, ht20_sta = false;
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
@@ -196,7 +196,7 @@ static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
if (non_ht_sta)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
else if (ht20_sta &&
- sdata->vif.bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+ sdata->vif.bss_conf.chanreq.oper.width > NL80211_CHAN_WIDTH_20)
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
else
ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -226,10 +226,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.self_prot);
int err = -ENOMEM;
- ie_len_he_cap = ieee80211_ie_len_he_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
- ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata,
- NL80211_IFTYPE_MESH_POINT);
+ ie_len_he_cap = ieee80211_ie_len_he_cap(sdata);
+ ie_len_eht_cap = ieee80211_ie_len_eht_cap(sdata);
skb = dev_alloc_skb(local->tx_headroom +
hdr_len +
2 + /* capability info */
@@ -266,14 +264,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action != WLAN_SP_MESH_PEERING_CLOSE) {
struct ieee80211_supported_band *sband;
- enum nl80211_band band;
+ u32 rate_flags, basic_rates;
sband = ieee80211_get_sband(sdata);
if (!sband) {
err = -EINVAL;
goto free;
}
- band = sband->band;
/* capability info */
pos = skb_put_zero(skb, 2);
@@ -282,8 +279,17 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2);
put_unaligned_le16(sta->sta.aid, pos);
}
- if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
- ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
+
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
+ basic_rates = sdata->vif.bss_conf.basic_rates;
+
+ if (ieee80211_put_srates_elem(skb, sband, basic_rates,
+ rate_flags, 0,
+ WLAN_EID_SUPP_RATES) ||
+ ieee80211_put_srates_elem(skb, sband, basic_rates,
+ rate_flags, 0,
+ WLAN_EID_EXT_SUPP_RATES) ||
mesh_add_rsn_ie(sdata, skb) ||
mesh_add_meshid_ie(sdata, skb) ||
mesh_add_meshconf_ie(sdata, skb))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 073105deb424..5f2e9f5e1779 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -8,7 +8,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#include <linux/delay.h>
@@ -46,6 +46,8 @@
#define IEEE80211_ADV_TTLM_SAFETY_BUFFER_MS msecs_to_jiffies(100)
#define IEEE80211_ADV_TTLM_ST_UNDERFLOW 0xff00
+#define IEEE80211_NEG_TTLM_REQ_TIMEOUT (HZ / 5)
+
static int max_nullfunc_tries = 2;
module_param(max_nullfunc_tries, int, 0644);
MODULE_PARM_DESC(max_nullfunc_tries,
@@ -92,84 +94,6 @@ MODULE_PARM_DESC(probe_wait_ms,
#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
/*
- * Extract from the given disabled subchannel bitmap (raw format
- * from the EHT Operation Element) the bits for the subchannel
- * we're using right now.
- */
-static u16
-ieee80211_extract_dis_subch_bmap(const struct ieee80211_eht_operation *eht_oper,
- struct cfg80211_chan_def *chandef, u16 bitmap)
-{
- struct ieee80211_eht_operation_info *info = (void *)eht_oper->optional;
- struct cfg80211_chan_def ap_chandef = *chandef;
- u32 ap_center_freq, local_center_freq;
- u32 ap_bw, local_bw;
- int ap_start_freq, local_start_freq;
- u16 shift, mask;
-
- if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) ||
- !(eht_oper->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
- return 0;
-
- /* set 160/320 supported to get the full AP definition */
- ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- true, true, &ap_chandef);
- ap_center_freq = ap_chandef.center_freq1;
- ap_bw = 20 * BIT(u8_get_bits(info->control,
- IEEE80211_EHT_OPER_CHAN_WIDTH));
- ap_start_freq = ap_center_freq - ap_bw / 2;
- local_center_freq = chandef->center_freq1;
- local_bw = 20 * BIT(ieee80211_chan_width_to_rx_bw(chandef->width));
- local_start_freq = local_center_freq - local_bw / 2;
- shift = (local_start_freq - ap_start_freq) / 20;
- mask = BIT(local_bw / 20) - 1;
-
- return (bitmap >> shift) & mask;
-}
-
-/*
- * Handle the puncturing bitmap, possibly downgrading bandwidth to get a
- * valid bitmap.
- */
-static void
-ieee80211_handle_puncturing_bitmap(struct ieee80211_link_data *link,
- const struct ieee80211_eht_operation *eht_oper,
- u16 bitmap, u64 *changed)
-{
- struct cfg80211_chan_def *chandef = &link->conf->chandef;
- struct ieee80211_local *local = link->sdata->local;
- u16 extracted;
- u64 _changed = 0;
-
- if (!changed)
- changed = &_changed;
-
- while (chandef->width > NL80211_CHAN_WIDTH_40) {
- extracted =
- ieee80211_extract_dis_subch_bmap(eht_oper, chandef,
- bitmap);
-
- if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- chandef) &&
- !(bitmap && ieee80211_hw_check(&local->hw,
- DISALLOW_PUNCTURING)))
- break;
- link->u.mgd.conn_flags |=
- ieee80211_chandef_downgrade(chandef);
- *changed |= BSS_CHANGED_BANDWIDTH;
- }
-
- if (chandef->width <= NL80211_CHAN_WIDTH_40)
- extracted = 0;
-
- if (link->conf->eht_puncturing != extracted) {
- link->conf->eht_puncturing = extracted;
- *changed |= BSS_CHANGED_EHT_PUNCTURING;
- }
-}
-
-/*
* We can have multiple work items (and connection probing)
* scheduling this timer, but we need to take care to only
* reschedule it when it should fire _earlier_ than it was
@@ -223,77 +147,84 @@ static int ecw2cw(int ecw)
return (1 << ecw) - 1;
}
-static ieee80211_conn_flags_t
-ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_link_data *link,
- ieee80211_conn_flags_t conn_flags,
- struct ieee80211_supported_band *sband,
- struct ieee80211_channel *channel,
- u32 vht_cap_info,
- const struct ieee80211_ht_operation *ht_oper,
- const struct ieee80211_vht_operation *vht_oper,
- const struct ieee80211_he_operation *he_oper,
- const struct ieee80211_eht_operation *eht_oper,
- const struct ieee80211_s1g_oper_ie *s1g_oper,
- struct cfg80211_chan_def *chandef, bool tracking)
+static enum ieee80211_conn_mode
+ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ u32 vht_cap_info,
+ const struct ieee802_11_elems *elems,
+ bool ignore_ht_channel_mismatch,
+ const struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *chandef)
{
+ const struct ieee80211_ht_operation *ht_oper = elems->ht_operation;
+ const struct ieee80211_vht_operation *vht_oper = elems->vht_operation;
+ const struct ieee80211_he_operation *he_oper = elems->he_operation;
+ const struct ieee80211_eht_operation *eht_oper = elems->eht_operation;
+ struct ieee80211_supported_band *sband =
+ sdata->local->hw.wiphy->bands[channel->band];
struct cfg80211_chan_def vht_chandef;
- struct ieee80211_sta_ht_cap sta_ht_cap;
- ieee80211_conn_flags_t ret;
+ bool no_vht = false;
u32 ht_cfreq;
- memset(chandef, 0, sizeof(struct cfg80211_chan_def));
- chandef->chan = channel;
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef->center_freq1 = channel->center_freq;
- chandef->freq1_offset = channel->freq_offset;
+ *chandef = (struct cfg80211_chan_def) {
+ .chan = channel,
+ .width = NL80211_CHAN_WIDTH_20_NOHT,
+ .center_freq1 = channel->center_freq,
+ .freq1_offset = channel->freq_offset,
+ };
- if (channel->band == NL80211_BAND_6GHZ) {
- if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, eht_oper,
- chandef)) {
- mlme_dbg(sdata,
- "bad 6 GHz operation, disabling HT/VHT/HE/EHT\n");
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- } else {
- ret = 0;
- }
- vht_chandef = *chandef;
- goto out;
- } else if (sband->band == NL80211_BAND_S1GHZ) {
- if (!ieee80211_chandef_s1g_oper(s1g_oper, chandef)) {
+ /* get special S1G case out of the way */
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ if (!ieee80211_chandef_s1g_oper(elems->s1g_oper, chandef)) {
sdata_info(sdata,
"Missing S1G Operation Element? Trying operating == primary\n");
chandef->width = ieee80211_s1g_channel_width(channel);
}
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
- goto out;
+ return IEEE80211_CONN_MODE_S1G;
}
- memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
- ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+ /* get special 6 GHz case out of the way */
+ if (sband->band == NL80211_BAND_6GHZ) {
+ enum ieee80211_conn_mode mode = IEEE80211_CONN_MODE_EHT;
- if (!ht_oper || !sta_ht_cap.ht_supported) {
- mlme_dbg(sdata, "HT operation missing / HT not supported\n");
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ /* this is an error */
+ if (conn->mode < IEEE80211_CONN_MODE_HE)
+ return IEEE80211_CONN_MODE_LEGACY;
+
+ if (!elems->he_6ghz_capa || !elems->he_cap) {
+ sdata_info(sdata,
+ "HE 6 GHz AP is missing HE/HE 6 GHz band capability\n");
+ return IEEE80211_CONN_MODE_LEGACY;
+ }
+
+ if (!eht_oper || !elems->eht_cap) {
+ eht_oper = NULL;
+ mode = IEEE80211_CONN_MODE_HE;
+ }
+
+ if (!ieee80211_chandef_he_6ghz_oper(sdata->local, he_oper,
+ eht_oper, chandef)) {
+ sdata_info(sdata, "bad HE/EHT 6 GHz operation\n");
+ return IEEE80211_CONN_MODE_LEGACY;
+ }
+
+ return mode;
}
+ /* now we have the progression HT, VHT, ... */
+ if (conn->mode < IEEE80211_CONN_MODE_HT)
+ return IEEE80211_CONN_MODE_LEGACY;
+
+ if (!ht_oper || !elems->ht_cap_elem)
+ return IEEE80211_CONN_MODE_LEGACY;
+
chandef->width = NL80211_CHAN_WIDTH_20;
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
- if (!tracking && channel->center_freq != ht_cfreq) {
+ if (!ignore_ht_channel_mismatch && channel->center_freq != ht_cfreq) {
/*
* It's possible that some APs are confused here;
* Netgear WNDR3700 sometimes reports 4 higher than
@@ -305,36 +236,22 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
"Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
channel->center_freq, ht_cfreq,
ht_oper->primary_chan, channel->band);
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ return IEEE80211_CONN_MODE_LEGACY;
}
- /* check 40 MHz support, if we have it */
- if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
- ieee80211_chandef_ht_oper(ht_oper, chandef);
- } else {
- mlme_dbg(sdata, "40 MHz not supported\n");
- /* 40 MHz (and 80 MHz) must be supported for VHT */
- ret = IEEE80211_CONN_DISABLE_VHT;
- /* also mark 40 MHz disabled */
- ret |= IEEE80211_CONN_DISABLE_40MHZ;
- goto out;
- }
+ ieee80211_chandef_ht_oper(ht_oper, chandef);
- if (!vht_oper || !sband->vht_cap.vht_supported) {
- mlme_dbg(sdata, "VHT operation missing / VHT not supported\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
- }
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
+ return IEEE80211_CONN_MODE_HT;
vht_chandef = *chandef;
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- he_oper &&
- (le32_to_cpu(he_oper->he_oper_params) &
- IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
+
+ /*
+ * having he_cap/he_oper parsed out implies we're at
+ * least operating as HE STA
+ */
+ if (elems->he_cap && he_oper &&
+ he_oper->he_oper_params & cpu_to_le32(IEEE80211_HE_OPERATION_VHT_OPER_INFO)) {
struct ieee80211_vht_operation he_oper_vht_cap;
/*
@@ -347,253 +264,614 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
&he_oper_vht_cap, ht_oper,
&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HE))
- sdata_info(sdata,
- "HE AP VHT information is invalid, disabling HE\n");
- ret = IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "HE AP VHT information is invalid, disabling HE\n");
+ /* this will cause us to re-parse as VHT STA */
+ return IEEE80211_CONN_MODE_VHT;
}
+ } else if (!vht_oper || !elems->vht_cap_elem) {
+ if (sband->band == NL80211_BAND_5GHZ) {
+ sdata_info(sdata,
+ "VHT information is missing, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
+ }
+ no_vht = true;
+ } else if (sband->band == NL80211_BAND_2GHZ) {
+ no_vht = true;
} else if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
vht_cap_info,
vht_oper, ht_oper,
&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information is invalid, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ sdata_info(sdata,
+ "AP VHT information is invalid, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
- if (!cfg80211_chandef_valid(&vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information is invalid, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
+ sdata_info(sdata,
+ "AP VHT information doesn't match HT, disabling VHT\n");
+ return IEEE80211_CONN_MODE_HT;
}
- if (cfg80211_chandef_identical(chandef, &vht_chandef)) {
- ret = 0;
- goto out;
- }
+ *chandef = vht_chandef;
- if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_VHT))
- sdata_info(sdata,
- "AP VHT information doesn't match HT, disabling VHT\n");
- ret = IEEE80211_CONN_DISABLE_VHT;
- goto out;
+ /* stick to current max mode if we or the AP don't have HE */
+ if (conn->mode < IEEE80211_CONN_MODE_HE ||
+ !elems->he_operation || !elems->he_cap) {
+ if (no_vht)
+ return IEEE80211_CONN_MODE_HT;
+ return IEEE80211_CONN_MODE_VHT;
}
- *chandef = vht_chandef;
+ /* stick to HE if we or the AP don't have EHT */
+ if (conn->mode < IEEE80211_CONN_MODE_EHT ||
+ !eht_oper || !elems->eht_cap)
+ return IEEE80211_CONN_MODE_HE;
/*
* handle the case that the EHT operation indicates that it holds EHT
* operation information (in case that the channel width differs from
* the channel width reported in HT/VHT/HE).
*/
- if (eht_oper && (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
+ if (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
struct cfg80211_chan_def eht_chandef = *chandef;
ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- eht_chandef.width ==
- NL80211_CHAN_WIDTH_160,
- false, &eht_chandef);
+ &eht_chandef);
+
+ eht_chandef.punctured =
+ ieee80211_eht_oper_dis_subchan_bitmap(eht_oper);
if (!cfg80211_chandef_valid(&eht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
- sdata_info(sdata,
- "AP EHT information is invalid, disabling EHT\n");
- ret = IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "AP EHT information is invalid, disabling EHT\n");
+ return IEEE80211_CONN_MODE_HE;
}
if (!cfg80211_chandef_compatible(chandef, &eht_chandef)) {
- if (!(conn_flags & IEEE80211_CONN_DISABLE_EHT))
- sdata_info(sdata,
- "AP EHT information is incompatible, disabling EHT\n");
- ret = IEEE80211_CONN_DISABLE_EHT;
- goto out;
+ sdata_info(sdata,
+ "AP EHT information doesn't match HT/VHT/HE, disabling EHT\n");
+ return IEEE80211_CONN_MODE_HE;
}
*chandef = eht_chandef;
}
- ret = 0;
+ return IEEE80211_CONN_MODE_EHT;
+}
+
+static bool
+ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_he_operation *he_op)
+{
+ struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
+ u16 mcs_80_map_tx, mcs_80_map_rx;
+ u16 ap_min_req_set;
+ int nss;
+
+ if (!he_cap)
+ return false;
+
+ /* mcs_nss is right after he_cap info */
+ he_mcs_nss_supp = (void *)(he_cap + 1);
+
+ mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
+ mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+
+ /* P802.11-REVme/D0.3
+ * 27.1.1 Introduction to the HE PHY
+ * ...
+ * An HE STA shall support the following features:
+ * ...
+ * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
+ * supported channel widths for HE SU PPDUs
+ */
+ if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ sdata_info(sdata,
+ "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
+ mcs_80_map_tx, mcs_80_map_rx);
+ return false;
+ }
+
+ if (!he_op)
+ return true;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
-out:
/*
- * When tracking the current AP, don't do any further checks if the
- * new chandef is identical to the one we're currently using for the
- * connection. This keeps us from playing ping-pong with regulatory,
- * without it the following can happen (for example):
- * - connect to an AP with 80 MHz, world regdom allows 80 MHz
- * - AP advertises regdom US
- * - CRDA loads regdom US with 80 MHz prohibited (old database)
- * - the code below detects an unsupported channel, downgrades, and
- * we disconnect from the AP in the caller
- * - disconnect causes CRDA to reload world regdomain and the game
- * starts anew.
- * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
+ * zeroes, which is nonsense, and completely inconsistent with itself
+ * (it doesn't have 8 streams). Accept the settings in this case anyway.
+ */
+ if (!ap_min_req_set)
+ return true;
+
+ /* make sure the AP is consistent with itself
*
- * It seems possible that there are still scenarios with CSA or real
- * bandwidth changes where a this could happen, but those cases are
- * less common and wouldn't completely prevent using the AP.
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * A STA that is operating in an HE BSS shall be able to receive and
+ * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
+ * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
+ * MLME-START.request primitive and shall be able to receive at each of
+ * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
+ * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
+ * primitive
*/
- if (tracking &&
- cfg80211_chandef_identical(chandef, &link->conf->chandef))
- return ret;
+ for (nss = 8; nss > 0; nss--) {
+ u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ u8 ap_rx_val;
+ u8 ap_tx_val;
+
+ if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
+
+ ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
+ ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
- /* don't print the message below for VHT mismatch if VHT is disabled */
- if (ret & IEEE80211_CONN_DISABLE_VHT)
- vht_chandef = *chandef;
+ if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
+ sdata_info(sdata,
+ "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
+ nss, ap_rx_val, ap_rx_val, ap_op_val);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_he_operation *he_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ u16 ap_min_req_set;
+ int i;
+
+ if (!sta_he_cap || !he_op)
+ return false;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
/*
- * Ignore the DISABLED flag when we're already connected and only
- * tracking the APs beacon for bandwidth changes - otherwise we
- * might get disconnected here if we connect to an AP, update our
- * regulatory information based on the AP's country IE and the
- * information we have is wrong/outdated and disables the channel
- * that we're actually using for the connection to the AP.
+ * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
+ * zeroes, which is nonsense, and completely inconsistent with itself
+ * (it doesn't have 8 streams). Accept the settings in this case anyway.
*/
- while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
- tracking ? 0 :
- IEEE80211_CHAN_DISABLED)) {
- if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
- ret = IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- break;
+ if (!ap_min_req_set)
+ return true;
+
+ /* Need to go over for 80MHz, 160MHz and for 80+80 */
+ for (i = 0; i < 3; i++) {
+ const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
+ &sta_he_cap->he_mcs_nss_supp;
+ u16 sta_mcs_map_rx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
+ u16 sta_mcs_map_tx =
+ le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
+ u8 nss;
+ bool verified = true;
+
+ /*
+ * For each band there is a maximum of 8 spatial streams
+ * possible. Each of the sta_mcs_map_* is a 16-bit struct built
+ * of 2 bits per NSS (1-8), with the values defined in enum
+ * ieee80211_he_mcs_support. Need to make sure STA TX and RX
+ * capabilities aren't less than the AP's minimum requirements
+ * for this HE BSS per SS.
+ * It is enough to find one such band that meets the reqs.
+ */
+ for (nss = 8; nss > 0; nss--) {
+ u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
+ u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
+ u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+
+ if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
+
+ /*
+ * Make sure the HE AP doesn't require MCSs that aren't
+ * supported by the client as required by spec
+ *
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
+ * a BSS, unless it supports (i.e., is able to both transmit and
+ * receive using) all of the <HE-MCS, NSS> tuples in the basic
+ * HE-MCS and NSS set.
+ */
+ if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
+ verified = false;
+ break;
+ }
+ }
+
+ if (verified)
+ return true;
+ }
+
+ /* If here, STA doesn't meet AP's HE min requirements */
+ return false;
+}
+
+static u8
+ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
+ const struct ieee80211_sta_eht_cap *sta_eht_cap,
+ unsigned int idx, int bw)
+{
+ u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
+ u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
+
+ /* handle us being a 20 MHz-only EHT STA - with four values
+ * for MCS 0-7, 8-9, 10-11, 12-13.
+ */
+ if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
+ return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
+
+ /* the others have MCS 0-9 together, rather than separately from 0-7 */
+ if (idx > 0)
+ idx--;
+
+ switch (bw) {
+ case 0:
+ return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
+ case 1:
+ if (!(he_phy_cap0 &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
+ return 0xff; /* pass check */
+ return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
+ case 2:
+ if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
+ return 0xff; /* pass check */
+ return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
+ }
+
+ WARN_ON(1);
+ return 0;
+}
+
+static bool
+ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_eht_operation *eht_op)
+{
+ const struct ieee80211_sta_he_cap *sta_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_sta_eht_cap *sta_eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
+ unsigned int i;
+
+ if (!sta_he_cap || !sta_eht_cap || !eht_op)
+ return false;
+
+ req = &eht_op->basic_mcs_nss;
+
+ for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
+ u8 req_rx_nss, req_tx_nss;
+ unsigned int bw;
+
+ req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+ IEEE80211_EHT_MCS_NSS_RX);
+ req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ for (bw = 0; bw < 3; bw++) {
+ u8 have, have_rx_nss, have_tx_nss;
+
+ have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
+ sta_eht_cap,
+ i, bw);
+ have_rx_nss = u8_get_bits(have,
+ IEEE80211_EHT_MCS_NSS_RX);
+ have_tx_nss = u8_get_bits(have,
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ if (req_rx_nss > have_rx_nss ||
+ req_tx_nss > have_tx_nss)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags)
+{
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy,
+ chandef, prohibited_flags))
+ return false;
+
+ if (chandef->punctured &&
+ ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING))
+ return false;
+
+ return true;
+}
+
+static struct ieee802_11_elems *
+ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_bss *cbss, int link_id,
+ struct ieee80211_chan_req *chanreq)
+{
+ const struct cfg80211_bss_ies *ies = rcu_dereference(cbss->ies);
+ struct ieee80211_bss *bss = (void *)cbss->priv;
+ struct ieee80211_channel *channel = cbss->channel;
+ struct ieee80211_elems_parse_params parse_params = {
+ .link_id = -1,
+ .from_ap = true,
+ .start = ies->data,
+ .len = ies->len,
+ .mode = conn->mode,
+ };
+ struct ieee802_11_elems *elems;
+ struct ieee80211_supported_band *sband;
+ struct cfg80211_chan_def ap_chandef;
+ enum ieee80211_conn_mode ap_mode;
+ int ret;
+
+again:
+ elems = ieee802_11_parse_elems_full(&parse_params);
+ if (!elems)
+ return ERR_PTR(-ENOMEM);
+
+ ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info,
+ elems, false, conn, &ap_chandef);
+
+ mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
+ cbss->bssid, ieee80211_conn_mode_str(ap_mode));
+
+ /* this should be impossible since parsing depends on our mode */
+ if (WARN_ON(ap_mode > conn->mode)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ sband = sdata->local->hw.wiphy->bands[channel->band];
+
+ switch (channel->band) {
+ case NL80211_BAND_S1GHZ:
+ if (WARN_ON(ap_mode != IEEE80211_CONN_MODE_S1G)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ return elems;
+ case NL80211_BAND_6GHZ:
+ if (ap_mode < IEEE80211_CONN_MODE_HE) {
+ sdata_info(sdata,
+ "Rejecting non-HE 6/7 GHz connection");
+ ret = -EINVAL;
+ goto free;
+ }
+ break;
+ default:
+ if (WARN_ON(ap_mode == IEEE80211_CONN_MODE_S1G)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+
+ switch (ap_mode) {
+ case IEEE80211_CONN_MODE_S1G:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto free;
+ case IEEE80211_CONN_MODE_LEGACY:
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
+ case IEEE80211_CONN_MODE_HT:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_40);
+ break;
+ case IEEE80211_CONN_MODE_VHT:
+ case IEEE80211_CONN_MODE_HE:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ break;
+ case IEEE80211_CONN_MODE_EHT:
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_320);
+ break;
+ }
+
+ conn->mode = ap_mode;
+ chanreq->oper = ap_chandef;
+
+ /* wider-bandwidth OFDMA is only done in EHT */
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW))
+ chanreq->ap = ap_chandef;
+ else
+ chanreq->ap.chan = NULL;
+
+ while (!ieee80211_chandef_usable(sdata, &chanreq->oper,
+ IEEE80211_CHAN_DISABLED)) {
+ if (WARN_ON(chanreq->oper.width == NL80211_CHAN_WIDTH_20_NOHT)) {
+ ret = -EINVAL;
+ goto free;
}
- ret |= ieee80211_chandef_downgrade(chandef);
+ ieee80211_chanreq_downgrade(chanreq, conn);
}
- if (!he_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
- IEEE80211_CHAN_NO_HE))
- ret |= IEEE80211_CONN_DISABLE_HE | IEEE80211_CONN_DISABLE_EHT;
+ if (conn->mode >= IEEE80211_CONN_MODE_HE &&
+ !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_NO_HE)) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ }
- if (!eht_oper || !cfg80211_chandef_usable(sdata->wdev.wiphy, chandef,
- IEEE80211_CHAN_NO_EHT))
- ret |= IEEE80211_CONN_DISABLE_EHT;
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !cfg80211_chandef_usable(sdata->wdev.wiphy, &chanreq->oper,
+ IEEE80211_CHAN_NO_EHT)) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ }
- if (chandef->width != vht_chandef.width && !tracking)
+ if (chanreq->oper.width != ap_chandef.width || ap_mode != conn->mode)
sdata_info(sdata,
- "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
+ "regulatory prevented using AP config, downgraded\n");
- WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
- return ret;
+ if (conn->mode >= IEEE80211_CONN_MODE_HE &&
+ (!ieee80211_verify_peer_he_mcs_support(sdata, (void *)elems->he_cap,
+ elems->he_operation) ||
+ !ieee80211_verify_sta_he_mcs_support(sdata, sband,
+ elems->he_operation))) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ sdata_info(sdata, "required MCSes not supported, disabling HE\n");
+ }
+
+ if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
+ !ieee80211_verify_sta_eht_mcs_support(sdata, sband,
+ elems->eht_operation)) {
+ conn->mode = IEEE80211_CONN_MODE_HE;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_160);
+ sdata_info(sdata, "required MCSes not supported, disabling EHT\n");
+ }
+
+ /* the mode can only decrease, so this must terminate */
+ if (ap_mode != conn->mode)
+ goto again;
+
+ mlme_link_id_dbg(sdata, link_id,
+ "connecting with %s mode, max bandwidth %d MHz\n",
+ ieee80211_conn_mode_str(conn->mode),
+ 20 * (1 << conn->bw_limit));
+
+ if (WARN_ON_ONCE(!cfg80211_chandef_valid(&chanreq->oper))) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ return elems;
+free:
+ kfree(elems);
+ return ERR_PTR(ret);
}
static int ieee80211_config_bw(struct ieee80211_link_data *link,
- const struct ieee80211_ht_cap *ht_cap,
- const struct ieee80211_vht_cap *vht_cap,
- const struct ieee80211_ht_operation *ht_oper,
- const struct ieee80211_vht_operation *vht_oper,
- const struct ieee80211_he_operation *he_oper,
- const struct ieee80211_eht_operation *eht_oper,
- const struct ieee80211_s1g_oper_ie *s1g_oper,
- const u8 *bssid, u64 *changed)
+ struct ieee802_11_elems *elems,
+ bool update, u64 *changed)
{
+ struct ieee80211_channel *channel = link->conf->chanreq.oper.chan;
struct ieee80211_sub_if_data *sdata = link->sdata;
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_channel *chan = link->conf->chandef.chan;
- struct ieee80211_supported_band *sband =
- local->hw.wiphy->bands[chan->band];
- struct cfg80211_chan_def chandef;
- u16 ht_opmode;
- ieee80211_conn_flags_t flags;
+ struct ieee80211_chan_req chanreq = {};
+ enum ieee80211_conn_mode ap_mode;
u32 vht_cap_info = 0;
+ u16 ht_opmode;
int ret;
- /* if HT was/is disabled, don't track any bandwidth changes */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT || !ht_oper)
+ /* don't track any bandwidth changes in legacy/S1G modes */
+ if (link->u.mgd.conn.mode == IEEE80211_CONN_MODE_LEGACY ||
+ link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G)
return 0;
- /* don't check VHT if we associated as non-VHT station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
- vht_oper = NULL;
+ if (elems->vht_cap_elem)
+ vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info);
+
+ ap_mode = ieee80211_determine_ap_chan(sdata, channel, vht_cap_info,
+ elems, true, &link->u.mgd.conn,
+ &chanreq.ap);
- /* don't check HE if we associated as non-HE station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE ||
- !ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
- he_oper = NULL;
- eht_oper = NULL;
+ if (ap_mode != link->u.mgd.conn.mode) {
+ link_info(link,
+ "AP appears to change mode (expected %s, found %s), disconnect\n",
+ ieee80211_conn_mode_str(link->u.mgd.conn.mode),
+ ieee80211_conn_mode_str(ap_mode));
+ return -EINVAL;
}
- /* don't check EHT if we associated as non-EHT station */
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT ||
- !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif))
- eht_oper = NULL;
+ chanreq.oper = chanreq.ap;
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT ||
+ sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW)
+ chanreq.ap.chan = NULL;
/*
- * if bss configuration changed store the new one -
+ * if HT operation mode changed store the new one -
* this may be applicable even if channel is identical
*/
- ht_opmode = le16_to_cpu(ht_oper->operation_mode);
- if (link->conf->ht_operation_mode != ht_opmode) {
- *changed |= BSS_CHANGED_HT;
- link->conf->ht_operation_mode = ht_opmode;
+ if (elems->ht_operation) {
+ ht_opmode = le16_to_cpu(elems->ht_operation->operation_mode);
+ if (link->conf->ht_operation_mode != ht_opmode) {
+ *changed |= BSS_CHANGED_HT;
+ link->conf->ht_operation_mode = ht_opmode;
+ }
}
- if (vht_cap)
- vht_cap_info = le32_to_cpu(vht_cap->vht_cap_info);
-
- /* calculate new channel (type) based on HT/VHT/HE operation IEs */
- flags = ieee80211_determine_chantype(sdata, link,
- link->u.mgd.conn_flags,
- sband, chan, vht_cap_info,
- ht_oper, vht_oper,
- he_oper, eht_oper,
- s1g_oper, &chandef, true);
-
/*
* Downgrade the new channel if we associated with restricted
- * capabilities. For example, if we associated as a 20 MHz STA
- * to a 40 MHz AP (due to regulatory, capabilities or config
- * reasons) then switching to a 40 MHz channel now won't do us
- * any good -- we couldn't use it with the AP.
+ * bandwidth capabilities. For example, if we associated as a
+ * 20 MHz STA to a 40 MHz AP (due to regulatory, capabilities
+ * or config reasons) then switching to a 40 MHz channel now
+ * won't do us any good -- we couldn't use it with the AP.
*/
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
- chandef.width == NL80211_CHAN_WIDTH_80P80)
- flags |= ieee80211_chandef_downgrade(&chandef);
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
- chandef.width == NL80211_CHAN_WIDTH_160)
- flags |= ieee80211_chandef_downgrade(&chandef);
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_40MHZ &&
- chandef.width > NL80211_CHAN_WIDTH_20)
- flags |= ieee80211_chandef_downgrade(&chandef);
-
- if (cfg80211_chandef_identical(&chandef, &link->conf->chandef))
+ while (link->u.mgd.conn.bw_limit <
+ ieee80211_min_bw_limit_from_chandef(&chanreq.oper))
+ ieee80211_chandef_downgrade(&chanreq.oper, NULL);
+
+ if (ieee80211_chanreq_identical(&chanreq, &link->conf->chanreq))
return 0;
link_info(link,
- "AP %pM changed bandwidth, new config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
- link->u.mgd.bssid, chandef.chan->center_freq,
- chandef.chan->freq_offset, chandef.width,
- chandef.center_freq1, chandef.freq1_offset,
- chandef.center_freq2);
-
- if (flags != (link->u.mgd.conn_flags &
- (IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT |
- IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ |
- IEEE80211_CONN_DISABLE_320MHZ)) ||
- !cfg80211_chandef_valid(&chandef)) {
+ "AP %pM changed bandwidth, new used config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
+ link->u.mgd.bssid, chanreq.oper.chan->center_freq,
+ chanreq.oper.chan->freq_offset, chanreq.oper.width,
+ chanreq.oper.center_freq1, chanreq.oper.freq1_offset,
+ chanreq.oper.center_freq2);
+
+ if (!cfg80211_chandef_valid(&chanreq.oper)) {
sdata_info(sdata,
- "AP %pM changed caps/bw in a way we can't support (0x%x/0x%x) - disconnect\n",
- link->u.mgd.bssid, flags, ifmgd->flags);
+ "AP %pM changed caps/bw in a way we can't support - disconnect\n",
+ link->u.mgd.bssid);
return -EINVAL;
}
- ret = ieee80211_link_change_bandwidth(link, &chandef, changed);
+ if (!update) {
+ link->conf->chanreq = chanreq;
+ return 0;
+ }
+
+ /*
+ * We're tracking the current AP here, so don't do any further checks
+ * here. This keeps us from playing ping-pong with regulatory, without
+ * it the following can happen (for example):
+ * - connect to an AP with 80 MHz, world regdom allows 80 MHz
+ * - AP advertises regdom US
+ * - CRDA loads regdom US with 80 MHz prohibited (old database)
+ * - we detect an unsupported channel and disconnect
+ * - disconnect causes CRDA to reload world regdomain and the game
+ * starts anew.
+ * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ *
+ * It seems possible that there are still scenarios with CSA or real
+ * bandwidth changes where a this could happen, but those cases are
+ * less common and wouldn't completely prevent using the AP.
+ */
+ ret = ieee80211_link_change_chanreq(link, &chanreq, changed);
if (ret) {
sdata_info(sdata,
"AP %pM changed bandwidth to incompatible one - disconnect\n",
@@ -612,7 +890,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps,
- ieee80211_conn_flags_t conn_flags)
+ const struct ieee80211_conn_settings *conn)
{
u8 *pos;
u32 flags = channel->flags;
@@ -647,7 +925,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
* capable of 40 MHz -- some broken APs will never fall
* back to trying to transmit in 20 MHz.
*/
- if (conn_flags & IEEE80211_CONN_DISABLE_40MHZ) {
+ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_20) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
@@ -686,7 +964,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_supported_band *sband,
struct ieee80211_vht_cap *ap_vht_cap,
- ieee80211_conn_flags_t conn_flags)
+ const struct ieee80211_conn_settings *conn)
{
struct ieee80211_local *local = sdata->local;
u8 *pos;
@@ -703,16 +981,7 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
/* determine capability flags */
cap = vht_cap.cap;
- if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ) {
- u32 bw = cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
-
- cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- if (bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ ||
- bw == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
- cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
- }
-
- if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ) {
+ if (conn->bw_limit <= IEEE80211_CONN_BW_LIMIT_80) {
cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
}
@@ -769,79 +1038,12 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
return mu_mimo_owner;
}
-/* This function determines HE capability flags for the association
- * and builds the IE.
- */
-static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_supported_band *sband,
- enum ieee80211_smps_mode smps_mode,
- ieee80211_conn_flags_t conn_flags)
-{
- u8 *pos, *pre_he_pos;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 he_cap_size;
-
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- if (WARN_ON(!he_cap))
- return;
-
- /* get a max size estimate */
- he_cap_size =
- 2 + 1 + sizeof(he_cap->he_cap_elem) +
- ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
- ieee80211_he_ppe_size(he_cap->ppe_thres[0],
- he_cap->he_cap_elem.phy_cap_info);
- pos = skb_put(skb, he_cap_size);
- pre_he_pos = pos;
- pos = ieee80211_ie_build_he_cap(conn_flags,
- pos, he_cap, pos + he_cap_size);
- /* trim excess if any */
- skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos));
-
- ieee80211_ie_build_he_6ghz_cap(sdata, smps_mode, skb);
-}
-
-static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_supported_band *sband)
-{
- u8 *pos;
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
- u8 eht_cap_size;
-
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
-
- /*
- * EHT capabilities element is only added if the HE capabilities element
- * was added so assume that 'he_cap' is valid and don't check it.
- */
- if (WARN_ON(!he_cap || !eht_cap))
- return;
-
- eht_cap_size =
- 2 + 1 + sizeof(eht_cap->eht_cap_elem) +
- ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem,
- false) +
- ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
- pos = skb_put(skb, eht_cap_size);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + eht_cap_size,
- false);
-}
-
static void ieee80211_assoc_add_rates(struct sk_buff *skb,
enum nl80211_chan_width width,
struct ieee80211_supported_band *sband,
struct ieee80211_mgd_assoc_data *assoc_data)
{
- unsigned int rates_len, supp_rates_len;
- u32 rates = 0;
- int i, count;
- u8 *pos;
+ u32 rates;
if (assoc_data->supp_rates_len) {
/*
@@ -850,53 +1052,23 @@ static void ieee80211_assoc_add_rates(struct sk_buff *skb,
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
- rates_len = ieee80211_parse_bitrates(width, sband,
- assoc_data->supp_rates,
- assoc_data->supp_rates_len,
- &rates);
+ ieee80211_parse_bitrates(width, sband,
+ assoc_data->supp_rates,
+ assoc_data->supp_rates_len,
+ &rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
- rates_len = sband->n_bitrates;
- for (i = 0; i < sband->n_bitrates; i++)
- rates |= BIT(i);
- }
-
- supp_rates_len = rates_len;
- if (supp_rates_len > 8)
- supp_rates_len = 8;
-
- pos = skb_put(skb, supp_rates_len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
-
- count = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = (u8)rate;
- if (++count == 8)
- break;
- }
+ rates = ~0;
}
- if (rates_len > count) {
- pos = skb_put(skb, rates_len - count + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = rates_len - count;
-
- for (i++; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate;
-
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = (u8)rate;
- }
- }
- }
+ ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates,
+ WLAN_EID_SUPP_RATES);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, ~rates,
+ WLAN_EID_EXT_SUPP_RATES);
}
static size_t ieee80211_add_before_ht_elems(struct sk_buff *skb,
@@ -1133,11 +1305,11 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
- !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HT) {
ieee80211_add_ht_ie(sdata, skb,
assoc_data->link[link_id].ap_ht_param,
sband, chan, smps_mode,
- assoc_data->link[link_id].conn_flags);
+ &assoc_data->link[link_id].conn);
ADD_PRESENT_ELEM(WLAN_EID_HT_CAPABILITY);
}
@@ -1147,37 +1319,27 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
offset);
if (sband->band != NL80211_BAND_6GHZ &&
- !(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_VHT) {
bool mu_mimo_owner =
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->link[link_id].ap_vht_cap,
- assoc_data->link[link_id].conn_flags);
+ &assoc_data->link[link_id].conn);
if (link)
link->conf->mu_mimo_owner = mu_mimo_owner;
ADD_PRESENT_ELEM(WLAN_EID_VHT_CAPABILITY);
}
- /*
- * If AP doesn't support HT, mark HE and EHT as disabled.
- * If on the 5GHz band, make sure it supports VHT.
- */
- if (assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HT ||
- (sband->band == NL80211_BAND_5GHZ &&
- assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_VHT))
- assoc_data->link[link_id].conn_flags |=
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
-
/* if present, add any custom IEs that go before HE */
offset = ieee80211_add_before_he_elems(skb, extra_elems,
extra_elems_len,
offset);
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_HE)) {
- ieee80211_add_he_ie(sdata, skb, sband, smps_mode,
- assoc_data->link[link_id].conn_flags);
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_HE) {
+ ieee80211_put_he_cap(skb, sdata, sband,
+ &assoc_data->link[link_id].conn);
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY);
+ ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode);
}
/*
@@ -1185,7 +1347,7 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
* calling ieee80211_assoc_add_ml_elem(), so add this one if
* we're going to put it after the ML element
*/
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT)
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_EHT_CAPABILITY);
if (link_id == assoc_data->assoc_link_id)
@@ -1195,8 +1357,9 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
/* crash if somebody gets it wrong */
present_elems = NULL;
- if (!(assoc_data->link[link_id].conn_flags & IEEE80211_CONN_DISABLE_EHT))
- ieee80211_add_eht_ie(sdata, skb, sband);
+ if (assoc_data->link[link_id].conn.mode >= IEEE80211_CONN_MODE_EHT)
+ ieee80211_put_eht_cap(skb, sdata, sband,
+ &assoc_data->link[link_id].conn);
if (sband->band == NL80211_BAND_S1GHZ) {
ieee80211_add_aid_request_ie(sdata, skb);
@@ -1206,9 +1369,6 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
if (iftd && iftd->vendor_elems.data && iftd->vendor_elems.len)
skb_put_data(skb, iftd->vendor_elems.data, iftd->vendor_elems.len);
- if (link)
- link->u.mgd.conn_flags = assoc_data->link[link_id].conn_flags;
-
return offset;
}
@@ -1318,8 +1478,6 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EML_CAPA);
skb_put_data(skb, &eml_capa, sizeof(eml_capa));
}
- /* need indication from userspace to support this */
- mld_capa_ops &= ~cpu_to_le16(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP);
skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops));
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
@@ -1499,7 +1657,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
/* Set MBSSID support for HE AP if needed */
if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID) &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
ext_capa && ext_capa->datalen >= 3)
ext_capa->data[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
@@ -1544,7 +1702,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
* for some reason check it and want it to be set, set the bit for all
* pre-EHT connections as we used to do.
*/
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT)
capab |= WLAN_CAPABILITY_ESS;
/* add the elements for the assoc (main) link */
@@ -1741,8 +1899,8 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy,
return;
}
- if (!cfg80211_chandef_identical(&link->conf->chandef,
- &link->csa_chandef)) {
+ if (!ieee80211_chanreq_identical(&link->conf->chanreq,
+ &link->csa_chanreq)) {
sdata_info(sdata,
"failed to finalize channel switch, disconnecting\n");
wiphy_work_queue(sdata->local->hw.wiphy,
@@ -1790,8 +1948,8 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
return;
}
- cfg80211_ch_switch_notify(sdata->dev, &link->reserved_chandef,
- link->link_id, 0);
+ cfg80211_ch_switch_notify(sdata->dev, &link->reserved.oper,
+ link->link_id);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
@@ -1857,7 +2015,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
struct ieee80211_sub_if_data *sdata = link->sdata;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct cfg80211_bss *cbss = link->u.mgd.bss;
+ struct cfg80211_bss *cbss = link->conf->bss;
struct ieee80211_chanctx_conf *conf;
struct ieee80211_chanctx *chanctx;
enum nl80211_band current_band;
@@ -1876,14 +2034,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
bss = (void *)cbss->priv;
res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
bss->vht_cap_info,
- link->u.mgd.conn_flags,
+ &link->u.mgd.conn,
link->u.mgd.bssid, &csa_ie);
if (!res) {
ch_switch.timestamp = timestamp;
ch_switch.device_timestamp = device_timestamp;
ch_switch.block_tx = csa_ie.mode;
- ch_switch.chandef = csa_ie.chandef;
+ ch_switch.chandef = csa_ie.chanreq.oper;
ch_switch.count = csa_ie.count;
ch_switch.delay = csa_ie.max_switch_time;
}
@@ -1903,34 +2061,36 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
return;
}
- if (link->conf->chandef.chan->band !=
- csa_ie.chandef.chan->band) {
+ if (link->conf->chanreq.oper.chan->band !=
+ csa_ie.chanreq.oper.chan->band) {
sdata_info(sdata,
"AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
link->u.mgd.bssid,
- csa_ie.chandef.chan->center_freq,
- csa_ie.chandef.width, csa_ie.chandef.center_freq1,
- csa_ie.chandef.center_freq2);
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
- if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef,
+ if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chanreq.oper,
IEEE80211_CHAN_DISABLED)) {
sdata_info(sdata,
"AP %pM switches to unsupported channel "
"(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), "
"disconnecting\n",
link->u.mgd.bssid,
- csa_ie.chandef.chan->center_freq,
- csa_ie.chandef.chan->freq_offset,
- csa_ie.chandef.width, csa_ie.chandef.center_freq1,
- csa_ie.chandef.freq1_offset,
- csa_ie.chandef.center_freq2);
+ csa_ie.chanreq.oper.chan->center_freq,
+ csa_ie.chanreq.oper.chan->freq_offset,
+ csa_ie.chanreq.oper.width,
+ csa_ie.chanreq.oper.center_freq1,
+ csa_ie.chanreq.oper.freq1_offset,
+ csa_ie.chanreq.oper.center_freq2);
goto drop_connection;
}
- if (cfg80211_chandef_identical(&csa_ie.chandef,
- &link->conf->chandef) &&
+ if (cfg80211_chandef_identical(&csa_ie.chanreq.oper,
+ &link->conf->chanreq.oper) &&
(!csa_ie.mode || !beacon)) {
if (link->u.mgd.csa_ignored_same_chan)
return;
@@ -1959,8 +2119,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
- if (local->use_chanctx &&
- !ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
+ if (!ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
sdata_info(sdata,
"driver doesn't support chan-switch with channel contexts\n");
goto drop_connection;
@@ -1972,7 +2131,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
goto drop_connection;
}
- res = ieee80211_link_reserve_chanctx(link, &csa_ie.chandef,
+ res = ieee80211_link_reserve_chanctx(link, &csa_ie.chanreq,
chanctx->mode, false);
if (res) {
sdata_info(sdata,
@@ -1982,7 +2141,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
}
link->conf->csa_active = true;
- link->csa_chandef = csa_ie.chandef;
+ link->csa_chanreq = csa_ie.chanreq;
link->csa_block_tx = csa_ie.mode;
link->u.mgd.csa_ignored_same_chan = false;
link->u.mgd.beacon_crc_valid = false;
@@ -1991,9 +2150,9 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef,
+ cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper,
link->link_id, csa_ie.count,
- csa_ie.mode, 0);
+ csa_ie.mode);
if (local->ops->channel_switch) {
/* use driver's channel switch callback */
@@ -2414,7 +2573,7 @@ void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
struct ieee80211_link_data *link =
container_of(work, struct ieee80211_link_data,
dfs_cac_timer_work.work);
- struct cfg80211_chan_def chandef = link->conf->chandef;
+ struct cfg80211_chan_def chandef = link->conf->chanreq.oper;
struct ieee80211_sub_if_data *sdata = link->sdata;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -2769,7 +2928,7 @@ static u64 ieee80211_link_set_associated(struct ieee80211_link_data *link,
ieee80211_check_rate_mask(link);
- link->u.mgd.bss = cbss;
+ link->conf->bss = cbss;
memcpy(link->u.mgd.bssid, cbss->bssid, ETH_ALEN);
if (sdata->vif.p2p ||
@@ -2917,7 +3076,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ifmgd->associated = false;
/* other links will be destroyed */
- sdata->deflink.u.mgd.bss = NULL;
+ sdata->deflink.conf->bss = NULL;
+ sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
netif_carrier_off(sdata->dev);
@@ -2991,7 +3151,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->vif.cfg.ssid_len = 0;
/* remove AP and TDLS peers */
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
/* finally reset all BSS / config parameters */
if (!ieee80211_vif_is_mld(&sdata->vif))
@@ -3057,7 +3217,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->deflink.u.mgd.disable_wmm_tracking = false;
ifmgd->flags = 0;
- sdata->deflink.u.mgd.conn_flags = 0;
for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
struct ieee80211_link_data *link;
@@ -3081,15 +3240,25 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec));
wiphy_delayed_work_cancel(local->hw.wiphy, &ifmgd->tx_tspec_wk);
+ sdata->vif.bss_conf.power_type = IEEE80211_REG_UNSET_AP;
sdata->vif.bss_conf.pwr_reduction = 0;
sdata->vif.bss_conf.tx_pwr_env_num = 0;
memset(sdata->vif.bss_conf.tx_pwr_env, 0,
sizeof(sdata->vif.bss_conf.tx_pwr_env));
+ sdata->vif.cfg.eml_cap = 0;
+ sdata->vif.cfg.eml_med_sync_delay = 0;
+ sdata->vif.cfg.mld_capa_op = 0;
+
memset(&sdata->u.mgd.ttlm_info, 0,
sizeof(sdata->u.mgd.ttlm_info));
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->neg_ttlm_timeout_work);
ieee80211_vif_set_links(sdata, 0, 0);
+
+ ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
}
static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
@@ -3237,7 +3406,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
ieee80211_mlme_send_probe_req(sdata, sdata->vif.addr, dst,
sdata->vif.cfg.ssid,
sdata->vif.cfg.ssid_len,
- sdata->deflink.u.mgd.bss->channel);
+ sdata->deflink.conf->bss->channel);
}
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
@@ -3320,7 +3489,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
return NULL;
if (ifmgd->associated)
- cbss = sdata->deflink.u.mgd.bss;
+ cbss = sdata->deflink.conf->bss;
else if (ifmgd->auth_data)
cbss = ifmgd->auth_data->bss;
else if (ifmgd->assoc_data && ifmgd->assoc_data->link[0].bss)
@@ -3399,8 +3568,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
continue;
- cfg80211_unlink_bss(local->hw.wiphy, link->u.mgd.bss);
- link->u.mgd.bss = NULL;
+ cfg80211_unlink_bss(local->hw.wiphy, link->conf->bss);
+ link->conf->bss = NULL;
}
}
@@ -3517,7 +3686,6 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
sta_info_destroy_addr(sdata, auth_data->ap_addr);
/* other links are destroyed */
- sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
@@ -3555,7 +3723,6 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.timer);
sta_info_destroy_addr(sdata, assoc_data->ap_addr);
- sdata->deflink.u.mgd.conn_flags = 0;
eth_zero_addr(sdata->deflink.u.mgd.bssid);
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
@@ -4005,11 +4172,13 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
struct ieee80211_local *local = sdata->local;
unsigned int link_id = link->link_id;
struct ieee80211_elems_parse_params parse_params = {
+ .mode = link->u.mgd.conn.mode,
.start = elem_start,
.len = elem_len,
.link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id,
.from_ap = true,
};
+ bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
const struct cfg80211_bss_ies *bss_ies = NULL;
@@ -4033,15 +4202,17 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS;
if (elems->ml_basic) {
- if (!(elems->ml_basic->control &
- cpu_to_le16(IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))) {
+ int bss_param_ch_cnt =
+ ieee80211_mle_get_bss_param_ch_cnt((const void *)elems->ml_basic);
+
+ if (bss_param_ch_cnt < 0) {
ret = false;
goto out;
}
- link->u.mgd.bss_param_ch_cnt =
- ieee80211_mle_get_bss_param_ch_cnt(elems->ml_basic);
+ link->u.mgd.bss_param_ch_cnt = bss_param_ch_cnt;
}
- } else if (!elems->prof ||
+ } else if (elems->parse_error & IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC ||
+ !elems->prof ||
!(elems->prof->control & prof_bss_param_ch_present)) {
ret = false;
goto out;
@@ -4085,9 +4256,9 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
*/
if (!is_6ghz &&
((assoc_data->wmm && !elems->wmm_param) ||
- (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
+ (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->ht_cap_elem || !elems->ht_operation)) ||
- (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
+ (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT &&
(!elems->vht_cap_elem || !elems->vht_operation)))) {
const struct cfg80211_bss_ies *ies;
struct ieee802_11_elems *bss_elems;
@@ -4124,25 +4295,25 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
* have to include the IEs in the (re)association response.
*/
if (!elems->ht_cap_elem && bss_elems->ht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) {
elems->ht_cap_elem = bss_elems->ht_cap_elem;
sdata_info(sdata,
"AP bug: HT capability missing from AssocResp\n");
}
if (!elems->ht_operation && bss_elems->ht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT) {
elems->ht_operation = bss_elems->ht_operation;
sdata_info(sdata,
"AP bug: HT operation missing from AssocResp\n");
}
if (!elems->vht_cap_elem && bss_elems->vht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
elems->vht_cap_elem = bss_elems->vht_cap_elem;
sdata_info(sdata,
"AP bug: VHT capa missing from AssocResp\n");
}
if (!elems->vht_operation && bss_elems->vht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
elems->vht_operation = bss_elems->vht_operation;
sdata_info(sdata,
"AP bug: VHT operation missing from AssocResp\n");
@@ -4154,8 +4325,10 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
/*
* We previously checked these in the beacon/probe response, so
* they should be present here. This is just a safety net.
+ * Note that the ieee80211_config_bw() below would also check
+ * for this (and more), but this has better error reporting.
*/
- if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT) &&
+ if (!is_6ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->wmm_param || !elems->ht_cap_elem || !elems->ht_operation)) {
sdata_info(sdata,
"HT AP is missing WMM params or HT capability/operation\n");
@@ -4163,7 +4336,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
goto out;
}
- if (!is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT) &&
+ if (is_5ghz && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT &&
(!elems->vht_cap_elem || !elems->vht_operation)) {
sdata_info(sdata,
"VHT AP is missing VHT capability/operation\n");
@@ -4171,36 +4344,28 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
goto out;
}
- if (is_6ghz && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- !elems->he_6ghz_capa) {
- sdata_info(sdata,
- "HE 6 GHz AP is missing HE 6 GHz band capability\n");
+ /* check/update if AP changed anything in assoc response vs. scan */
+ if (ieee80211_config_bw(link, elems,
+ link_id == assoc_data->assoc_link_id,
+ changed)) {
ret = false;
goto out;
}
- if (WARN_ON(!link->conf->chandef.chan)) {
- ret = false;
- goto out;
- }
- sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
-
- if (!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
- (!elems->he_cap || !elems->he_operation)) {
- sdata_info(sdata,
- "HE AP is missing HE capability/operation\n");
+ if (WARN_ON(!link->conf->chanreq.oper.chan)) {
ret = false;
goto out;
}
+ sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band];
/* Set up internal HT/VHT capabilities */
- if (elems->ht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
+ if (elems->ht_cap_elem && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT)
ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
elems->ht_cap_elem,
link_sta);
if (elems->vht_cap_elem &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_VHT) {
const struct ieee80211_vht_cap *bss_vht_cap = NULL;
const struct cfg80211_bss_ies *ies;
@@ -4227,14 +4392,41 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
rcu_read_unlock();
}
- if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
+ if (elems->he_operation &&
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE &&
elems->he_cap) {
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
elems->he_6ghz_capa,
link_sta);
+ he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation);
+
+ if (is_6ghz && he_6ghz_oper) {
+ switch (u8_get_bits(he_6ghz_oper->control,
+ IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ bss_conf->power_type = IEEE80211_REG_LPI_AP;
+ break;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ bss_conf->power_type = IEEE80211_REG_SP_AP;
+ break;
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ bss_conf->power_type = IEEE80211_REG_VLP_AP;
+ break;
+ default:
+ bss_conf->power_type = IEEE80211_REG_UNSET_AP;
+ break;
+ }
+ } else if (is_6ghz) {
+ link_info(link,
+ "HE 6 GHz operation missing (on %d MHz), expect issues\n",
+ bss_conf->chanreq.oper.chan->center_freq);
+ }
+
bss_conf->he_support = link_sta->pub->he_cap.has_he;
if (elems->rsnx && elems->rsnx_len &&
(elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) &&
@@ -4248,7 +4440,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta, elems);
if (elems->eht_operation && elems->eht_cap &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
+ link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_EHT) {
ieee80211_eht_cap_ie_to_sta_eht_cap(sdata, sband,
elems->he_cap,
elems->he_cap_len,
@@ -4257,7 +4449,6 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
link_sta);
bss_conf->eht_support = link_sta->pub->eht_cap.has_eht;
- *changed |= BSS_CHANGED_EHT_PUNCTURING;
} else {
bss_conf->eht_support = false;
}
@@ -4455,7 +4646,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
bool support_160;
u8 chains = 1;
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HT)
return chains;
ht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_CAPABILITY);
@@ -4468,7 +4659,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
*/
}
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_VHT)
return chains;
vht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
@@ -4487,7 +4678,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
chains = max(chains, nss);
}
- if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE)
+ if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_HE)
return chains;
ies = rcu_dereference(cbss->ies);
@@ -4538,536 +4729,331 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
return chains;
}
-static bool
-ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_bss_ies *ies,
- const struct ieee80211_he_operation *he_op)
+static void
+ieee80211_determine_our_sta_mode(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_assoc_request *req,
+ bool wmm_used, int link_id,
+ struct ieee80211_conn_settings *conn)
{
- const struct element *he_cap_elem;
- const struct ieee80211_he_cap_elem *he_cap;
- struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
- u16 mcs_80_map_tx, mcs_80_map_rx;
- u16 ap_min_req_set;
- int mcs_nss_size;
- int nss;
-
- he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
- ies->data, ies->len);
-
- if (!he_cap_elem)
- return false;
+ struct ieee80211_sta_ht_cap sta_ht_cap = sband->ht_cap;
+ bool is_5ghz = sband->band == NL80211_BAND_5GHZ;
+ bool is_6ghz = sband->band == NL80211_BAND_6GHZ;
+ const struct ieee80211_sta_he_cap *he_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
- /* invalid HE IE */
- if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
- sdata_info(sdata,
- "Invalid HE elem, Disable HE\n");
- return false;
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ conn->mode = IEEE80211_CONN_MODE_S1G;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_dbg(sdata, "operating as S1G STA\n");
+ return;
}
- /* skip one byte ext_tag_id */
- he_cap = (void *)(he_cap_elem->data + 1);
- mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap);
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
- /* invalid HE IE */
- if (he_cap_elem->datalen < 1 + sizeof(*he_cap) + mcs_nss_size) {
- sdata_info(sdata,
- "Invalid HE elem with nss size, Disable HE\n");
- return false;
+ ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
+ if (req && req->flags & ASSOC_REQ_DISABLE_HT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HT disabled by flag, limiting to legacy\n");
+ goto out;
}
- /* mcs_nss is right after he_cap info */
- he_mcs_nss_supp = (void *)(he_cap + 1);
+ if (!wmm_used) {
+ mlme_link_id_dbg(sdata, link_id,
+ "WMM/QoS not supported, limiting to legacy\n");
+ goto out;
+ }
- mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
- mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+ if (req) {
+ unsigned int i;
- /* P802.11-REVme/D0.3
- * 27.1.1 Introduction to the HE PHY
- * ...
- * An HE STA shall support the following features:
- * ...
- * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
- * supported channel widths for HE SU PPDUs
- */
- if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
- sdata_info(sdata,
- "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
- mcs_80_map_tx, mcs_80_map_rx);
- return false;
+ for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
+ if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
+ netdev_info(sdata->dev,
+ "WEP/TKIP use, limiting to legacy\n");
+ goto out;
+ }
+ }
}
- if (!he_op)
- return true;
-
- ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+ if (!sta_ht_cap.ht_supported && !is_6ghz) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HT not supported (and not on 6 GHz), limiting to legacy\n");
+ goto out;
+ }
- /*
- * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
- * zeroes, which is nonsense, and completely inconsistent with itself
- * (it doesn't have 8 streams). Accept the settings in this case anyway.
- */
- if (!ap_min_req_set)
- return true;
+ /* HT is fine */
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ?
+ IEEE80211_CONN_BW_LIMIT_40 :
+ IEEE80211_CONN_BW_LIMIT_20;
- /* make sure the AP is consistent with itself
- *
- * P802.11-REVme/D0.3
- * 26.17.1 Basic HE BSS operation
- *
- * A STA that is operating in an HE BSS shall be able to receive and
- * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
- * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
- * MLME-START.request primitive and shall be able to receive at each of
- * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
- * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
- * primitive
- */
- for (nss = 8; nss > 0; nss--) {
- u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
- u8 ap_rx_val;
- u8 ap_tx_val;
+ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
+ ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
- if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
- continue;
+ if (req && req->flags & ASSOC_REQ_DISABLE_VHT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "VHT disabled by flag, limiting to HT\n");
+ goto out;
+ }
- ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
- ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
+ if (vht_cap.vht_supported && is_5ghz) {
+ bool have_80mhz = false;
+ unsigned int i;
- if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
- sdata_info(sdata,
- "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
- nss, ap_rx_val, ap_rx_val, ap_op_val);
- return false;
+ if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40 MHz support on 5 GHz, limiting to HT\n");
+ goto out;
}
- }
- return true;
-}
+ /* Allow VHT if at least one channel on the sband supports 80 MHz */
+ for (i = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_NO_80MHZ))
+ continue;
-static bool
-ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_supported_band *sband,
- const struct ieee80211_he_operation *he_op)
-{
- const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- u16 ap_min_req_set;
- int i;
+ have_80mhz = true;
+ break;
+ }
- if (!sta_he_cap || !he_op)
- return false;
+ if (!have_80mhz) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no 80 MHz channel support on 5 GHz, limiting to HT\n");
+ goto out;
+ }
+ } else if (is_5ghz) { /* !vht_supported but on 5 GHz */
+ mlme_link_id_dbg(sdata, link_id,
+ "no VHT support on 5 GHz, limiting to HT\n");
+ goto out;
+ }
- ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+ /* VHT - if we have - is fine, including 80 MHz, check 160 below again */
+ if (sband->band != NL80211_BAND_2GHZ) {
+ conn->mode = IEEE80211_CONN_MODE_VHT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160;
+ }
- /*
- * Apparently iPhone 13 (at least iOS version 15.3.1) sets this to all
- * zeroes, which is nonsense, and completely inconsistent with itself
- * (it doesn't have 8 streams). Accept the settings in this case anyway.
- */
- if (!ap_min_req_set)
- return true;
+ if (is_5ghz &&
+ !(vht_cap.cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))) {
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
+ mlme_link_id_dbg(sdata, link_id,
+ "no VHT 160 MHz capability on 5 GHz, limiting to 80 MHz");
+ }
- /* Need to go over for 80MHz, 160MHz and for 80+80 */
- for (i = 0; i < 3; i++) {
- const struct ieee80211_he_mcs_nss_supp *sta_mcs_nss_supp =
- &sta_he_cap->he_mcs_nss_supp;
- u16 sta_mcs_map_rx =
- le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i]);
- u16 sta_mcs_map_tx =
- le16_to_cpu(((__le16 *)sta_mcs_nss_supp)[2 * i + 1]);
- u8 nss;
- bool verified = true;
+ if (req && req->flags & ASSOC_REQ_DISABLE_HE) {
+ mlme_link_id_dbg(sdata, link_id,
+ "HE disabled by flag, limiting to HT/VHT\n");
+ goto out;
+ }
- /*
- * For each band there is a maximum of 8 spatial streams
- * possible. Each of the sta_mcs_map_* is a 16-bit struct built
- * of 2 bits per NSS (1-8), with the values defined in enum
- * ieee80211_he_mcs_support. Need to make sure STA TX and RX
- * capabilities aren't less than the AP's minimum requirements
- * for this HE BSS per SS.
- * It is enough to find one such band that meets the reqs.
- */
- for (nss = 8; nss > 0; nss--) {
- u8 sta_rx_val = (sta_mcs_map_rx >> (2 * (nss - 1))) & 3;
- u8 sta_tx_val = (sta_mcs_map_tx >> (2 * (nss - 1))) & 3;
- u8 ap_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ if (!he_cap) {
+ WARN_ON(is_6ghz);
+ mlme_link_id_dbg(sdata, link_id,
+ "no HE support, limiting to HT/VHT\n");
+ goto out;
+ }
- if (ap_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
- continue;
+ /* so we have HE */
+ conn->mode = IEEE80211_CONN_MODE_HE;
- /*
- * Make sure the HE AP doesn't require MCSs that aren't
- * supported by the client as required by spec
- *
- * P802.11-REVme/D0.3
- * 26.17.1 Basic HE BSS operation
- *
- * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
- * a BSS, unless it supports (i.e., is able to both transmit and
- * receive using) all of the <HE-MCS, NSS> tuples in the basic
- * HE-MCS and NSS set.
- */
- if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
- (ap_val > sta_rx_val) || (ap_val > sta_tx_val)) {
- verified = false;
- break;
- }
+ /* check bandwidth */
+ switch (sband->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ break;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40 MHz HE cap in 2.4 GHz, limiting to 20 MHz\n");
+ break;
+ case NL80211_BAND_5GHZ:
+ if (!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)) {
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ mlme_link_id_dbg(sdata, link_id,
+ "no 40/80 MHz HE cap in 5 GHz, limiting to 20 MHz\n");
+ break;
}
+ if (!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)) {
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_80);
+ mlme_link_id_dbg(sdata, link_id,
+ "no 160 MHz HE cap in 5 GHz, limiting to 80 MHz\n");
+ }
+ break;
+ case NL80211_BAND_6GHZ:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ break;
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit,
+ IEEE80211_CONN_BW_LIMIT_80);
+ mlme_link_id_dbg(sdata, link_id,
+ "no 160 MHz HE cap in 6 GHz, limiting to 80 MHz\n");
+ break;
+ }
- if (verified)
- return true;
+ if (req && req->flags & ASSOC_REQ_DISABLE_EHT) {
+ mlme_link_id_dbg(sdata, link_id,
+ "EHT disabled by flag, limiting to HE\n");
+ goto out;
}
- /* If here, STA doesn't meet AP's HE min requirements */
- return false;
-}
+ eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ if (!eht_cap) {
+ mlme_link_id_dbg(sdata, link_id,
+ "no EHT support, limiting to HE\n");
+ goto out;
+ }
-static u8
-ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap,
- const struct ieee80211_sta_eht_cap *sta_eht_cap,
- unsigned int idx, int bw)
-{
- u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0];
- u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0];
+ /* we have EHT */
- /* handle us being a 20 MHz-only EHT STA - with four values
- * for MCS 0-7, 8-9, 10-11, 12-13.
- */
- if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL))
- return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx];
+ conn->mode = IEEE80211_CONN_MODE_EHT;
- /* the others have MCS 0-9 together, rather than separately from 0-7 */
- if (idx > 0)
- idx--;
+ /* check bandwidth */
+ if (is_6ghz &&
+ eht_cap->eht_cap_elem.phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_320;
+ else if (is_6ghz)
+ mlme_link_id_dbg(sdata, link_id,
+ "no EHT 320 MHz cap in 6 GHz, limiting to 160 MHz\n");
- switch (bw) {
- case 0:
- return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx];
- case 1:
- if (!(he_phy_cap0 &
- (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)))
- return 0xff; /* pass check */
- return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx];
- case 2:
- if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ))
- return 0xff; /* pass check */
- return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx];
- }
-
- WARN_ON(1);
- return 0;
+out:
+ mlme_link_id_dbg(sdata, link_id,
+ "determined local STA to be %s, BW limited to %d MHz\n",
+ ieee80211_conn_mode_str(conn->mode),
+ 20 * (1 << conn->bw_limit));
}
-static bool
-ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_supported_band *sband,
- const struct ieee80211_eht_operation *eht_op)
+static void
+ieee80211_determine_our_sta_mode_auth(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_auth_request *req,
+ bool wmm_used,
+ struct ieee80211_conn_settings *conn)
{
- const struct ieee80211_sta_he_cap *sta_he_cap =
- ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- const struct ieee80211_sta_eht_cap *sta_eht_cap =
- ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
- const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req;
- unsigned int i;
-
- if (!sta_he_cap || !sta_eht_cap || !eht_op)
- return false;
-
- req = &eht_op->basic_mcs_nss;
-
- for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) {
- u8 req_rx_nss, req_tx_nss;
- unsigned int bw;
-
- req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i],
- IEEE80211_EHT_MCS_NSS_RX);
- req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i],
- IEEE80211_EHT_MCS_NSS_TX);
+ ieee80211_determine_our_sta_mode(sdata, sband, NULL, wmm_used,
+ req->link_id > 0 ? req->link_id : 0,
+ conn);
+}
- for (bw = 0; bw < 3; bw++) {
- u8 have, have_rx_nss, have_tx_nss;
+static void
+ieee80211_determine_our_sta_mode_assoc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_assoc_request *req,
+ bool wmm_used, int link_id,
+ struct ieee80211_conn_settings *conn)
+{
+ struct ieee80211_conn_settings tmp;
- have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap,
- sta_eht_cap,
- i, bw);
- have_rx_nss = u8_get_bits(have,
- IEEE80211_EHT_MCS_NSS_RX);
- have_tx_nss = u8_get_bits(have,
- IEEE80211_EHT_MCS_NSS_TX);
+ WARN_ON(!req);
- if (req_rx_nss > have_rx_nss ||
- req_tx_nss > have_tx_nss)
- return false;
- }
- }
+ ieee80211_determine_our_sta_mode(sdata, sband, req, wmm_used, link_id,
+ &tmp);
- return true;
+ conn->mode = min_t(enum ieee80211_conn_mode,
+ conn->mode, tmp.mode);
+ conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
+ conn->bw_limit, tmp.bw_limit);
}
static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
- struct cfg80211_bss *cbss,
- bool mlo,
- ieee80211_conn_flags_t *conn_flags)
+ int link_id,
+ struct cfg80211_bss *cbss, bool mlo,
+ struct ieee80211_conn_settings *conn)
{
struct ieee80211_local *local = sdata->local;
- const struct ieee80211_ht_cap *ht_cap = NULL;
- const struct ieee80211_ht_operation *ht_oper = NULL;
- const struct ieee80211_vht_operation *vht_oper = NULL;
- const struct ieee80211_he_operation *he_oper = NULL;
- const struct ieee80211_eht_operation *eht_oper = NULL;
- const struct ieee80211_s1g_oper_ie *s1g_oper = NULL;
- struct ieee80211_supported_band *sband;
- struct cfg80211_chan_def chandef;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
- bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
- bool supports_mlo = false;
- struct ieee80211_bss *bss = (void *)cbss->priv;
- struct ieee80211_elems_parse_params parse_params = {
- .link_id = -1,
- .from_ap = true,
- };
+ struct ieee80211_chan_req chanreq = {};
struct ieee802_11_elems *elems;
- const struct cfg80211_bss_ies *ies;
int ret;
u32 i;
- bool have_80mhz;
lockdep_assert_wiphy(local->hw.wiphy);
rcu_read_lock();
+ elems = ieee80211_determine_chan_mode(sdata, conn, cbss, link_id,
+ &chanreq);
- ies = rcu_dereference(cbss->ies);
- parse_params.start = ies->data;
- parse_params.len = ies->len;
- elems = ieee802_11_parse_elems_full(&parse_params);
- if (!elems) {
+ if (IS_ERR(elems)) {
rcu_read_unlock();
- return -ENOMEM;
- }
-
- sband = local->hw.wiphy->bands[cbss->channel->band];
-
- *conn_flags &= ~(IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ);
-
- /* disable HT/VHT/HE if we don't support them */
- if (!sband->ht_cap.ht_supported && !is_6ghz) {
- mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE/EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!sband->vht_cap.vht_supported && is_5ghz) {
- mlme_dbg(sdata, "VHT not supported, disabling VHT/HE/EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) {
- mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) {
- mlme_dbg(sdata, "EHT not supported, disabling EHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_HT) && !is_6ghz) {
- ht_oper = elems->ht_operation;
- ht_cap = elems->ht_cap_elem;
-
- if (!ht_cap) {
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- ht_oper = NULL;
- }
+ return PTR_ERR(elems);
}
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_VHT) && !is_6ghz) {
- vht_oper = elems->vht_operation;
- if (vht_oper && !ht_oper) {
- vht_oper = NULL;
- sdata_info(sdata,
- "AP advertised VHT without HT, disabling HT/VHT/HE\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_HT;
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- *conn_flags |= IEEE80211_CONN_DISABLE_HE;
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
-
- if (!elems->vht_cap_elem) {
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- vht_oper = NULL;
- }
+ if (mlo && !elems->ml_basic) {
+ sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
+ rcu_read_unlock();
+ kfree(elems);
+ return -EINVAL;
}
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_HE)) {
- he_oper = elems->he_operation;
-
- if (link && is_6ghz) {
- struct ieee80211_bss_conf *bss_conf;
- u8 j = 0;
-
- bss_conf = link->conf;
-
- if (elems->pwr_constr_elem)
- bss_conf->pwr_reduction = *elems->pwr_constr_elem;
-
- BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
- ARRAY_SIZE(elems->tx_pwr_env));
-
- for (i = 0; i < elems->tx_pwr_env_num; i++) {
- if (elems->tx_pwr_env_len[i] >
- sizeof(bss_conf->tx_pwr_env[j]))
- continue;
-
- bss_conf->tx_pwr_env_num++;
- memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
- elems->tx_pwr_env_len[i]);
- j++;
- }
- }
-
- if (!ieee80211_verify_peer_he_mcs_support(sdata, ies, he_oper) ||
- !ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
- *conn_flags |= IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- }
+ if (link && is_6ghz && conn->mode >= IEEE80211_CONN_MODE_HE) {
+ struct ieee80211_bss_conf *bss_conf;
+ u8 j = 0;
- /*
- * EHT requires HE to be supported as well. Specifically for 6 GHz
- * channels, the operation channel information can only be deduced from
- * both the 6 GHz operation information (from the HE operation IE) and
- * EHT operation.
- */
- if (!(*conn_flags &
- (IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT)) &&
- he_oper) {
- const struct cfg80211_bss_ies *cbss_ies;
- const struct element *eht_ml_elem;
- const u8 *eht_oper_ie;
-
- cbss_ies = rcu_dereference(cbss->ies);
- eht_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_OPERATION,
- cbss_ies->data, cbss_ies->len);
- if (eht_oper_ie && eht_oper_ie[1] >=
- 1 + sizeof(struct ieee80211_eht_operation))
- eht_oper = (void *)(eht_oper_ie + 3);
- else
- eht_oper = NULL;
+ bss_conf = link->conf;
- if (!ieee80211_verify_sta_eht_mcs_support(sdata, sband, eht_oper))
- *conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ if (elems->pwr_constr_elem)
+ bss_conf->pwr_reduction = *elems->pwr_constr_elem;
- eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
- cbss_ies->data, cbss_ies->len);
+ BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) !=
+ ARRAY_SIZE(elems->tx_pwr_env));
- /* data + 1 / datalen - 1 since it's an extended element */
- if (!(*conn_flags & IEEE80211_CONN_DISABLE_EHT) &&
- eht_ml_elem &&
- ieee80211_mle_type_ok(eht_ml_elem->data + 1,
- IEEE80211_ML_CONTROL_TYPE_BASIC,
- eht_ml_elem->datalen - 1)) {
- supports_mlo = true;
+ for (i = 0; i < elems->tx_pwr_env_num; i++) {
+ if (elems->tx_pwr_env_len[i] > sizeof(bss_conf->tx_pwr_env[j]))
+ continue;
- sdata->vif.cfg.eml_cap =
- ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1);
- sdata->vif.cfg.eml_med_sync_delay =
- ieee80211_mle_get_eml_med_sync_delay(eht_ml_elem->data + 1);
+ bss_conf->tx_pwr_env_num++;
+ memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i],
+ elems->tx_pwr_env_len[i]);
+ j++;
}
}
-
- /* Allow VHT if at least one channel on the sband supports 80 MHz */
- have_80mhz = false;
- for (i = 0; i < sband->n_channels; i++) {
- if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
- IEEE80211_CHAN_NO_80MHZ))
- continue;
-
- have_80mhz = true;
- break;
- }
-
- if (!have_80mhz) {
- sdata_info(sdata, "80 MHz not supported, disabling VHT\n");
- *conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- }
-
- if (sband->band == NL80211_BAND_S1GHZ) {
- s1g_oper = elems->s1g_oper;
- if (!s1g_oper)
- sdata_info(sdata,
- "AP missing S1G operation element?\n");
- }
-
- *conn_flags |=
- ieee80211_determine_chantype(sdata, link, *conn_flags,
- sband,
- cbss->channel,
- bss->vht_cap_info,
- ht_oper, vht_oper,
- he_oper, eht_oper,
- s1g_oper,
- &chandef, false);
-
- if (link)
- link->needed_rx_chains =
- min(ieee80211_max_rx_chains(link, cbss),
- local->rx_chains);
-
rcu_read_unlock();
/* the element data was RCU protected so no longer valid anyway */
kfree(elems);
elems = NULL;
- if (*conn_flags & IEEE80211_CONN_DISABLE_HE && is_6ghz) {
- sdata_info(sdata, "Rejecting non-HE 6/7 GHz connection");
- return -EINVAL;
- }
-
- if (mlo && !supports_mlo) {
- sdata_info(sdata, "Rejecting MLO as it is not supported by AP\n");
- return -EINVAL;
- }
-
if (!link)
return 0;
- /* will change later if needed */
- link->smps_mode = IEEE80211_SMPS_OFF;
+ rcu_read_lock();
+ link->needed_rx_chains = min(ieee80211_max_rx_chains(link, cbss),
+ local->rx_chains);
+ rcu_read_unlock();
/*
* If this fails (possibly due to channel context sharing
* on incompatible channels, e.g. 80+80 and 160 sharing the
* same control channel) try to use a smaller bandwidth.
*/
- ret = ieee80211_link_use_channel(link, &chandef,
+ ret = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
/* don't downgrade for 5 and 10 MHz channels, though. */
- if (chandef.width == NL80211_CHAN_WIDTH_5 ||
- chandef.width == NL80211_CHAN_WIDTH_10)
- goto out;
+ if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 ||
+ chanreq.oper.width == NL80211_CHAN_WIDTH_10)
+ return ret;
- while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
- *conn_flags |=
- ieee80211_chandef_downgrade(&chandef);
- ret = ieee80211_link_use_channel(link, &chandef,
+ while (ret && chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT) {
+ ieee80211_chanreq_downgrade(&chanreq, conn);
+
+ ret = ieee80211_link_use_channel(link, &chanreq,
IEEE80211_CHANCTX_SHARED);
}
- out:
+
return ret;
}
@@ -5128,6 +5114,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sta))
goto out_err;
+ sta->sta.spp_amsdu = assoc_data->spp_amsdu;
+
if (ieee80211_vif_is_mld(&sdata->vif)) {
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (!assoc_data->link[link_id].bss)
@@ -5191,8 +5179,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
if (link_id != assoc_data->assoc_link_id) {
- err = ieee80211_prep_channel(sdata, link, cbss, true,
- &link->u.mgd.conn_flags);
+ link->u.mgd.conn = assoc_data->link[link_id].conn;
+
+ err = ieee80211_prep_channel(sdata, link, link_id, cbss,
+ true, &link->u.mgd.conn);
if (err) {
link_info(link, "prep_channel failed\n");
goto out_err;
@@ -5310,6 +5300,9 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (!assoc_data)
return;
+ parse_params.mode =
+ assoc_data->link[assoc_data->assoc_link_id].conn.mode;
+
if (!ether_addr_equal(assoc_data->ap_addr, mgmt->bssid) ||
!ether_addr_equal(assoc_data->ap_addr, mgmt->sa))
return;
@@ -5426,6 +5419,13 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
assoc_data->ap_addr);
goto abandon_assoc;
}
+
+ sdata->vif.cfg.eml_cap =
+ ieee80211_mle_get_eml_cap((const void *)elems->ml_basic);
+ sdata->vif.cfg.eml_med_sync_delay =
+ ieee80211_mle_get_eml_med_sync_delay((const void *)elems->ml_basic);
+ sdata->vif.cfg.mld_capa_op =
+ ieee80211_mle_get_mld_capa_op((const void *)elems->ml_basic);
}
sdata->vif.cfg.aid = aid;
@@ -5688,49 +5688,6 @@ static bool ieee80211_rx_our_beacon(const u8 *tx_bssid,
return ether_addr_equal(tx_bssid, bss->transmitted_bss->bssid);
}
-static bool ieee80211_config_puncturing(struct ieee80211_link_data *link,
- const struct ieee80211_eht_operation *eht_oper,
- u64 *changed)
-{
- struct ieee80211_local *local = link->sdata->local;
- u16 bitmap = 0, extracted;
-
- if ((eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
- (eht_oper->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
- const struct ieee80211_eht_operation_info *info =
- (void *)eht_oper->optional;
- const u8 *disable_subchannel_bitmap = info->optional;
-
- bitmap = get_unaligned_le16(disable_subchannel_bitmap);
- }
-
- extracted = ieee80211_extract_dis_subch_bmap(eht_oper,
- &link->conf->chandef,
- bitmap);
-
- /* accept if there are no changes */
- if (!(*changed & BSS_CHANGED_BANDWIDTH) &&
- extracted == link->conf->eht_puncturing)
- return true;
-
- if (!cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- &link->conf->chandef)) {
- link_info(link,
- "Got an invalid disable subchannel bitmap from AP %pM: bitmap = 0x%x, bw = 0x%x. disconnect\n",
- link->u.mgd.bssid,
- bitmap,
- link->conf->chandef.width);
- return false;
- }
-
- if (bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING))
- return false;
-
- ieee80211_handle_puncturing_bitmap(link, eht_oper, bitmap, changed);
- return true;
-}
-
static void ieee80211_ml_reconf_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
@@ -5890,6 +5847,56 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
TU_TO_JIFFIES(delay));
}
+static int ieee80211_ttlm_set_links(struct ieee80211_sub_if_data *sdata,
+ u16 active_links, u16 dormant_links,
+ u16 suspended_links)
+{
+ u64 changed = 0;
+ int ret;
+
+ if (!active_links) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If there is an active negotiated TTLM, it should be discarded by
+ * the new negotiated/advertised TTLM.
+ */
+ if (sdata->vif.neg_ttlm.valid) {
+ memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm));
+ sdata->vif.suspended_links = 0;
+ changed = BSS_CHANGED_MLD_TTLM;
+ }
+
+ if (sdata->vif.active_links != active_links) {
+ ret = ieee80211_set_active_links(&sdata->vif, active_links);
+ if (ret) {
+ sdata_info(sdata, "Failed to set TTLM active links\n");
+ goto out;
+ }
+ }
+
+ ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
+ dormant_links);
+ if (ret) {
+ sdata_info(sdata, "Failed to set TTLM dormant links\n");
+ goto out;
+ }
+
+ changed |= BSS_CHANGED_MLD_VALID_LINKS;
+ sdata->vif.suspended_links = suspended_links;
+ if (sdata->vif.suspended_links)
+ changed |= BSS_CHANGED_MLD_TTLM;
+
+ ieee80211_vif_cfg_change_notify(sdata, changed);
+
+out:
+ if (ret)
+ ieee80211_disconnect(&sdata->vif, false);
+
+ return ret;
+}
+
static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
struct wiphy_work *work)
{
@@ -5897,30 +5904,19 @@ static void ieee80211_tid_to_link_map_work(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data,
u.mgd.ttlm_work.work);
- int ret;
new_active_links = sdata->u.mgd.ttlm_info.map &
sdata->vif.valid_links;
new_dormant_links = ~sdata->u.mgd.ttlm_info.map &
sdata->vif.valid_links;
- if (!new_active_links) {
- ieee80211_disconnect(&sdata->vif, false);
- return;
- }
ieee80211_vif_set_links(sdata, sdata->vif.valid_links, 0);
- new_active_links = BIT(ffs(new_active_links) - 1);
- ieee80211_set_active_links(&sdata->vif, new_active_links);
-
- ret = ieee80211_vif_set_links(sdata, sdata->vif.valid_links,
- new_dormant_links);
+ if (ieee80211_ttlm_set_links(sdata, new_active_links, new_dormant_links,
+ 0))
+ return;
sdata->u.mgd.ttlm_info.active = true;
sdata->u.mgd.ttlm_info.switch_time = 0;
-
- if (!ret)
- ieee80211_vif_cfg_change_notify(sdata,
- BSS_CHANGED_MLD_VALID_LINKS);
}
static u16 ieee80211_get_ttlm(u8 bm_size, u8 *data)
@@ -6130,6 +6126,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
u8 *bssid, *variable = mgmt->u.beacon.variable;
u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
struct ieee80211_elems_parse_params parse_params = {
+ .mode = link->u.mgd.conn.mode,
.link_id = -1,
.from_ap = true,
};
@@ -6212,7 +6209,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
}
if (!ifmgd->associated ||
- !ieee80211_rx_our_beacon(bssid, link->u.mgd.bss))
+ !ieee80211_rx_our_beacon(bssid, link->conf->bss))
return;
bssid = link->u.mgd.bssid;
@@ -6239,7 +6236,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
*/
if (!ieee80211_is_s1g_beacon(hdr->frame_control))
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
- parse_params.bss = link->u.mgd.bss;
+ parse_params.bss = link->conf->bss;
parse_params.filter = care_about_ies;
parse_params.crc = ncrc;
elems = ieee802_11_parse_elems_full(&parse_params);
@@ -6377,21 +6374,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
goto free;
}
- if (WARN_ON(!link->conf->chandef.chan))
+ if (WARN_ON(!link->conf->chanreq.oper.chan))
goto free;
- sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
+ sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band];
changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
- if (ieee80211_config_bw(link, elems->ht_cap_elem,
- elems->vht_cap_elem, elems->ht_operation,
- elems->vht_operation, elems->he_operation,
- elems->eht_operation,
- elems->s1g_oper, bssid, &changed)) {
- sdata_info(sdata,
- "failed to follow AP %pM bandwidth change, disconnect\n",
- bssid);
+ if (ieee80211_config_bw(link, elems, true, &changed)) {
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
true, deauth_buf);
@@ -6413,21 +6403,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
elems->pwr_constr_elem,
elems->cisco_dtpc_elem);
- if (elems->eht_operation &&
- !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
- if (!ieee80211_config_puncturing(link, elems->eht_operation,
- &changed)) {
- ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
- WLAN_REASON_DEAUTH_LEAVING,
- true, deauth_buf);
- ieee80211_report_disconnect(sdata, deauth_buf,
- sizeof(deauth_buf), true,
- WLAN_REASON_DEAUTH_LEAVING,
- false);
- goto free;
- }
- }
-
ieee80211_ml_reconfiguration(sdata, elems);
ieee80211_process_adv_ttlm(sdata, elems,
le64_to_cpu(mgmt->u.beacon.timestamp));
@@ -6437,6 +6412,376 @@ free:
kfree(elems);
}
+static void ieee80211_apply_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm neg_ttlm)
+{
+ u16 new_active_links, new_dormant_links, new_suspended_links, map = 0;
+ u8 i;
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++)
+ map |= neg_ttlm.downlink[i] | neg_ttlm.uplink[i];
+
+ /* If there is an active TTLM, unset previously suspended links */
+ if (sdata->vif.neg_ttlm.valid)
+ sdata->vif.dormant_links &= ~sdata->vif.suspended_links;
+
+ /* exclude links that are already disabled by advertised TTLM */
+ new_active_links =
+ map & sdata->vif.valid_links & ~sdata->vif.dormant_links;
+ new_suspended_links =
+ (~map & sdata->vif.valid_links) & ~sdata->vif.dormant_links;
+ new_dormant_links = sdata->vif.dormant_links | new_suspended_links;
+ if (ieee80211_ttlm_set_links(sdata, new_active_links,
+ new_dormant_links, new_suspended_links))
+ return;
+
+ sdata->vif.neg_ttlm = neg_ttlm;
+ sdata->vif.neg_ttlm.valid = true;
+}
+
+static void ieee80211_neg_ttlm_timeout_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.neg_ttlm_timeout_work.work);
+
+ sdata_info(sdata,
+ "No negotiated TTLM response from AP, disconnecting.\n");
+
+ __ieee80211_disconnect(sdata);
+}
+
+static void
+ieee80211_neg_ttlm_add_suggested_map(struct sk_buff *skb,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u8 i, direction[IEEE80211_TTLM_MAX_CNT];
+
+ if (memcmp(neg_ttlm->downlink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->downlink))) {
+ direction[0] = IEEE80211_TTLM_DIRECTION_DOWN;
+ direction[1] = IEEE80211_TTLM_DIRECTION_UP;
+ } else {
+ direction[0] = IEEE80211_TTLM_DIRECTION_BOTH;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(direction); i++) {
+ u8 tid, len, map_ind = 0, *len_pos, *map_ind_pos, *pos;
+ __le16 map;
+
+ len = sizeof(struct ieee80211_ttlm_elem) + 1 + 1;
+
+ pos = skb_put(skb, len + 2);
+ *pos++ = WLAN_EID_EXTENSION;
+ len_pos = pos++;
+ *pos++ = WLAN_EID_EXT_TID_TO_LINK_MAPPING;
+ *pos++ = direction[i];
+ map_ind_pos = pos++;
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ map = direction[i] == IEEE80211_TTLM_DIRECTION_UP ?
+ cpu_to_le16(neg_ttlm->uplink[tid]) :
+ cpu_to_le16(neg_ttlm->downlink[tid]);
+ if (!map)
+ continue;
+
+ len += 2;
+ map_ind |= BIT(tid);
+ skb_put_data(skb, &map, sizeof(map));
+ }
+
+ *map_ind_pos = map_ind;
+ *len_pos = len;
+
+ if (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH)
+ break;
+ }
+}
+
+static void
+ieee80211_send_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm,
+ u8 dialog_token)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_req);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = skb_put_zero(skb, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_req.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ;
+ mgmt->u.action.u.ttlm_req.dialog_token = dialog_token;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ ieee80211_tx_skb(sdata, skb);
+}
+
+int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ttlm_params *params)
+{
+ struct ieee80211_neg_ttlm neg_ttlm = {};
+ u8 i;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ !(sdata->vif.cfg.mld_capa_op &
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP))
+ return -EINVAL;
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if ((params->dlink[i] & ~sdata->vif.valid_links) ||
+ (params->ulink[i] & ~sdata->vif.valid_links))
+ return -EINVAL;
+
+ neg_ttlm.downlink[i] = params->dlink[i];
+ neg_ttlm.uplink[i] = params->ulink[i];
+ }
+
+ if (drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm) !=
+ NEG_TTLM_RES_ACCEPT)
+ return -EINVAL;
+
+ ieee80211_apply_neg_ttlm(sdata, neg_ttlm);
+ sdata->u.mgd.dialog_token_alloc++;
+ ieee80211_send_neg_ttlm_req(sdata, &sdata->vif.neg_ttlm,
+ sdata->u.mgd.dialog_token_alloc);
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work);
+ wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work,
+ IEEE80211_NEG_TTLM_REQ_TIMEOUT);
+ return 0;
+}
+
+static void
+ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_neg_ttlm_res ttlm_res,
+ u8 dialog_token,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct sk_buff *skb;
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->tx_headroom);
+ mgmt = skb_put_zero(skb, hdr_len);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+ mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+ mgmt->u.action.u.ttlm_res.action_code =
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES;
+ mgmt->u.action.u.ttlm_res.dialog_token = dialog_token;
+ switch (ttlm_res) {
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case NEG_TTLM_RES_REJECT:
+ mgmt->u.action.u.ttlm_res.status_code =
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
+ break;
+ case NEG_TTLM_RES_ACCEPT:
+ mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS;
+ break;
+ case NEG_TTLM_RES_SUGGEST_PREFERRED:
+ mgmt->u.action.u.ttlm_res.status_code =
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ break;
+ }
+
+ ieee80211_tx_skb(sdata, skb);
+}
+
+static int
+ieee80211_parse_neg_ttlm(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_ttlm_elem *ttlm,
+ struct ieee80211_neg_ttlm *neg_ttlm,
+ u8 *direction)
+{
+ u8 control, link_map_presence, map_size, tid;
+ u8 *pos;
+
+ /* The element size was already validated in
+ * ieee80211_tid_to_link_map_size_ok()
+ */
+ pos = (void *)ttlm->optional;
+
+ control = ttlm->control;
+
+ /* mapping switch time and expected duration fields are not expected
+ * in case of negotiated TTLM
+ */
+ if (control & (IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT |
+ IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)) {
+ mlme_dbg(sdata,
+ "Invalid TTLM element in negotiated TTLM request\n");
+ return -EINVAL;
+ }
+
+ if (control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP) {
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ neg_ttlm->downlink[tid] = sdata->vif.valid_links;
+ neg_ttlm->uplink[tid] = sdata->vif.valid_links;
+ }
+ *direction = IEEE80211_TTLM_DIRECTION_BOTH;
+ return 0;
+ }
+
+ *direction = u8_get_bits(control, IEEE80211_TTLM_CONTROL_DIRECTION);
+ if (*direction != IEEE80211_TTLM_DIRECTION_DOWN &&
+ *direction != IEEE80211_TTLM_DIRECTION_UP &&
+ *direction != IEEE80211_TTLM_DIRECTION_BOTH)
+ return -EINVAL;
+
+ link_map_presence = *pos;
+ pos++;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ map_size = 1;
+ else
+ map_size = 2;
+
+ for (tid = 0; tid < IEEE80211_TTLM_NUM_TIDS; tid++) {
+ u16 map;
+
+ if (link_map_presence & BIT(tid)) {
+ map = ieee80211_get_ttlm(map_size, pos);
+ if (!map) {
+ mlme_dbg(sdata,
+ "No active links for TID %d", tid);
+ return -EINVAL;
+ }
+ } else {
+ map = 0;
+ }
+
+ switch (*direction) {
+ case IEEE80211_TTLM_DIRECTION_BOTH:
+ neg_ttlm->downlink[tid] = map;
+ neg_ttlm->uplink[tid] = map;
+ break;
+ case IEEE80211_TTLM_DIRECTION_DOWN:
+ neg_ttlm->downlink[tid] = map;
+ break;
+ case IEEE80211_TTLM_DIRECTION_UP:
+ neg_ttlm->uplink[tid] = map;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pos += map_size;
+ }
+ return 0;
+}
+
+void ieee80211_process_neg_ttlm_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ u8 dialog_token, direction[IEEE80211_TTLM_MAX_CNT] = {}, i;
+ size_t ies_len;
+ enum ieee80211_neg_ttlm_res ttlm_res = NEG_TTLM_RES_ACCEPT;
+ struct ieee802_11_elems *elems = NULL;
+ struct ieee80211_neg_ttlm neg_ttlm = {};
+
+ BUILD_BUG_ON(ARRAY_SIZE(direction) != ARRAY_SIZE(elems->ttlm));
+
+ if (!ieee80211_vif_is_mld(&sdata->vif))
+ return;
+
+ dialog_token = mgmt->u.action.u.ttlm_req.dialog_token;
+ ies_len = len - offsetof(struct ieee80211_mgmt,
+ u.action.u.ttlm_req.variable);
+ elems = ieee802_11_parse_elems(mgmt->u.action.u.ttlm_req.variable,
+ ies_len, true, NULL);
+ if (!elems) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+
+ for (i = 0; i < elems->ttlm_num; i++) {
+ if (ieee80211_parse_neg_ttlm(sdata, elems->ttlm[i],
+ &neg_ttlm, &direction[i]) ||
+ (direction[i] == IEEE80211_TTLM_DIRECTION_BOTH &&
+ elems->ttlm_num != 1)) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+ }
+
+ if (!elems->ttlm_num ||
+ (elems->ttlm_num == 2 && direction[0] == direction[1])) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if ((neg_ttlm.downlink[i] &&
+ (neg_ttlm.downlink[i] & ~sdata->vif.valid_links)) ||
+ (neg_ttlm.uplink[i] &&
+ (neg_ttlm.uplink[i] & ~sdata->vif.valid_links))) {
+ ttlm_res = NEG_TTLM_RES_REJECT;
+ goto out;
+ }
+ }
+
+ ttlm_res = drv_can_neg_ttlm(sdata->local, sdata, &neg_ttlm);
+
+ if (ttlm_res != NEG_TTLM_RES_ACCEPT)
+ goto out;
+
+ ieee80211_apply_neg_ttlm(sdata, neg_ttlm);
+out:
+ kfree(elems);
+ ieee80211_send_neg_ttlm_res(sdata, ttlm_res, dialog_token, &neg_ttlm);
+}
+
+void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ mgmt->u.action.u.ttlm_req.dialog_token !=
+ sdata->u.mgd.dialog_token_alloc)
+ return;
+
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &sdata->u.mgd.neg_ttlm_timeout_work);
+
+ /* MLD station sends a TID to link mapping request, mainly to handle
+ * BTM (BSS transition management) request, in which case it needs to
+ * restrict the active links set.
+ * In this case it's not expected that the MLD AP will reject the
+ * negotiated TTLM request.
+ * This can be better implemented in the future, to handle request
+ * rejections.
+ */
+ if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS)
+ __ieee80211_disconnect(sdata);
+}
+
void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -7066,6 +7411,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_handle_tspec_ac_params_wk);
wiphy_delayed_work_init(&ifmgd->ttlm_work,
ieee80211_tid_to_link_map_work);
+ wiphy_delayed_work_init(&ifmgd->neg_ttlm_timeout_work,
+ ieee80211_neg_ttlm_timeout_work);
ifmgd->flags = 0;
ifmgd->powersave = sdata->wdev.ps;
@@ -7075,6 +7422,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
spin_lock_init(&ifmgd->teardown_lock);
ifmgd->teardown_skb = NULL;
ifmgd->orig_teardown_skb = NULL;
+ ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
}
static void ieee80211_recalc_smps_work(struct wiphy *wiphy,
@@ -7094,8 +7442,8 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link)
unsigned int link_id = link->link_id;
link->u.mgd.p2p_noa_index = -1;
- link->u.mgd.conn_flags = 0;
link->conf->bssid = link->u.mgd.bssid;
+ link->smps_mode = IEEE80211_SMPS_OFF;
wiphy_work_init(&link->u.mgd.request_smps_work,
ieee80211_request_smps_mgd_work);
@@ -7133,6 +7481,7 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss, s8 link_id,
const u8 *ap_mld_addr, bool assoc,
+ struct ieee80211_conn_settings *conn,
bool override)
{
struct ieee80211_local *local = sdata->local;
@@ -7264,13 +7613,22 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
}
if (new_sta || override) {
- err = ieee80211_prep_channel(sdata, link, cbss, mlo,
- &link->u.mgd.conn_flags);
+ /*
+ * Only set this if we're also going to calculate the AP
+ * settings etc., otherwise this was set before in a
+ * previous call. Note override is set to %true in assoc
+ * if the settings were changed.
+ */
+ link->u.mgd.conn = *conn;
+ err = ieee80211_prep_channel(sdata, link, link->link_id, cbss,
+ mlo, &link->u.mgd.conn);
if (err) {
if (new_sta)
sta_info_free(local, new_sta);
goto out_err;
}
+ /* pass out for use in assoc */
+ *conn = link->u.mgd.conn;
}
if (new_sta) {
@@ -7309,6 +7667,75 @@ out_err:
return err;
}
+static bool ieee80211_mgd_csa_present(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_bss_ies *ies,
+ u8 cur_channel, bool ignore_ecsa)
+{
+ const struct element *csa_elem, *ecsa_elem;
+ struct ieee80211_channel_sw_ie *csa = NULL;
+ struct ieee80211_ext_chansw_ie *ecsa = NULL;
+
+ if (!ies)
+ return false;
+
+ csa_elem = cfg80211_find_elem(WLAN_EID_CHANNEL_SWITCH,
+ ies->data, ies->len);
+ if (csa_elem && csa_elem->datalen == sizeof(*csa))
+ csa = (void *)csa_elem->data;
+
+ ecsa_elem = cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN,
+ ies->data, ies->len);
+ if (ecsa_elem && ecsa_elem->datalen == sizeof(*ecsa))
+ ecsa = (void *)ecsa_elem->data;
+
+ if (csa && csa->count == 0)
+ csa = NULL;
+ if (csa && !csa->mode && csa->new_ch_num == cur_channel)
+ csa = NULL;
+
+ if (ecsa && ecsa->count == 0)
+ ecsa = NULL;
+ if (ecsa && !ecsa->mode && ecsa->new_ch_num == cur_channel)
+ ecsa = NULL;
+
+ if (ignore_ecsa && ecsa) {
+ sdata_info(sdata,
+ "Ignoring ECSA in probe response - was considered stuck!\n");
+ return csa;
+ }
+
+ return csa || ecsa;
+}
+
+static bool ieee80211_mgd_csa_in_process(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_bss *bss)
+{
+ u8 cur_channel;
+ bool ret;
+
+ cur_channel = ieee80211_frequency_to_channel(bss->channel->center_freq);
+
+ rcu_read_lock();
+ if (ieee80211_mgd_csa_present(sdata,
+ rcu_dereference(bss->beacon_ies),
+ cur_channel, false)) {
+ ret = true;
+ goto out;
+ }
+
+ if (ieee80211_mgd_csa_present(sdata,
+ rcu_dereference(bss->proberesp_ies),
+ cur_channel, bss->proberesp_ecsa_stuck)) {
+ ret = true;
+ goto out;
+ }
+
+ ret = false;
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
/* config hooks */
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_auth_request *req)
@@ -7316,11 +7743,13 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_auth_data *auth_data;
+ struct ieee80211_conn_settings conn;
struct ieee80211_link_data *link;
- const struct element *csa_elem, *ecsa_elem;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss *bss;
u16 auth_alg;
int err;
- bool cont_auth;
+ bool cont_auth, wmm_used;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -7360,21 +7789,10 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
if (ifmgd->assoc_data)
return -EBUSY;
- rcu_read_lock();
- csa_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_CHANNEL_SWITCH);
- ecsa_elem = ieee80211_bss_get_elem(req->bss,
- WLAN_EID_EXT_CHANSWITCH_ANN);
- if ((csa_elem &&
- csa_elem->datalen == sizeof(struct ieee80211_channel_sw_ie) &&
- ((struct ieee80211_channel_sw_ie *)csa_elem->data)->count != 0) ||
- (ecsa_elem &&
- ecsa_elem->datalen == sizeof(struct ieee80211_ext_chansw_ie) &&
- ((struct ieee80211_ext_chansw_ie *)ecsa_elem->data)->count != 0)) {
- rcu_read_unlock();
+ if (ieee80211_mgd_csa_in_process(sdata, req->bss)) {
sdata_info(sdata, "AP is in CSA process, reject auth\n");
return -EINVAL;
}
- rcu_read_unlock();
auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
req->ie_len, GFP_KERNEL);
@@ -7462,15 +7880,24 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
/* needed for transmitting the auth frame(s) properly */
memcpy(sdata->vif.cfg.ap_addr, auth_data->ap_addr, ETH_ALEN);
+ bss = (void *)req->bss->priv;
+ wmm_used = bss->wmm_used && (local->hw.queues >= IEEE80211_NUM_ACS);
+
+ sband = local->hw.wiphy->bands[req->bss->channel->band];
+
+ ieee80211_determine_our_sta_mode_auth(sdata, sband, req, wmm_used,
+ &conn);
+
err = ieee80211_prep_connection(sdata, req->bss, req->link_id,
- req->ap_mld_addr, cont_auth, false);
+ req->ap_mld_addr, cont_auth,
+ &conn, false);
if (err)
goto err_clear;
- if (req->link_id > 0)
+ if (req->link_id >= 0)
link = sdata_dereference(sdata->link[req->link_id], sdata);
else
- link = sdata_dereference(sdata->link[0], sdata);
+ link = &sdata->deflink;
if (WARN_ON(!link)) {
err = -ENOLINK;
@@ -7502,38 +7929,33 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return err;
}
-static ieee80211_conn_flags_t
+static void
ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data,
struct cfg80211_assoc_request *req,
- ieee80211_conn_flags_t conn_flags,
+ struct ieee80211_conn_settings *conn,
unsigned int link_id)
{
struct ieee80211_local *local = sdata->local;
const struct cfg80211_bss_ies *bss_ies;
struct ieee80211_supported_band *sband;
- const struct element *ht_elem, *vht_elem;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
- bool is_5ghz, is_6ghz;
cbss = assoc_data->link[link_id].bss;
if (WARN_ON(!cbss))
- return 0;
+ return;
bss = (void *)cbss->priv;
sband = local->hw.wiphy->bands[cbss->channel->band];
if (WARN_ON(!sband))
- return 0;
+ return;
link = sdata_dereference(sdata->link[link_id], sdata);
if (WARN_ON(!link))
- return 0;
-
- is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
- is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
+ return;
/* for MLO connections assume advertising all rates is OK */
if (!req->ap_mld_addr) {
@@ -7550,40 +7972,18 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
assoc_data->ie_pos += req->links[link_id].elems_len;
}
- rcu_read_lock();
- ht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_OPERATION);
- if (ht_elem && ht_elem->datalen >= sizeof(struct ieee80211_ht_operation))
- assoc_data->link[link_id].ap_ht_param =
- ((struct ieee80211_ht_operation *)(ht_elem->data))->ht_param;
- else if (!is_6ghz)
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- vht_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
- if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) {
- memcpy(&assoc_data->link[link_id].ap_vht_cap, vht_elem->data,
- sizeof(struct ieee80211_vht_cap));
- } else if (is_5ghz) {
- link_info(link,
- "VHT capa missing/short, disabling VHT/HE/EHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_VHT |
- IEEE80211_CONN_DISABLE_HE |
- IEEE80211_CONN_DISABLE_EHT;
- }
- rcu_read_unlock();
-
link->u.mgd.beacon_crc_valid = false;
link->u.mgd.dtim_period = 0;
link->u.mgd.have_beacon = false;
- /* override HT/VHT configuration only if the AP and we support it */
- if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ /* override HT configuration only if the AP and we support it */
+ if (conn->mode >= IEEE80211_CONN_MODE_HT) {
struct ieee80211_sta_ht_cap sta_ht_cap;
memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
}
- link->conf->eht_puncturing = 0;
-
rcu_read_lock();
bss_ies = rcu_dereference(cbss->beacon_ies);
if (bss_ies) {
@@ -7604,7 +8004,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
}
if (bss_ies) {
- const struct ieee80211_eht_operation *eht_oper;
const struct element *elem;
elem = cfg80211_find_ext_elem(WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION,
@@ -7621,32 +8020,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
link->conf->ema_ap = true;
else
link->conf->ema_ap = false;
-
- elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_OPERATION,
- bss_ies->data, bss_ies->len);
- eht_oper = (const void *)(elem->data + 1);
-
- if (elem &&
- ieee80211_eht_oper_size_ok((const void *)(elem->data + 1),
- elem->datalen - 1) &&
- (eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
- (eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)) {
- const struct ieee80211_eht_operation_info *info =
- (void *)eht_oper->optional;
- const u8 *disable_subchannel_bitmap = info->optional;
- u16 bitmap;
-
- bitmap = get_unaligned_le16(disable_subchannel_bitmap);
- if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
- &link->conf->chandef) &&
- !(bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING)))
- ieee80211_handle_puncturing_bitmap(link,
- eht_oper,
- bitmap,
- NULL);
- else
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
}
rcu_read_unlock();
@@ -7673,8 +8046,6 @@ ieee80211_setup_assoc_link(struct ieee80211_sub_if_data *sdata,
} else {
link->smps_mode = link->u.mgd.req_smps;
}
-
- return conn_flags;
}
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
@@ -7684,13 +8055,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_assoc_data *assoc_data;
- const struct element *ssid_elem, *csa_elem, *ecsa_elem;
+ const struct element *ssid_elem;
struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg;
- ieee80211_conn_flags_t conn_flags = 0;
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss;
- struct ieee80211_bss *bss;
- bool override;
+ bool override, uapsd_supported;
+ bool match_auth;
int i, err;
size_t size = sizeof(*assoc_data) + req->ie_len;
@@ -7707,54 +8077,28 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
cbss = req->link_id < 0 ? req->bss : req->links[req->link_id].bss;
+ if (ieee80211_mgd_csa_in_process(sdata, cbss)) {
+ sdata_info(sdata, "AP is in CSA process, reject assoc\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+
rcu_read_lock();
ssid_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_SSID);
if (!ssid_elem || ssid_elem->datalen > sizeof(assoc_data->ssid)) {
rcu_read_unlock();
- kfree(assoc_data);
- return -EINVAL;
- }
-
- csa_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_CHANNEL_SWITCH);
- ecsa_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_EXT_CHANSWITCH_ANN);
- if ((csa_elem &&
- csa_elem->datalen == sizeof(struct ieee80211_channel_sw_ie) &&
- ((struct ieee80211_channel_sw_ie *)csa_elem->data)->count != 0) ||
- (ecsa_elem &&
- ecsa_elem->datalen == sizeof(struct ieee80211_ext_chansw_ie) &&
- ((struct ieee80211_ext_chansw_ie *)ecsa_elem->data)->count != 0)) {
- sdata_info(sdata, "AP is in CSA process, reject assoc\n");
- rcu_read_unlock();
- kfree(assoc_data);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
memcpy(assoc_data->ssid, ssid_elem->data, ssid_elem->datalen);
assoc_data->ssid_len = ssid_elem->datalen;
- memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
- vif_cfg->ssid_len = assoc_data->ssid_len;
rcu_read_unlock();
- if (req->ap_mld_addr) {
- for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- if (!req->links[i].bss)
- continue;
- link = sdata_dereference(sdata->link[i], sdata);
- if (link)
- ether_addr_copy(assoc_data->link[i].addr,
- link->conf->addr);
- else
- eth_random_addr(assoc_data->link[i].addr);
- }
- } else {
- memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
- }
-
- assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
-
- memcpy(assoc_data->ap_addr,
- req->ap_mld_addr ?: req->bss->bssid,
- ETH_ALEN);
+ if (req->ap_mld_addr)
+ memcpy(assoc_data->ap_addr, req->ap_mld_addr, ETH_ALEN);
+ else
+ memcpy(assoc_data->ap_addr, cbss->bssid, ETH_ALEN);
if (ifmgd->associated) {
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@ -7772,98 +8116,134 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
false);
}
- if (ifmgd->auth_data && !ifmgd->auth_data->done) {
- err = -EBUSY;
- goto err_free;
- }
+ memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
+ memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
+ sizeof(ifmgd->ht_capa_mask));
- if (ifmgd->assoc_data) {
- err = -EBUSY;
- goto err_free;
- }
+ memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
+ memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
+ sizeof(ifmgd->vht_capa_mask));
- if (ifmgd->auth_data) {
- bool match;
+ memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
+ memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
+ sizeof(ifmgd->s1g_capa_mask));
- /* keep sta info, bssid if matching */
- match = ether_addr_equal(ifmgd->auth_data->ap_addr,
- assoc_data->ap_addr) &&
- ifmgd->auth_data->link_id == req->link_id;
+ /* keep some setup (AP STA, channel, ...) if matching */
+ if (ifmgd->auth_data)
+ match_auth = ether_addr_equal(ifmgd->auth_data->ap_addr,
+ assoc_data->ap_addr) &&
+ ifmgd->auth_data->link_id == req->link_id;
- /* Cleanup is delayed if auth_data matches */
- if (!match)
- ieee80211_destroy_auth_data(sdata, false);
- }
+ if (req->ap_mld_addr) {
+ uapsd_supported = true;
- /* prepare assoc data */
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ struct ieee80211_supported_band *sband;
+ struct cfg80211_bss *link_cbss = req->links[i].bss;
+ struct ieee80211_bss *bss;
- bss = (void *)cbss->priv;
- assoc_data->wmm = bss->wmm_used &&
- (local->hw.queues >= IEEE80211_NUM_ACS);
+ if (!link_cbss)
+ continue;
- /*
- * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
- * We still associate in non-HT mode (11a/b/g) if any one of these
- * ciphers is configured as pairwise.
- * We can set this to true for non-11n hardware, that'll be checked
- * separately along with the peer capabilities.
- */
- for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
- if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- netdev_info(sdata->dev,
- "disabling HT/VHT/HE due to WEP/TKIP use\n");
+ bss = (void *)link_cbss->priv;
+
+ if (!bss->wmm_used) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (req->flags & (ASSOC_REQ_DISABLE_HT |
+ ASSOC_REQ_DISABLE_VHT |
+ ASSOC_REQ_DISABLE_HE |
+ ASSOC_REQ_DISABLE_EHT)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (link_cbss->channel->band == NL80211_BAND_S1GHZ) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ link = sdata_dereference(sdata->link[i], sdata);
+ if (link)
+ ether_addr_copy(assoc_data->link[i].addr,
+ link->conf->addr);
+ else
+ eth_random_addr(assoc_data->link[i].addr);
+ sband = local->hw.wiphy->bands[link_cbss->channel->band];
+
+ if (match_auth && i == assoc_link_id)
+ assoc_data->link[i].conn = link->u.mgd.conn;
+ else
+ assoc_data->link[i].conn =
+ ieee80211_conn_settings_unlimited;
+ ieee80211_determine_our_sta_mode_assoc(sdata, sband,
+ req, true, i,
+ &assoc_data->link[i].conn);
+ assoc_data->link[i].bss = link_cbss;
+ assoc_data->link[i].disabled = req->links[i].disabled;
+
+ if (!bss->uapsd_supported)
+ uapsd_supported = false;
+
+ if (assoc_data->link[i].conn.mode < IEEE80211_CONN_MODE_EHT) {
+ err = -EINVAL;
+ req->links[i].error = err;
+ goto err_free;
+ }
}
- }
- /* also disable HT/VHT/HE/EHT if the AP doesn't use WMM */
- if (!bss->wmm_used) {
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- netdev_info(sdata->dev,
- "disabling HT/VHT/HE as WMM/QoS is not supported by the AP\n");
- }
+ assoc_data->wmm = true;
+ } else {
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_bss *bss = (void *)cbss->priv;
- if (req->flags & ASSOC_REQ_DISABLE_HT) {
- mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n");
- conn_flags |= IEEE80211_CONN_DISABLE_HT;
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
- }
+ memcpy(assoc_data->link[0].addr, sdata->vif.addr, ETH_ALEN);
+ assoc_data->s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
- if (req->flags & ASSOC_REQ_DISABLE_VHT) {
- mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_VHT;
- }
+ assoc_data->wmm = bss->wmm_used &&
+ (local->hw.queues >= IEEE80211_NUM_ACS);
+
+ if (cbss->channel->band == NL80211_BAND_6GHZ &&
+ req->flags & (ASSOC_REQ_DISABLE_HT |
+ ASSOC_REQ_DISABLE_VHT |
+ ASSOC_REQ_DISABLE_HE)) {
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ sband = local->hw.wiphy->bands[cbss->channel->band];
- if (req->flags & ASSOC_REQ_DISABLE_HE) {
- mlme_dbg(sdata, "HE disabled by flag, disabling HE/EHT\n");
- conn_flags |= IEEE80211_CONN_DISABLE_HE;
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ assoc_data->link[0].bss = cbss;
+
+ if (match_auth)
+ assoc_data->link[0].conn = sdata->deflink.u.mgd.conn;
+ else
+ assoc_data->link[0].conn =
+ ieee80211_conn_settings_unlimited;
+ ieee80211_determine_our_sta_mode_assoc(sdata, sband, req,
+ assoc_data->wmm, 0,
+ &assoc_data->link[0].conn);
+
+ uapsd_supported = bss->uapsd_supported;
}
- if (req->flags & ASSOC_REQ_DISABLE_EHT)
- conn_flags |= IEEE80211_CONN_DISABLE_EHT;
+ assoc_data->spp_amsdu = req->flags & ASSOC_REQ_SPP_AMSDU;
- memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
- memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
- sizeof(ifmgd->ht_capa_mask));
+ if (ifmgd->auth_data && !ifmgd->auth_data->done) {
+ err = -EBUSY;
+ goto err_free;
+ }
- memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
- memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
- sizeof(ifmgd->vht_capa_mask));
+ if (ifmgd->assoc_data) {
+ err = -EBUSY;
+ goto err_free;
+ }
- memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
- memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
- sizeof(ifmgd->s1g_capa_mask));
+ /* Cleanup is delayed if auth_data matches */
+ if (ifmgd->auth_data && !match_auth)
+ ieee80211_destroy_auth_data(sdata, false);
if (req->ie && req->ie_len) {
memcpy(assoc_data->ie, req->ie, req->ie_len);
@@ -7895,19 +8275,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
assoc_data->assoc_link_id = assoc_link_id;
if (req->ap_mld_addr) {
- for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) {
- assoc_data->link[i].conn_flags = conn_flags;
- assoc_data->link[i].bss = req->links[i].bss;
- assoc_data->link[i].disabled = req->links[i].disabled;
- }
-
/* if there was no authentication, set up the link */
err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0);
if (err)
goto err_clear;
- } else {
- assoc_data->link[0].conn_flags = conn_flags;
- assoc_data->link[0].bss = cbss;
}
link = sdata_dereference(sdata->link[assoc_link_id], sdata);
@@ -7916,19 +8287,21 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
goto err_clear;
}
- /* keep old conn_flags from ieee80211_prep_channel() from auth */
- conn_flags |= link->u.mgd.conn_flags;
- conn_flags |= ieee80211_setup_assoc_link(sdata, assoc_data, req,
- conn_flags, assoc_link_id);
- override = link->u.mgd.conn_flags != conn_flags;
- link->u.mgd.conn_flags |= conn_flags;
+ override = link->u.mgd.conn.mode !=
+ assoc_data->link[assoc_link_id].conn.mode ||
+ link->u.mgd.conn.bw_limit !=
+ assoc_data->link[assoc_link_id].conn.bw_limit;
+ link->u.mgd.conn = assoc_data->link[assoc_link_id].conn;
+
+ ieee80211_setup_assoc_link(sdata, assoc_data, req, &link->u.mgd.conn,
+ assoc_link_id);
if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK),
"U-APSD not supported with HW_PS_NULLFUNC_STACK\n"))
sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
- if (bss->wmm_used && bss->uapsd_supported &&
+ if (assoc_data->wmm && uapsd_supported &&
(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD)) {
assoc_data->uapsd = true;
ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
@@ -7972,34 +8345,35 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
continue;
if (i == assoc_data->assoc_link_id)
continue;
- /* only calculate the flags, hence link == NULL */
- err = ieee80211_prep_channel(sdata, NULL,
+ /* only calculate the mode, hence link == NULL */
+ err = ieee80211_prep_channel(sdata, NULL, i,
assoc_data->link[i].bss, true,
- &assoc_data->link[i].conn_flags);
+ &assoc_data->link[i].conn);
if (err) {
req->links[i].error = err;
goto err_clear;
}
}
+ memcpy(vif_cfg->ssid, assoc_data->ssid, assoc_data->ssid_len);
+ vif_cfg->ssid_len = assoc_data->ssid_len;
+
/* needed for transmitting the assoc frames properly */
memcpy(sdata->vif.cfg.ap_addr, assoc_data->ap_addr, ETH_ALEN);
err = ieee80211_prep_connection(sdata, cbss, req->link_id,
- req->ap_mld_addr, true, override);
+ req->ap_mld_addr, true,
+ &assoc_data->link[assoc_link_id].conn,
+ override);
if (err)
goto err_clear;
- assoc_data->link[assoc_data->assoc_link_id].conn_flags =
- link->u.mgd.conn_flags;
-
if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC)) {
const struct cfg80211_bss_ies *beacon_ies;
rcu_read_lock();
beacon_ies = rcu_dereference(req->bss->beacon_ies);
-
- if (beacon_ies) {
+ if (!beacon_ies) {
/*
* Wait up to one beacon interval ...
* should this be more if we miss one?
@@ -8080,6 +8454,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
ieee80211_report_disconnect(sdata, frame_buf,
sizeof(frame_buf), true,
req->reason_code, false);
+ drv_mgd_complete_tx(sdata->local, sdata, &info);
return 0;
}
@@ -8156,6 +8531,8 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
&ifmgd->ml_reconf_work);
wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work);
+ wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+ &ifmgd->neg_ttlm_timeout_work);
if (ifmgd->assoc_data)
ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT);
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 449af4e1cca4..9ef14e475c90 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -168,6 +168,7 @@ void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
struct ocb_setup *setup)
{
+ struct ieee80211_chan_req chanreq = { .oper = setup->chandef };
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
@@ -182,7 +183,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
sdata->deflink.needed_rx_chains = sdata->local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &setup->chandef,
+ err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
return err;
@@ -207,7 +208,7 @@ int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
ifocb->joined = false;
- sta_info_flush(sdata);
+ sta_info_flush(sdata, -1);
spin_lock_bh(&ifocb->incomplete_lock);
while (!list_empty(&ifocb->incomplete_stations)) {
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 6c4080202573..221695d841fd 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -86,7 +86,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
- if (WARN_ON(local->use_chanctx))
+ if (WARN_ON(!local->emulate_chanctx))
return;
/*
@@ -136,7 +136,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
lockdep_assert_wiphy(local->hw.wiphy);
- if (WARN_ON(local->use_chanctx))
+ if (WARN_ON(!local->emulate_chanctx))
return;
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -351,10 +351,13 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
* 20 MHz channel width) don't stop all the operations but still
* treat it as though the ROC operation started properly, so
* other ROC operations won't interfere with this one.
+ *
+ * Note: scan can't run, tmp_channel is what we use, so this
+ * must be the currently active channel.
*/
- roc->on_channel = roc->chan == local->_oper_chandef.chan &&
- local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
- local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
+ roc->on_channel = roc->chan == local->hw.conf.chandef.chan &&
+ local->hw.conf.chandef.width != NL80211_CHAN_WIDTH_5 &&
+ local->hw.conf.chandef.width != NL80211_CHAN_WIDTH_10;
/* start this ROC */
ieee80211_recalc_idle(local);
@@ -363,7 +366,7 @@ static void _ieee80211_start_next_roc(struct ieee80211_local *local)
ieee80211_offchannel_stop_vifs(local);
local->tmp_channel = roc->chan;
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
}
wiphy_delayed_work_queue(local->hw.wiphy, &local->roc_work,
@@ -426,7 +429,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
return;
if (!roc->started) {
- WARN_ON(local->use_chanctx);
+ WARN_ON(!local->emulate_chanctx);
_ieee80211_start_next_roc(local);
} else {
on_channel = roc->on_channel;
@@ -439,7 +442,7 @@ static void __ieee80211_roc_work(struct ieee80211_local *local)
ieee80211_flush_queues(local, NULL, false);
local->tmp_channel = NULL;
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
ieee80211_offchannel_return(local);
}
@@ -539,7 +542,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
/* this may work, but is untested */
return -EOPNOTSUPP;
- if (local->use_chanctx && !local->ops->remain_on_channel)
+ if (!local->emulate_chanctx && !local->ops->remain_on_channel)
return -EOPNOTSUPP;
roc = kzalloc(sizeof(*roc), GFP_KERNEL);
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
new file mode 100644
index 000000000000..196a882e4c19
--- /dev/null
+++ b/net/mac80211/parse.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2002-2005, Instant802 Networks, Inc.
+ * Copyright 2005-2006, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018-2024 Intel Corporation
+ *
+ * element parsing for mac80211
+ */
+
+#include <net/mac80211.h>
+#include <linux/netdevice.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <net/net_namespace.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <kunit/visibility.h>
+
+#include "ieee80211_i.h"
+#include "driver-ops.h"
+#include "rate.h"
+#include "mesh.h"
+#include "wme.h"
+#include "led.h"
+#include "wep.h"
+
+static void
+ieee80211_parse_extension_element(u32 *crc,
+ const struct element *elem,
+ struct ieee802_11_elems *elems,
+ struct ieee80211_elems_parse_params *params)
+{
+ const void *data = elem->data + 1;
+ bool calc_crc = false;
+ u8 len;
+
+ if (!elem->datalen)
+ return;
+
+ len = elem->datalen - 1;
+
+ switch (elem->data[0]) {
+ case WLAN_EID_EXT_HE_MU_EDCA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ calc_crc = true;
+ if (len >= sizeof(*elems->mu_edca_param_set))
+ elems->mu_edca_param_set = data;
+ break;
+ case WLAN_EID_EXT_HE_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (ieee80211_he_capa_size_ok(data, len)) {
+ elems->he_cap = data;
+ elems->he_cap_len = len;
+ }
+ break;
+ case WLAN_EID_EXT_HE_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ calc_crc = true;
+ if (len >= sizeof(*elems->he_operation) &&
+ len >= ieee80211_he_oper_size(data) - 1)
+ elems->he_operation = data;
+ break;
+ case WLAN_EID_EXT_UORA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= 1)
+ elems->uora_element = data;
+ break;
+ case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
+ if (len == 3)
+ elems->max_channel_switch_time = data;
+ break;
+ case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
+ if (len >= sizeof(*elems->mbssid_config_ie))
+ elems->mbssid_config_ie = data;
+ break;
+ case WLAN_EID_EXT_HE_SPR:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= sizeof(*elems->he_spr) &&
+ len >= ieee80211_he_spr_size(data))
+ elems->he_spr = data;
+ break;
+ case WLAN_EID_EXT_HE_6GHZ_CAPA:
+ if (params->mode < IEEE80211_CONN_MODE_HE)
+ break;
+ if (len >= sizeof(*elems->he_6ghz_capa))
+ elems->he_6ghz_capa = data;
+ break;
+ case WLAN_EID_EXT_EHT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_eht_capa_size_ok(elems->he_cap,
+ data, len,
+ params->from_ap)) {
+ elems->eht_cap = data;
+ elems->eht_cap_len = len;
+ }
+ break;
+ case WLAN_EID_EXT_EHT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_eht_oper_size_ok(data, len))
+ elems->eht_operation = data;
+ calc_crc = true;
+ break;
+ case WLAN_EID_EXT_EHT_MULTI_LINK:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ calc_crc = true;
+
+ if (ieee80211_mle_size_ok(data, len)) {
+ const struct ieee80211_multi_link_elem *mle =
+ (void *)data;
+
+ switch (le16_get_bits(mle->control,
+ IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ if (elems->ml_basic) {
+ elems->parse_error |=
+ IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC;
+ break;
+ }
+ elems->ml_basic_elem = (void *)elem;
+ elems->ml_basic = data;
+ elems->ml_basic_len = len;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ elems->ml_reconf_elem = (void *)elem;
+ elems->ml_reconf = data;
+ elems->ml_reconf_len = len;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ case WLAN_EID_EXT_BANDWIDTH_INDICATION:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ if (ieee80211_bandwidth_indication_size_ok(data, len))
+ elems->bandwidth_indication = data;
+ calc_crc = true;
+ break;
+ case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+ calc_crc = true;
+ if (ieee80211_tid_to_link_map_size_ok(data, len) &&
+ elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
+ elems->ttlm[elems->ttlm_num] = (void *)data;
+ elems->ttlm_num++;
+ }
+ break;
+ }
+
+ if (crc && calc_crc)
+ *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
+}
+
+static u32
+_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
+ struct ieee802_11_elems *elems,
+ const struct element *check_inherit)
+{
+ const struct element *elem;
+ bool calc_crc = params->filter != 0;
+ DECLARE_BITMAP(seen_elems, 256);
+ u32 crc = params->crc;
+
+ bitmap_zero(seen_elems, 256);
+
+ for_each_element(elem, params->start, params->len) {
+ const struct element *subelem;
+ u8 elem_parse_failed;
+ u8 id = elem->id;
+ u8 elen = elem->datalen;
+ const u8 *pos = elem->data;
+
+ if (check_inherit &&
+ !cfg80211_is_element_inherited(elem,
+ check_inherit))
+ continue;
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ case WLAN_EID_SUPP_RATES:
+ case WLAN_EID_FH_PARAMS:
+ case WLAN_EID_DS_PARAMS:
+ case WLAN_EID_CF_PARAMS:
+ case WLAN_EID_TIM:
+ case WLAN_EID_IBSS_PARAMS:
+ case WLAN_EID_CHALLENGE:
+ case WLAN_EID_RSN:
+ case WLAN_EID_ERP_INFO:
+ case WLAN_EID_EXT_SUPP_RATES:
+ case WLAN_EID_HT_CAPABILITY:
+ case WLAN_EID_HT_OPERATION:
+ case WLAN_EID_VHT_CAPABILITY:
+ case WLAN_EID_VHT_OPERATION:
+ case WLAN_EID_MESH_ID:
+ case WLAN_EID_MESH_CONFIG:
+ case WLAN_EID_PEER_MGMT:
+ case WLAN_EID_PREQ:
+ case WLAN_EID_PREP:
+ case WLAN_EID_PERR:
+ case WLAN_EID_RANN:
+ case WLAN_EID_CHANNEL_SWITCH:
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ case WLAN_EID_COUNTRY:
+ case WLAN_EID_PWR_CONSTRAINT:
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
+ case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ case WLAN_EID_CHAN_SWITCH_PARAM:
+ case WLAN_EID_EXT_CAPABILITY:
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ case WLAN_EID_LINK_ID:
+ case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ case WLAN_EID_RSNX:
+ case WLAN_EID_S1G_BCN_COMPAT:
+ case WLAN_EID_S1G_CAPABILITIES:
+ case WLAN_EID_S1G_OPERATION:
+ case WLAN_EID_AID_RESPONSE:
+ case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
+ /*
+ * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
+ * that if the content gets bigger it might be needed more than once
+ */
+ if (test_bit(id, seen_elems)) {
+ elems->parse_error |=
+ IEEE80211_PARSE_ERR_DUP_ELEM;
+ continue;
+ }
+ break;
+ }
+
+ if (calc_crc && id < 64 && (params->filter & (1ULL << id)))
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elem_parse_failed = 0;
+
+ switch (id) {
+ case WLAN_EID_LINK_ID:
+ if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->lnk_id = (void *)(pos - 2);
+ break;
+ case WLAN_EID_CHAN_SWITCH_TIMING:
+ if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ch_sw_timing = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ elems->ext_capab = pos;
+ elems->ext_capab_len = elen;
+ break;
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ if (elen >= 1)
+ elems->ds_params = pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_TIM:
+ if (elen >= sizeof(struct ieee80211_tim_ie)) {
+ elems->tim = (void *)pos;
+ elems->tim_len = elen;
+ } else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+ pos[2] == 0xf2) {
+ /* Microsoft OUI (00:50:F2) */
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ if (elen >= 5 && pos[3] == 2) {
+ /* OUI Type 2 - WMM IE */
+ if (pos[4] == 0) {
+ elems->wmm_info = pos;
+ elems->wmm_info_len = elen;
+ } else if (pos[4] == 1) {
+ elems->wmm_param = pos;
+ elems->wmm_param_len = elen;
+ }
+ }
+ }
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn = pos;
+ elems->rsn_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ if (elen >= 1)
+ elems->erp_info = pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen >= sizeof(struct ieee80211_ht_cap))
+ elems->ht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen >= sizeof(struct ieee80211_ht_operation))
+ elems->ht_operation = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen >= sizeof(struct ieee80211_vht_cap))
+ elems->vht_cap_elem = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen >= sizeof(struct ieee80211_vht_operation)) {
+ elems->vht_operation = (void *)pos;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_OPMODE_NOTIF:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (elen > 0) {
+ elems->opmode_notif = pos;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_MESH_ID:
+ elems->mesh_id = pos;
+ elems->mesh_id_len = elen;
+ break;
+ case WLAN_EID_MESH_CONFIG:
+ if (elen >= sizeof(struct ieee80211_meshconf_ie))
+ elems->mesh_config = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_PEER_MGMT:
+ elems->peering = pos;
+ elems->peering_len = elen;
+ break;
+ case WLAN_EID_MESH_AWAKE_WINDOW:
+ if (elen >= 2)
+ elems->awake_window = (void *)pos;
+ break;
+ case WLAN_EID_PREQ:
+ elems->preq = pos;
+ elems->preq_len = elen;
+ break;
+ case WLAN_EID_PREP:
+ elems->prep = pos;
+ elems->prep_len = elen;
+ break;
+ case WLAN_EID_PERR:
+ elems->perr = pos;
+ elems->perr_len = elen;
+ break;
+ case WLAN_EID_RANN:
+ if (elen >= sizeof(struct ieee80211_rann_ie))
+ elems->rann = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH:
+ if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ch_switch_ie = (void *)pos;
+ break;
+ case WLAN_EID_EXT_CHANSWITCH_ANN:
+ if (elen != sizeof(struct ieee80211_ext_chansw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->ext_chansw_ie = (void *)pos;
+ break;
+ case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
+ if (params->mode < IEEE80211_CONN_MODE_HT)
+ break;
+ if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->sec_chan_offs = (void *)pos;
+ break;
+ case WLAN_EID_CHAN_SWITCH_PARAM:
+ if (elen <
+ sizeof(*elems->mesh_chansw_params_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->mesh_chansw_params_ie = (void *)pos;
+ break;
+ case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+
+ if (!params->action) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM;
+ break;
+ }
+
+ if (elen < sizeof(*elems->wide_bw_chansw_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->wide_bw_chansw_ie = (void *)pos;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH_WRAPPER:
+ if (params->mode < IEEE80211_CONN_MODE_VHT)
+ break;
+ if (params->action) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_UNEXPECTED_ELEM;
+ break;
+ }
+ /*
+ * This is a bit tricky, but as we only care about
+ * a few elements, parse them out manually.
+ */
+ subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
+ pos, elen);
+ if (subelem) {
+ if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
+ elems->wide_bw_chansw_ie =
+ (void *)subelem->data;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ }
+
+ if (params->mode < IEEE80211_CONN_MODE_EHT)
+ break;
+
+ subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
+ pos, elen);
+ if (subelem) {
+ const void *edata = subelem->data + 1;
+ u8 edatalen = subelem->datalen - 1;
+
+ if (ieee80211_bandwidth_indication_size_ok(edata,
+ edatalen))
+ elems->bandwidth_indication = edata;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ }
+ break;
+ case WLAN_EID_COUNTRY:
+ elems->country_elem = pos;
+ elems->country_elem_len = elen;
+ break;
+ case WLAN_EID_PWR_CONSTRAINT:
+ if (elen != 1) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->pwr_constr_elem = pos;
+ break;
+ case WLAN_EID_CISCO_VENDOR_SPECIFIC:
+ /* Lots of different options exist, but we only care
+ * about the Dynamic Transmit Power Control element.
+ * First check for the Cisco OUI, then for the DTPC
+ * tag (0x00).
+ */
+ if (elen < 4) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+
+ if (pos[0] != 0x00 || pos[1] != 0x40 ||
+ pos[2] != 0x96 || pos[3] != 0x00)
+ break;
+
+ if (elen != 6) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elems->cisco_dtpc_elem = pos;
+ break;
+ case WLAN_EID_ADDBA_EXT:
+ if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ }
+ elems->addba_ext_ie = (void *)pos;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
+ elems->timeout_int = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_BSS_MAX_IDLE_PERIOD:
+ if (elen >= sizeof(*elems->max_idle_period_ie))
+ elems->max_idle_period_ie = (void *)pos;
+ break;
+ case WLAN_EID_RSNX:
+ elems->rsnx = pos;
+ elems->rsnx_len = elen;
+ break;
+ case WLAN_EID_TX_POWER_ENVELOPE:
+ if (elen < 1 ||
+ elen > sizeof(struct ieee80211_tx_pwr_env))
+ break;
+
+ if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env))
+ break;
+
+ elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos;
+ elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen;
+ elems->tx_pwr_env_num++;
+ break;
+ case WLAN_EID_EXTENSION:
+ ieee80211_parse_extension_element(calc_crc ?
+ &crc : NULL,
+ elem, elems, params);
+ break;
+ case WLAN_EID_S1G_CAPABILITIES:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen >= sizeof(*elems->s1g_capab))
+ elems->s1g_capab = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_S1G_OPERATION:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(*elems->s1g_oper))
+ elems->s1g_oper = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_S1G_BCN_COMPAT:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(*elems->s1g_bcn_compat))
+ elems->s1g_bcn_compat = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ case WLAN_EID_AID_RESPONSE:
+ if (params->mode != IEEE80211_CONN_MODE_S1G)
+ break;
+ if (elen == sizeof(struct ieee80211_aid_response_ie))
+ elems->aid_resp = (void *)pos;
+ else
+ elem_parse_failed =
+ IEEE80211_PARSE_ERR_BAD_ELEM_SIZE;
+ break;
+ default:
+ break;
+ }
+
+ if (elem_parse_failed)
+ elems->parse_error |= elem_parse_failed;
+ else
+ __set_bit(id, seen_elems);
+ }
+
+ if (!for_each_element_completed(elem, params->start, params->len))
+ elems->parse_error |= IEEE80211_PARSE_ERR_INVALID_END;
+
+ return crc;
+}
+
+static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
+ struct ieee802_11_elems *elems,
+ struct cfg80211_bss *bss,
+ u8 *nontransmitted_profile)
+{
+ const struct element *elem, *sub;
+ size_t profile_len = 0;
+ bool found = false;
+
+ if (!bss || !bss->transmitted_bss)
+ return profile_len;
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
+ if (elem->datalen < 2)
+ continue;
+ if (elem->data[0] < 1 || elem->data[0] > 8)
+ continue;
+
+ for_each_element(sub, elem->data + 1, elem->datalen - 1) {
+ u8 new_bssid[ETH_ALEN];
+ const u8 *index;
+
+ if (sub->id != 0 || sub->datalen < 4) {
+ /* not a valid BSS profile */
+ continue;
+ }
+
+ if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ sub->data[1] != 2) {
+ /* The first element of the
+ * Nontransmitted BSSID Profile is not
+ * the Nontransmitted BSSID Capability
+ * element.
+ */
+ continue;
+ }
+
+ memset(nontransmitted_profile, 0, len);
+ profile_len = cfg80211_merge_profile(start, len,
+ elem,
+ sub,
+ nontransmitted_profile,
+ len);
+
+ /* found a Nontransmitted BSSID Profile */
+ index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ nontransmitted_profile,
+ profile_len);
+ if (!index || index[1] < 1 || index[2] == 0) {
+ /* Invalid MBSSID Index element */
+ continue;
+ }
+
+ cfg80211_gen_new_bssid(bss->transmitted_bss->bssid,
+ elem->data[0],
+ index[2],
+ new_bssid);
+ if (ether_addr_equal(new_bssid, bss->bssid)) {
+ found = true;
+ elems->bssid_index_len = index[1];
+ elems->bssid_index = (void *)&index[2];
+ break;
+ }
+ }
+ }
+
+ return found ? profile_len : 0;
+}
+
+static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems,
+ u8 link_id)
+{
+ const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
+ ssize_t ml_len = elems->ml_basic_len;
+ const struct element *sub;
+
+ if (!ml || !ml_len)
+ return;
+
+ if (le16_get_bits(ml->control, IEEE80211_ML_CONTROL_TYPE) !=
+ IEEE80211_ML_CONTROL_TYPE_BASIC)
+ return;
+
+ for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+ struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+ ssize_t sta_prof_len;
+ u16 control;
+
+ if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
+ continue;
+
+ if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
+ sub->datalen))
+ return;
+
+ control = le16_to_cpu(prof->control);
+
+ if (link_id != u16_get_bits(control,
+ IEEE80211_MLE_STA_CONTROL_LINK_ID))
+ continue;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
+ return;
+
+ /* the sub element can be fragmented */
+ sta_prof_len =
+ cfg80211_defragment_element(sub,
+ (u8 *)ml, ml_len,
+ elems->scratch_pos,
+ elems->scratch +
+ elems->scratch_len -
+ elems->scratch_pos,
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+
+ if (sta_prof_len < 0)
+ return;
+
+ elems->prof = (void *)elems->scratch_pos;
+ elems->sta_prof_len = sta_prof_len;
+ elems->scratch_pos += sta_prof_len;
+
+ return;
+ }
+}
+
+static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems,
+ struct ieee80211_elems_parse_params *params)
+{
+ struct ieee80211_mle_per_sta_profile *prof;
+ struct ieee80211_elems_parse_params sub = {
+ .mode = params->mode,
+ .action = params->action,
+ .from_ap = params->from_ap,
+ .link_id = -1,
+ };
+ ssize_t ml_len = elems->ml_basic_len;
+ const struct element *non_inherit = NULL;
+ const u8 *end;
+
+ if (params->link_id == -1)
+ return;
+
+ ml_len = cfg80211_defragment_element(elems->ml_basic_elem,
+ elems->ie_start,
+ elems->total_len,
+ elems->scratch_pos,
+ elems->scratch +
+ elems->scratch_len -
+ elems->scratch_pos,
+ WLAN_EID_FRAGMENT);
+
+ if (ml_len < 0)
+ return;
+
+ elems->ml_basic = (const void *)elems->scratch_pos;
+ elems->ml_basic_len = ml_len;
+
+ ieee80211_mle_get_sta_prof(elems, params->link_id);
+ prof = elems->prof;
+
+ if (!prof)
+ return;
+
+ /* check if we have the 4 bytes for the fixed part in assoc response */
+ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
+ elems->prof = NULL;
+ elems->sta_prof_len = 0;
+ return;
+ }
+
+ /*
+ * Skip the capability information and the status code that are expected
+ * as part of the station profile in association response frames. Note
+ * the -1 is because the 'sta_info_len' is accounted to as part of the
+ * per-STA profile, but not part of the 'u8 variable[]' portion.
+ */
+ sub.start = prof->variable + prof->sta_info_len - 1 + 4;
+ end = (const u8 *)prof + elems->sta_prof_len;
+ sub.len = end - sub.start;
+
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ sub.start, sub.len);
+ _ieee802_11_parse_elems_full(&sub, elems, non_inherit);
+}
+
+struct ieee802_11_elems *
+ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
+{
+ struct ieee802_11_elems *elems;
+ const struct element *non_inherit = NULL;
+ u8 *nontransmitted_profile;
+ int nontransmitted_profile_len = 0;
+ size_t scratch_len = 3 * params->len;
+
+ elems = kzalloc(struct_size(elems, scratch, scratch_len), GFP_ATOMIC);
+ if (!elems)
+ return NULL;
+ elems->ie_start = params->start;
+ elems->total_len = params->len;
+ elems->scratch_len = scratch_len;
+ elems->scratch_pos = elems->scratch;
+
+ nontransmitted_profile = elems->scratch_pos;
+ nontransmitted_profile_len =
+ ieee802_11_find_bssid_profile(params->start, params->len,
+ elems, params->bss,
+ nontransmitted_profile);
+ elems->scratch_pos += nontransmitted_profile_len;
+ elems->scratch_len -= nontransmitted_profile_len;
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ nontransmitted_profile,
+ nontransmitted_profile_len);
+
+ elems->crc = _ieee802_11_parse_elems_full(params, elems, non_inherit);
+
+ /* Override with nontransmitted profile, if found */
+ if (nontransmitted_profile_len) {
+ struct ieee80211_elems_parse_params sub = {
+ .mode = params->mode,
+ .start = nontransmitted_profile,
+ .len = nontransmitted_profile_len,
+ .action = params->action,
+ .link_id = params->link_id,
+ };
+
+ _ieee802_11_parse_elems_full(&sub, elems, NULL);
+ }
+
+ ieee80211_mle_parse_link(elems, params);
+
+ if (elems->tim && !elems->parse_error) {
+ const struct ieee80211_tim_ie *tim_ie = elems->tim;
+
+ elems->dtim_period = tim_ie->dtim_period;
+ elems->dtim_count = tim_ie->dtim_count;
+ }
+
+ /* Override DTIM period and count if needed */
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_period))
+ elems->dtim_period = elems->bssid_index->dtim_period;
+
+ if (elems->bssid_index &&
+ elems->bssid_index_len >=
+ offsetofend(struct ieee80211_bssid_index, dtim_count))
+ elems->dtim_count = elems->bssid_index->dtim_count;
+
+ return elems;
+}
+EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
+
+int ieee80211_parse_bitrates(enum nl80211_chan_width width,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates)
+{
+ u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
+ struct ieee80211_rate *br;
+ int brate, rate, i, j, count = 0;
+
+ *rates = 0;
+
+ for (i = 0; i < srates_len; i++) {
+ rate = srates[i] & 0x7f;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, 5);
+ if (brate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index d5ea5f5bcf3a..cfc8eb70e966 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -4,7 +4,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2022 Intel Corporation
+ * Copyright (C) 2019, 2022-2024 Intel Corporation
*/
#include <linux/kernel.h>
@@ -119,7 +119,8 @@ void rate_control_rate_update(struct ieee80211_local *local,
rcu_read_unlock();
}
- drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+ if (sta->uploaded)
+ drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
}
int ieee80211_rate_control_register(const struct rate_control_ops *ops)
@@ -278,10 +279,10 @@ void ieee80211_check_rate_mask(struct ieee80211_link_data *link)
u32 user_mask, basic_rates = link->conf->basic_rates;
enum nl80211_band band;
- if (WARN_ON(!link->conf->chandef.chan))
+ if (WARN_ON(!link->conf->chanreq.oper.chan))
return;
- band = link->conf->chandef.chan->band;
+ band = link->conf->chanreq.oper.chan->band;
if (band == NL80211_BAND_S1GHZ) {
/* TODO */
return;
@@ -761,7 +762,7 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
u32 i, flags;
*mask = sdata->rc_rateidx_mask[sband->band];
- flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
for (i = 0; i < sband->n_bitrates; i++) {
if ((flags & sband->bitrates[i].flags) != flags)
*mask &= ~BIT(i);
@@ -817,7 +818,7 @@ rate_control_apply_mask_ratetbl(struct sta_info *sta,
mcs_mask, vht_mask))
return;
- chan_width = sta->sdata->vif.bss_conf.chandef.width;
+ chan_width = sta->sdata->vif.bss_conf.chanreq.oper.width;
for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
if (rates->rate[i].idx < 0)
break;
@@ -854,7 +855,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
* included in the configured mask and change the rate indexes
* if needed.
*/
- chan_width = sdata->vif.bss_conf.chandef.width;
+ chan_width = sdata->vif.bss_conf.chanreq.oper.width;
for (i = 0; i < max_rates; i++) {
/* Skip invalid rates */
if (rates[i].idx < 0)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0bf72928ccfc..c1f850138405 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -6,7 +6,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/jiffies.h>
@@ -1251,8 +1251,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- u16 sc = le16_to_cpu(hdr->seq_ctrl);
- u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
+ u16 mpdu_seq_num = ieee80211_get_sn(hdr);
u16 head_seq_num, buf_size;
int index;
bool ret = true;
@@ -1435,13 +1434,31 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_any_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ ieee80211_is_any_nullfunc(hdr->frame_control))
return RX_CONTINUE;
if (!rx->sta)
return RX_CONTINUE;
+ if (unlikely(is_multicast_ether_addr(hdr->addr1))) {
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ u16 sn = ieee80211_get_sn(hdr);
+
+ if (!ieee80211_is_data_present(hdr->frame_control))
+ return RX_CONTINUE;
+
+ if (!ieee80211_vif_is_mld(&sdata->vif) ||
+ sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_CONTINUE;
+
+ if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO &&
+ ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last))
+ return RX_DROP_U_DUP;
+
+ sdata->u.mgd.mcast_seq_last = sn;
+ return RX_CONTINUE;
+ }
+
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
@@ -3369,8 +3386,7 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
if (color == bss_conf->he_bss_color.color)
ieee80211_obss_color_collision_notify(&rx->sdata->vif,
- BIT_ULL(color),
- GFP_ATOMIC);
+ BIT_ULL(color));
}
}
@@ -3763,6 +3779,28 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break;
}
break;
+ case WLAN_CATEGORY_PROTECTED_EHT:
+ switch (mgmt->u.action.u.ttlm_req.action_code) {
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_req))
+ goto invalid;
+ goto queue;
+ case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ break;
+
+ if (len < offsetofend(typeof(*mgmt),
+ u.action.u.ttlm_res))
+ goto invalid;
+ goto queue;
+ default:
+ break;
+ }
+ break;
}
return RX_CONTINUE;
@@ -5192,7 +5230,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
*/
if (!status->link_valid && pubsta->mlo) {
- struct ieee80211_hdr *hdr = (void *)skb->data;
struct link_sta_info *link_sta;
link_sta = link_sta_info_get_bss(rx.sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 645355e5f1bc..bca2a259fda6 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/if_arp.h>
@@ -237,14 +237,18 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
}
static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
u32 scan_flags, const u8 *da)
{
if (!sdata)
return false;
- /* accept broadcast for OCE */
- if (scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP &&
- is_broadcast_ether_addr(da))
+
+ /* accept broadcast on 6 GHz and for OCE */
+ if (is_broadcast_ether_addr(da) &&
+ (channel->band == NL80211_BAND_6GHZ ||
+ scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP))
return true;
+
if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
return true;
return ether_addr_equal(da, sdata->vif.addr);
@@ -293,6 +297,12 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
}
+ channel = ieee80211_get_channel_khz(local->hw.wiphy,
+ ieee80211_rx_status_to_khz(rx_status));
+
+ if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
+ return;
+
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
struct cfg80211_scan_request *scan_req;
struct cfg80211_sched_scan_request *sched_scan_req;
@@ -310,19 +320,15 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
/* ignore ProbeResp to foreign address or non-bcast (OCE)
* unless scanning with randomised address
*/
- if (!ieee80211_scan_accept_presp(sdata1, scan_req_flags,
+ if (!ieee80211_scan_accept_presp(sdata1, channel,
+ scan_req_flags,
mgmt->da) &&
- !ieee80211_scan_accept_presp(sdata2, sched_scan_req_flags,
+ !ieee80211_scan_accept_presp(sdata2, channel,
+ sched_scan_req_flags,
mgmt->da))
return;
}
- channel = ieee80211_get_channel_khz(local->hw.wiphy,
- ieee80211_rx_status_to_khz(rx_status));
-
- if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
- return;
-
bss = ieee80211_bss_info_update(local, rx_status,
mgmt, skb->len,
channel);
@@ -394,6 +400,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata)
req->ie, req->ie_len,
bands_used, req->rates, &chandef,
flags);
+ if (ielen < 0)
+ return false;
local->hw_scan_req->req.ie_len = ielen;
local->hw_scan_req->req.no_cck = req->no_cck;
ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr);
@@ -470,7 +478,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
}
/* Set power back to normal operating levels. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
if (!hw_scan && was_scanning) {
ieee80211_configure_filter(local);
@@ -517,7 +525,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
/* Software scan is not supported in multi-channel cases */
- if (local->use_chanctx)
+ if (!local->emulate_chanctx)
return -EOPNOTSUPP;
/*
@@ -547,7 +555,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
ieee80211_configure_filter(local);
/* We need to set power level at maximum rate for scanning. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
@@ -671,7 +679,10 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
* After sending probe requests, wait for probe responses
* on the channel.
*/
- *next_delay = IEEE80211_CHANNEL_TIME;
+ *next_delay = msecs_to_jiffies(scan_req->duration) >
+ IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME ?
+ msecs_to_jiffies(scan_req->duration) - IEEE80211_PROBE_DELAY :
+ IEEE80211_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
}
@@ -781,7 +792,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
} else if ((req->n_channels == 1) &&
- (req->channels[0] == local->_oper_chandef.chan)) {
+ (req->channels[0] == local->hw.conf.chandef.chan)) {
/*
* If we are scanning only on the operating channel
* then we do not need to stop normal activities
@@ -799,7 +810,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
ieee80211_configure_filter(local); /* accept probe-responses */
/* We need to ensure power level is at max for scanning. */
- ieee80211_hw_config(local, 0);
+ ieee80211_hw_conf_chan(local);
if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
IEEE80211_CHAN_RADAR)) ||
@@ -964,13 +975,13 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
- if (chan == local->_oper_chandef.chan)
- local->scan_chandef = local->_oper_chandef;
+ if (chan == local->hw.conf.chandef.chan)
+ local->scan_chandef = local->hw.conf.chandef;
else
local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
set_channel:
- if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+ if (ieee80211_hw_conf_chan(local))
skip = 1;
/* advance state machine to next channel/band */
@@ -994,7 +1005,10 @@ set_channel:
*/
if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
!scan_req->n_ssids) {
- *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ *next_delay = msecs_to_jiffies(scan_req->duration) >
+ IEEE80211_PASSIVE_CHANNEL_TIME ?
+ msecs_to_jiffies(scan_req->duration) :
+ IEEE80211_PASSIVE_CHANNEL_TIME;
local->next_scan_state = SCAN_DECISION;
if (scan_req->n_ssids)
set_bit(SCAN_BEACON_WAIT, &local->scanning);
@@ -1011,7 +1025,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
{
/* switch back to the operating channel */
local->scan_chandef.chan = NULL;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ ieee80211_hw_conf_chan(local);
/* disable PS */
ieee80211_offchannel_return(local);
@@ -1310,10 +1324,12 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ieee80211_prepare_scan_chandef(&chandef);
- ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
- &sched_scan_ies, req->ie,
- req->ie_len, bands_used, rate_masks, &chandef,
- flags);
+ ret = ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz,
+ &sched_scan_ies, req->ie,
+ req->ie_len, bands_used, rate_masks,
+ &chandef, flags);
+ if (ret < 0)
+ goto error;
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
if (ret == 0) {
@@ -1321,8 +1337,8 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
rcu_assign_pointer(local->sched_scan_req, req);
}
+error:
kfree(ie);
-
out:
if (ret) {
/* Clean in case of failure after HW restart or upon resume. */
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 55959b0b24c5..327c74e296e2 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -19,21 +19,222 @@
#include "sta_info.h"
#include "wme.h"
+static bool
+wbcs_elem_to_chandef(const struct ieee80211_wide_bw_chansw_ie *wbcs_elem,
+ struct cfg80211_chan_def *chandef)
+{
+ u8 ccfs0 = wbcs_elem->new_center_freq_seg0;
+ u8 ccfs1 = wbcs_elem->new_center_freq_seg1;
+ u32 cf0 = ieee80211_channel_to_frequency(ccfs0, chandef->chan->band);
+ u32 cf1 = ieee80211_channel_to_frequency(ccfs1, chandef->chan->band);
+
+ switch (wbcs_elem->new_channel_width) {
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ /* deprecated encoding */
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 = cf0;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ /* deprecated encoding */
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ chandef->center_freq1 = cf0;
+ chandef->center_freq2 = cf1;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ chandef->center_freq1 = cf0;
+
+ if (ccfs1) {
+ u8 diff = abs(ccfs0 - ccfs1);
+
+ if (diff == 8) {
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 = cf1;
+ } else if (diff > 8) {
+ chandef->width = NL80211_CHAN_WIDTH_80P80;
+ chandef->center_freq2 = cf1;
+ }
+ }
+ break;
+ case IEEE80211_VHT_CHANWIDTH_USE_HT:
+ default:
+ /* If the WBCS Element is present, new channel bandwidth is
+ * at least 40 MHz.
+ */
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ chandef->center_freq1 = cf0;
+ break;
+ }
+
+ return cfg80211_chandef_valid(chandef);
+}
+
+static void
+validate_chandef_by_ht_vht_oper(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ u32 vht_cap_info,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 control_freq, center_freq1, center_freq2;
+ enum nl80211_chan_width chan_width;
+ struct ieee80211_ht_operation ht_oper;
+ struct ieee80211_vht_operation vht_oper;
+
+ if (conn->mode < IEEE80211_CONN_MODE_HT ||
+ conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
+ chandef->chan = NULL;
+ return;
+ }
+
+ control_freq = chandef->chan->center_freq;
+ center_freq1 = chandef->center_freq1;
+ center_freq2 = chandef->center_freq2;
+ chan_width = chandef->width;
+
+ ht_oper.primary_chan = ieee80211_frequency_to_channel(control_freq);
+ if (control_freq != center_freq1)
+ ht_oper.ht_param = control_freq > center_freq1 ?
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW :
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ else
+ ht_oper.ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+ ieee80211_chandef_ht_oper(&ht_oper, chandef);
+
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
+ return;
+
+ vht_oper.center_freq_seg0_idx =
+ ieee80211_frequency_to_channel(center_freq1);
+ vht_oper.center_freq_seg1_idx = center_freq2 ?
+ ieee80211_frequency_to_channel(center_freq2) : 0;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_320:
+ WARN_ON(1);
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ vht_oper.center_freq_seg1_idx = vht_oper.center_freq_seg0_idx;
+ vht_oper.center_freq_seg0_idx +=
+ control_freq < center_freq1 ? -8 : 8;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ vht_oper.chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ ht_oper.operation_mode =
+ le16_encode_bits(vht_oper.center_freq_seg1_idx,
+ IEEE80211_HT_OP_MODE_CCFS2_MASK);
+
+ if (!ieee80211_chandef_vht_oper(&sdata->local->hw, vht_cap_info,
+ &vht_oper, &ht_oper, chandef))
+ chandef->chan = NULL;
+}
+
+static void
+validate_chandef_by_6ghz_he_eht_oper(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_conn_settings *conn,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_local *local = sdata->local;
+ u32 control_freq, center_freq1, center_freq2;
+ enum nl80211_chan_width chan_width;
+ struct {
+ struct ieee80211_he_operation _oper;
+ struct ieee80211_he_6ghz_oper _6ghz_oper;
+ } __packed he;
+ struct {
+ struct ieee80211_eht_operation _oper;
+ struct ieee80211_eht_operation_info _oper_info;
+ } __packed eht;
+
+ if (conn->mode < IEEE80211_CONN_MODE_HE) {
+ chandef->chan = NULL;
+ return;
+ }
+
+ control_freq = chandef->chan->center_freq;
+ center_freq1 = chandef->center_freq1;
+ center_freq2 = chandef->center_freq2;
+ chan_width = chandef->width;
+
+ he._oper.he_oper_params =
+ le32_encode_bits(1, IEEE80211_HE_OPERATION_6GHZ_OP_INFO);
+ he._6ghz_oper.primary =
+ ieee80211_frequency_to_channel(control_freq);
+ he._6ghz_oper.ccfs0 = ieee80211_frequency_to_channel(center_freq1);
+ he._6ghz_oper.ccfs1 = center_freq2 ?
+ ieee80211_frequency_to_channel(center_freq2) : 0;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_320:
+ he._6ghz_oper.ccfs1 = he._6ghz_oper.ccfs0;
+ he._6ghz_oper.ccfs0 += control_freq < center_freq1 ? -16 : 16;
+ he._6ghz_oper.control = IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ he._6ghz_oper.ccfs1 = he._6ghz_oper.ccfs0;
+ he._6ghz_oper.ccfs0 += control_freq < center_freq1 ? -8 : 8;
+ fallthrough;
+ case NL80211_CHAN_WIDTH_80P80:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ;
+ break;
+ default:
+ he._6ghz_oper.control =
+ IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ;
+ break;
+ }
+
+ if (conn->mode < IEEE80211_CONN_MODE_EHT) {
+ if (!ieee80211_chandef_he_6ghz_oper(local, &he._oper,
+ NULL, chandef))
+ chandef->chan = NULL;
+ } else {
+ eht._oper.params = IEEE80211_EHT_OPER_INFO_PRESENT;
+ eht._oper_info.control = he._6ghz_oper.control;
+ eht._oper_info.ccfs0 = he._6ghz_oper.ccfs0;
+ eht._oper_info.ccfs1 = he._6ghz_oper.ccfs1;
+
+ if (!ieee80211_chandef_he_6ghz_oper(local, &he._oper,
+ &eht._oper, chandef))
+ chandef->chan = NULL;
+ }
+}
+
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum nl80211_band current_band,
u32 vht_cap_info,
- ieee80211_conn_flags_t conn_flags, u8 *bssid,
+ struct ieee80211_conn_settings *conn,
+ u8 *bssid,
struct ieee80211_csa_ie *csa_ie)
{
enum nl80211_band new_band = current_band;
int new_freq;
- u8 new_chan_no;
+ u8 new_chan_no = 0, new_op_class = 0;
struct ieee80211_channel *new_chan;
- struct cfg80211_chan_def new_vht_chandef = {};
+ struct cfg80211_chan_def new_chandef = {};
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
const struct ieee80211_bandwidth_indication *bwi;
+ const struct ieee80211_ext_chansw_ie *ext_chansw_elem;
int secondary_channel_offset = -1;
memset(csa_ie, 0, sizeof(*csa_ie));
@@ -41,36 +242,41 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
sec_chan_offs = elems->sec_chan_offs;
wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
bwi = elems->bandwidth_indication;
+ ext_chansw_elem = elems->ext_chansw_ie;
- if (conn_flags & (IEEE80211_CONN_DISABLE_HT |
- IEEE80211_CONN_DISABLE_40MHZ)) {
+ if (conn->mode < IEEE80211_CONN_MODE_HT ||
+ conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
sec_chan_offs = NULL;
wide_bw_chansw_ie = NULL;
}
- if (conn_flags & IEEE80211_CONN_DISABLE_VHT)
+ if (conn->mode < IEEE80211_CONN_MODE_VHT)
wide_bw_chansw_ie = NULL;
- if (elems->ext_chansw_ie) {
- if (!ieee80211_operating_class_to_band(
- elems->ext_chansw_ie->new_operating_class,
- &new_band)) {
- sdata_info(sdata,
- "cannot understand ECSA IE operating class, %d, ignoring\n",
- elems->ext_chansw_ie->new_operating_class);
+ if (ext_chansw_elem) {
+ new_op_class = ext_chansw_elem->new_operating_class;
+
+ if (!ieee80211_operating_class_to_band(new_op_class, &new_band)) {
+ new_op_class = 0;
+ sdata_info(sdata, "cannot understand ECSA IE operating class, %d, ignoring\n",
+ ext_chansw_elem->new_operating_class);
+ } else {
+ new_chan_no = ext_chansw_elem->new_ch_num;
+ csa_ie->count = ext_chansw_elem->count;
+ csa_ie->mode = ext_chansw_elem->mode;
}
- new_chan_no = elems->ext_chansw_ie->new_ch_num;
- csa_ie->count = elems->ext_chansw_ie->count;
- csa_ie->mode = elems->ext_chansw_ie->mode;
- } else if (elems->ch_switch_ie) {
+ }
+
+ if (!new_op_class && elems->ch_switch_ie) {
new_chan_no = elems->ch_switch_ie->new_ch_num;
csa_ie->count = elems->ch_switch_ie->count;
csa_ie->mode = elems->ch_switch_ie->mode;
- } else {
- /* nothing here we understand */
- return 1;
}
+ /* nothing here we understand */
+ if (!new_chan_no)
+ return 1;
+
/* Mesh Channel Switch Parameters Element */
if (elems->mesh_chansw_params_ie) {
csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
@@ -95,7 +301,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
if (sec_chan_offs) {
secondary_channel_offset = sec_chan_offs->sec_chan_offs;
- } else if (!(conn_flags & IEEE80211_CONN_DISABLE_HT)) {
+ } else if (conn->mode >= IEEE80211_CONN_MODE_HT) {
/* If the secondary channel offset IE is not present,
* we can't know what's the post-CSA offset, so the
* best we can do is use 20MHz.
@@ -107,26 +313,26 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
default:
/* secondary_channel_offset was present but is invalid */
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT20);
break;
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT40PLUS);
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_HT40MINUS);
break;
case -1:
- cfg80211_chandef_create(&csa_ie->chandef, new_chan,
+ cfg80211_chandef_create(&csa_ie->chanreq.oper, new_chan,
NL80211_CHAN_NO_HT);
/* keep width for 5/10 MHz channels */
- switch (sdata->vif.bss_conf.chandef.width) {
+ switch (sdata->vif.bss_conf.chanreq.oper.width) {
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
- csa_ie->chandef.width =
- sdata->vif.bss_conf.chandef.width;
+ csa_ie->chanreq.oper.width =
+ sdata->vif.bss_conf.chanreq.oper.width;
break;
default:
break;
@@ -134,59 +340,48 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
break;
}
+ /* parse one of the Elements to build a new chandef */
+ memset(&new_chandef, 0, sizeof(new_chandef));
+ new_chandef.chan = new_chan;
if (bwi) {
/* start with the CSA one */
- new_vht_chandef = csa_ie->chandef;
+ new_chandef = csa_ie->chanreq.oper;
/* and update the width accordingly */
- /* FIXME: support 160/320 */
- ieee80211_chandef_eht_oper(&bwi->info, true, true,
- &new_vht_chandef);
- } else if (wide_bw_chansw_ie) {
- u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
- struct ieee80211_vht_operation vht_oper = {
- .chan_width =
- wide_bw_chansw_ie->new_channel_width,
- .center_freq_seg0_idx =
- wide_bw_chansw_ie->new_center_freq_seg0,
- .center_freq_seg1_idx = new_seg1,
- /* .basic_mcs_set doesn't matter */
- };
- struct ieee80211_ht_operation ht_oper = {
- .operation_mode =
- cpu_to_le16(new_seg1 <<
- IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
- };
-
- /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
- * to the previously parsed chandef
- */
- new_vht_chandef = csa_ie->chandef;
-
- /* ignore if parsing fails */
- if (!ieee80211_chandef_vht_oper(&sdata->local->hw,
- vht_cap_info,
- &vht_oper, &ht_oper,
- &new_vht_chandef))
- new_vht_chandef.chan = NULL;
-
- if (conn_flags & IEEE80211_CONN_DISABLE_80P80MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
- ieee80211_chandef_downgrade(&new_vht_chandef);
- if (conn_flags & IEEE80211_CONN_DISABLE_160MHZ &&
- new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
- ieee80211_chandef_downgrade(&new_vht_chandef);
+ ieee80211_chandef_eht_oper(&bwi->info, &new_chandef);
+ } else if (!wide_bw_chansw_ie || !wbcs_elem_to_chandef(wide_bw_chansw_ie,
+ &new_chandef)) {
+ if (!ieee80211_operating_class_to_chandef(new_op_class, new_chan,
+ &new_chandef))
+ new_chandef = csa_ie->chanreq.oper;
}
- /* if VHT data is there validate & use it */
- if (new_vht_chandef.chan) {
- if (!cfg80211_chandef_compatible(&new_vht_chandef,
- &csa_ie->chandef)) {
+ /* check if the new chandef fits the capabilities */
+ if (new_band == NL80211_BAND_6GHZ)
+ validate_chandef_by_6ghz_he_eht_oper(sdata, conn, &new_chandef);
+ else
+ validate_chandef_by_ht_vht_oper(sdata, conn, vht_cap_info,
+ &new_chandef);
+
+ /* if data is there validate the bandwidth & use it */
+ if (new_chandef.chan) {
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320 &&
+ new_chandef.width == NL80211_CHAN_WIDTH_320)
+ ieee80211_chandef_downgrade(&new_chandef, NULL);
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160 &&
+ (new_chandef.width == NL80211_CHAN_WIDTH_80P80 ||
+ new_chandef.width == NL80211_CHAN_WIDTH_160))
+ ieee80211_chandef_downgrade(&new_chandef, NULL);
+
+ if (!cfg80211_chandef_compatible(&new_chandef,
+ &csa_ie->chanreq.oper)) {
sdata_info(sdata,
"BSS %pM: CSA has inconsistent channel data, disconnecting\n",
bssid);
return -EINVAL;
}
- csa_ie->chandef = new_vht_chandef;
+
+ csa_ie->chanreq.oper = new_chandef;
}
if (elems->max_channel_switch_time)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index bf1adcd96b41..da5fdd6f5c85 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -404,7 +404,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
int i;
for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
- if (!(sta->sta.valid_links & BIT(i)))
+ struct link_sta_info *link_sta;
+
+ link_sta = rcu_access_pointer(sta->link[i]);
+ if (!link_sta)
continue;
sta_remove_link(sta, i, false);
@@ -910,6 +913,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
+ ieee80211_check_fast_xmit(sta);
+
return 0;
out_remove:
if (sta->sta.valid_links)
@@ -1561,7 +1566,8 @@ void sta_info_stop(struct ieee80211_local *local)
}
-int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
+int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
+ int link_id)
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
@@ -1575,12 +1581,18 @@ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
WARN_ON(vlans && !sdata->bss);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- if (sdata == sta->sdata ||
- (vlans && sdata->bss == sta->sdata->bss)) {
- if (!WARN_ON(__sta_info_destroy_part1(sta)))
- list_add(&sta->free_list, &free_list);
- ret++;
- }
+ if (sdata != sta->sdata &&
+ (!vlans || sdata->bss != sta->sdata->bss))
+ continue;
+
+ if (link_id >= 0 && sta->sta.valid_links &&
+ !(sta->sta.valid_links & BIT(link_id)))
+ continue;
+
+ if (!WARN_ON(__sta_info_destroy_part1(sta)))
+ list_add(&sta->free_list, &free_list);
+
+ ret++;
}
if (!list_empty(&free_list)) {
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5ef1554f991f..f03731a5bbee 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -886,8 +886,12 @@ void sta_info_stop(struct ieee80211_local *local);
*
* @sdata: sdata to remove all stations from
* @vlans: if the given interface is an AP interface, also flush VLANs
+ * @link_id: if given (>=0), all those STA entries using @link_id only
+ * will be removed. If -1 is passed, all STA entries will be
+ * removed.
*/
-int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans);
+int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
+ int link_id);
/**
* sta_info_flush - flush matching STA entries from the STA table
@@ -895,10 +899,14 @@ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans);
* Returns the number of removed STA entries.
*
* @sdata: sdata to remove all stations from
+ * @link_id: if given (>=0), all those STA entries using @link_id only
+ * will be removed. If -1 is passed, all STA entries will be
+ * removed.
*/
-static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata)
+static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata,
+ int link_id)
{
- return __sta_info_flush(sdata, false);
+ return __sta_info_flush(sdata, false, link_id);
}
void sta_set_rate_info_tx(struct sta_info *sta,
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 49730b424141..42d9c06cbb84 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -6,7 +6,7 @@
* Copyright 2014, Intel Corporation
* Copyright 2014 Intel Mobile Communications GmbH
* Copyright 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2019, 2021-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -159,7 +159,7 @@ static void ieee80211_tdls_add_oper_classes(struct ieee80211_link_data *link,
u8 *pos;
u8 op_class;
- if (!ieee80211_chandef_to_operating_class(&link->conf->chandef,
+ if (!ieee80211_chandef_to_operating_class(&link->conf->chanreq.oper,
&op_class))
return;
@@ -347,7 +347,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
(uc.width > sta->tdls_chandef.width &&
!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
sdata->wdev.iftype)))
- ieee80211_chandef_downgrade(&uc);
+ ieee80211_chandef_downgrade(&uc, NULL);
if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
@@ -382,8 +382,8 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sband))
return;
- ieee80211_add_srates_ie(sdata, skb, false, sband->band);
- ieee80211_add_ext_srates_ie(sdata, skb, false, sband->band);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, 0, WLAN_EID_SUPP_RATES);
+ ieee80211_put_srates_elem(skb, sband, 0, 0, 0, WLAN_EID_EXT_SUPP_RATES);
ieee80211_tdls_add_supp_channels(sdata, skb);
/* add any custom IEs that go before Extended Capabilities */
@@ -438,7 +438,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sta))
return;
- sta->tdls_chandef = link->conf->chandef;
+ sta->tdls_chandef = link->conf->chanreq.oper;
}
ieee80211_tdls_add_oper_classes(link, skb);
@@ -548,30 +548,14 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
}
/* build the HE-cap from sband */
- if (he_cap &&
- (action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE ||
- action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
- __le16 he_6ghz_capa;
- u8 cap_size;
-
- cap_size =
- 2 + 1 + sizeof(he_cap->he_cap_elem) +
- ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
- ieee80211_he_ppe_size(he_cap->ppe_thres[0],
- he_cap->he_cap_elem.phy_cap_info);
- pos = skb_put(skb, cap_size);
- pos = ieee80211_ie_build_he_cap(0, pos, he_cap, pos + cap_size);
+ if (action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_TDLS_SETUP_RESPONSE ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
+ ieee80211_put_he_cap(skb, sdata, sband, NULL);
/* Build HE 6Ghz capa IE from sband */
- if (sband->band == NL80211_BAND_6GHZ) {
- cap_size = 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
- pos = skb_put(skb, cap_size);
- he_6ghz_capa =
- ieee80211_get_he_6ghz_capa_vif(sband, &sdata->vif);
- pos = ieee80211_write_he_6ghz_cap(pos, he_6ghz_capa,
- pos + cap_size);
- }
+ if (sband->band == NL80211_BAND_6GHZ)
+ ieee80211_put_he_6ghz_cap(skb, sdata, link->smps_mode);
}
/* add any custom IEs that go before EHT capabilities */
@@ -591,21 +575,10 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link,
}
/* build the EHT-cap from sband */
- if (he_cap && eht_cap &&
- (action_code == WLAN_TDLS_SETUP_REQUEST ||
- action_code == WLAN_TDLS_SETUP_RESPONSE ||
- action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) {
- u8 cap_size;
-
- cap_size =
- 2 + 1 + sizeof(eht_cap->eht_cap_elem) +
- ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem, false) +
- ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
- pos = skb_put(skb, cap_size);
- ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + cap_size, false);
- }
+ if (action_code == WLAN_TDLS_SETUP_REQUEST ||
+ action_code == WLAN_TDLS_SETUP_RESPONSE ||
+ action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)
+ ieee80211_put_eht_cap(skb, sdata, sband, NULL);
/* add any remaining IEs */
if (extra_ies_len) {
@@ -638,7 +611,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
if (WARN_ON_ONCE(!sta || !ap_sta))
return;
- sta->tdls_chandef = link->conf->chandef;
+ sta->tdls_chandef = link->conf->chanreq.oper;
/* add any custom IEs that go before the QoS IE */
if (extra_ies_len) {
@@ -684,7 +657,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link,
pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap,
- &link->conf->chandef, prot,
+ &link->conf->chanreq.oper, prot,
true);
}
@@ -1413,8 +1386,8 @@ iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
u16 opmode;
- /* Nothing to do if the BSS connection uses HT */
- if (!(sdata->deflink.u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HT))
+ /* Nothing to do if the BSS connection uses (at least) HT */
+ if (sdata->deflink.u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT)
return;
tdls_ht = (sta && sta->sta.deflink.ht_cap.ht_supported) ||
diff --git a/net/mac80211/tests/elems.c b/net/mac80211/tests/elems.c
index 997d0cd27b2d..30fc0acb7ac2 100644
--- a/net/mac80211/tests/elems.c
+++ b/net/mac80211/tests/elems.c
@@ -14,6 +14,7 @@ static void mle_defrag(struct kunit *test)
struct ieee80211_elems_parse_params parse_params = {
.link_id = 12,
.from_ap = true,
+ .mode = IEEE80211_CONN_MODE_EHT,
};
struct ieee802_11_elems *parsed;
struct sk_buff *skb;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 06835ed4c44f..478b32d2520a 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2,7 +2,7 @@
/*
* Portions of this file
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -50,7 +50,7 @@
__entry->center_freq1 = (c) ? (c)->center_freq1 : 0; \
__entry->freq1_offset = (c) ? (c)->freq1_offset : 0; \
__entry->center_freq2 = (c) ? (c)->center_freq2 : 0;
-#define CHANDEF_PR_FMT " control:%d.%03d MHz width:%d center: %d.%03d/%d MHz"
+#define CHANDEF_PR_FMT " chandef(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
#define CHANDEF_PR_ARG __entry->control_freq, __entry->freq_offset, __entry->chan_width, \
__entry->center_freq1, __entry->freq1_offset, __entry->center_freq2
@@ -69,22 +69,45 @@
__entry->min_center_freq1 = (c)->center_freq1; \
__entry->min_freq1_offset = (c)->freq1_offset; \
__entry->min_center_freq2 = (c)->center_freq2;
-#define MIN_CHANDEF_PR_FMT " min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz"
+#define MIN_CHANDEF_PR_FMT " mindef(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
#define MIN_CHANDEF_PR_ARG __entry->min_control_freq, __entry->min_freq_offset, \
__entry->min_chan_width, \
__entry->min_center_freq1, __entry->min_freq1_offset, \
__entry->min_center_freq2
+#define AP_CHANDEF_ENTRY \
+ __field(u32, ap_control_freq) \
+ __field(u32, ap_freq_offset) \
+ __field(u32, ap_chan_width) \
+ __field(u32, ap_center_freq1) \
+ __field(u32, ap_freq1_offset) \
+ __field(u32, ap_center_freq2)
+
+#define AP_CHANDEF_ASSIGN(c) \
+ __entry->ap_control_freq = (c)->chan ? (c)->chan->center_freq : 0;\
+ __entry->ap_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0;\
+ __entry->ap_chan_width = (c)->chan ? (c)->width : 0; \
+ __entry->ap_center_freq1 = (c)->chan ? (c)->center_freq1 : 0; \
+ __entry->ap_freq1_offset = (c)->chan ? (c)->freq1_offset : 0; \
+ __entry->ap_center_freq2 = (c)->chan ? (c)->center_freq2 : 0;
+#define AP_CHANDEF_PR_FMT " ap(%d.%03d MHz,width:%d,center: %d.%03d/%d MHz)"
+#define AP_CHANDEF_PR_ARG __entry->ap_control_freq, __entry->ap_freq_offset, \
+ __entry->ap_chan_width, \
+ __entry->ap_center_freq1, __entry->ap_freq1_offset, \
+ __entry->ap_center_freq2
+
#define CHANCTX_ENTRY CHANDEF_ENTRY \
MIN_CHANDEF_ENTRY \
+ AP_CHANDEF_ENTRY \
__field(u8, rx_chains_static) \
__field(u8, rx_chains_dynamic)
#define CHANCTX_ASSIGN CHANDEF_ASSIGN(&ctx->conf.def) \
MIN_CHANDEF_ASSIGN(&ctx->conf.min_def) \
+ AP_CHANDEF_ASSIGN(&ctx->conf.ap) \
__entry->rx_chains_static = ctx->conf.rx_chains_static; \
__entry->rx_chains_dynamic = ctx->conf.rx_chains_dynamic
-#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT " chains:%d/%d"
-#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, \
+#define CHANCTX_PR_FMT CHANDEF_PR_FMT MIN_CHANDEF_PR_FMT AP_CHANDEF_PR_FMT " chains:%d/%d"
+#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, AP_CHANDEF_PR_ARG, \
__entry->rx_chains_static, __entry->rx_chains_dynamic
#define KEY_ENTRY __field(u32, cipher) \
@@ -503,9 +526,9 @@ TRACE_EVENT(drv_link_info_changed,
__entry->ht_operation_mode = link_conf->ht_operation_mode;
__entry->cqm_rssi_thold = link_conf->cqm_rssi_thold;
__entry->cqm_rssi_hyst = link_conf->cqm_rssi_hyst;
- __entry->channel_width = link_conf->chandef.width;
- __entry->channel_cfreq1 = link_conf->chandef.center_freq1;
- __entry->channel_cfreq1_offset = link_conf->chandef.freq1_offset;
+ __entry->channel_width = link_conf->chanreq.oper.width;
+ __entry->channel_cfreq1 = link_conf->chanreq.oper.center_freq1;
+ __entry->channel_cfreq1_offset = link_conf->chanreq.oper.freq1_offset;
__entry->qos = link_conf->qos;
__entry->hidden_ssid = link_conf->hidden_ssid;
__entry->txpower = link_conf->txpower;
@@ -3035,6 +3058,34 @@ TRACE_EVENT(api_radar_detected,
)
);
+TRACE_EVENT(api_request_smps,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ enum ieee80211_smps_mode smps_mode),
+
+ TP_ARGS(local, sdata, link, smps_mode),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(int, link_id)
+ __field(u32, smps_mode)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->link_id = link->link_id,
+ __entry->smps_mode = smps_mode;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " " VIF_PR_FMT " link:%d, smps_mode:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, __entry->smps_mode
+ )
+);
+
/*
* Tracing for internal functions
* (which may also be called in response to driver calls)
@@ -3088,6 +3139,58 @@ TRACE_EVENT(stop_queue,
)
);
+TRACE_EVENT(drv_can_neg_ttlm,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_neg_ttlm *neg_ttlm),
+
+ TP_ARGS(local, sdata, neg_ttlm),
+
+ TP_STRUCT__entry(LOCAL_ENTRY
+ VIF_ENTRY
+ __array(u16, downlink, sizeof(u16) * 8)
+ __array(u16, uplink, sizeof(u16) * 8)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->downlink, neg_ttlm->downlink,
+ sizeof(neg_ttlm->downlink));
+ memcpy(__entry->uplink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->uplink));
+ ),
+
+ TP_printk(LOCAL_PR_FMT ", " VIF_PR_FMT, LOCAL_PR_ARG, VIF_PR_ARG)
+);
+
+TRACE_EVENT(drv_neg_ttlm_res,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_neg_ttlm_res res,
+ struct ieee80211_neg_ttlm *neg_ttlm),
+
+ TP_ARGS(local, sdata, res, neg_ttlm),
+
+ TP_STRUCT__entry(LOCAL_ENTRY
+ VIF_ENTRY
+ __field(u32, res)
+ __array(u16, downlink, sizeof(u16) * 8)
+ __array(u16, uplink, sizeof(u16) * 8)
+ ),
+
+ TP_fast_assign(LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->res = res;
+ memcpy(__entry->downlink, neg_ttlm->downlink,
+ sizeof(neg_ttlm->downlink));
+ memcpy(__entry->uplink, neg_ttlm->uplink,
+ sizeof(neg_ttlm->uplink));
+ ),
+
+ TP_printk(LOCAL_PR_FMT VIF_PR_FMT " response: %d\n ",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->res
+ )
+);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index c9dbe9aab7bd..aea4ce55c5ac 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -16,8 +16,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
-#define MAX_MSG_LEN 120
-
DECLARE_EVENT_CLASS(mac80211_msg_event,
TP_PROTO(struct va_format *vaf),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 314998fdb1a5..6bf223e6cd1a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*
* Transmit and frame generation functions.
*/
@@ -133,6 +133,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
mrate = sband->bitrates[0].bitrate;
for (i = 0; i < sband->n_bitrates; i++) {
struct ieee80211_rate *r = &sband->bitrates[i];
+ u32 flag;
if (r->bitrate > txrate->bitrate)
break;
@@ -145,28 +146,24 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
switch (sband->band) {
case NL80211_BAND_2GHZ:
- case NL80211_BAND_LC: {
- u32 flag;
+ case NL80211_BAND_LC:
if (tx->sdata->deflink.operating_11g_mode)
flag = IEEE80211_RATE_MANDATORY_G;
else
flag = IEEE80211_RATE_MANDATORY_B;
- if (r->flags & flag)
- mrate = r->bitrate;
break;
- }
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
- if (r->flags & IEEE80211_RATE_MANDATORY_A)
- mrate = r->bitrate;
+ flag = IEEE80211_RATE_MANDATORY_A;
break;
- case NL80211_BAND_S1GHZ:
- case NL80211_BAND_60GHZ:
- /* TODO, for now fall through */
- case NUM_NL80211_BANDS:
+ default:
+ flag = 0;
WARN_ON(1);
break;
}
+
+ if (r->flags & flag)
+ mrate = r->bitrate;
}
if (rate == -1) {
/* No matching basic rate found; use highest suitable mandatory
@@ -2393,12 +2390,18 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
if (chanctx_conf)
chandef = &chanctx_conf->def;
- else if (!local->use_chanctx)
- chandef = &local->_oper_chandef;
else
goto fail_rcu;
/*
+ * If driver/HW supports IEEE80211_CHAN_CAN_MONITOR we still
+ * shouldn't transmit on disabled channels.
+ */
+ if (!cfg80211_chandef_usable(local->hw.wiphy, chandef,
+ IEEE80211_CHAN_DISABLED))
+ goto fail_rcu;
+
+ /*
* Frame injection is not allowed if beaconing is not allowed
* or if we need radar detection. Beaconing is usually not allowed when
* the mode or operation (Adhoc, AP, Mesh) does not support DFS.
@@ -3048,7 +3051,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
sdata->vif.type == NL80211_IFTYPE_STATION)
goto out;
- if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
goto out;
if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
@@ -3100,10 +3103,11 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
/* DA SA BSSID */
build.da_offs = offsetof(struct ieee80211_hdr, addr1);
build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+ rcu_read_lock();
link = rcu_dereference(sdata->link[tdls_link_id]);
- if (WARN_ON_ONCE(!link))
- break;
- memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN);
+ if (!WARN_ON_ONCE(!link))
+ memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN);
+ rcu_read_unlock();
build.hdr_len = 24;
break;
}
@@ -3926,6 +3930,7 @@ begin:
goto begin;
skb = __skb_dequeue(&tx.skbs);
+ info = IEEE80211_SKB_CB(skb);
if (!skb_queue_empty(&tx.skbs)) {
spin_lock_bh(&fq->lock);
@@ -3957,7 +3962,8 @@ begin:
ieee80211_free_txskb(&local->hw, skb);
goto begin;
} else {
- vif = NULL;
+ info->control.vif = NULL;
+ return skb;
}
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -3970,7 +3976,7 @@ begin:
}
encap_out:
- IEEE80211_SKB_CB(skb)->control.vif = vif;
+ info->control.vif = vif;
if (tx.sta &&
wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) {
@@ -5030,16 +5036,24 @@ static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
return beacon->cntdwn_current_counter;
}
-u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
struct beacon_data *beacon = NULL;
u8 count = 0;
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return 0;
+
rcu_read_lock();
+ link = rcu_dereference(sdata->link[link_id]);
+ if (!link)
+ goto unlock;
+
if (sdata->vif.type == NL80211_IFTYPE_AP)
- beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
+ beacon = rcu_dereference(link->u.ap.beacon);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
beacon = rcu_dereference(sdata->u.ibss.presp);
else if (ieee80211_vif_is_mesh(&sdata->vif))
@@ -5081,9 +5095,11 @@ unlock:
}
EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn);
-bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif,
+ unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link;
struct beacon_data *beacon = NULL;
u8 *beacon_data;
size_t beacon_data_len;
@@ -5092,9 +5108,17 @@ bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
if (!ieee80211_sdata_running(sdata))
return false;
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return 0;
+
rcu_read_lock();
+
+ link = rcu_dereference(sdata->link[link_id]);
+ if (!link)
+ goto out;
+
if (vif->type == NL80211_IFTYPE_AP) {
- beacon = rcu_dereference(sdata->deflink.u.ap.beacon);
+ beacon = rcu_dereference(link->u.ap.beacon);
if (WARN_ON(!beacon || !beacon->tail))
goto out;
beacon_data = beacon->tail;
@@ -5280,7 +5304,7 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
- ieee80211_beacon_update_cntdwn(vif);
+ ieee80211_beacon_update_cntdwn(vif, link->link_id);
ieee80211_set_beacon_cntdwn(sdata, beacon, link);
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 643c54855be6..627bd5a8bda5 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -6,7 +6,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*
* utilities for mac80211
*/
@@ -46,6 +46,11 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
}
EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
+const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited = {
+ .mode = IEEE80211_CONN_MODE_EHT,
+ .bw_limit = IEEE80211_CONN_BW_LIMIT_320,
+};
+
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type)
{
@@ -912,776 +917,6 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
-static void
-ieee80211_parse_extension_element(u32 *crc,
- const struct element *elem,
- struct ieee802_11_elems *elems,
- struct ieee80211_elems_parse_params *params)
-{
- const void *data = elem->data + 1;
- bool calc_crc = false;
- u8 len;
-
- if (!elem->datalen)
- return;
-
- len = elem->datalen - 1;
-
- switch (elem->data[0]) {
- case WLAN_EID_EXT_HE_MU_EDCA:
- calc_crc = true;
- if (len >= sizeof(*elems->mu_edca_param_set))
- elems->mu_edca_param_set = data;
- break;
- case WLAN_EID_EXT_HE_CAPABILITY:
- if (ieee80211_he_capa_size_ok(data, len)) {
- elems->he_cap = data;
- elems->he_cap_len = len;
- }
- break;
- case WLAN_EID_EXT_HE_OPERATION:
- calc_crc = true;
- if (len >= sizeof(*elems->he_operation) &&
- len >= ieee80211_he_oper_size(data) - 1)
- elems->he_operation = data;
- break;
- case WLAN_EID_EXT_UORA:
- if (len >= 1)
- elems->uora_element = data;
- break;
- case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME:
- if (len == 3)
- elems->max_channel_switch_time = data;
- break;
- case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION:
- if (len >= sizeof(*elems->mbssid_config_ie))
- elems->mbssid_config_ie = data;
- break;
- case WLAN_EID_EXT_HE_SPR:
- if (len >= sizeof(*elems->he_spr) &&
- len >= ieee80211_he_spr_size(data))
- elems->he_spr = data;
- break;
- case WLAN_EID_EXT_HE_6GHZ_CAPA:
- if (len >= sizeof(*elems->he_6ghz_capa))
- elems->he_6ghz_capa = data;
- break;
- case WLAN_EID_EXT_EHT_CAPABILITY:
- if (ieee80211_eht_capa_size_ok(elems->he_cap,
- data, len,
- params->from_ap)) {
- elems->eht_cap = data;
- elems->eht_cap_len = len;
- }
- break;
- case WLAN_EID_EXT_EHT_OPERATION:
- if (ieee80211_eht_oper_size_ok(data, len))
- elems->eht_operation = data;
- calc_crc = true;
- break;
- case WLAN_EID_EXT_EHT_MULTI_LINK:
- calc_crc = true;
-
- if (ieee80211_mle_size_ok(data, len)) {
- const struct ieee80211_multi_link_elem *mle =
- (void *)data;
-
- switch (le16_get_bits(mle->control,
- IEEE80211_ML_CONTROL_TYPE)) {
- case IEEE80211_ML_CONTROL_TYPE_BASIC:
- elems->ml_basic_elem = (void *)elem;
- elems->ml_basic = data;
- elems->ml_basic_len = len;
- break;
- case IEEE80211_ML_CONTROL_TYPE_RECONF:
- elems->ml_reconf_elem = (void *)elem;
- elems->ml_reconf = data;
- elems->ml_reconf_len = len;
- break;
- default:
- break;
- }
- }
- break;
- case WLAN_EID_EXT_BANDWIDTH_INDICATION:
- if (ieee80211_bandwidth_indication_size_ok(data, len))
- elems->bandwidth_indication = data;
- calc_crc = true;
- break;
- case WLAN_EID_EXT_TID_TO_LINK_MAPPING:
- calc_crc = true;
- if (ieee80211_tid_to_link_map_size_ok(data, len) &&
- elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) {
- elems->ttlm[elems->ttlm_num] = (void *)data;
- elems->ttlm_num++;
- }
- break;
- }
-
- if (crc && calc_crc)
- *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2);
-}
-
-static u32
-_ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params,
- struct ieee802_11_elems *elems,
- const struct element *check_inherit)
-{
- const struct element *elem;
- bool calc_crc = params->filter != 0;
- DECLARE_BITMAP(seen_elems, 256);
- u32 crc = params->crc;
-
- bitmap_zero(seen_elems, 256);
-
- for_each_element(elem, params->start, params->len) {
- const struct element *subelem;
- bool elem_parse_failed;
- u8 id = elem->id;
- u8 elen = elem->datalen;
- const u8 *pos = elem->data;
-
- if (check_inherit &&
- !cfg80211_is_element_inherited(elem,
- check_inherit))
- continue;
-
- switch (id) {
- case WLAN_EID_SSID:
- case WLAN_EID_SUPP_RATES:
- case WLAN_EID_FH_PARAMS:
- case WLAN_EID_DS_PARAMS:
- case WLAN_EID_CF_PARAMS:
- case WLAN_EID_TIM:
- case WLAN_EID_IBSS_PARAMS:
- case WLAN_EID_CHALLENGE:
- case WLAN_EID_RSN:
- case WLAN_EID_ERP_INFO:
- case WLAN_EID_EXT_SUPP_RATES:
- case WLAN_EID_HT_CAPABILITY:
- case WLAN_EID_HT_OPERATION:
- case WLAN_EID_VHT_CAPABILITY:
- case WLAN_EID_VHT_OPERATION:
- case WLAN_EID_MESH_ID:
- case WLAN_EID_MESH_CONFIG:
- case WLAN_EID_PEER_MGMT:
- case WLAN_EID_PREQ:
- case WLAN_EID_PREP:
- case WLAN_EID_PERR:
- case WLAN_EID_RANN:
- case WLAN_EID_CHANNEL_SWITCH:
- case WLAN_EID_EXT_CHANSWITCH_ANN:
- case WLAN_EID_COUNTRY:
- case WLAN_EID_PWR_CONSTRAINT:
- case WLAN_EID_TIMEOUT_INTERVAL:
- case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
- case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
- case WLAN_EID_CHAN_SWITCH_PARAM:
- case WLAN_EID_EXT_CAPABILITY:
- case WLAN_EID_CHAN_SWITCH_TIMING:
- case WLAN_EID_LINK_ID:
- case WLAN_EID_BSS_MAX_IDLE_PERIOD:
- case WLAN_EID_RSNX:
- case WLAN_EID_S1G_BCN_COMPAT:
- case WLAN_EID_S1G_CAPABILITIES:
- case WLAN_EID_S1G_OPERATION:
- case WLAN_EID_AID_RESPONSE:
- case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
- /*
- * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
- * that if the content gets bigger it might be needed more than once
- */
- if (test_bit(id, seen_elems)) {
- elems->parse_error = true;
- continue;
- }
- break;
- }
-
- if (calc_crc && id < 64 && (params->filter & (1ULL << id)))
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- elem_parse_failed = false;
-
- switch (id) {
- case WLAN_EID_LINK_ID:
- if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) {
- elem_parse_failed = true;
- break;
- }
- elems->lnk_id = (void *)(pos - 2);
- break;
- case WLAN_EID_CHAN_SWITCH_TIMING:
- if (elen < sizeof(struct ieee80211_ch_switch_timing)) {
- elem_parse_failed = true;
- break;
- }
- elems->ch_sw_timing = (void *)pos;
- break;
- case WLAN_EID_EXT_CAPABILITY:
- elems->ext_capab = pos;
- elems->ext_capab_len = elen;
- break;
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- if (elen >= 1)
- elems->ds_params = pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_TIM:
- if (elen >= sizeof(struct ieee80211_tim_ie)) {
- elems->tim = (void *)pos;
- elems->tim_len = elen;
- } else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
- pos[2] == 0xf2) {
- /* Microsoft OUI (00:50:F2) */
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- if (elen >= 5 && pos[3] == 2) {
- /* OUI Type 2 - WMM IE */
- if (pos[4] == 0) {
- elems->wmm_info = pos;
- elems->wmm_info_len = elen;
- } else if (pos[4] == 1) {
- elems->wmm_param = pos;
- elems->wmm_param_len = elen;
- }
- }
- }
- break;
- case WLAN_EID_RSN:
- elems->rsn = pos;
- elems->rsn_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- if (elen >= 1)
- elems->erp_info = pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_HT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_ht_cap))
- elems->ht_cap_elem = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_HT_OPERATION:
- if (elen >= sizeof(struct ieee80211_ht_operation))
- elems->ht_operation = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VHT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_vht_cap))
- elems->vht_cap_elem = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_VHT_OPERATION:
- if (elen >= sizeof(struct ieee80211_vht_operation)) {
- elems->vht_operation = (void *)pos;
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- break;
- }
- elem_parse_failed = true;
- break;
- case WLAN_EID_OPMODE_NOTIF:
- if (elen > 0) {
- elems->opmode_notif = pos;
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
- break;
- }
- elem_parse_failed = true;
- break;
- case WLAN_EID_MESH_ID:
- elems->mesh_id = pos;
- elems->mesh_id_len = elen;
- break;
- case WLAN_EID_MESH_CONFIG:
- if (elen >= sizeof(struct ieee80211_meshconf_ie))
- elems->mesh_config = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_PEER_MGMT:
- elems->peering = pos;
- elems->peering_len = elen;
- break;
- case WLAN_EID_MESH_AWAKE_WINDOW:
- if (elen >= 2)
- elems->awake_window = (void *)pos;
- break;
- case WLAN_EID_PREQ:
- elems->preq = pos;
- elems->preq_len = elen;
- break;
- case WLAN_EID_PREP:
- elems->prep = pos;
- elems->prep_len = elen;
- break;
- case WLAN_EID_PERR:
- elems->perr = pos;
- elems->perr_len = elen;
- break;
- case WLAN_EID_RANN:
- if (elen >= sizeof(struct ieee80211_rann_ie))
- elems->rann = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_CHANNEL_SWITCH:
- if (elen != sizeof(struct ieee80211_channel_sw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->ch_switch_ie = (void *)pos;
- break;
- case WLAN_EID_EXT_CHANSWITCH_ANN:
- if (elen != sizeof(struct ieee80211_ext_chansw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->ext_chansw_ie = (void *)pos;
- break;
- case WLAN_EID_SECONDARY_CHANNEL_OFFSET:
- if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->sec_chan_offs = (void *)pos;
- break;
- case WLAN_EID_CHAN_SWITCH_PARAM:
- if (elen <
- sizeof(*elems->mesh_chansw_params_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->mesh_chansw_params_ie = (void *)pos;
- break;
- case WLAN_EID_WIDE_BW_CHANNEL_SWITCH:
- if (!params->action ||
- elen < sizeof(*elems->wide_bw_chansw_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->wide_bw_chansw_ie = (void *)pos;
- break;
- case WLAN_EID_CHANNEL_SWITCH_WRAPPER:
- if (params->action) {
- elem_parse_failed = true;
- break;
- }
- /*
- * This is a bit tricky, but as we only care about
- * a few elements, parse them out manually.
- */
- subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH,
- pos, elen);
- if (subelem) {
- if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie))
- elems->wide_bw_chansw_ie =
- (void *)subelem->data;
- else
- elem_parse_failed = true;
- }
-
- subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION,
- pos, elen);
- if (subelem) {
- const void *edata = subelem->data + 1;
- u8 edatalen = subelem->datalen - 1;
-
- if (ieee80211_bandwidth_indication_size_ok(edata,
- edatalen))
- elems->bandwidth_indication = edata;
- else
- elem_parse_failed = true;
- }
- break;
- case WLAN_EID_COUNTRY:
- elems->country_elem = pos;
- elems->country_elem_len = elen;
- break;
- case WLAN_EID_PWR_CONSTRAINT:
- if (elen != 1) {
- elem_parse_failed = true;
- break;
- }
- elems->pwr_constr_elem = pos;
- break;
- case WLAN_EID_CISCO_VENDOR_SPECIFIC:
- /* Lots of different options exist, but we only care
- * about the Dynamic Transmit Power Control element.
- * First check for the Cisco OUI, then for the DTPC
- * tag (0x00).
- */
- if (elen < 4) {
- elem_parse_failed = true;
- break;
- }
-
- if (pos[0] != 0x00 || pos[1] != 0x40 ||
- pos[2] != 0x96 || pos[3] != 0x00)
- break;
-
- if (elen != 6) {
- elem_parse_failed = true;
- break;
- }
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- elems->cisco_dtpc_elem = pos;
- break;
- case WLAN_EID_ADDBA_EXT:
- if (elen < sizeof(struct ieee80211_addba_ext_ie)) {
- elem_parse_failed = true;
- break;
- }
- elems->addba_ext_ie = (void *)pos;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
- elems->timeout_int = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_BSS_MAX_IDLE_PERIOD:
- if (elen >= sizeof(*elems->max_idle_period_ie))
- elems->max_idle_period_ie = (void *)pos;
- break;
- case WLAN_EID_RSNX:
- elems->rsnx = pos;
- elems->rsnx_len = elen;
- break;
- case WLAN_EID_TX_POWER_ENVELOPE:
- if (elen < 1 ||
- elen > sizeof(struct ieee80211_tx_pwr_env))
- break;
-
- if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env))
- break;
-
- elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos;
- elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen;
- elems->tx_pwr_env_num++;
- break;
- case WLAN_EID_EXTENSION:
- ieee80211_parse_extension_element(calc_crc ?
- &crc : NULL,
- elem, elems, params);
- break;
- case WLAN_EID_S1G_CAPABILITIES:
- if (elen >= sizeof(*elems->s1g_capab))
- elems->s1g_capab = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_S1G_OPERATION:
- if (elen == sizeof(*elems->s1g_oper))
- elems->s1g_oper = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_S1G_BCN_COMPAT:
- if (elen == sizeof(*elems->s1g_bcn_compat))
- elems->s1g_bcn_compat = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- case WLAN_EID_AID_RESPONSE:
- if (elen == sizeof(struct ieee80211_aid_response_ie))
- elems->aid_resp = (void *)pos;
- else
- elem_parse_failed = true;
- break;
- default:
- break;
- }
-
- if (elem_parse_failed)
- elems->parse_error = true;
- else
- __set_bit(id, seen_elems);
- }
-
- if (!for_each_element_completed(elem, params->start, params->len))
- elems->parse_error = true;
-
- return crc;
-}
-
-static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
- struct ieee802_11_elems *elems,
- struct cfg80211_bss *bss,
- u8 *nontransmitted_profile)
-{
- const struct element *elem, *sub;
- size_t profile_len = 0;
- bool found = false;
-
- if (!bss || !bss->transmitted_bss)
- return profile_len;
-
- for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) {
- if (elem->datalen < 2)
- continue;
- if (elem->data[0] < 1 || elem->data[0] > 8)
- continue;
-
- for_each_element(sub, elem->data + 1, elem->datalen - 1) {
- u8 new_bssid[ETH_ALEN];
- const u8 *index;
-
- if (sub->id != 0 || sub->datalen < 4) {
- /* not a valid BSS profile */
- continue;
- }
-
- if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
- sub->data[1] != 2) {
- /* The first element of the
- * Nontransmitted BSSID Profile is not
- * the Nontransmitted BSSID Capability
- * element.
- */
- continue;
- }
-
- memset(nontransmitted_profile, 0, len);
- profile_len = cfg80211_merge_profile(start, len,
- elem,
- sub,
- nontransmitted_profile,
- len);
-
- /* found a Nontransmitted BSSID Profile */
- index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
- nontransmitted_profile,
- profile_len);
- if (!index || index[1] < 1 || index[2] == 0) {
- /* Invalid MBSSID Index element */
- continue;
- }
-
- cfg80211_gen_new_bssid(bss->transmitted_bss->bssid,
- elem->data[0],
- index[2],
- new_bssid);
- if (ether_addr_equal(new_bssid, bss->bssid)) {
- found = true;
- elems->bssid_index_len = index[1];
- elems->bssid_index = (void *)&index[2];
- break;
- }
- }
- }
-
- return found ? profile_len : 0;
-}
-
-static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems,
- u8 link_id)
-{
- const struct ieee80211_multi_link_elem *ml = elems->ml_basic;
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *sub;
-
- if (!ml || !ml_len)
- return;
-
- if (le16_get_bits(ml->control, IEEE80211_ML_CONTROL_TYPE) !=
- IEEE80211_ML_CONTROL_TYPE_BASIC)
- return;
-
- for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
- struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
- ssize_t sta_prof_len;
- u16 control;
-
- if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
- continue;
-
- if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data,
- sub->datalen))
- return;
-
- control = le16_to_cpu(prof->control);
-
- if (link_id != u16_get_bits(control,
- IEEE80211_MLE_STA_CONTROL_LINK_ID))
- continue;
-
- if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
- return;
-
- /* the sub element can be fragmented */
- sta_prof_len =
- cfg80211_defragment_element(sub,
- (u8 *)ml, ml_len,
- elems->scratch_pos,
- elems->scratch +
- elems->scratch_len -
- elems->scratch_pos,
- IEEE80211_MLE_SUBELEM_FRAGMENT);
-
- if (sta_prof_len < 0)
- return;
-
- elems->prof = (void *)elems->scratch_pos;
- elems->sta_prof_len = sta_prof_len;
- elems->scratch_pos += sta_prof_len;
-
- return;
- }
-}
-
-static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems,
- struct ieee80211_elems_parse_params *params)
-{
- struct ieee80211_mle_per_sta_profile *prof;
- struct ieee80211_elems_parse_params sub = {
- .action = params->action,
- .from_ap = params->from_ap,
- .link_id = -1,
- };
- ssize_t ml_len = elems->ml_basic_len;
- const struct element *non_inherit = NULL;
- const u8 *end;
-
- if (params->link_id == -1)
- return;
-
- ml_len = cfg80211_defragment_element(elems->ml_basic_elem,
- elems->ie_start,
- elems->total_len,
- elems->scratch_pos,
- elems->scratch +
- elems->scratch_len -
- elems->scratch_pos,
- WLAN_EID_FRAGMENT);
-
- if (ml_len < 0)
- return;
-
- elems->ml_basic = (const void *)elems->scratch_pos;
- elems->ml_basic_len = ml_len;
-
- ieee80211_mle_get_sta_prof(elems, params->link_id);
- prof = elems->prof;
-
- if (!prof)
- return;
-
- /* check if we have the 4 bytes for the fixed part in assoc response */
- if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
- elems->prof = NULL;
- elems->sta_prof_len = 0;
- return;
- }
-
- /*
- * Skip the capability information and the status code that are expected
- * as part of the station profile in association response frames. Note
- * the -1 is because the 'sta_info_len' is accounted to as part of the
- * per-STA profile, but not part of the 'u8 variable[]' portion.
- */
- sub.start = prof->variable + prof->sta_info_len - 1 + 4;
- end = (const u8 *)prof + elems->sta_prof_len;
- sub.len = end - sub.start;
-
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- sub.start, sub.len);
- _ieee802_11_parse_elems_full(&sub, elems, non_inherit);
-}
-
-struct ieee802_11_elems *
-ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
-{
- struct ieee802_11_elems *elems;
- const struct element *non_inherit = NULL;
- u8 *nontransmitted_profile;
- int nontransmitted_profile_len = 0;
- size_t scratch_len = 3 * params->len;
-
- elems = kzalloc(struct_size(elems, scratch, scratch_len), GFP_ATOMIC);
- if (!elems)
- return NULL;
- elems->ie_start = params->start;
- elems->total_len = params->len;
- elems->scratch_len = scratch_len;
- elems->scratch_pos = elems->scratch;
-
- nontransmitted_profile = elems->scratch_pos;
- nontransmitted_profile_len =
- ieee802_11_find_bssid_profile(params->start, params->len,
- elems, params->bss,
- nontransmitted_profile);
- elems->scratch_pos += nontransmitted_profile_len;
- elems->scratch_len -= nontransmitted_profile_len;
- non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
- nontransmitted_profile,
- nontransmitted_profile_len);
-
- elems->crc = _ieee802_11_parse_elems_full(params, elems, non_inherit);
-
- /* Override with nontransmitted profile, if found */
- if (nontransmitted_profile_len) {
- struct ieee80211_elems_parse_params sub = {
- .start = nontransmitted_profile,
- .len = nontransmitted_profile_len,
- .action = params->action,
- .link_id = params->link_id,
- };
-
- _ieee802_11_parse_elems_full(&sub, elems, NULL);
- }
-
- ieee80211_mle_parse_link(elems, params);
-
- if (elems->tim && !elems->parse_error) {
- const struct ieee80211_tim_ie *tim_ie = elems->tim;
-
- elems->dtim_period = tim_ie->dtim_period;
- elems->dtim_count = tim_ie->dtim_count;
- }
-
- /* Override DTIM period and count if needed */
- if (elems->bssid_index &&
- elems->bssid_index_len >=
- offsetofend(struct ieee80211_bssid_index, dtim_period))
- elems->dtim_period = elems->bssid_index->dtim_period;
-
- if (elems->bssid_index &&
- elems->bssid_index_len >=
- offsetofend(struct ieee80211_bssid_index, dtim_count))
- elems->dtim_count = elems->bssid_index->dtim_count;
-
- return elems;
-}
-EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full);
-
void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_queue_params
*qparam, int ac)
@@ -1938,37 +1173,34 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
}
}
-u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end)
+static int ieee80211_put_s1g_cap(struct sk_buff *skb,
+ struct ieee80211_sta_s1g_cap *s1g_cap)
{
- if ((end - pos) < 5)
- return pos;
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_s1g_cap))
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- *pos++ = 1 + sizeof(cap);
- *pos++ = WLAN_EID_EXT_HE_6GHZ_CAPA;
- memcpy(pos, &cap, sizeof(cap));
+ skb_put_u8(skb, WLAN_EID_S1G_CAPABILITIES);
+ skb_put_u8(skb, sizeof(struct ieee80211_s1g_cap));
- return pos + 2;
+ skb_put_data(skb, &s1g_cap->cap, sizeof(s1g_cap->cap));
+ skb_put_data(skb, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs));
+
+ return 0;
}
-static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
- u8 *buffer, size_t buffer_len,
- const u8 *ie, size_t ie_len,
- enum nl80211_band band,
- u32 rate_mask,
- struct cfg80211_chan_def *chandef,
- size_t *offset, u32 flags)
+static int ieee80211_put_preq_ies_band(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const u8 *ie, size_t ie_len,
+ size_t *offset,
+ enum nl80211_band band,
+ u32 rate_mask,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
- u8 *pos = buffer, *end = buffer + buffer_len;
+ int i, err;
size_t noffset;
- int supp_rates_len, i;
- u8 rates[32];
- int num_rates;
- int ext_rates_len;
u32 rate_flags;
bool have_80mhz = false;
@@ -1981,32 +1213,13 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
rate_flags = ieee80211_chandef_rate_flags(chandef);
/* For direct scan add S1G IE and consider its override bits */
- if (band == NL80211_BAND_S1GHZ) {
- if (end - pos < 2 + sizeof(struct ieee80211_s1g_cap))
- goto out_err;
- pos = ieee80211_ie_build_s1g_cap(pos, &sband->s1g_cap);
- goto done;
- }
+ if (band == NL80211_BAND_S1GHZ)
+ return ieee80211_put_s1g_cap(skb, &sband->s1g_cap);
- num_rates = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if ((BIT(i) & rate_mask) == 0)
- continue; /* skip rate */
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
-
- rates[num_rates++] =
- (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- }
-
- supp_rates_len = min_t(int, num_rates, 8);
-
- if (end - pos < 2 + supp_rates_len)
- goto out_err;
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
- memcpy(pos, rates, supp_rates_len);
- pos += supp_rates_len;
+ err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags, 0,
+ WLAN_EID_SUPP_RATES);
+ if (err)
+ return err;
/* insert "request information" if in custom IEs */
if (ie && ie_len) {
@@ -2019,34 +1232,28 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
before_extrates,
ARRAY_SIZE(before_extrates),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
- ext_rates_len = num_rates - supp_rates_len;
- if (ext_rates_len > 0) {
- if (end - pos < 2 + ext_rates_len)
- goto out_err;
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = ext_rates_len;
- memcpy(pos, rates + supp_rates_len, ext_rates_len);
- pos += ext_rates_len;
- }
+ err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags, 0,
+ WLAN_EID_EXT_SUPP_RATES);
+ if (err)
+ return err;
if (chandef->chan && sband->band == NL80211_BAND_2GHZ) {
- if (end - pos < 3)
- goto out_err;
- *pos++ = WLAN_EID_DS_PARAMS;
- *pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(
- chandef->chan->center_freq);
+ if (skb_tailroom(skb) < 3)
+ return -ENOBUFS;
+ skb_put_u8(skb, WLAN_EID_DS_PARAMS);
+ skb_put_u8(skb, 1);
+ skb_put_u8(skb,
+ ieee80211_frequency_to_channel(chandef->chan->center_freq));
}
if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT)
- goto done;
+ return 0;
/* insert custom IEs that go before HT */
if (ie && ie_len) {
@@ -2061,18 +1268,21 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_ht, ARRAY_SIZE(before_ht),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
if (sband->ht_cap.ht_supported) {
- if (end - pos < 2 + sizeof(struct ieee80211_ht_cap))
- goto out_err;
- pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
- sband->ht_cap.cap);
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
+ return -ENOBUFS;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
+ ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+ sband->ht_cap.cap);
}
/* insert custom IEs that go before VHT */
@@ -2093,10 +1303,9 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_vht, ARRAY_SIZE(before_vht),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
@@ -2111,10 +1320,14 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
}
if (sband->vht_cap.vht_supported && have_80mhz) {
- if (end - pos < 2 + sizeof(struct ieee80211_vht_cap))
- goto out_err;
- pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
- sband->vht_cap.cap);
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
+ return -ENOBUFS;
+
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap));
+ ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+ sband->vht_cap.cap);
}
/* insert custom IEs that go before HE */
@@ -2131,107 +1344,128 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata,
noffset = ieee80211_ie_split(ie, ie_len,
before_he, ARRAY_SIZE(before_he),
*offset);
- if (end - pos < noffset - *offset)
- goto out_err;
- memcpy(pos, ie + *offset, noffset - *offset);
- pos += noffset - *offset;
+ if (skb_tailroom(skb) < noffset - *offset)
+ return -ENOBUFS;
+ skb_put_data(skb, ie + *offset, noffset - *offset);
*offset = noffset;
}
- he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
- if (he_cap &&
- cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE)) {
- pos = ieee80211_ie_build_he_cap(0, pos, he_cap, end);
- if (!pos)
- goto out_err;
+ err = ieee80211_put_he_cap(skb, sdata, sband, NULL);
+ if (err)
+ return err;
}
- eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
-
- if (eht_cap &&
- cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
+ if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE |
IEEE80211_CHAN_NO_EHT)) {
- pos = ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, end,
- sdata->vif.type == NL80211_IFTYPE_AP);
- if (!pos)
- goto out_err;
+ err = ieee80211_put_eht_cap(skb, sdata, sband, NULL);
+ if (err)
+ return err;
}
- if (cfg80211_any_usable_channels(local->hw.wiphy,
- BIT(NL80211_BAND_6GHZ),
- IEEE80211_CHAN_NO_HE)) {
- struct ieee80211_supported_band *sband6;
-
- sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
- he_cap = ieee80211_get_he_iftype_cap_vif(sband6, &sdata->vif);
-
- if (he_cap) {
- enum nl80211_iftype iftype =
- ieee80211_vif_type_p2p(&sdata->vif);
- __le16 cap = ieee80211_get_he_6ghz_capa(sband6, iftype);
-
- pos = ieee80211_write_he_6ghz_cap(pos, cap, end);
- }
- }
+ err = ieee80211_put_he_6ghz_cap(skb, sdata, IEEE80211_SMPS_OFF);
+ if (err)
+ return err;
/*
* If adding more here, adjust code in main.c
* that calculates local->scan_ies_len.
*/
- return pos - buffer;
- out_err:
- WARN_ONCE(1, "not enough space for preq IEs\n");
- done:
- return pos - buffer;
+ return 0;
}
-int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer,
- size_t buffer_len,
- struct ieee80211_scan_ies *ie_desc,
- const u8 *ie, size_t ie_len,
- u8 bands_used, u32 *rate_masks,
- struct cfg80211_chan_def *chandef,
- u32 flags)
+static int ieee80211_put_preq_ies(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_scan_ies *ie_desc,
+ const u8 *ie, size_t ie_len,
+ u8 bands_used, u32 *rate_masks,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
{
- size_t pos = 0, old_pos = 0, custom_ie_offset = 0;
- int i;
+ size_t custom_ie_offset = 0;
+ int i, err;
memset(ie_desc, 0, sizeof(*ie_desc));
for (i = 0; i < NUM_NL80211_BANDS; i++) {
if (bands_used & BIT(i)) {
- pos += ieee80211_build_preq_ies_band(sdata,
- buffer + pos,
- buffer_len - pos,
- ie, ie_len, i,
- rate_masks[i],
- chandef,
- &custom_ie_offset,
- flags);
- ie_desc->ies[i] = buffer + old_pos;
- ie_desc->len[i] = pos - old_pos;
- old_pos = pos;
+ ie_desc->ies[i] = skb_tail_pointer(skb);
+ err = ieee80211_put_preq_ies_band(skb, sdata,
+ ie, ie_len,
+ &custom_ie_offset,
+ i, rate_masks[i],
+ chandef, flags);
+ if (err)
+ return err;
+ ie_desc->len[i] = skb_tail_pointer(skb) -
+ ie_desc->ies[i];
}
}
/* add any remaining custom IEs */
if (ie && ie_len) {
- if (WARN_ONCE(buffer_len - pos < ie_len - custom_ie_offset,
+ if (WARN_ONCE(skb_tailroom(skb) < ie_len - custom_ie_offset,
"not enough space for preq custom IEs\n"))
- return pos;
- memcpy(buffer + pos, ie + custom_ie_offset,
- ie_len - custom_ie_offset);
- ie_desc->common_ies = buffer + pos;
- ie_desc->common_ie_len = ie_len - custom_ie_offset;
- pos += ie_len - custom_ie_offset;
+ return -ENOBUFS;
+ ie_desc->common_ies = skb_tail_pointer(skb);
+ skb_put_data(skb, ie + custom_ie_offset,
+ ie_len - custom_ie_offset);
+ ie_desc->common_ie_len = skb_tail_pointer(skb) -
+ ie_desc->common_ies;
}
- return pos;
+ return 0;
};
+int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer,
+ size_t buffer_len,
+ struct ieee80211_scan_ies *ie_desc,
+ const u8 *ie, size_t ie_len,
+ u8 bands_used, u32 *rate_masks,
+ struct cfg80211_chan_def *chandef,
+ u32 flags)
+{
+ struct sk_buff *skb = alloc_skb(buffer_len, GFP_KERNEL);
+ uintptr_t offs;
+ int ret, i;
+ u8 *start;
+
+ if (!skb)
+ return -ENOMEM;
+
+ start = skb_tail_pointer(skb);
+ memset(start, 0, skb_tailroom(skb));
+ ret = ieee80211_put_preq_ies(skb, sdata, ie_desc, ie, ie_len,
+ bands_used, rate_masks, chandef,
+ flags);
+ if (ret < 0) {
+ goto out;
+ }
+
+ if (skb->len > buffer_len) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ memcpy(buffer, start, skb->len);
+
+ /* adjust ie_desc for copy */
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ offs = ie_desc->ies[i] - start;
+ ie_desc->ies[i] = buffer + offs;
+ }
+ offs = ie_desc->common_ies - start;
+ ie_desc->common_ies = buffer + offs;
+
+ ret = skb->len;
+out:
+ consume_skb(skb);
+ return ret;
+}
+
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
const u8 *src, const u8 *dst,
u32 ratemask,
@@ -2244,7 +1478,6 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
struct cfg80211_chan_def chandef;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- int ies_len;
u32 rate_masks[NUM_NL80211_BANDS] = {};
struct ieee80211_scan_ies dummy_ie_desc;
@@ -2253,7 +1486,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* in order to maximize the chance that we get a response. Some
* badly-behaved APs don't respond when this parameter is included.
*/
- chandef.width = sdata->vif.bss_conf.chandef.width;
+ chandef.width = sdata->vif.bss_conf.chanreq.oper.width;
if (flags & IEEE80211_PROBE_FLAG_DIRECTED)
chandef.chan = NULL;
else
@@ -2265,11 +1498,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
return NULL;
rate_masks[chan->band] = ratemask;
- ies_len = ieee80211_build_preq_ies(sdata, skb_tail_pointer(skb),
- skb_tailroom(skb), &dummy_ie_desc,
- ie, ie_len, BIT(chan->band),
- rate_masks, &chandef, flags);
- skb_put(skb, ies_len);
+ ieee80211_put_preq_ies(skb, sdata, &dummy_ie_desc,
+ ie, ie_len, BIT(chan->band),
+ rate_masks, &chandef, flags);
if (dst) {
mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -2295,7 +1526,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sband))
return 1;
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper);
num_rates = sband->n_bitrates;
supp_rates = 0;
@@ -2416,9 +1648,6 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
lockdep_assert_wiphy(local->hw.wiphy);
- if (!local->use_chanctx)
- return;
-
conf = rcu_dereference_protected(link->conf->chanctx_conf,
lockdep_is_held(&local->hw.wiphy->mtx));
if (conf) {
@@ -2648,20 +1877,20 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
/* add channel contexts */
- if (local->use_chanctx) {
- list_for_each_entry(ctx, &local->chanctx_list, list)
- if (ctx->replace_state !=
- IEEE80211_CHANCTX_REPLACES_OTHER)
- WARN_ON(drv_add_chanctx(local, ctx));
-
- sdata = wiphy_dereference(local->hw.wiphy,
- local->monitor_sdata);
- if (sdata && ieee80211_sdata_running(sdata))
- ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
- }
+ list_for_each_entry(ctx, &local->chanctx_list, list)
+ if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+ WARN_ON(drv_add_chanctx(local, ctx));
+
+ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
+ if (sdata && ieee80211_sdata_running(sdata))
+ ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
/* reconfigure hardware */
- ieee80211_hw_config(local, ~0);
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL |
+ IEEE80211_CONF_CHANGE_MONITOR |
+ IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS |
+ IEEE80211_CONF_CHANGE_IDLE);
ieee80211_configure_filter(local);
@@ -2756,9 +1985,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
sdata->vif.bss_conf.protected_keep_alive)
changed |= BSS_CHANGED_KEEP_ALIVE;
- if (sdata->vif.bss_conf.eht_puncturing)
- changed |= BSS_CHANGED_EHT_PUNCTURING;
-
ieee80211_bss_info_change_notify(sdata,
changed);
} else if (!WARN_ON(!link)) {
@@ -3109,21 +2335,6 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
return pos;
}
-u8 *ieee80211_ie_build_s1g_cap(u8 *pos, struct ieee80211_sta_s1g_cap *s1g_cap)
-{
- *pos++ = WLAN_EID_S1G_CAPABILITIES;
- *pos++ = sizeof(struct ieee80211_s1g_cap);
- memset(pos, 0, sizeof(struct ieee80211_s1g_cap));
-
- memcpy(pos, &s1g_cap->cap, sizeof(s1g_cap->cap));
- pos += sizeof(s1g_cap->cap);
-
- memcpy(pos, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs));
- pos += sizeof(s1g_cap->nss_mcs);
-
- return pos;
-}
-
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap)
{
@@ -3180,7 +2391,8 @@ u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
return pos;
}
-u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+/* this may return more than ieee80211_put_he_6ghz_cap() will need */
+u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata)
{
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_supported_band *sband;
@@ -3190,7 +2402,7 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
if (!sband)
return 0;
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
if (!he_cap)
return 0;
@@ -3201,38 +2413,75 @@ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
he_cap->he_cap_elem.phy_cap_info);
}
-u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
+static void
+ieee80211_get_adjusted_he_cap(const struct ieee80211_conn_settings *conn,
const struct ieee80211_sta_he_cap *he_cap,
- u8 *end)
+ struct ieee80211_he_cap_elem *elem)
{
- struct ieee80211_he_cap_elem elem;
- u8 n;
- u8 ie_len;
- u8 *orig_pos = pos;
+ u8 ru_limit, max_ru;
- /* Make sure we have place for the IE */
- /*
- * TODO: the 1 added is because this temporarily is under the EXTENSION
- * IE. Get rid of it when it moves.
- */
- if (!he_cap)
- return orig_pos;
+ *elem = he_cap->he_cap_elem;
- /* modify on stack first to calculate 'n' and 'ie_len' correctly */
- elem = he_cap->he_cap_elem;
+ switch (conn->bw_limit) {
+ case IEEE80211_CONN_BW_LIMIT_20:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242;
+ break;
+ case IEEE80211_CONN_BW_LIMIT_40:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ break;
+ case IEEE80211_CONN_BW_LIMIT_80:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ break;
+ default:
+ ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996;
+ break;
+ }
+
+ max_ru = elem->phy_cap_info[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK;
+ max_ru = min(max_ru, ru_limit);
+ elem->phy_cap_info[8] &= ~IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK;
+ elem->phy_cap_info[8] |= max_ru;
- if (disable_flags & IEEE80211_CONN_DISABLE_40MHZ)
- elem.phy_cap_info[0] &=
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) {
+ elem->phy_cap_info[0] &=
~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G);
+ elem->phy_cap_info[9] &=
+ ~IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM;
+ }
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) {
+ elem->phy_cap_info[0] &=
+ ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G);
+ elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ elem->phy_cap_info[7] &=
+ ~(IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ);
+ }
+}
- if (disable_flags & IEEE80211_CONN_DISABLE_160MHZ)
- elem.phy_cap_info[0] &=
- ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+int ieee80211_put_he_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn)
+{
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_he_cap_elem elem;
+ u8 *len;
+ u8 n;
+ u8 ie_len;
- if (disable_flags & IEEE80211_CONN_DISABLE_80P80MHZ)
- elem.phy_cap_info[0] &=
- ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ if (!conn)
+ conn = &ieee80211_conn_settings_unlimited;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ if (!he_cap)
+ return 0;
+
+ /* modify on stack first to calculate 'n' and 'ie_len' correctly */
+ ieee80211_get_adjusted_he_cap(conn, he_cap, &elem);
n = ieee80211_he_mcs_nss_size(&elem);
ie_len = 2 + 1 +
@@ -3240,19 +2489,17 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
ieee80211_he_ppe_size(he_cap->ppe_thres[0],
he_cap->he_cap_elem.phy_cap_info);
- if ((end - pos) < ie_len)
- return orig_pos;
+ if (skb_tailroom(skb) < ie_len)
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- pos++; /* We'll set the size later below */
- *pos++ = WLAN_EID_EXT_HE_CAPABILITY;
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ len = skb_put(skb, 1); /* We'll set the size later below */
+ skb_put_u8(skb, WLAN_EID_EXT_HE_CAPABILITY);
/* Fixed data */
- memcpy(pos, &elem, sizeof(elem));
- pos += sizeof(elem);
+ skb_put_data(skb, &elem, sizeof(elem));
- memcpy(pos, &he_cap->he_mcs_nss_supp, n);
- pos += n;
+ skb_put_data(skb, &he_cap->he_mcs_nss_supp, n);
/* Check if PPE Threshold should be present */
if ((he_cap->he_cap_elem.phy_cap_info[6] &
@@ -3276,41 +2523,39 @@ u8 *ieee80211_ie_build_he_cap(ieee80211_conn_flags_t disable_flags, u8 *pos,
n = DIV_ROUND_UP(n, 8);
/* Copy PPE Thresholds */
- memcpy(pos, &he_cap->ppe_thres, n);
- pos += n;
+ skb_put_data(skb, &he_cap->ppe_thres, n);
end:
- orig_pos[1] = (pos - orig_pos) - 2;
- return pos;
+ *len = skb_tail_pointer(skb) - len - 1;
+ return 0;
}
-void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
- enum ieee80211_smps_mode smps_mode,
- struct sk_buff *skb)
+int ieee80211_put_he_6ghz_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode)
{
struct ieee80211_supported_band *sband;
const struct ieee80211_sband_iftype_data *iftd;
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- u8 *pos;
- u16 cap;
+ __le16 cap;
if (!cfg80211_any_usable_channels(sdata->local->hw.wiphy,
BIT(NL80211_BAND_6GHZ),
IEEE80211_CHAN_NO_HE))
- return;
+ return 0;
sband = sdata->local->hw.wiphy->bands[NL80211_BAND_6GHZ];
iftd = ieee80211_get_sband_iftype_data(sband, iftype);
if (!iftd)
- return;
+ return 0;
/* Check for device HE 6 GHz capability before adding element */
if (!iftd->he_6ghz_capa.capa)
- return;
+ return 0;
- cap = le16_to_cpu(iftd->he_6ghz_capa.capa);
- cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS;
+ cap = iftd->he_6ghz_capa.capa;
+ cap &= cpu_to_le16(~IEEE80211_HE_6GHZ_CAP_SM_PS);
switch (smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
@@ -3318,22 +2563,27 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
WARN_ON(1);
fallthrough;
case IEEE80211_SMPS_OFF:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
case IEEE80211_SMPS_STATIC:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
case IEEE80211_SMPS_DYNAMIC:
- cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
- IEEE80211_HE_6GHZ_CAP_SM_PS);
+ cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
break;
}
- pos = skb_put(skb, 2 + 1 + sizeof(cap));
- ieee80211_write_he_6ghz_cap(pos, cpu_to_le16(cap),
- pos + 2 + 1 + sizeof(cap));
+ if (skb_tailroom(skb) < 2 + 1 + sizeof(cap))
+ return -ENOBUFS;
+
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, 1 + sizeof(cap));
+ skb_put_u8(skb, WLAN_EID_EXT_HE_6GHZ_CAPA);
+ skb_put_data(skb, &cap, sizeof(cap));
+ return 0;
}
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
@@ -3785,7 +3035,6 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
}
void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
- bool support_160, bool support_320,
struct cfg80211_chan_def *chandef)
{
chandef->center_freq1 =
@@ -3804,90 +3053,38 @@ void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info,
chandef->width = NL80211_CHAN_WIDTH_80;
break;
case IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ:
- if (support_160) {
- chandef->width = NL80211_CHAN_WIDTH_160;
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(info->ccfs1,
- chandef->chan->band);
- } else {
- chandef->width = NL80211_CHAN_WIDTH_80;
- }
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(info->ccfs1,
+ chandef->chan->band);
break;
case IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ:
- if (support_320) {
- chandef->width = NL80211_CHAN_WIDTH_320;
- chandef->center_freq1 =
- ieee80211_channel_to_frequency(info->ccfs1,
- chandef->chan->band);
- } else if (support_160) {
- chandef->width = NL80211_CHAN_WIDTH_160;
- } else {
- chandef->width = NL80211_CHAN_WIDTH_80;
-
- if (chandef->center_freq1 > chandef->chan->center_freq)
- chandef->center_freq1 -= 40;
- else
- chandef->center_freq1 += 40;
- }
+ chandef->width = NL80211_CHAN_WIDTH_320;
+ chandef->center_freq1 =
+ ieee80211_channel_to_frequency(info->ccfs1,
+ chandef->chan->band);
break;
}
}
-bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
+bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local,
const struct ieee80211_he_operation *he_oper,
const struct ieee80211_eht_operation *eht_oper,
struct cfg80211_chan_def *chandef)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
- const struct ieee80211_sta_he_cap *he_cap;
- const struct ieee80211_sta_eht_cap *eht_cap;
struct cfg80211_chan_def he_chandef = *chandef;
const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
- bool support_80_80, support_160, support_320;
- u8 he_phy_cap, eht_phy_cap;
u32 freq;
if (chandef->chan->band != NL80211_BAND_6GHZ)
return true;
- sband = local->hw.wiphy->bands[NL80211_BAND_6GHZ];
-
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
- if (!he_cap) {
- sdata_info(sdata, "Missing iftype sband data/HE cap");
- return false;
- }
-
- he_phy_cap = he_cap->he_cap_elem.phy_cap_info[0];
- support_160 =
- he_phy_cap &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- support_80_80 =
- he_phy_cap &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
-
- if (!he_oper) {
- sdata_info(sdata,
- "HE is not advertised on (on %d MHz), expect issues\n",
- chandef->chan->center_freq);
+ if (!he_oper)
return false;
- }
-
- eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
- if (!eht_cap)
- eht_oper = NULL;
he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper) {
- sdata_info(sdata,
- "HE 6GHz operation missing (on %d MHz), expect issues\n",
- chandef->chan->center_freq);
+ if (!he_6ghz_oper)
return false;
- }
/*
* The EHT operation IE does not contain the primary channel so the
@@ -3896,20 +3093,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
*/
freq = ieee80211_channel_to_frequency(he_6ghz_oper->primary,
NL80211_BAND_6GHZ);
- he_chandef.chan = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
+ he_chandef.chan = ieee80211_get_channel(local->hw.wiphy, freq);
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- bss_conf->power_type = IEEE80211_REG_LPI_AP;
- break;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- bss_conf->power_type = IEEE80211_REG_SP_AP;
- break;
- default:
- bss_conf->power_type = IEEE80211_REG_UNSET_AP;
- break;
- }
+ if (!he_chandef.chan)
+ return false;
if (!eht_oper ||
!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) {
@@ -3928,13 +3115,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.width = NL80211_CHAN_WIDTH_80;
if (!he_6ghz_oper->ccfs1)
break;
- if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8) {
- if (support_160)
- he_chandef.width = NL80211_CHAN_WIDTH_160;
- } else {
- if (support_80_80)
- he_chandef.width = NL80211_CHAN_WIDTH_80P80;
- }
+ if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8)
+ he_chandef.width = NL80211_CHAN_WIDTH_160;
+ else
+ he_chandef.width = NL80211_CHAN_WIDTH_80P80;
break;
}
@@ -3946,30 +3130,17 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.center_freq1 =
ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0,
NL80211_BAND_6GHZ);
- if (support_80_80 || support_160)
- he_chandef.center_freq2 =
- ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
- NL80211_BAND_6GHZ);
+ he_chandef.center_freq2 =
+ ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
+ NL80211_BAND_6GHZ);
}
} else {
- eht_phy_cap = eht_cap->eht_cap_elem.phy_cap_info[0];
- support_320 =
- eht_phy_cap & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
-
ieee80211_chandef_eht_oper((const void *)eht_oper->optional,
- support_160, support_320,
&he_chandef);
}
- if (!cfg80211_chandef_valid(&he_chandef)) {
- sdata_info(sdata,
- "HE 6GHz operation resulted in invalid chandef: %d MHz/%d/%d MHz/%d MHz\n",
- he_chandef.chan ? he_chandef.chan->center_freq : 0,
- he_chandef.width,
- he_chandef.center_freq1,
- he_chandef.center_freq2);
+ if (!cfg80211_chandef_valid(&he_chandef))
return false;
- }
*chandef = he_chandef;
@@ -4012,121 +3183,62 @@ bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
return true;
}
-int ieee80211_parse_bitrates(enum nl80211_chan_width width,
- const struct ieee80211_supported_band *sband,
- const u8 *srates, int srates_len, u32 *rates)
-{
- u32 rate_flags = ieee80211_chanwidth_rate_flags(width);
- struct ieee80211_rate *br;
- int brate, rate, i, j, count = 0;
-
- *rates = 0;
-
- for (i = 0; i < srates_len; i++) {
- rate = srates[i] & 0x7f;
-
- for (j = 0; j < sband->n_bitrates; j++) {
- br = &sband->bitrates[j];
- if ((rate_flags & br->flags) != rate_flags)
- continue;
-
- brate = DIV_ROUND_UP(br->bitrate, 5);
- if (brate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
- }
- return count;
-}
-
-int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band)
+int ieee80211_put_srates_elem(struct sk_buff *skb,
+ const struct ieee80211_supported_band *sband,
+ u32 basic_rates, u32 rate_flags, u32 masked_rates,
+ u8 element_id)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- int rate;
- u8 i, rates, *pos;
- u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- u32 rate_flags;
+ u8 i, rates, skip;
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
- sband = local->hw.wiphy->bands[band];
rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
+ if (masked_rates & BIT(i))
+ continue;
rates++;
}
- if (rates > 8)
- rates = 8;
-
- if (skb_tailroom(skb) < rates + 2)
- return -ENOMEM;
-
- pos = skb_put(skb, rates + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = rates;
- for (i = 0; i < rates; i++) {
- u8 basic = 0;
- if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
- continue;
- if (need_basic && basic_rates & BIT(i))
- basic = 0x80;
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = basic | (u8) rate;
+ if (element_id == WLAN_EID_SUPP_RATES) {
+ rates = min_t(u8, rates, 8);
+ skip = 0;
+ } else {
+ skip = 8;
+ if (rates <= skip)
+ return 0;
+ rates -= skip;
}
- return 0;
-}
+ if (skb_tailroom(skb) < rates + 2)
+ return -ENOBUFS;
-int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, bool need_basic,
- enum nl80211_band band)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- int rate;
- u8 i, exrates, *pos;
- u32 basic_rates = sdata->vif.bss_conf.basic_rates;
- u32 rate_flags;
+ skb_put_u8(skb, element_id);
+ skb_put_u8(skb, rates);
- rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates && rates; i++) {
+ int rate;
+ u8 basic;
- sband = local->hw.wiphy->bands[band];
- exrates = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
- exrates++;
- }
+ if (masked_rates & BIT(i))
+ continue;
- if (exrates > 8)
- exrates -= 8;
- else
- exrates = 0;
+ if (skip > 0) {
+ skip--;
+ continue;
+ }
- if (skb_tailroom(skb) < exrates + 2)
- return -ENOMEM;
+ basic = basic_rates & BIT(i) ? 0x80 : 0;
- if (exrates) {
- pos = skb_put(skb, exrates + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = exrates;
- for (i = 8; i < sband->n_bitrates; i++) {
- u8 basic = 0;
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
- if (need_basic && basic_rates & BIT(i))
- basic = 0x80;
- rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
- *pos++ = basic | (u8) rate;
- }
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5);
+ skb_put_u8(skb, basic | (u8)rate);
+ rates--;
}
+
+ WARN(rates > 0, "rates confused: rates:%d, element:%d\n",
+ rates, element_id);
+
return 0;
}
@@ -4338,7 +3450,7 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
&sdata->deflink.dfs_cac_timer_work);
if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chandef;
+ chandef = sdata->vif.bss_conf.chanreq.oper;
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev,
&chandef,
@@ -4386,78 +3498,92 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_radar_detected);
-ieee80211_conn_flags_t ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
+void ieee80211_chandef_downgrade(struct cfg80211_chan_def *c,
+ struct ieee80211_conn_settings *conn)
{
- ieee80211_conn_flags_t ret;
- int tmp;
+ enum nl80211_chan_width new_primary_width;
+ struct ieee80211_conn_settings _ignored = {};
+
+ /* allow passing NULL if caller doesn't care */
+ if (!conn)
+ conn = &_ignored;
+
+again:
+ /* no-HT indicates nothing to do */
+ new_primary_width = NL80211_CHAN_WIDTH_20_NOHT;
switch (c->width) {
+ default:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ WARN_ON_ONCE(1);
+ fallthrough;
case NL80211_CHAN_WIDTH_20:
c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ c->punctured = 0;
break;
case NL80211_CHAN_WIDTH_40:
c->width = NL80211_CHAN_WIDTH_20;
c->center_freq1 = c->chan->center_freq;
- ret = IEEE80211_CONN_DISABLE_40MHZ |
- IEEE80211_CONN_DISABLE_VHT;
+ if (conn->mode == IEEE80211_CONN_MODE_VHT)
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ c->punctured = 0;
break;
case NL80211_CHAN_WIDTH_80:
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
- c->width = NL80211_CHAN_WIDTH_40;
- ret = IEEE80211_CONN_DISABLE_VHT;
+ new_primary_width = NL80211_CHAN_WIDTH_40;
+ if (conn->mode == IEEE80211_CONN_MODE_VHT)
+ conn->mode = IEEE80211_CONN_MODE_HT;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_40;
break;
case NL80211_CHAN_WIDTH_80P80:
c->center_freq2 = 0;
c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
break;
case NL80211_CHAN_WIDTH_160:
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P80 */
- tmp /= 4;
- c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
- c->width = NL80211_CHAN_WIDTH_80;
- ret = IEEE80211_CONN_DISABLE_80P80MHZ |
- IEEE80211_CONN_DISABLE_160MHZ;
+ new_primary_width = NL80211_CHAN_WIDTH_80;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80;
break;
case NL80211_CHAN_WIDTH_320:
- /* n_P20 */
- tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
- /* n_P160 */
- tmp /= 8;
- c->center_freq1 = c->center_freq1 - 80 + 160 * tmp;
- c->width = NL80211_CHAN_WIDTH_160;
- ret = IEEE80211_CONN_DISABLE_320MHZ;
- break;
- default:
- case NL80211_CHAN_WIDTH_20_NOHT:
- WARN_ON_ONCE(1);
- c->width = NL80211_CHAN_WIDTH_20_NOHT;
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ new_primary_width = NL80211_CHAN_WIDTH_160;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160;
break;
case NL80211_CHAN_WIDTH_1:
case NL80211_CHAN_WIDTH_2:
case NL80211_CHAN_WIDTH_4:
case NL80211_CHAN_WIDTH_8:
case NL80211_CHAN_WIDTH_16:
+ WARN_ON_ONCE(1);
+ /* keep c->width */
+ conn->mode = IEEE80211_CONN_MODE_S1G;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
+ break;
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
WARN_ON_ONCE(1);
/* keep c->width */
- ret = IEEE80211_CONN_DISABLE_HT | IEEE80211_CONN_DISABLE_VHT;
+ conn->mode = IEEE80211_CONN_MODE_LEGACY;
+ conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20;
break;
}
- WARN_ON_ONCE(!cfg80211_chandef_valid(c));
+ if (new_primary_width != NL80211_CHAN_WIDTH_20_NOHT) {
+ c->center_freq1 = cfg80211_chandef_primary(c, new_primary_width,
+ &c->punctured);
+ c->width = new_primary_width;
+ }
- return ret;
+ /*
+ * With an 80 MHz channel, we might have the puncturing in the primary
+ * 40 Mhz channel, but that's not valid when downgraded to 40 MHz width.
+ * In that case, downgrade again.
+ */
+ if (!cfg80211_chandef_valid(c) && c->punctured)
+ goto again;
+
+ WARN_ON_ONCE(!cfg80211_chandef_valid(c));
}
/*
@@ -4773,7 +3899,7 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list)
if (link->reserved_radar_required)
- radar_detect |= BIT(link->reserved_chandef.width);
+ radar_detect |= BIT(link->reserved.oper.width);
/*
* An in-place reservation context should not have any assigned vifs
@@ -4787,7 +3913,7 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
continue;
radar_detect |=
- BIT(link->conf->chandef.width);
+ BIT(link->conf->chanreq.oper.width);
}
return radar_detect;
@@ -5037,7 +4163,8 @@ u16 ieee80211_encode_usf(int listen_interval)
return (u16) listen_interval;
}
-u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+/* this may return more than ieee80211_put_eht_cap() will need */
+u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata)
{
const struct ieee80211_sta_he_cap *he_cap;
const struct ieee80211_sta_eht_cap *eht_cap;
@@ -5049,13 +4176,12 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
if (!sband)
return 0;
- he_cap = ieee80211_get_he_iftype_cap(sband, iftype);
- eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype);
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
if (!he_cap || !eht_cap)
return 0;
- is_ap = iftype == NL80211_IFTYPE_AP ||
- iftype == NL80211_IFTYPE_P2P_GO;
+ is_ap = sdata->vif.type == NL80211_IFTYPE_AP;
n = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
&eht_cap->eht_cap_elem,
@@ -5067,45 +4193,134 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
return 0;
}
-u8 *ieee80211_ie_build_eht_cap(u8 *pos,
- const struct ieee80211_sta_he_cap *he_cap,
- const struct ieee80211_sta_eht_cap *eht_cap,
- u8 *end,
- bool for_ap)
+int ieee80211_put_eht_cap(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_supported_band *sband,
+ const struct ieee80211_conn_settings *conn)
{
+ const struct ieee80211_sta_he_cap *he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif);
+ const struct ieee80211_sta_eht_cap *eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif);
+ bool for_ap = sdata->vif.type == NL80211_IFTYPE_AP;
+ struct ieee80211_eht_cap_elem_fixed fixed;
+ struct ieee80211_he_cap_elem he;
u8 mcs_nss_len, ppet_len;
+ u8 orig_mcs_nss_len;
u8 ie_len;
- u8 *orig_pos = pos;
+
+ if (!conn)
+ conn = &ieee80211_conn_settings_unlimited;
/* Make sure we have place for the IE */
if (!he_cap || !eht_cap)
- return orig_pos;
+ return 0;
+
+ orig_mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+ &eht_cap->eht_cap_elem,
+ for_ap);
- mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
- &eht_cap->eht_cap_elem,
- for_ap);
+ ieee80211_get_adjusted_he_cap(conn, he_cap, &he);
+
+ fixed = eht_cap->eht_cap_elem;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_80)
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ;
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) {
+ fixed.phy_cap_info[1] &=
+ ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK;
+ fixed.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK;
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ;
+ }
+
+ if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320) {
+ fixed.phy_cap_info[0] &=
+ ~IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+ fixed.phy_cap_info[1] &=
+ ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK;
+ fixed.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ fixed.phy_cap_info[6] &=
+ ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ;
+ }
+
+ if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20)
+ fixed.phy_cap_info[0] &=
+ ~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ;
+
+ mcs_nss_len = ieee80211_eht_mcs_nss_size(&he, &fixed, for_ap);
ppet_len = ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
- eht_cap->eht_cap_elem.phy_cap_info);
+ fixed.phy_cap_info);
ie_len = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + mcs_nss_len + ppet_len;
- if ((end - pos) < ie_len)
- return orig_pos;
+ if (skb_tailroom(skb) < ie_len)
+ return -ENOBUFS;
- *pos++ = WLAN_EID_EXTENSION;
- *pos++ = ie_len - 2;
- *pos++ = WLAN_EID_EXT_EHT_CAPABILITY;
+ skb_put_u8(skb, WLAN_EID_EXTENSION);
+ skb_put_u8(skb, ie_len - 2);
+ skb_put_u8(skb, WLAN_EID_EXT_EHT_CAPABILITY);
+ skb_put_data(skb, &fixed, sizeof(fixed));
- /* Fixed data */
- memcpy(pos, &eht_cap->eht_cap_elem, sizeof(eht_cap->eht_cap_elem));
- pos += sizeof(eht_cap->eht_cap_elem);
+ if (mcs_nss_len == 4 && orig_mcs_nss_len != 4) {
+ /*
+ * If the (non-AP) STA became 20 MHz only, then convert from
+ * <=80 to 20-MHz-only format, where MCSes are indicated in
+ * the groups 0-7, 8-9, 10-11, 12-13 rather than just 0-9,
+ * 10-11, 12-13. Thus, use 0-9 for 0-7 and 8-9.
+ */
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs11_max_nss);
+ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss);
+ } else {
+ skb_put_data(skb, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
+ }
- memcpy(pos, &eht_cap->eht_mcs_nss_supp, mcs_nss_len);
- pos += mcs_nss_len;
+ if (ppet_len)
+ skb_put_data(skb, &eht_cap->eht_ppe_thres, ppet_len);
- if (ppet_len) {
- memcpy(pos, &eht_cap->eht_ppe_thres, ppet_len);
- pos += ppet_len;
- }
+ return 0;
+}
- return pos;
+const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode)
+{
+ static const char * const modes[] = {
+ [IEEE80211_CONN_MODE_S1G] = "S1G",
+ [IEEE80211_CONN_MODE_LEGACY] = "legacy",
+ [IEEE80211_CONN_MODE_HT] = "HT",
+ [IEEE80211_CONN_MODE_VHT] = "VHT",
+ [IEEE80211_CONN_MODE_HE] = "HE",
+ [IEEE80211_CONN_MODE_EHT] = "EHT",
+ };
+
+ if (WARN_ON(mode >= ARRAY_SIZE(modes)))
+ return "<out of range>";
+
+ return modes[mode] ?: "<missing string>";
+}
+
+enum ieee80211_conn_bw_limit
+ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IEEE80211_CONN_BW_LIMIT_20;
+ case NL80211_CHAN_WIDTH_40:
+ return IEEE80211_CONN_BW_LIMIT_40;
+ case NL80211_CHAN_WIDTH_80:
+ return IEEE80211_CONN_BW_LIMIT_80;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return IEEE80211_CONN_BW_LIMIT_160;
+ case NL80211_CHAN_WIDTH_320:
+ return IEEE80211_CONN_BW_LIMIT_320;
+ default:
+ WARN(1, "unhandled chandef width %d\n", chandef->width);
+ return IEEE80211_CONN_BW_LIMIT_20;
+ }
}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index b3a5c3e96a72..2c475c439ba9 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -369,7 +369,7 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
link_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
if (eht_cap->has_eht &&
- link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
info = eht_cap->eht_cap_elem.phy_cap_info[0];
if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) {
@@ -380,7 +380,7 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta)
info = he_cap->he_cap_elem.phy_cap_info[0];
- if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
ret = IEEE80211_STA_RX_BW_40;
else
@@ -515,7 +515,7 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
if (WARN_ON(!link_conf))
bss_width = NL80211_CHAN_WIDTH_20_NOHT;
else
- bss_width = link_conf->chandef.width;
+ bss_width = link_conf->chanreq.oper.width;
rcu_read_unlock();
bw = ieee80211_sta_cap_rx_bw(link_sta);
diff --git a/net/mac80211/wbrf.c b/net/mac80211/wbrf.c
index a05c5b971789..3a8612309137 100644
--- a/net/mac80211/wbrf.c
+++ b/net/mac80211/wbrf.c
@@ -23,8 +23,6 @@ void ieee80211_check_wbrf_support(struct ieee80211_local *local)
return;
local->wbrf_supported = acpi_amd_wbrf_supported_producer(dev);
- dev_dbg(dev, "WBRF is %s supported\n",
- local->wbrf_supported ? "" : "not");
}
static void get_chan_freq_boundary(u32 center_freq, u32 bandwidth, u64 *start, u64 *end)
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 94dae7cb6dbd..e40529b8c5c9 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -315,7 +315,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
* Calculate AAD for CCMP/GCMP, returning qos_tid since we
* need that in CCMP also for b_0.
*/
-static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
+static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad, bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 mask_fc;
@@ -340,7 +340,14 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
len_a += 6;
if (ieee80211_is_data_qos(hdr->frame_control)) {
- qos_tid = ieee80211_get_tid(hdr);
+ qos_tid = *ieee80211_get_qos_ctl(hdr);
+
+ if (spp_amsdu)
+ qos_tid &= IEEE80211_QOS_CTL_TID_MASK |
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ else
+ qos_tid &= IEEE80211_QOS_CTL_TID_MASK;
+
mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
len_a += 2;
} else {
@@ -369,10 +376,11 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
return qos_tid;
}
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad,
+ bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- u8 qos_tid = ccmp_gcmp_aad(skb, aad);
+ u8 qos_tid = ccmp_gcmp_aad(skb, aad, spp_amsdu);
/* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
* mode authentication are not allowed to collide, yet both are derived
@@ -479,7 +487,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
return 0;
pos += IEEE80211_CCMP_HDR_LEN;
- ccmp_special_blocks(skb, pn, b_0, aad);
+ ccmp_special_blocks(skb, pn, b_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
skb_put(skb, mic_len));
}
@@ -557,7 +566,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
u8 aad[2 * AES_BLOCK_SIZE];
u8 b_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, b_0, aad);
+ ccmp_special_blocks(skb, pn, b_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
if (ieee80211_aes_ccm_decrypt(
key->u.ccmp.tfm, b_0, aad,
@@ -581,7 +591,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
return RX_CONTINUE;
}
-static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
+static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad,
+ bool spp_amsdu)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -591,7 +602,7 @@ static void gcmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *j_0, u8 *aad)
j_0[14] = 0;
j_0[AES_BLOCK_SIZE - 1] = 0x01;
- ccmp_gcmp_aad(skb, aad);
+ ccmp_gcmp_aad(skb, aad, spp_amsdu);
}
static inline void gcmp_pn2hdr(u8 *hdr, const u8 *pn, int key_id)
@@ -680,7 +691,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
return 0;
pos += IEEE80211_GCMP_HDR_LEN;
- gcmp_special_blocks(skb, pn, j_0, aad);
+ gcmp_special_blocks(skb, pn, j_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
skb_put(skb, IEEE80211_GCMP_MIC_LEN));
}
@@ -753,7 +765,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
u8 aad[2 * AES_BLOCK_SIZE];
u8 j_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- gcmp_special_blocks(skb, pn, j_0, aad);
+ gcmp_special_blocks(skb, pn, j_0, aad,
+ key->conf.flags & IEEE80211_KEY_FLAG_SPP_AMSDU);
if (ieee80211_aes_gcm_decrypt(
key->u.gcmp.tfm, j_0, aad,
diff --git a/net/mctp/Kconfig b/net/mctp/Kconfig
index 3a5c0e70da77..d8d3413a37f7 100644
--- a/net/mctp/Kconfig
+++ b/net/mctp/Kconfig
@@ -14,6 +14,7 @@ menuconfig MCTP
config MCTP_TEST
bool "MCTP core tests" if !KUNIT_ALL_TESTS
+ select MCTP_FLOWS
depends on MCTP=y && KUNIT=y
default KUNIT_ALL_TESTS
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index f6be58b68c6f..de52a9191da0 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -350,30 +350,102 @@ static int mctp_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
}
-static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
+/* helpers for reading/writing the tag ioc, handling compatibility across the
+ * two versions, and some basic API error checking
+ */
+static int mctp_ioctl_tag_copy_from_user(unsigned long arg,
+ struct mctp_ioc_tag_ctl2 *ctl,
+ bool tagv2)
+{
+ struct mctp_ioc_tag_ctl ctl_compat;
+ unsigned long size;
+ void *ptr;
+ int rc;
+
+ if (tagv2) {
+ size = sizeof(*ctl);
+ ptr = ctl;
+ } else {
+ size = sizeof(ctl_compat);
+ ptr = &ctl_compat;
+ }
+
+ rc = copy_from_user(ptr, (void __user *)arg, size);
+ if (rc)
+ return -EFAULT;
+
+ if (!tagv2) {
+ /* compat, using defaults for new fields */
+ ctl->net = MCTP_INITIAL_DEFAULT_NET;
+ ctl->peer_addr = ctl_compat.peer_addr;
+ ctl->local_addr = MCTP_ADDR_ANY;
+ ctl->flags = ctl_compat.flags;
+ ctl->tag = ctl_compat.tag;
+ }
+
+ if (ctl->flags)
+ return -EINVAL;
+
+ if (ctl->local_addr != MCTP_ADDR_ANY &&
+ ctl->local_addr != MCTP_ADDR_NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mctp_ioctl_tag_copy_to_user(unsigned long arg,
+ struct mctp_ioc_tag_ctl2 *ctl,
+ bool tagv2)
+{
+ struct mctp_ioc_tag_ctl ctl_compat;
+ unsigned long size;
+ void *ptr;
+ int rc;
+
+ if (tagv2) {
+ ptr = ctl;
+ size = sizeof(*ctl);
+ } else {
+ ctl_compat.peer_addr = ctl->peer_addr;
+ ctl_compat.tag = ctl->tag;
+ ctl_compat.flags = ctl->flags;
+
+ ptr = &ctl_compat;
+ size = sizeof(ctl_compat);
+ }
+
+ rc = copy_to_user((void __user *)arg, ptr, size);
+ if (rc)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mctp_ioctl_alloctag(struct mctp_sock *msk, bool tagv2,
+ unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
struct mctp_sk_key *key = NULL;
- struct mctp_ioc_tag_ctl ctl;
+ struct mctp_ioc_tag_ctl2 ctl;
unsigned long flags;
u8 tag;
+ int rc;
- if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
- return -EFAULT;
+ rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
+ if (rc)
+ return rc;
if (ctl.tag)
return -EINVAL;
- if (ctl.flags)
- return -EINVAL;
-
- key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY,
- true, &tag);
+ key = mctp_alloc_local_tag(msk, ctl.net, MCTP_ADDR_ANY,
+ ctl.peer_addr, true, &tag);
if (IS_ERR(key))
return PTR_ERR(key);
ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
- if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
+ rc = mctp_ioctl_tag_copy_to_user(arg, &ctl, tagv2);
+ if (rc) {
unsigned long fl2;
/* Unwind our key allocation: the keys list lock needs to be
* taken before the individual key locks, and we need a valid
@@ -385,28 +457,27 @@ static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
__mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED);
mctp_key_unref(key);
spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
- return -EFAULT;
+ return rc;
}
mctp_key_unref(key);
return 0;
}
-static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
+static int mctp_ioctl_droptag(struct mctp_sock *msk, bool tagv2,
+ unsigned long arg)
{
struct net *net = sock_net(&msk->sk);
- struct mctp_ioc_tag_ctl ctl;
+ struct mctp_ioc_tag_ctl2 ctl;
unsigned long flags, fl2;
struct mctp_sk_key *key;
struct hlist_node *tmp;
int rc;
u8 tag;
- if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
- return -EFAULT;
-
- if (ctl.flags)
- return -EINVAL;
+ rc = mctp_ioctl_tag_copy_from_user(arg, &ctl, tagv2);
+ if (rc)
+ return rc;
/* Must be a local tag, TO set, preallocated */
if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
@@ -422,6 +493,7 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
*/
spin_lock_irqsave(&key->lock, fl2);
if (key->manual_alloc &&
+ ctl.net == key->net &&
ctl.peer_addr == key->peer_addr &&
tag == key->tag) {
__mctp_key_remove(key, net, fl2,
@@ -439,12 +511,17 @@ static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
+ bool tagv2 = false;
switch (cmd) {
+ case SIOCMCTPALLOCTAG2:
case SIOCMCTPALLOCTAG:
- return mctp_ioctl_alloctag(msk, arg);
+ tagv2 = cmd == SIOCMCTPALLOCTAG2;
+ return mctp_ioctl_alloctag(msk, tagv2, arg);
case SIOCMCTPDROPTAG:
- return mctp_ioctl_droptag(msk, arg);
+ case SIOCMCTPDROPTAG2:
+ tagv2 = cmd == SIOCMCTPDROPTAG2;
+ return mctp_ioctl_droptag(msk, tagv2, arg);
}
return -EINVAL;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 7a47a58aa54b..eefd7834d9a0 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -73,13 +73,50 @@ static struct mctp_sock *mctp_lookup_bind(struct net *net, struct sk_buff *skb)
return NULL;
}
-static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
- mctp_eid_t peer, u8 tag)
+/* A note on the key allocations.
+ *
+ * struct net->mctp.keys contains our set of currently-allocated keys for
+ * MCTP tag management. The lookup tuple for these is the peer EID,
+ * local EID and MCTP tag.
+ *
+ * In some cases, the peer EID may be MCTP_EID_ANY: for example, when a
+ * broadcast message is sent, we may receive responses from any peer EID.
+ * Because the broadcast dest address is equivalent to ANY, we create
+ * a key with (local = local-eid, peer = ANY). This allows a match on the
+ * incoming broadcast responses from any peer.
+ *
+ * We perform lookups when packets are received, and when tags are allocated
+ * in two scenarios:
+ *
+ * - when a packet is sent, with a locally-owned tag: we need to find an
+ * unused tag value for the (local, peer) EID pair.
+ *
+ * - when a tag is manually allocated: we need to find an unused tag value
+ * for the peer EID, but don't have a specific local EID at that stage.
+ *
+ * in the latter case, on successful allocation, we end up with a tag with
+ * (local = ANY, peer = peer-eid).
+ *
+ * So, the key set allows both a local EID of ANY, as well as a peer EID of
+ * ANY in the lookup tuple. Both may be ANY if we prealloc for a broadcast.
+ * The matching (in mctp_key_match()) during lookup allows the match value to
+ * be ANY in either the dest or source addresses.
+ *
+ * When allocating (+ inserting) a tag, we need to check for conflicts amongst
+ * the existing tag set. This requires macthing either exactly on the local
+ * and peer addresses, or either being ANY.
+ */
+
+static bool mctp_key_match(struct mctp_sk_key *key, unsigned int net,
+ mctp_eid_t local, mctp_eid_t peer, u8 tag)
{
+ if (key->net != net)
+ return false;
+
if (!mctp_address_matches(key->local_addr, local))
return false;
- if (key->peer_addr != peer)
+ if (!mctp_address_matches(key->peer_addr, peer))
return false;
if (key->tag != tag)
@@ -92,7 +129,7 @@ static bool mctp_key_match(struct mctp_sk_key *key, mctp_eid_t local,
* key exists.
*/
static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
- mctp_eid_t peer,
+ unsigned int netid, mctp_eid_t peer,
unsigned long *irqflags)
__acquires(&key->lock)
{
@@ -108,7 +145,7 @@ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
spin_lock_irqsave(&net->mctp.keys_lock, flags);
hlist_for_each_entry(key, &net->mctp.keys, hlist) {
- if (!mctp_key_match(key, mh->dest, peer, tag))
+ if (!mctp_key_match(key, netid, mh->dest, peer, tag))
continue;
spin_lock(&key->lock);
@@ -131,6 +168,7 @@ static struct mctp_sk_key *mctp_lookup_key(struct net *net, struct sk_buff *skb,
}
static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
+ unsigned int net,
mctp_eid_t local, mctp_eid_t peer,
u8 tag, gfp_t gfp)
{
@@ -140,6 +178,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk,
if (!key)
return NULL;
+ key->net = net;
key->peer_addr = peer;
key->local_addr = local;
key->tag = tag;
@@ -185,8 +224,8 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk)
}
hlist_for_each_entry(tmp, &net->mctp.keys, hlist) {
- if (mctp_key_match(tmp, key->local_addr, key->peer_addr,
- key->tag)) {
+ if (mctp_key_match(tmp, key->net, key->local_addr,
+ key->peer_addr, key->tag)) {
spin_lock(&tmp->lock);
if (tmp->valid)
rc = -EEXIST;
@@ -327,6 +366,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
struct mctp_sock *msk;
struct mctp_hdr *mh;
+ unsigned int netid;
unsigned long f;
u8 tag, flags;
int rc;
@@ -345,6 +385,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
/* grab header, advance data ptr */
mh = mctp_hdr(skb);
+ netid = mctp_cb(skb)->net;
skb_pull(skb, sizeof(struct mctp_hdr));
if (mh->ver != 1)
@@ -358,7 +399,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
/* lookup socket / reasm context, exactly matching (src,dest,tag).
* we hold a ref on the key, and key->lock held.
*/
- key = mctp_lookup_key(net, skb, mh->src, &f);
+ key = mctp_lookup_key(net, skb, netid, mh->src, &f);
if (flags & MCTP_HDR_FLAG_SOM) {
if (key) {
@@ -368,8 +409,12 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* key lookup to find the socket, but don't use this
* key for reassembly - we'll create a more specific
* one for future packets if required (ie, !EOM).
+ *
+ * this lookup requires key->peer to be MCTP_ADDR_ANY,
+ * it doesn't match just any key->peer.
*/
- any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f);
+ any_key = mctp_lookup_key(net, skb, netid,
+ MCTP_ADDR_ANY, &f);
if (any_key) {
msk = container_of(any_key->sk,
struct mctp_sock, sk);
@@ -406,7 +451,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* packets for this message
*/
if (!key) {
- key = mctp_key_alloc(msk, mh->dest, mh->src,
+ key = mctp_key_alloc(msk, netid, mh->dest, mh->src,
tag, GFP_ATOMIC);
if (!key) {
rc = -ENOMEM;
@@ -596,11 +641,12 @@ static void mctp_reserve_tag(struct net *net, struct mctp_sk_key *key,
refcount_inc(&key->refs);
}
-/* Allocate a locally-owned tag value for (saddr, daddr), and reserve
+/* Allocate a locally-owned tag value for (local, peer), and reserve
* it for the socket msk
*/
struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
- mctp_eid_t daddr, mctp_eid_t saddr,
+ unsigned int netid,
+ mctp_eid_t local, mctp_eid_t peer,
bool manual, u8 *tagp)
{
struct net *net = sock_net(&msk->sk);
@@ -610,11 +656,11 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
u8 tagbits;
/* for NULL destination EIDs, we may get a response from any peer */
- if (daddr == MCTP_ADDR_NULL)
- daddr = MCTP_ADDR_ANY;
+ if (peer == MCTP_ADDR_NULL)
+ peer = MCTP_ADDR_ANY;
/* be optimistic, alloc now */
- key = mctp_key_alloc(msk, saddr, daddr, 0, GFP_KERNEL);
+ key = mctp_key_alloc(msk, netid, local, peer, 0, GFP_KERNEL);
if (!key)
return ERR_PTR(-ENOMEM);
@@ -631,12 +677,24 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
* lock held, they don't change over the lifetime of the key.
*/
+ /* tags are net-specific */
+ if (tmp->net != netid)
+ continue;
+
/* if we don't own the tag, it can't conflict */
if (tmp->tag & MCTP_HDR_FLAG_TO)
continue;
- if (!(mctp_address_matches(tmp->peer_addr, daddr) &&
- mctp_address_matches(tmp->local_addr, saddr)))
+ /* Since we're avoiding conflicting entries, match peer and
+ * local addresses, including with a wildcard on ANY. See
+ * 'A note on key allocations' for background.
+ */
+ if (peer != MCTP_ADDR_ANY &&
+ !mctp_address_matches(tmp->peer_addr, peer))
+ continue;
+
+ if (local != MCTP_ADDR_ANY &&
+ !mctp_address_matches(tmp->local_addr, local))
continue;
spin_lock(&tmp->lock);
@@ -663,7 +721,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
spin_unlock_irqrestore(&mns->keys_lock, flags);
if (!tagbits) {
- kfree(key);
+ mctp_key_unref(key);
return ERR_PTR(-EBUSY);
}
@@ -671,6 +729,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
}
static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
+ unsigned int netid,
mctp_eid_t daddr,
u8 req_tag, u8 *tagp)
{
@@ -685,6 +744,9 @@ static struct mctp_sk_key *mctp_lookup_prealloc_tag(struct mctp_sock *msk,
spin_lock_irqsave(&mns->keys_lock, flags);
hlist_for_each_entry(tmp, &mns->keys, hlist) {
+ if (tmp->net != netid)
+ continue;
+
if (tmp->tag != req_tag)
continue;
@@ -843,6 +905,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
/* copy message payload */
skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
+ /* we need to copy the extensions, for MCTP flow data */
+ skb_ext_copy(skb2, skb);
+
/* do route */
rc = rt->output(rt, skb2);
if (rc)
@@ -865,6 +930,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct mctp_sk_key *key;
struct mctp_hdr *hdr;
unsigned long flags;
+ unsigned int netid;
unsigned int mtu;
mctp_eid_t saddr;
bool ext_rt;
@@ -888,7 +954,7 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
dev = dev_get_by_index_rcu(sock_net(sk), cb->ifindex);
if (!dev) {
rcu_read_unlock();
- return rc;
+ goto out_free;
}
rt->dev = __mctp_dev_get(dev);
rcu_read_unlock();
@@ -903,7 +969,8 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
rt->mtu = 0;
} else {
- return -EINVAL;
+ rc = -EINVAL;
+ goto out_free;
}
spin_lock_irqsave(&rt->dev->addrs_lock, flags);
@@ -915,16 +982,17 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
rc = 0;
}
spin_unlock_irqrestore(&rt->dev->addrs_lock, flags);
+ netid = READ_ONCE(rt->dev->net);
if (rc)
goto out_release;
if (req_tag & MCTP_TAG_OWNER) {
if (req_tag & MCTP_TAG_PREALLOC)
- key = mctp_lookup_prealloc_tag(msk, daddr,
+ key = mctp_lookup_prealloc_tag(msk, netid, daddr,
req_tag, &tag);
else
- key = mctp_alloc_local_tag(msk, daddr, saddr,
+ key = mctp_alloc_local_tag(msk, netid, saddr, daddr,
false, &tag);
if (IS_ERR(key)) {
@@ -966,12 +1034,17 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt,
rc = mctp_do_fragment_route(rt, skb, mtu, tag);
}
+ /* route output functions consume the skb, even on error */
+ skb = NULL;
+
out_release:
if (!ext_rt)
mctp_route_release(rt);
mctp_dev_put(tmp_rt.dev);
+out_free:
+ kfree_skb(skb);
return rc;
}
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 92ea4158f7fc..77e5dd422258 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -79,6 +79,16 @@ static void mctp_test_route_destroy(struct kunit *test,
kfree_rcu(&rt->rt, rcu);
}
+static void mctp_test_skb_set_dev(struct sk_buff *skb,
+ struct mctp_test_dev *dev)
+{
+ struct mctp_skb_cb *cb;
+
+ cb = mctp_cb(skb);
+ cb->net = READ_ONCE(dev->mdev->net);
+ skb->dev = dev->ndev;
+}
+
static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
unsigned int data_len)
{
@@ -91,6 +101,7 @@ static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
if (!skb)
return NULL;
+ __mctp_cb(skb);
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
buf = skb_put(skb, data_len);
@@ -111,6 +122,7 @@ static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
if (!skb)
return NULL;
+ __mctp_cb(skb);
memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
memcpy(skb_put(skb, data_len), data, data_len);
@@ -249,8 +261,6 @@ static void mctp_test_rx_input(struct kunit *test)
skb = mctp_test_create_skb(&params->hdr, 1);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- __mctp_cb(skb);
-
mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL);
KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input);
@@ -283,7 +293,8 @@ KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests,
static void __mctp_route_test_init(struct kunit *test,
struct mctp_test_dev **devp,
struct mctp_test_route **rtp,
- struct socket **sockp)
+ struct socket **sockp,
+ unsigned int netid)
{
struct sockaddr_mctp addr = {0};
struct mctp_test_route *rt;
@@ -293,6 +304,8 @@ static void __mctp_route_test_init(struct kunit *test,
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ if (netid != MCTP_NET_ANY)
+ WRITE_ONCE(dev->mdev->net, netid);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
@@ -301,7 +314,7 @@ static void __mctp_route_test_init(struct kunit *test,
KUNIT_ASSERT_EQ(test, rc, 0);
addr.smctp_family = AF_MCTP;
- addr.smctp_network = MCTP_NET_ANY;
+ addr.smctp_network = netid;
addr.smctp_addr.s_addr = 8;
addr.smctp_type = 0;
rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
@@ -339,13 +352,12 @@ static void mctp_test_route_input_sk(struct kunit *test)
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock);
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
skb = mctp_test_create_skb_data(&params->hdr, &params->type);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
@@ -410,15 +422,14 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test)
params = test->param_value;
- __mctp_route_test_init(test, &dev, &rt, &sock);
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
for (i = 0; i < params->n_hdrs; i++) {
c = i;
skb = mctp_test_create_skb_data(&params->hdrs[i], &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
}
@@ -544,6 +555,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
struct mctp_sock *msk;
struct socket *sock;
unsigned long flags;
+ unsigned int net;
int rc;
u8 c;
@@ -551,6 +563,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
dev = mctp_test_create_dev();
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ net = READ_ONCE(dev->mdev->net);
rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
@@ -562,8 +575,9 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
mns = &sock_net(sock->sk)->mctp;
/* set the incoming tag according to test params */
- key = mctp_key_alloc(msk, params->key_local_addr, params->key_peer_addr,
- params->key_tag, GFP_KERNEL);
+ key = mctp_key_alloc(msk, net, params->key_local_addr,
+ params->key_peer_addr, params->key_tag,
+ GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, key);
@@ -576,8 +590,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test)
skb = mctp_test_create_skb_data(&params->hdr, &c);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
- skb->dev = dev->ndev;
- __mctp_cb(skb);
+ mctp_test_skb_set_dev(skb, dev);
rc = mctp_route_input(&rt->rt, skb);
@@ -665,6 +678,373 @@ static void mctp_route_input_sk_keys_to_desc(
KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests,
mctp_route_input_sk_keys_to_desc);
+struct test_net {
+ unsigned int netid;
+ struct mctp_test_dev *dev;
+ struct mctp_test_route *rt;
+ struct socket *sock;
+ struct sk_buff *skb;
+ struct mctp_sk_key *key;
+ struct {
+ u8 type;
+ unsigned int data;
+ } msg;
+};
+
+static void
+mctp_test_route_input_multiple_nets_bind_init(struct kunit *test,
+ struct test_net *t)
+{
+ struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1) | FL_TO);
+
+ t->msg.data = t->netid;
+
+ __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+
+ t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
+ mctp_test_skb_set_dev(t->skb, t->dev);
+}
+
+static void
+mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test,
+ struct test_net *t)
+{
+ __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+}
+
+/* Test that skbs from different nets (otherwise identical) get routed to their
+ * corresponding socket via the sockets' bind()
+ */
+static void mctp_test_route_input_multiple_nets_bind(struct kunit *test)
+{
+ struct sk_buff *rx_skb1, *rx_skb2;
+ struct test_net t1, t2;
+ int rc;
+
+ t1.netid = 1;
+ t2.netid = 2;
+
+ t1.msg.type = 0;
+ t2.msg.type = 0;
+
+ mctp_test_route_input_multiple_nets_bind_init(test, &t1);
+ mctp_test_route_input_multiple_nets_bind_init(test, &t2);
+
+ rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+ rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
+ KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
+ t1.netid);
+ kfree_skb(rx_skb1);
+
+ rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
+ KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
+ t2.netid);
+ kfree_skb(rx_skb2);
+
+ mctp_test_route_input_multiple_nets_bind_fini(test, &t1);
+ mctp_test_route_input_multiple_nets_bind_fini(test, &t2);
+}
+
+static void
+mctp_test_route_input_multiple_nets_key_init(struct kunit *test,
+ struct test_net *t)
+{
+ struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1));
+ struct mctp_sock *msk;
+ struct netns_mctp *mns;
+ unsigned long flags;
+
+ t->msg.data = t->netid;
+
+ __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
+
+ msk = container_of(t->sock->sk, struct mctp_sock, sk);
+
+ t->key = mctp_key_alloc(msk, t->netid, hdr.dest, hdr.src, 1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
+
+ mns = &sock_net(t->sock->sk)->mctp;
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ mctp_reserve_tag(&init_net, t->key, msk);
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
+ t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
+ mctp_test_skb_set_dev(t->skb, t->dev);
+}
+
+static void
+mctp_test_route_input_multiple_nets_key_fini(struct kunit *test,
+ struct test_net *t)
+{
+ mctp_key_unref(t->key);
+ __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
+}
+
+/* test that skbs from different nets (otherwise identical) get routed to their
+ * corresponding socket via the sk_key
+ */
+static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
+{
+ struct sk_buff *rx_skb1, *rx_skb2;
+ struct test_net t1, t2;
+ int rc;
+
+ t1.netid = 1;
+ t2.netid = 2;
+
+ /* use type 1 which is not bound */
+ t1.msg.type = 1;
+ t2.msg.type = 1;
+
+ mctp_test_route_input_multiple_nets_key_init(test, &t1);
+ mctp_test_route_input_multiple_nets_key_init(test, &t2);
+
+ rc = mctp_route_input(&t1.rt->rt, t1.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+ rc = mctp_route_input(&t2.rt->rt, t2.skb);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
+ KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
+ t1.netid);
+ kfree_skb(rx_skb1);
+
+ rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
+ KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
+ KUNIT_EXPECT_EQ(test,
+ *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
+ t2.netid);
+ kfree_skb(rx_skb2);
+
+ mctp_test_route_input_multiple_nets_key_fini(test, &t1);
+ mctp_test_route_input_multiple_nets_key_fini(test, &t2);
+}
+
+#if IS_ENABLED(CONFIG_MCTP_FLOWS)
+
+static void mctp_test_flow_init(struct kunit *test,
+ struct mctp_test_dev **devp,
+ struct mctp_test_route **rtp,
+ struct socket **sock,
+ struct sk_buff **skbp,
+ unsigned int len)
+{
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+
+ /* we have a slightly odd routing setup here; the test route
+ * is for EID 8, which is our local EID. We don't do a routing
+ * lookup, so that's fine - all we require is a path through
+ * mctp_local_output, which will call rt->output on whatever
+ * route we provide
+ */
+ __mctp_route_test_init(test, &dev, &rt, sock, MCTP_NET_ANY);
+
+ /* Assign a single EID. ->addrs is freed on mctp netdev release */
+ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
+ dev->mdev->num_addrs = 1;
+ dev->mdev->addrs[0] = 8;
+
+ skb = alloc_skb(len + sizeof(struct mctp_hdr) + 1, GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, skb);
+ __mctp_cb(skb);
+ skb_reserve(skb, sizeof(struct mctp_hdr) + 1);
+ memset(skb_put(skb, len), 0, len);
+
+ /* take a ref for the route, we'll decrement in local output */
+ refcount_inc(&rt->rt.refs);
+
+ *devp = dev;
+ *rtp = rt;
+ *skbp = skb;
+}
+
+static void mctp_test_flow_fini(struct kunit *test,
+ struct mctp_test_dev *dev,
+ struct mctp_test_route *rt,
+ struct socket *sock)
+{
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+/* test that an outgoing skb has the correct MCTP extension data set */
+static void mctp_test_packet_flow(struct kunit *test)
+{
+ struct sk_buff *skb, *skb2;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_flow *flow;
+ struct socket *sock;
+ u8 dst = 8;
+ int n, rc;
+
+ mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 30);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ n = rt->pkts.qlen;
+ KUNIT_ASSERT_EQ(test, n, 1);
+
+ skb2 = skb_dequeue(&rt->pkts);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2);
+
+ flow = skb_ext_find(skb2, SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow->key);
+ KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk);
+
+ kfree_skb(skb2);
+ mctp_test_flow_fini(test, dev, rt, sock);
+}
+
+/* test that outgoing skbs, after fragmentation, all have the correct MCTP
+ * extension data set.
+ */
+static void mctp_test_fragment_flow(struct kunit *test)
+{
+ struct mctp_flow *flows[2];
+ struct sk_buff *tx_skbs[2];
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skb;
+ struct socket *sock;
+ u8 dst = 8;
+ int n, rc;
+
+ mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 100);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ n = rt->pkts.qlen;
+ KUNIT_ASSERT_EQ(test, n, 2);
+
+ /* both resulting packets should have the same flow data */
+ tx_skbs[0] = skb_dequeue(&rt->pkts);
+ tx_skbs[1] = skb_dequeue(&rt->pkts);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]);
+
+ flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key);
+ KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk);
+
+ flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]);
+ KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key);
+
+ kfree_skb(tx_skbs[0]);
+ kfree_skb(tx_skbs[1]);
+ mctp_test_flow_fini(test, dev, rt, sock);
+}
+
+#else
+static void mctp_test_packet_flow(struct kunit *test)
+{
+ kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
+}
+
+static void mctp_test_fragment_flow(struct kunit *test)
+{
+ kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
+}
+#endif
+
+/* Test that outgoing skbs cause a suitable tag to be created */
+static void mctp_test_route_output_key_create(struct kunit *test)
+{
+ const unsigned int netid = 50;
+ const u8 dst = 26, src = 15;
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct mctp_sk_key *key;
+ struct netns_mctp *mns;
+ unsigned long flags;
+ struct socket *sock;
+ struct sk_buff *skb;
+ bool empty, single;
+ const int len = 2;
+ int rc;
+
+ dev = mctp_test_create_dev();
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ WRITE_ONCE(dev->mdev->net, netid);
+
+ rt = mctp_test_create_route(&init_net, dev->mdev, dst, 68);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
+
+ rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
+ dev->mdev->num_addrs = 1;
+ dev->mdev->addrs[0] = src;
+
+ skb = alloc_skb(sizeof(struct mctp_hdr) + 1 + len, GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, skb);
+ __mctp_cb(skb);
+ skb_reserve(skb, sizeof(struct mctp_hdr) + 1 + len);
+ memset(skb_put(skb, len), 0, len);
+
+ refcount_inc(&rt->rt.refs);
+
+ mns = &sock_net(sock->sk)->mctp;
+
+ /* We assume we're starting from an empty keys list, which requires
+ * preceding tests to clean up correctly!
+ */
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ empty = hlist_empty(&mns->keys);
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+ KUNIT_ASSERT_TRUE(test, empty);
+
+ rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
+ KUNIT_ASSERT_EQ(test, rc, 0);
+
+ key = NULL;
+ single = false;
+ spin_lock_irqsave(&mns->keys_lock, flags);
+ if (!hlist_empty(&mns->keys)) {
+ key = hlist_entry(mns->keys.first, struct mctp_sk_key, hlist);
+ single = hlist_is_singular_node(&key->hlist, &mns->keys);
+ }
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ KUNIT_ASSERT_NOT_NULL(test, key);
+ KUNIT_ASSERT_TRUE(test, single);
+
+ KUNIT_EXPECT_EQ(test, key->net, netid);
+ KUNIT_EXPECT_EQ(test, key->local_addr, src);
+ KUNIT_EXPECT_EQ(test, key->peer_addr, dst);
+ /* key has incoming tag, so inverse of what we sent */
+ KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER);
+
+ sock_release(sock);
+ mctp_test_route_destroy(test, rt);
+ mctp_test_destroy_dev(dev);
+}
+
static struct kunit_case mctp_test_cases[] = {
KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params),
KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params),
@@ -673,6 +1053,11 @@ static struct kunit_case mctp_test_cases[] = {
mctp_route_input_sk_reasm_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
mctp_route_input_sk_keys_gen_params),
+ KUNIT_CASE(mctp_test_route_input_multiple_nets_bind),
+ KUNIT_CASE(mctp_test_route_input_multiple_nets_key),
+ KUNIT_CASE(mctp_test_packet_flow),
+ KUNIT_CASE(mctp_test_fragment_flow),
+ KUNIT_CASE(mctp_test_route_output_key_create),
{}
};
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
index e03ba66bbe18..565763eb0211 100644
--- a/net/mctp/test/utils.c
+++ b/net/mctp/test/utils.c
@@ -4,6 +4,7 @@
#include <linux/mctp.h>
#include <linux/if_arp.h>
+#include <net/mctp.h>
#include <net/mctpdevice.h>
#include <net/pkt_sched.h>
@@ -54,6 +55,7 @@ struct mctp_test_dev *mctp_test_create_dev(void)
rcu_read_lock();
dev->mdev = __mctp_dev_get(ndev);
+ dev->mdev->net = mctp_default_net(dev_net(ndev));
rcu_read_unlock();
return dev;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1af29af65388..6dab883a08dd 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2179,7 +2179,9 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct mpls_route __rcu **platform_label;
- struct fib_dump_filter filter = {};
+ struct fib_dump_filter filter = {
+ .rtnl_held = true,
+ };
unsigned int flags = NLM_F_MULTI;
size_t platform_labels;
unsigned int index;
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 533d082f0701..45d1e6a157fc 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -27,6 +27,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
__be16 mpls_protocol;
unsigned int mpls_hlen;
+ if (!skb_inner_network_header_was_set(skb))
+ goto out;
+
skb_reset_network_header(skb);
mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index ef59e25dc482..8fc790f2a01b 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -55,8 +55,6 @@ static int mpls_xmit(struct sk_buff *skb)
out_dev = dst->dev;
net = dev_net(out_dev);
- skb_orphan(skb);
-
if (!mpls_output_possible(out_dev) ||
!dst->lwtstate || skb_warn_if_lro(skb))
goto drop;
diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
index a536586742f2..3ae46b545d2c 100644
--- a/net/mptcp/diag.c
+++ b/net/mptcp/diag.c
@@ -10,20 +10,24 @@
#include <linux/net.h>
#include <linux/inet_diag.h>
#include <net/netlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
-static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
+static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *sf;
struct nlattr *start;
u32 flags = 0;
+ bool slow;
int err;
+ if (inet_sk_state_load(sk) == TCP_LISTEN)
+ return 0;
+
start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
if (!start)
return -EMSGSIZE;
+ slow = lock_sock_fast(sk);
rcu_read_lock();
sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
if (!sf) {
@@ -63,17 +67,19 @@ static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
sf->map_data_len) ||
nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) ||
nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) ||
- nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, sf->local_id)) {
+ nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, subflow_get_local_id(sf))) {
err = -EMSGSIZE;
goto nla_failure;
}
rcu_read_unlock();
+ unlock_sock_fast(sk, slow);
nla_nest_end(skb, start);
return 0;
nla_failure:
rcu_read_unlock();
+ unlock_sock_fast(sk, slow);
nla_nest_cancel(skb, start);
return err;
}
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index 74698582a285..ad28da655f8b 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -59,13 +59,12 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
mptcp_data_unlock(sk);
}
-void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
- const struct mptcp_options_received *mp_opt)
+void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+ const struct mptcp_options_received *mp_opt)
{
struct sock *sk = (struct sock *)msk;
struct sk_buff *skb;
- mptcp_data_lock(sk);
skb = skb_peek_tail(&sk->sk_receive_queue);
if (skb) {
WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
@@ -77,5 +76,4 @@ void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_
}
pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
- mptcp_data_unlock(sk);
}
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index bd8ff5950c8d..0566dd793810 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -10,7 +10,6 @@
#include <linux/net.h>
#include <linux/inet_diag.h>
#include <net/netlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
diff --git a/net/mptcp/mptcp_pm_gen.c b/net/mptcp/mptcp_pm_gen.c
index 670da7822e6c..c30a2a90a192 100644
--- a/net/mptcp/mptcp_pm_gen.c
+++ b/net/mptcp/mptcp_pm_gen.c
@@ -32,8 +32,9 @@ const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1]
};
/* MPTCP_PM_CMD_GET_ADDR - do */
-const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1] = {
- [MPTCP_PM_ENDPOINT_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ATTR_TOKEN + 1] = {
+ [MPTCP_PM_ATTR_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
};
/* MPTCP_PM_CMD_FLUSH_ADDRS - do */
@@ -110,7 +111,7 @@ const struct genl_ops mptcp_pm_nl_ops[11] = {
.doit = mptcp_pm_nl_get_addr_doit,
.dumpit = mptcp_pm_nl_get_addr_dumpit,
.policy = mptcp_pm_get_addr_nl_policy,
- .maxattr = MPTCP_PM_ENDPOINT_ADDR,
+ .maxattr = MPTCP_PM_ATTR_TOKEN,
.flags = GENL_UNS_ADMIN_PERM,
},
{
diff --git a/net/mptcp/mptcp_pm_gen.h b/net/mptcp/mptcp_pm_gen.h
index ac9fc7225b6a..e24258f6f819 100644
--- a/net/mptcp/mptcp_pm_gen.h
+++ b/net/mptcp/mptcp_pm_gen.h
@@ -18,7 +18,7 @@ extern const struct nla_policy mptcp_pm_add_addr_nl_policy[MPTCP_PM_ENDPOINT_ADD
extern const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
-extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
+extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ATTR_TOKEN + 1];
extern const struct nla_policy mptcp_pm_flush_addrs_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index d2527d189a79..27ca42c77b02 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -689,8 +689,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
if (!echo) {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX);
- opts->ahmac = add_addr_generate_hmac(msk->local_key,
- msk->remote_key,
+ opts->ahmac = add_addr_generate_hmac(READ_ONCE(msk->local_key),
+ READ_ONCE(msk->remote_key),
&opts->addr);
} else {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
@@ -792,7 +792,7 @@ static bool mptcp_established_options_fastclose(struct sock *sk,
*size = TCPOLEN_MPTCP_FASTCLOSE;
opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
- opts->rcvr_key = msk->remote_key;
+ opts->rcvr_key = READ_ONCE(msk->remote_key);
pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
@@ -962,9 +962,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
/* subflows are fully established as soon as we get any
* additional ack, including ADD_ADDR.
*/
- subflow->fully_established = 1;
- WRITE_ONCE(msk->fully_established, true);
- goto check_notify;
+ goto set_fully_established;
}
/* If the first established packet does not contain MP_CAPABLE + data
@@ -983,10 +981,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
if (mp_opt->deny_join_id0)
WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
-set_fully_established:
if (unlikely(!READ_ONCE(msk->pm.server_side)))
pr_warn_once("bogus mpc option on established client sk");
- mptcp_subflow_fully_established(subflow, mp_opt);
+
+set_fully_established:
+ mptcp_data_lock((struct sock *)msk);
+ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
+ mptcp_data_unlock((struct sock *)msk);
check_notify:
/* if the subflow is not already linked into the conn_list, we can't
@@ -1030,7 +1031,7 @@ u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una)
{
msk->bytes_acked += new_snd_una - msk->snd_una;
- msk->snd_una = new_snd_una;
+ WRITE_ONCE(msk->snd_una, new_snd_una);
}
static void ack_update_msk(struct mptcp_sock *msk,
@@ -1057,10 +1058,10 @@ static void ack_update_msk(struct mptcp_sock *msk,
new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd;
if (after64(new_wnd_end, msk->wnd_end))
- msk->wnd_end = new_wnd_end;
+ WRITE_ONCE(msk->wnd_end, new_wnd_end);
/* this assumes mptcp_incoming_options() is invoked after tcp_ack() */
- if (after64(msk->wnd_end, READ_ONCE(msk->snd_nxt)))
+ if (after64(msk->wnd_end, snd_nxt))
__mptcp_check_push(sk, ssk);
if (after64(new_snd_una, old_snd_una)) {
@@ -1071,7 +1072,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
trace_ack_update_msk(mp_opt->data_ack,
old_snd_una, new_snd_una,
- new_wnd_end, msk->wnd_end);
+ new_wnd_end, READ_ONCE(msk->wnd_end));
}
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit)
@@ -1099,8 +1100,8 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
if (mp_opt->echo)
return true;
- hmac = add_addr_generate_hmac(msk->remote_key,
- msk->local_key,
+ hmac = add_addr_generate_hmac(READ_ONCE(msk->remote_key),
+ READ_ONCE(msk->local_key),
&mp_opt->addr);
pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
@@ -1147,7 +1148,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
if (unlikely(mp_opt.suboptions != OPTION_MPTCP_DSS)) {
if ((mp_opt.suboptions & OPTION_MPTCP_FASTCLOSE) &&
- msk->local_key == mp_opt.rcvr_key) {
+ READ_ONCE(msk->local_key) == mp_opt.rcvr_key) {
WRITE_ONCE(msk->rcv_fastclose, true);
mptcp_schedule_work((struct sock *)msk);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSERX);
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 4ae19113b8eb..55406720c607 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -6,7 +6,6 @@
#define pr_fmt(fmt) "MPTCP: " fmt
#include <linux/kernel.h>
-#include <net/tcp.h>
#include <net/mptcp.h>
#include "protocol.h"
@@ -77,7 +76,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
+ pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side);
WRITE_ONCE(pm->server_side, server_side);
mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
@@ -441,13 +440,27 @@ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id
return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex);
}
-int mptcp_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup)
+int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info)
{
- if (token)
- return mptcp_userspace_pm_set_flags(net, token, loc, rem, bkup);
- return mptcp_pm_nl_set_flags(net, loc, bkup);
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_get_addr(skb, info);
+ return mptcp_pm_nl_get_addr(skb, info);
+}
+
+int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ const struct genl_info *info = genl_info_dump(cb);
+
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_dump_addr(msg, cb);
+ return mptcp_pm_nl_dump_addr(msg, cb);
+}
+
+int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[MPTCP_PM_ATTR_TOKEN])
+ return mptcp_userspace_pm_set_flags(skb, info);
+ return mptcp_pm_nl_set_flags(skb, info);
}
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 287a60381eae..5c17d39146ea 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -8,19 +8,13 @@
#include <linux/inet.h>
#include <linux/kernel.h>
-#include <net/tcp.h>
#include <net/inet_common.h>
#include <net/netns/generic.h>
#include <net/mptcp.h>
-#include <net/genetlink.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
-/* forward declaration */
-static struct genl_family mptcp_genl_family;
-
static int pm_nl_pernet_id;
struct mptcp_pm_add_entry {
@@ -396,19 +390,6 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
}
}
-static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
- const struct mptcp_addr_info *addr)
-{
- int i;
-
- for (i = 0; i < nr; i++) {
- if (addrs[i].id == addr->id)
- return true;
- }
-
- return false;
-}
-
/* Fill all the remote addresses into the array addrs[],
* and return the array size.
*/
@@ -440,18 +421,34 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
msk->pm.subflows++;
addrs[i++] = remote;
} else {
+ DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+
+ /* Forbid creation of new subflows matching existing
+ * ones, possibly already created by incoming ADD_ADDR
+ */
+ bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
+ mptcp_for_each_subflow(msk, subflow)
+ if (READ_ONCE(subflow->local_id) == local->id)
+ __set_bit(subflow->remote_id, unavail_id);
+
mptcp_for_each_subflow(msk, subflow) {
ssk = mptcp_subflow_tcp_sock(subflow);
remote_address((struct sock_common *)ssk, &addrs[i]);
- addrs[i].id = subflow->remote_id;
+ addrs[i].id = READ_ONCE(subflow->remote_id);
if (deny_id0 && !addrs[i].id)
continue;
+ if (test_bit(addrs[i].id, unavail_id))
+ continue;
+
if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
continue;
- if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
- msk->pm.subflows < subflows_max) {
+ if (msk->pm.subflows < subflows_max) {
+ /* forbid creating multiple address towards
+ * this id
+ */
+ __set_bit(addrs[i].id, unavail_id);
msk->pm.subflows++;
i++;
}
@@ -502,15 +499,12 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
}
static struct mptcp_pm_addr_entry *
-__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
- bool lookup_by_id)
+__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info)
{
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if ((!lookup_by_id &&
- mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) ||
- (lookup_by_id && entry->addr.id == info->id))
+ if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port))
return entry;
}
return NULL;
@@ -540,7 +534,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
rcu_read_lock();
- entry = __lookup_addr(pernet, &mpc_addr, false);
+ entry = __lookup_addr(pernet, &mpc_addr);
if (entry) {
__clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
msk->mpc_endpoint_id = entry->addr.id;
@@ -799,18 +793,18 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
mptcp_for_each_subflow_safe(msk, subflow, tmp) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ u8 remote_id = READ_ONCE(subflow->remote_id);
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
- u8 id = subflow->local_id;
+ u8 id = subflow_get_local_id(subflow);
- if (rm_type == MPTCP_MIB_RMADDR && subflow->remote_id != rm_id)
+ if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
continue;
if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
continue;
pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
- i, rm_id, subflow->local_id, subflow->remote_id,
- msk->mpc_endpoint_id);
+ i, rm_id, id, remote_id, msk->mpc_endpoint_id);
spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how);
@@ -901,7 +895,8 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
}
static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
- struct mptcp_pm_addr_entry *entry)
+ struct mptcp_pm_addr_entry *entry,
+ bool needs_id)
{
struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
unsigned int addr_max;
@@ -949,7 +944,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
}
}
- if (!entry->addr.id) {
+ if (!entry->addr.id && needs_id) {
find_next:
entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
MPTCP_PM_MAX_ADDR_ID + 1,
@@ -960,7 +955,7 @@ find_next:
}
}
- if (!entry->addr.id)
+ if (!entry->addr.id && needs_id)
goto out;
__set_bit(entry->addr.id, pernet->id_bitmap);
@@ -1092,7 +1087,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
entry->ifindex = 0;
entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
entry->lsk = NULL;
- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
if (ret < 0)
kfree(entry);
@@ -1285,6 +1280,18 @@ next:
return 0;
}
+static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr,
+ struct genl_info *info)
+{
+ struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
+
+ if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
+ mptcp_pm_address_nl_policy, info->extack) &&
+ tb[MPTCP_PM_ADDR_ATTR_ID])
+ return true;
+ return false;
+}
+
int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
@@ -1326,7 +1333,8 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
goto out_free;
}
}
- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
+ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
+ !mptcp_pm_has_addr_attr_id(attr, info));
if (ret < 0) {
GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
goto out_free;
@@ -1533,8 +1541,8 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
}
}
-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list)
+static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
+ struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
struct mptcp_pm_addr_entry *entry;
@@ -1619,8 +1627,8 @@ int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static int mptcp_nl_fill_addr(struct sk_buff *skb,
- struct mptcp_pm_addr_entry *entry)
+int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry)
{
struct mptcp_addr_info *addr = &entry->addr;
struct nlattr *attr;
@@ -1658,7 +1666,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
@@ -1708,8 +1716,13 @@ fail:
return ret;
}
-int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ return mptcp_pm_get_addr(skb, info);
+}
+
+int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb)
{
struct net *net = sock_net(msg->sk);
struct mptcp_pm_addr_entry *entry;
@@ -1751,6 +1764,12 @@ int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
return msg->len;
}
+int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ return mptcp_pm_dump_addr(msg, cb);
+}
+
static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
{
struct nlattr *attr = info->attrs[id];
@@ -1865,66 +1884,63 @@ next:
return ret;
}
-int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup)
+int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info)
{
- struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
+ struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
MPTCP_PM_ADDR_FLAG_FULLMESH;
+ struct net *net = sock_net(skb->sk);
struct mptcp_pm_addr_entry *entry;
+ struct pm_nl_pernet *pernet;
u8 lookup_by_id = 0;
+ u8 bkup = 0;
+ int ret;
- if (addr->addr.family == AF_UNSPEC) {
+ pernet = pm_nl_get_pernet(net);
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
+ if (ret < 0)
+ return ret;
+
+ if (addr.addr.family == AF_UNSPEC) {
lookup_by_id = 1;
- if (!addr->addr.id)
+ if (!addr.addr.id) {
+ GENL_SET_ERR_MSG(info, "missing required inputs");
return -EOPNOTSUPP;
+ }
}
+ if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
+
spin_lock_bh(&pernet->lock);
- entry = __lookup_addr(pernet, &addr->addr, lookup_by_id);
+ entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) :
+ __lookup_addr(pernet, &addr.addr);
if (!entry) {
spin_unlock_bh(&pernet->lock);
+ GENL_SET_ERR_MSG(info, "address not found");
return -EINVAL;
}
- if ((addr->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
+ if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
spin_unlock_bh(&pernet->lock);
+ GENL_SET_ERR_MSG(info, "invalid addr flags");
return -EINVAL;
}
- changed = (addr->flags ^ entry->flags) & mask;
- entry->flags = (entry->flags & ~mask) | (addr->flags & mask);
- *addr = *entry;
+ changed = (addr.flags ^ entry->flags) & mask;
+ entry->flags = (entry->flags & ~mask) | (addr.flags & mask);
+ addr = *entry;
spin_unlock_bh(&pernet->lock);
- mptcp_nl_set_flags(net, &addr->addr, bkup, changed);
+ mptcp_nl_set_flags(net, &addr.addr, bkup, changed);
return 0;
}
int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct mptcp_pm_addr_entry remote = { .addr = { .family = AF_UNSPEC }, };
- struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
- struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
- struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
- struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
- struct net *net = sock_net(skb->sk);
- u8 bkup = 0;
- int ret;
-
- ret = mptcp_pm_parse_entry(attr, info, false, &addr);
- if (ret < 0)
- return ret;
-
- if (attr_rem) {
- ret = mptcp_pm_parse_entry(attr_rem, info, false, &remote);
- if (ret < 0)
- return ret;
- }
-
- if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
- bkup = 1;
-
- return mptcp_pm_set_flags(net, token, &addr, &remote, bkup);
+ return mptcp_pm_set_flags(skb, info);
}
static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp)
@@ -1980,7 +1996,7 @@ static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
if (WARN_ON_ONCE(!sf))
return -EINVAL;
- if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, sf->local_id))
+ if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf)))
return -EMSGSIZE;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
@@ -1997,7 +2013,7 @@ static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
const struct mptcp_subflow_context *sf;
u8 sk_err;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
return -EMSGSIZE;
if (mptcp_event_add_subflow(skb, ssk))
@@ -2055,7 +2071,7 @@ static int mptcp_event_created(struct sk_buff *skb,
const struct mptcp_sock *msk,
const struct sock *ssk)
{
- int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
+ int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
if (err)
return err;
@@ -2083,7 +2099,7 @@ void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
if (!nlh)
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id))
@@ -2118,7 +2134,7 @@ void mptcp_event_addr_announced(const struct sock *ssk,
if (!nlh)
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)))
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
@@ -2234,7 +2250,7 @@ void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
goto nla_put_failure;
break;
case MPTCP_EVENT_CLOSED:
- if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token) < 0)
+ if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token)) < 0)
goto nla_put_failure;
break;
case MPTCP_EVENT_ANNOUNCED:
@@ -2264,7 +2280,7 @@ nla_put_failure:
nlmsg_free(skb);
}
-static struct genl_family mptcp_genl_family __ro_after_init = {
+struct genl_family mptcp_genl_family __ro_after_init = {
.name = MPTCP_PM_NAME,
.version = MPTCP_PM_VER,
.netnsok = true,
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index efecbe3cf415..9f5d422d5ef6 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -26,7 +26,8 @@ void mptcp_free_local_addr_list(struct mptcp_sock *msk)
}
static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
- struct mptcp_pm_addr_entry *entry)
+ struct mptcp_pm_addr_entry *entry,
+ bool needs_id)
{
DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
struct mptcp_pm_addr_entry *match = NULL;
@@ -41,7 +42,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
spin_lock_bh(&msk->pm.lock);
list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
- if (addr_match && entry->addr.id == 0)
+ if (addr_match && entry->addr.id == 0 && needs_id)
entry->addr.id = e->addr.id;
id_match = (e->addr.id == entry->addr.id);
if (addr_match && id_match) {
@@ -64,7 +65,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
}
*e = *entry;
- if (!e->addr.id)
+ if (!e->addr.id && needs_id)
e->addr.id = find_next_zero_bit(id_bitmap,
MPTCP_PM_MAX_ADDR_ID + 1,
1);
@@ -105,19 +106,26 @@ static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
return -EINVAL;
}
+static struct mptcp_pm_addr_entry *
+mptcp_userspace_pm_lookup_addr_by_id(struct mptcp_sock *msk, unsigned int id)
+{
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (entry->addr.id == id)
+ return entry;
+ }
+ return NULL;
+}
+
int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
unsigned int id,
u8 *flags, int *ifindex)
{
- struct mptcp_pm_addr_entry *entry, *match = NULL;
+ struct mptcp_pm_addr_entry *match;
spin_lock_bh(&msk->pm.lock);
- list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
- if (id == entry->addr.id) {
- match = entry;
- break;
- }
- }
+ match = mptcp_userspace_pm_lookup_addr_by_id(msk, id);
spin_unlock_bh(&msk->pm.lock);
if (match) {
*flags = match->flags;
@@ -130,10 +138,21 @@ int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
struct mptcp_addr_info *skc)
{
- struct mptcp_pm_addr_entry new_entry;
+ struct mptcp_pm_addr_entry *entry = NULL, *e, new_entry;
__be16 msk_sport = ((struct inet_sock *)
inet_sk((struct sock *)msk))->inet_sport;
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (mptcp_addresses_equal(&e->addr, skc, false)) {
+ entry = e;
+ break;
+ }
+ }
+ spin_unlock_bh(&msk->pm.lock);
+ if (entry)
+ return entry->addr.id;
+
memset(&new_entry, 0, sizeof(struct mptcp_pm_addr_entry));
new_entry.addr = *skc;
new_entry.addr.id = 0;
@@ -142,7 +161,7 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
if (new_entry.addr.port == msk_sport)
new_entry.addr.port = 0;
- return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
+ return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
}
int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
@@ -187,7 +206,7 @@ int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
goto announce_err;
}
- err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val);
+ err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val, false);
if (err < 0) {
GENL_SET_ERR_MSG(info, "did not match address and id");
goto announce_err;
@@ -222,7 +241,7 @@ static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk,
lock_sock(sk);
mptcp_for_each_subflow(msk, subflow) {
- if (subflow->local_id == 0) {
+ if (READ_ONCE(subflow->local_id) == 0) {
has_id_0 = true;
break;
}
@@ -249,7 +268,7 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID];
- struct mptcp_pm_addr_entry *match = NULL;
+ struct mptcp_pm_addr_entry *match;
struct mptcp_pm_addr_entry *entry;
struct mptcp_sock *msk;
LIST_HEAD(free_list);
@@ -286,13 +305,7 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
lock_sock(sk);
- list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
- if (entry->addr.id == id_val) {
- match = entry;
- break;
- }
- }
-
+ match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val);
if (!match) {
GENL_SET_ERR_MSG(info, "address with specified id not found");
release_sock(sk);
@@ -322,7 +335,6 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
struct mptcp_pm_addr_entry local = { 0 };
struct mptcp_addr_info addr_r;
- struct mptcp_addr_info addr_l;
struct mptcp_sock *msk;
int err = -EINVAL;
struct sock *sk;
@@ -348,26 +360,32 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
goto create_err;
}
- err = mptcp_pm_parse_addr(laddr, info, &addr_l);
+ err = mptcp_pm_parse_entry(laddr, info, true, &local);
if (err < 0) {
NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr");
goto create_err;
}
+ if (local.flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
+ GENL_SET_ERR_MSG(info, "invalid addr flags");
+ err = -EINVAL;
+ goto create_err;
+ }
+ local.flags |= MPTCP_PM_ADDR_FLAG_SUBFLOW;
+
err = mptcp_pm_parse_addr(raddr, info, &addr_r);
if (err < 0) {
NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
goto create_err;
}
- if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) {
+ if (!mptcp_pm_addr_families_match(sk, &local.addr, &addr_r)) {
GENL_SET_ERR_MSG(info, "families mismatch");
err = -EINVAL;
goto create_err;
}
- local.addr = addr_l;
- err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
+ err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
if (err < 0) {
GENL_SET_ERR_MSG(info, "did not match address and id");
goto create_err;
@@ -375,7 +393,7 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
lock_sock(sk);
- err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+ err = __mptcp_subflow_connect(sk, &local.addr, &addr_r);
release_sock(sk);
@@ -483,6 +501,16 @@ int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info
goto destroy_err;
}
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (addr_l.family == AF_INET && ipv6_addr_v4mapped(&addr_r.addr6)) {
+ ipv6_addr_set_v4mapped(addr_l.addr.s_addr, &addr_l.addr6);
+ addr_l.family = AF_INET6;
+ }
+ if (addr_r.family == AF_INET && ipv6_addr_v4mapped(&addr_l.addr6)) {
+ ipv6_addr_set_v4mapped(addr_r.addr.s_addr, &addr_r.addr6);
+ addr_r.family = AF_INET6;
+ }
+#endif
if (addr_l.family != addr_r.family) {
GENL_SET_ERR_MSG(info, "address families do not match");
err = -EINVAL;
@@ -518,35 +546,194 @@ destroy_err:
return err;
}
-int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup)
+int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info)
{
+ struct mptcp_pm_addr_entry loc = { .addr = { .family = AF_UNSPEC }, };
+ struct mptcp_pm_addr_entry rem = { .addr = { .family = AF_UNSPEC }, };
+ struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct net *net = sock_net(skb->sk);
struct mptcp_sock *msk;
int ret = -EINVAL;
struct sock *sk;
u32 token_val;
+ u8 bkup = 0;
token_val = nla_get_u32(token);
msk = mptcp_token_get_sock(net, token_val);
- if (!msk)
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
return ret;
+ }
sk = (struct sock *)msk;
- if (!mptcp_pm_is_userspace(msk))
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "userspace PM not selected");
+ goto set_flags_err;
+ }
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &loc);
+ if (ret < 0)
goto set_flags_err;
- if (loc->addr.family == AF_UNSPEC ||
- rem->addr.family == AF_UNSPEC)
+ if (attr_rem) {
+ ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem);
+ if (ret < 0)
+ goto set_flags_err;
+ }
+
+ if (loc.addr.family == AF_UNSPEC ||
+ rem.addr.family == AF_UNSPEC) {
+ GENL_SET_ERR_MSG(info, "invalid address families");
+ ret = -EINVAL;
goto set_flags_err;
+ }
+
+ if (loc.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
+ bkup = 1;
lock_sock(sk);
- ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc->addr, &rem->addr, bkup);
+ ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc.addr, &rem.addr, bkup);
release_sock(sk);
set_flags_err:
sock_put(sk);
return ret;
}
+
+int mptcp_userspace_pm_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct id_bitmap {
+ DECLARE_BITMAP(map, MPTCP_PM_MAX_ADDR_ID + 1);
+ } *bitmap;
+ const struct genl_info *info = genl_info_dump(cb);
+ struct net *net = sock_net(msg->sk);
+ struct mptcp_pm_addr_entry *entry;
+ struct mptcp_sock *msk;
+ struct nlattr *token;
+ int ret = -EINVAL;
+ struct sock *sk;
+ void *hdr;
+
+ bitmap = (struct id_bitmap *)cb->ctx;
+ token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+
+ msk = mptcp_token_get_sock(net, nla_get_u32(token));
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return ret;
+ }
+
+ sk = (struct sock *)msk;
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto out;
+ }
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (test_bit(entry->addr.id, bitmap->map))
+ continue;
+
+ hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &mptcp_genl_family,
+ NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
+ if (!hdr)
+ break;
+
+ if (mptcp_nl_fill_addr(msg, entry) < 0) {
+ genlmsg_cancel(msg, hdr);
+ break;
+ }
+
+ __set_bit(entry->addr.id, bitmap->map);
+ genlmsg_end(msg, hdr);
+ }
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ ret = msg->len;
+
+out:
+ sock_put(sk);
+ return ret;
+}
+
+int mptcp_userspace_pm_get_addr(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+ struct mptcp_pm_addr_entry addr, *entry;
+ struct net *net = sock_net(skb->sk);
+ struct mptcp_sock *msk;
+ struct sk_buff *msg;
+ int ret = -EINVAL;
+ struct sock *sk;
+ void *reply;
+
+ msk = mptcp_token_get_sock(net, nla_get_u32(token));
+ if (!msk) {
+ NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token");
+ return ret;
+ }
+
+ sk = (struct sock *)msk;
+
+ if (!mptcp_pm_is_userspace(msk)) {
+ GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected");
+ goto out;
+ }
+
+ ret = mptcp_pm_parse_entry(attr, info, false, &addr);
+ if (ret < 0)
+ goto out;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
+ info->genlhdr->cmd);
+ if (!reply) {
+ GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
+ ret = -EMSGSIZE;
+ goto fail;
+ }
+
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
+ entry = mptcp_userspace_pm_lookup_addr_by_id(msk, addr.addr.id);
+ if (!entry) {
+ GENL_SET_ERR_MSG(info, "address not found");
+ ret = -EINVAL;
+ goto unlock_fail;
+ }
+
+ ret = mptcp_nl_fill_addr(msg, entry);
+ if (ret)
+ goto unlock_fail;
+
+ genlmsg_end(msg, reply);
+ ret = genlmsg_reply(msg, info);
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+ sock_put(sk);
+ return ret;
+
+unlock_fail:
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+fail:
+ nlmsg_free(msg);
+out:
+ sock_put(sk);
+ return ret;
+}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 3ed4709a7509..3a1967bc7bad 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -15,7 +15,6 @@
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
-#include <net/tcp.h>
#include <net/tcp_states.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
@@ -85,7 +84,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
subflow->subflow_id = msk->subflow_id++;
/* This is the first subflow, always with id 0 */
- subflow->local_id_valid = 1;
+ WRITE_ONCE(subflow->local_id, 0);
mptcp_sock_graft(msk->first, sk->sk_socket);
iput(SOCK_INODE(ssock));
@@ -410,6 +409,7 @@ static void mptcp_close_wake_up(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+/* called under the msk socket lock */
static bool mptcp_pending_data_fin_ack(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -441,16 +441,17 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
}
}
+/* can be called with no lock acquired */
static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
{
struct mptcp_sock *msk = mptcp_sk(sk);
if (READ_ONCE(msk->rcv_data_fin) &&
- ((1 << sk->sk_state) &
+ ((1 << inet_sk_state_load(sk)) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
- if (msk->ack_seq == rcv_data_fin_seq) {
+ if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) {
if (seq)
*seq = rcv_data_fin_seq;
@@ -748,7 +749,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
msk->bytes_received += end_seq - msk->ack_seq;
- msk->ack_seq = end_seq;
+ WRITE_ONCE(msk->ack_seq, end_seq);
moved = true;
}
return moved;
@@ -985,6 +986,7 @@ static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
put_page(dfrag->page);
}
+/* called under both the msk socket lock and the data lock */
static void __mptcp_clean_una(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1033,13 +1035,15 @@ static void __mptcp_clean_una(struct sock *sk)
msk->recovery = false;
out:
- if (snd_una == READ_ONCE(msk->snd_nxt) &&
- snd_una == READ_ONCE(msk->write_seq)) {
+ if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) {
if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
mptcp_stop_rtx_timer(sk);
} else {
mptcp_reset_rtx_timer(sk);
}
+
+ if (mptcp_pending_data_fin_ack(sk))
+ mptcp_schedule_work(sk);
}
static void __mptcp_clean_una_wakeup(struct sock *sk)
@@ -1260,6 +1264,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
mpext = mptcp_get_ext(skb);
if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) {
TCP_SKB_CB(skb)->eor = 1;
+ tcp_mark_push(tcp_sk(ssk), skb);
goto alloc_skb;
}
@@ -1499,14 +1504,17 @@ static void mptcp_update_post_push(struct mptcp_sock *msk,
*/
if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
- msk->snd_nxt = snd_nxt_new;
+ WRITE_ONCE(msk->snd_nxt, snd_nxt_new);
}
}
void mptcp_check_and_set_pending(struct sock *sk)
{
- if (mptcp_send_head(sk))
- mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING);
+ if (mptcp_send_head(sk)) {
+ mptcp_data_lock(sk);
+ mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING);
+ mptcp_data_unlock(sk);
+ }
}
static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
@@ -1683,15 +1691,6 @@ out:
}
}
-static void mptcp_set_nospace(struct sock *sk)
-{
- /* enable autotune */
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-
- /* will be cleared on avail space */
- set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
-}
-
static int mptcp_disconnect(struct sock *sk, int flags);
static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
@@ -1762,6 +1761,30 @@ static int do_copy_data_nocache(struct sock *sk, int copy,
return 0;
}
+/* open-code sk_stream_memory_free() plus sent limit computation to
+ * avoid indirect calls in fast-path.
+ * Called under the msk socket lock, so we can avoid a bunch of ONCE
+ * annotations.
+ */
+static u32 mptcp_send_limit(const struct sock *sk)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ u32 limit, not_sent;
+
+ if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf))
+ return 0;
+
+ limit = mptcp_notsent_lowat(sk);
+ if (limit == UINT_MAX)
+ return UINT_MAX;
+
+ not_sent = msk->write_seq - msk->snd_nxt;
+ if (not_sent >= limit)
+ return 0;
+
+ return limit - not_sent;
+}
+
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1806,6 +1829,12 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct mptcp_data_frag *dfrag;
bool dfrag_collapsed;
size_t psize, offset;
+ u32 copy_limit;
+
+ /* ensure fitting the notsent_lowat() constraint */
+ copy_limit = mptcp_send_limit(sk);
+ if (!copy_limit)
+ goto wait_for_memory;
/* reuse tail pfrag, if possible, or carve a new one from the
* page allocator
@@ -1813,9 +1842,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
dfrag = mptcp_pending_tail(sk);
dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
if (!dfrag_collapsed) {
- if (!sk_stream_memory_free(sk))
- goto wait_for_memory;
-
if (!mptcp_page_frag_refill(sk, pfrag))
goto wait_for_memory;
@@ -1830,6 +1856,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
offset = dfrag->offset + dfrag->data_len;
psize = pfrag->size - offset;
psize = min_t(size_t, psize, msg_data_left(msg));
+ psize = min_t(size_t, psize, copy_limit);
total_ts = psize + frag_truesize;
if (!sk_wmem_schedule(sk, total_ts))
@@ -1865,7 +1892,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
continue;
wait_for_memory:
- mptcp_set_nospace(sk);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
__mptcp_push_pending(sk, msg->msg_flags);
ret = sk_stream_wait_memory(sk, &timeo);
if (ret)
@@ -1960,6 +1987,9 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
if (copied <= 0)
return;
+ if (!msk->rcvspace_init)
+ mptcp_rcv_space_init(msk, msk->first);
+
msk->rcvq_space.copied += copied;
mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
@@ -2108,7 +2138,7 @@ static unsigned int mptcp_inq_hint(const struct sock *sk)
skb = skb_peek(&msk->receive_queue);
if (skb) {
- u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+ u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq;
if (hint_val >= INT_MAX)
return INT_MAX;
@@ -2314,9 +2344,6 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
if (__mptcp_check_fallback(msk))
return false;
- if (tcp_rtx_and_write_queues_empty(sk))
- return false;
-
/* the closing socket has some data untransmitted and/or unacked:
* some data in the mptcp rtx queue has not really xmitted yet.
* keep it simple and re-inject the whole mptcp level rtx queue
@@ -2755,7 +2782,7 @@ static void __mptcp_init_sock(struct sock *sk)
__skb_queue_head_init(&msk->receive_queue);
msk->out_of_order_queue = RB_ROOT;
msk->first_pending = NULL;
- msk->rmem_fwd_alloc = 0;
+ WRITE_ONCE(msk->rmem_fwd_alloc, 0);
WRITE_ONCE(msk->rmem_released, 0);
msk->timer_ival = TCP_RTO_MIN;
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
@@ -2971,7 +2998,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk->sk_prot->destroy(sk);
- WARN_ON_ONCE(msk->rmem_fwd_alloc);
+ WARN_ON_ONCE(READ_ONCE(msk->rmem_fwd_alloc));
WARN_ON_ONCE(msk->rmem_released);
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
@@ -3145,22 +3172,22 @@ static int mptcp_disconnect(struct sock *sk, int flags)
mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
WRITE_ONCE(msk->flags, 0);
msk->cb_flags = 0;
- msk->push_pending = 0;
msk->recovery = false;
- msk->can_ack = false;
- msk->fully_established = false;
- msk->rcv_data_fin = false;
- msk->snd_data_fin_enable = false;
- msk->rcv_fastclose = false;
- msk->use_64bit_ack = false;
- msk->bytes_consumed = 0;
+ WRITE_ONCE(msk->can_ack, false);
+ WRITE_ONCE(msk->fully_established, false);
+ WRITE_ONCE(msk->rcv_data_fin, false);
+ WRITE_ONCE(msk->snd_data_fin_enable, false);
+ WRITE_ONCE(msk->rcv_fastclose, false);
+ WRITE_ONCE(msk->use_64bit_ack, false);
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
mptcp_pm_data_reset(msk);
mptcp_ca_reset(sk);
+ msk->bytes_consumed = 0;
msk->bytes_acked = 0;
msk->bytes_received = 0;
msk->bytes_sent = 0;
msk->bytes_retrans = 0;
+ msk->rcvspace_init = 0;
WRITE_ONCE(sk->sk_shutdown, 0);
sk_error_report(sk);
@@ -3174,8 +3201,50 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
+
+static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk)
+{
+ const struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_txoptions *opt;
+ struct ipv6_pinfo *newnp;
+
+ newnp = inet6_sk(newsk);
+
+ rcu_read_lock();
+ opt = rcu_dereference(np->opt);
+ if (opt) {
+ opt = ipv6_dup_options(newsk, opt);
+ if (!opt)
+ net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__);
+ }
+ RCU_INIT_POINTER(newnp->opt, opt);
+ rcu_read_unlock();
+}
#endif
+static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk)
+{
+ struct ip_options_rcu *inet_opt, *newopt = NULL;
+ const struct inet_sock *inet = inet_sk(sk);
+ struct inet_sock *newinet;
+
+ newinet = inet_sk(newsk);
+
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
+ if (inet_opt) {
+ newopt = sock_kmalloc(newsk, sizeof(*inet_opt) +
+ inet_opt->opt.optlen, GFP_ATOMIC);
+ if (newopt)
+ memcpy(newopt, inet_opt, sizeof(*inet_opt) +
+ inet_opt->opt.optlen);
+ else
+ net_warn_ratelimited("%s: Failed to copy ip options\n", __func__);
+ }
+ RCU_INIT_POINTER(newinet->inet_opt, newopt);
+ rcu_read_unlock();
+}
+
struct sock *mptcp_sk_clone_init(const struct sock *sk,
const struct mptcp_options_received *mp_opt,
struct sock *ssk,
@@ -3183,6 +3252,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
{
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
+ struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk;
if (!nsk)
@@ -3195,18 +3265,25 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
__mptcp_init_sock(nsk);
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (nsk->sk_family == AF_INET6)
+ mptcp_copy_ip6_options(nsk, sk);
+ else
+#endif
+ mptcp_copy_ip_options(nsk, sk);
+
msk = mptcp_sk(nsk);
- msk->local_key = subflow_req->local_key;
- msk->token = subflow_req->token;
+ WRITE_ONCE(msk->local_key, subflow_req->local_key);
+ WRITE_ONCE(msk->token, subflow_req->token);
msk->in_accept_queue = 1;
WRITE_ONCE(msk->fully_established, false);
if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
WRITE_ONCE(msk->csum_enabled, true);
- msk->write_seq = subflow_req->idsn + 1;
- msk->snd_nxt = msk->write_seq;
- msk->snd_una = msk->write_seq;
- msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
+ WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1);
+ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+ WRITE_ONCE(msk->snd_una, msk->write_seq);
+ WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
mptcp_init_sched(msk, mptcp_sk(sk)->sched);
@@ -3223,7 +3300,8 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
/* The msk maintain a ref to each subflow in the connections list */
WRITE_ONCE(msk->first, ssk);
- list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list);
+ subflow = mptcp_subflow_ctx(ssk);
+ list_add(&subflow->node, &msk->conn_list);
sock_hold(ssk);
/* new mpc subflow takes ownership of the newly
@@ -3238,6 +3316,9 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
__mptcp_propagate_sndbuf(nsk, ssk);
mptcp_rcv_space_init(msk, ssk);
+
+ if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
+ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
bh_unlock_sock(nsk);
/* note: the newly allocated socket refcount is 2 now */
@@ -3248,6 +3329,7 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
{
const struct tcp_sock *tp = tcp_sk(ssk);
+ msk->rcvspace_init = 1;
msk->rcvq_space.copied = 0;
msk->rcvq_space.rtt_us = 0;
@@ -3258,8 +3340,6 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
TCP_INIT_CWND * tp->advmss);
if (msk->rcvq_space.space == 0)
msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
-
- WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
}
void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
@@ -3306,9 +3386,6 @@ void __mptcp_data_acked(struct sock *sk)
__mptcp_clean_una(sk);
else
__set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
-
- if (mptcp_pending_data_fin_ack(sk))
- mptcp_schedule_work(sk);
}
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
@@ -3333,8 +3410,7 @@ static void mptcp_release_cb(struct sock *sk)
struct mptcp_sock *msk = mptcp_sk(sk);
for (;;) {
- unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) |
- msk->push_pending;
+ unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
struct list_head join_list;
if (!flags)
@@ -3350,7 +3426,6 @@ static void mptcp_release_cb(struct sock *sk)
* datapath acquires the msk socket spinlock while helding
* the subflow socket lock
*/
- msk->push_pending = 0;
msk->cb_flags &= ~flags;
spin_unlock_bh(&sk->sk_lock.slock);
@@ -3478,13 +3553,8 @@ void mptcp_finish_connect(struct sock *ssk)
* accessing the field below
*/
WRITE_ONCE(msk->local_key, subflow->local_key);
- WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
- WRITE_ONCE(msk->snd_nxt, msk->write_seq);
- WRITE_ONCE(msk->snd_una, msk->write_seq);
mptcp_pm_new_connection(msk, ssk, 0);
-
- mptcp_rcv_space_init(msk, ssk);
}
void mptcp_sock_graft(struct sock *sk, struct socket *parent)
@@ -3717,6 +3787,7 @@ static struct proto mptcp_prot = {
.unhash = mptcp_unhash,
.get_port = mptcp_get_port,
.forward_alloc_get = mptcp_forward_alloc_get,
+ .stream_memory_free = mptcp_stream_memory_free,
.sockets_allocated = &mptcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
@@ -3890,12 +3961,12 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
- if (sk_stream_is_writeable(sk))
+ if (__mptcp_stream_is_writeable(sk, 1))
return EPOLLOUT | EPOLLWRNORM;
- mptcp_set_nospace(sk);
- smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
- if (sk_stream_is_writeable(sk))
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */
+ if (__mptcp_stream_is_writeable(sk, 1))
return EPOLLOUT | EPOLLWRNORM;
return 0;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 3517f2d24a22..f16edef6026a 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -113,10 +113,9 @@
#define MPTCP_RST_TRANSIENT BIT(0)
/* MPTCP socket atomic flags */
-#define MPTCP_NOSPACE 1
-#define MPTCP_WORK_RTX 2
-#define MPTCP_FALLBACK_DONE 4
-#define MPTCP_WORK_CLOSE_SUBFLOW 5
+#define MPTCP_WORK_RTX 1
+#define MPTCP_FALLBACK_DONE 2
+#define MPTCP_WORK_CLOSE_SUBFLOW 3
/* MPTCP socket release cb flags */
#define MPTCP_PUSH_PENDING 1
@@ -260,8 +259,10 @@ struct mptcp_data_frag {
struct mptcp_sock {
/* inet_connection_sock must be the first member */
struct inet_connection_sock sk;
- u64 local_key;
- u64 remote_key;
+ u64 local_key; /* protected by the first subflow socket lock
+ * lockless access read
+ */
+ u64 remote_key; /* same as above */
u64 write_seq;
u64 bytes_sent;
u64 snd_nxt;
@@ -286,7 +287,6 @@ struct mptcp_sock {
int rmem_released;
unsigned long flags;
unsigned long cb_flags;
- unsigned long push_pending;
bool recovery; /* closing subflow write queue reinjected */
bool can_ack;
bool fully_established;
@@ -305,7 +305,9 @@ struct mptcp_sock {
nodelay:1,
fastopening:1,
in_accept_queue:1,
- free_first:1;
+ free_first:1,
+ rcvspace_init:1;
+ u32 notsent_lowat;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
@@ -341,12 +343,30 @@ struct mptcp_sock {
#define mptcp_for_each_subflow_safe(__msk, __subflow, __tmp) \
list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node)
+extern struct genl_family mptcp_genl_family;
+
static inline void msk_owned_by_me(const struct mptcp_sock *msk)
{
sock_owned_by_me((const struct sock *)msk);
}
+#ifdef CONFIG_DEBUG_NET
+/* MPTCP-specific: we might (indirectly) call this helper with the wrong sk */
+#undef tcp_sk
+#define tcp_sk(ptr) ({ \
+ typeof(ptr) _ptr = (ptr); \
+ WARN_ON(_ptr->sk_protocol != IPPROTO_TCP); \
+ container_of_const(_ptr, struct tcp_sock, inet_conn.icsk_inet.sk); \
+})
+#define mptcp_sk(ptr) ({ \
+ typeof(ptr) _ptr = (ptr); \
+ WARN_ON(_ptr->sk_protocol != IPPROTO_MPTCP); \
+ container_of_const(_ptr, struct mptcp_sock, sk.icsk_inet.sk); \
+})
+
+#else /* !CONFIG_DEBUG_NET */
#define mptcp_sk(ptr) container_of_const(ptr, struct mptcp_sock, sk.icsk_inet.sk)
+#endif
/* the msk socket don't use the backlog, also account for the bulk
* free memory
@@ -400,7 +420,7 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (msk->snd_una == READ_ONCE(msk->snd_nxt))
+ if (msk->snd_una == msk->snd_nxt)
return NULL;
return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
@@ -491,10 +511,9 @@ struct mptcp_subflow_context {
remote_key_valid : 1, /* received the peer key from */
disposable : 1, /* ctx can be free at ulp release time */
stale : 1, /* unable to snd/rcv data, do not use for xmit */
- local_id_valid : 1, /* local_id is correctly initialized */
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
- __unused : 9;
+ __unused : 10;
bool data_avail;
bool scheduled;
u32 remote_nonce;
@@ -505,7 +524,7 @@ struct mptcp_subflow_context {
u8 hmac[MPTCPOPT_HMAC_LEN]; /* MPJ subflow only */
u64 iasn; /* initial ack sequence number, MPC subflows only */
};
- u8 local_id;
+ s16 local_id; /* if negative not initialized yet */
u8 remote_id;
u8 reset_seen:1;
u8 reset_transient:1;
@@ -556,6 +575,7 @@ mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow)
{
memset(&subflow->reset, 0, sizeof(subflow->reset));
subflow->request_mptcp = 1;
+ WRITE_ONCE(subflow->local_id, -1);
}
static inline u64
@@ -622,8 +642,9 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net);
unsigned int mptcp_close_timeout(const struct sock *sk);
int mptcp_get_pm_type(const struct net *net);
const char *mptcp_get_scheduler(const struct net *net);
-void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
- const struct mptcp_options_received *mp_opt);
+void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow,
+ const struct mptcp_options_received *mp_opt);
bool __mptcp_retransmit_pending_data(struct sock *sk);
void mptcp_check_and_set_pending(struct sock *sk);
void __mptcp_push_pending(struct sock *sk, unsigned int flags);
@@ -789,6 +810,38 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
}
+static inline u32 mptcp_notsent_lowat(const struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ u32 val;
+
+ val = READ_ONCE(mptcp_sk(sk)->notsent_lowat);
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+}
+
+static inline bool mptcp_stream_memory_free(const struct sock *sk, int wake)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ u32 notsent_bytes;
+
+ notsent_bytes = READ_ONCE(msk->write_seq) - READ_ONCE(msk->snd_nxt);
+ return (notsent_bytes << wake) < mptcp_notsent_lowat(sk);
+}
+
+static inline bool __mptcp_stream_is_writeable(const struct sock *sk, int wake)
+{
+ return mptcp_stream_memory_free(sk, wake) &&
+ __sk_stream_is_writeable(sk, wake);
+}
+
+static inline void mptcp_write_space(struct sock *sk)
+{
+ /* pairs with memory barrier in mptcp_poll */
+ smp_mb();
+ if (mptcp_stream_memory_free(sk, 1))
+ sk_stream_write_space(sk);
+}
+
static inline void __mptcp_sync_sndbuf(struct sock *sk)
{
struct mptcp_subflow_context *subflow;
@@ -807,6 +860,7 @@ static inline void __mptcp_sync_sndbuf(struct sock *sk)
/* the msk max wmem limit is <nr_subflows> * tcp wmem[2] */
WRITE_ONCE(sk->sk_sndbuf, new_sndbuf);
+ mptcp_write_space(sk);
}
/* The called held both the msk socket and the subflow socket locks,
@@ -837,16 +891,6 @@ static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
local_bh_enable();
}
-static inline void mptcp_write_space(struct sock *sk)
-{
- if (sk_stream_is_writeable(sk)) {
- /* pairs with memory barrier in mptcp_poll */
- smp_mb();
- if (test_and_clear_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags))
- sk_stream_write_space(sk);
- }
-}
-
void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
#define MPTCP_TOKEN_MAX_RETRIES 4
@@ -926,21 +970,15 @@ int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int
int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
unsigned int id,
u8 *flags, int *ifindex);
-int mptcp_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup);
-int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup);
-int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token,
- struct mptcp_pm_addr_entry *loc,
- struct mptcp_pm_addr_entry *rem, u8 bkup);
+int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info);
+int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info);
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool echo);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
-void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
- struct list_head *rm_list);
void mptcp_free_local_addr_list(struct mptcp_sock *msk);
@@ -952,10 +990,12 @@ void mptcp_event_pm_listener(const struct sock *ssk,
enum mptcp_event_type event);
bool mptcp_userspace_pm_active(const struct mptcp_sock *msk);
-void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
- const struct mptcp_options_received *mp_opt);
+void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+ const struct mptcp_options_received *mp_opt);
void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
struct request_sock *req);
+int mptcp_nl_fill_addr(struct sk_buff *skb,
+ struct mptcp_pm_addr_entry *entry);
static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
{
@@ -1020,6 +1060,24 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
+int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int mptcp_userspace_pm_dump_addr(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info);
+int mptcp_userspace_pm_get_addr(struct sk_buff *skb,
+ struct genl_info *info);
+
+static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
+{
+ int local_id = READ_ONCE(subflow->local_id);
+
+ if (local_id < 0)
+ return 0;
+ return local_id;
+}
void __init mptcp_pm_nl_init(void);
void mptcp_pm_nl_work(struct mptcp_sock *msk);
@@ -1128,7 +1186,8 @@ static inline bool subflow_simultaneous_connect(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1) &&
+ return (1 << sk->sk_state) &
+ (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING) &&
is_active_ssk(subflow) &&
!subflow->conn_finished;
}
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index c40f1428e602..dcd1c76d2a3b 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -624,20 +624,11 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
return ret;
}
-static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- int val;
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (copy_from_sockptr(&val, optval, sizeof(val)))
- return -EFAULT;
-
- lock_sock(sk);
sockopt_seq_inc(msk);
msk->cork = !!val;
mptcp_for_each_subflow(msk, subflow) {
@@ -649,25 +640,15 @@ static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optva
}
if (!val)
mptcp_check_and_set_pending(sk);
- release_sock(sk);
return 0;
}
-static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int __mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, int val)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- int val;
-
- if (optlen < sizeof(int))
- return -EINVAL;
-
- if (copy_from_sockptr(&val, optval, sizeof(val)))
- return -EFAULT;
- lock_sock(sk);
sockopt_seq_inc(msk);
msk->nodelay = !!val;
mptcp_for_each_subflow(msk, subflow) {
@@ -679,8 +660,6 @@ static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t op
}
if (val)
mptcp_check_and_set_pending(sk);
- release_sock(sk);
-
return 0;
}
@@ -803,25 +782,10 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
int ret, val;
switch (optname) {
- case TCP_INQ:
- ret = mptcp_get_int_option(msk, optval, optlen, &val);
- if (ret)
- return ret;
- if (val < 0 || val > 1)
- return -EINVAL;
-
- lock_sock(sk);
- msk->recvmsg_inq = !!val;
- release_sock(sk);
- return 0;
case TCP_ULP:
return -EOPNOTSUPP;
case TCP_CONGESTION:
return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
- case TCP_CORK:
- return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen);
- case TCP_NODELAY:
- return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
case TCP_DEFER_ACCEPT:
/* See tcp.c: TCP_DEFER_ACCEPT does not fail */
mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen);
@@ -834,7 +798,34 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
optval, optlen);
}
- return -EOPNOTSUPP;
+ ret = mptcp_get_int_option(msk, optval, optlen, &val);
+ if (ret)
+ return ret;
+
+ lock_sock(sk);
+ switch (optname) {
+ case TCP_INQ:
+ if (val < 0 || val > 1)
+ ret = -EINVAL;
+ else
+ msk->recvmsg_inq = !!val;
+ break;
+ case TCP_NOTSENT_LOWAT:
+ WRITE_ONCE(msk->notsent_lowat, val);
+ mptcp_write_space(sk);
+ break;
+ case TCP_CORK:
+ ret = __mptcp_setsockopt_sol_tcp_cork(msk, val);
+ break;
+ case TCP_NODELAY:
+ ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val);
+ break;
+ default:
+ ret = -ENOPROTOOPT;
+ }
+
+ release_sock(sk);
+ return ret;
}
int mptcp_setsockopt(struct sock *sk, int level, int optname,
@@ -942,7 +933,7 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info)
mptcp_data_unlock(sk);
slow = lock_sock_fast(sk);
- info->mptcpi_csum_enabled = msk->csum_enabled;
+ info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled);
info->mptcpi_token = msk->token;
info->mptcpi_write_seq = msk->write_seq;
info->mptcpi_retransmits = inet_csk(sk)->icsk_retransmits;
@@ -1349,6 +1340,8 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
return mptcp_put_int_option(msk, optval, optlen, msk->cork);
case TCP_NODELAY:
return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
+ case TCP_NOTSENT_LOWAT:
+ return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat);
}
return -EOPNOTSUPP;
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 0dcb721c89d1..1626dd20c68f 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -15,13 +15,11 @@
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
-#include <net/tcp.h>
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/ip6_route.h>
#include <net/transp_v6.h>
#endif
#include <net/mptcp.h>
-#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
@@ -75,7 +73,8 @@ static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_
get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
- subflow_generate_hmac(msk->local_key, msk->remote_key,
+ subflow_generate_hmac(READ_ONCE(msk->local_key),
+ READ_ONCE(msk->remote_key),
subflow_req->local_nonce,
subflow_req->remote_nonce, hmac);
@@ -421,29 +420,26 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
void __mptcp_sync_state(struct sock *sk, int state)
{
+ struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sock *ssk = msk->first;
+
+ subflow = mptcp_subflow_ctx(ssk);
+ __mptcp_propagate_sndbuf(sk, ssk);
+ if (!msk->rcvspace_init)
+ mptcp_rcv_space_init(msk, ssk);
- __mptcp_propagate_sndbuf(sk, msk->first);
if (sk->sk_state == TCP_SYN_SENT) {
+ /* subflow->idsn is always available is TCP_SYN_SENT state,
+ * even for the FASTOPEN scenarios
+ */
+ WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
+ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
mptcp_set_state(sk, state);
sk->sk_state_change(sk);
}
}
-static void mptcp_propagate_state(struct sock *sk, struct sock *ssk)
-{
- struct mptcp_sock *msk = mptcp_sk(sk);
-
- mptcp_data_lock(sk);
- if (!sock_owned_by_user(sk)) {
- __mptcp_sync_state(sk, ssk->sk_state);
- } else {
- msk->pending_state = ssk->sk_state;
- __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
- }
- mptcp_data_unlock(sk);
-}
-
static void subflow_set_remote_key(struct mptcp_sock *msk,
struct mptcp_subflow_context *subflow,
const struct mptcp_options_received *mp_opt)
@@ -465,6 +461,31 @@ static void subflow_set_remote_key(struct mptcp_sock *msk,
atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
}
+static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ const struct mptcp_options_received *mp_opt)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+ mptcp_data_lock(sk);
+ if (mp_opt) {
+ /* Options are available only in the non fallback cases
+ * avoid updating rx path fields otherwise
+ */
+ WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
+ WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
+ subflow_set_remote_key(msk, subflow, mp_opt);
+ }
+
+ if (!sock_owned_by_user(sk)) {
+ __mptcp_sync_state(sk, ssk->sk_state);
+ } else {
+ msk->pending_state = ssk->sk_state;
+ __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
+ }
+ mptcp_data_unlock(sk);
+}
+
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -499,10 +520,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
if (mp_opt.deny_join_id0)
WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
subflow->mp_capable = 1;
- subflow_set_remote_key(msk, subflow, &mp_opt);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
mptcp_finish_connect(sk);
- mptcp_propagate_state(parent, sk);
+ mptcp_propagate_state(parent, sk, subflow, &mp_opt);
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];
@@ -514,7 +534,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->backup = mp_opt.backup;
subflow->thmac = mp_opt.thmac;
subflow->remote_nonce = mp_opt.nonce;
- subflow->remote_id = mp_opt.join_id;
+ WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
subflow, subflow->thmac, subflow->remote_nonce,
subflow->backup);
@@ -545,8 +565,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
}
} else if (mptcp_check_fallback(sk)) {
fallback:
- mptcp_rcv_space_init(msk, sk);
- mptcp_propagate_state(parent, sk);
+ mptcp_propagate_state(parent, sk, subflow, NULL);
}
return;
@@ -557,8 +576,8 @@ do_reset:
static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
{
- subflow->local_id = local_id;
- subflow->local_id_valid = 1;
+ WARN_ON_ONCE(local_id < 0 || local_id > 255);
+ WRITE_ONCE(subflow->local_id, local_id);
}
static int subflow_chk_local_id(struct sock *sk)
@@ -567,7 +586,7 @@ static int subflow_chk_local_id(struct sock *sk)
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
int err;
- if (likely(subflow->local_id_valid))
+ if (likely(subflow->local_id >= 0))
return 0;
err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
@@ -694,7 +713,8 @@ static bool subflow_hmac_valid(const struct request_sock *req,
if (!msk)
return false;
- subflow_generate_hmac(msk->remote_key, msk->local_key,
+ subflow_generate_hmac(READ_ONCE(msk->remote_key),
+ READ_ONCE(msk->local_key),
subflow_req->remote_nonce,
subflow_req->local_nonce, hmac);
@@ -731,17 +751,16 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
kfree_rcu(ctx, rcu);
}
-void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
- const struct mptcp_options_received *mp_opt)
+void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow,
+ const struct mptcp_options_received *mp_opt)
{
- struct mptcp_sock *msk = mptcp_sk(subflow->conn);
-
subflow_set_remote_key(msk, subflow, mp_opt);
subflow->fully_established = 1;
WRITE_ONCE(msk->fully_established, true);
if (subflow->is_mptfo)
- mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
+ __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
}
static struct sock *subflow_syn_recv_sock(const struct sock *sk,
@@ -834,7 +853,6 @@ create_child:
* mpc option
*/
if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
- mptcp_subflow_fully_established(ctx, &mp_opt);
mptcp_pm_fully_established(owner, child);
ctx->pm_notified = 1;
}
@@ -1530,8 +1548,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
&flags, &ifindex);
subflow->remote_key_valid = 1;
- subflow->remote_key = msk->remote_key;
- subflow->local_key = msk->local_key;
+ subflow->remote_key = READ_ONCE(msk->remote_key);
+ subflow->local_key = READ_ONCE(msk->local_key);
subflow->token = msk->token;
mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
@@ -1549,7 +1567,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
remote_token, local_id, remote_id);
subflow->remote_token = remote_token;
- subflow->remote_id = remote_id;
+ WRITE_ONCE(subflow->remote_id, remote_id);
subflow->request_join = 1;
subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
subflow->subflow_id = msk->subflow_id++;
@@ -1713,6 +1731,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
pr_debug("subflow=%p", ctx);
ctx->tcp_sock = sk;
+ WRITE_ONCE(ctx->local_id, -1);
return ctx;
}
@@ -1744,10 +1763,9 @@ static void subflow_state_change(struct sock *sk)
msk = mptcp_sk(parent);
if (subflow_simultaneous_connect(sk)) {
mptcp_do_fallback(sk);
- mptcp_rcv_space_init(msk, sk);
pr_fallback(msk);
subflow->conn_finished = 1;
- mptcp_propagate_state(parent, sk);
+ mptcp_propagate_state(parent, sk, subflow, NULL);
}
/* as recvmsg() does not acquire the subflow socket for ssk selection
@@ -1949,14 +1967,14 @@ static void subflow_ulp_clone(const struct request_sock *req,
new_ctx->idsn = subflow_req->idsn;
/* this is the first subflow, id is always 0 */
- new_ctx->local_id_valid = 1;
+ subflow_set_local_id(new_ctx, 0);
} else if (subflow_req->mp_join) {
new_ctx->ssn_offset = subflow_req->ssn_offset;
new_ctx->mp_join = 1;
new_ctx->fully_established = 1;
new_ctx->remote_key_valid = 1;
new_ctx->backup = subflow_req->backup;
- new_ctx->remote_id = subflow_req->remote_id;
+ WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
new_ctx->token = subflow_req->token;
new_ctx->thmac = subflow_req->thmac;
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
index bfff53e668da..4fc39fa2e262 100644
--- a/net/mptcp/token_test.c
+++ b/net/mptcp/token_test.c
@@ -52,14 +52,19 @@ static struct mptcp_subflow_context *build_ctx(struct kunit *test)
static struct mptcp_sock *build_msk(struct kunit *test)
{
struct mptcp_sock *msk;
+ struct sock *sk;
msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
sock_net_set((struct sock *)msk, &init_net);
+ sk = (struct sock *)msk;
+
/* be sure the token helpers can dereference sk->sk_prot */
- ((struct sock *)msk)->sk_prot = &tcp_prot;
+ sk->sk_prot = &tcp_prot;
+ sk->sk_protocol = IPPROTO_MPTCP;
+
return msk;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 441d1f134110..df2dc21304ef 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -818,7 +818,7 @@ config NETFILTER_XT_TARGET_AUDIT
config NETFILTER_XT_TARGET_CHECKSUM
tristate "CHECKSUM target support"
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `CHECKSUM' target, which can be used in the iptables mangle
@@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_CONNSECMARK
config NETFILTER_XT_TARGET_CT
tristate '"CT" target support'
depends on NF_CONNTRACK
- depends on IP_NF_RAW || IP6_NF_RAW
+ depends on IP_NF_RAW || IP6_NF_RAW || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This options adds a `CT' target, which allows to specify initial
@@ -880,7 +880,7 @@ config NETFILTER_XT_TARGET_CT
config NETFILTER_XT_TARGET_DSCP
tristate '"DSCP" and "TOS" target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a `DSCP' target, which allows you to manipulate
@@ -896,7 +896,7 @@ config NETFILTER_XT_TARGET_DSCP
config NETFILTER_XT_TARGET_HL
tristate '"HL" hoplimit target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds the "HL" (for IPv6) and "TTL" (for IPv4)
@@ -1080,7 +1080,7 @@ config NETFILTER_XT_TARGET_TPROXY
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
- depends on IP_NF_MANGLE
+ depends on IP_NF_MANGLE || NFT_COMPAT
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
select NF_TPROXY_IPV4
@@ -1147,7 +1147,7 @@ config NETFILTER_XT_TARGET_TCPMSS
config NETFILTER_XT_TARGET_TCPOPTSTRIP
tristate '"TCPOPTSTRIP" target support'
- depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE || NFT_COMPAT
depends on NETFILTER_ADVANCED
help
This option adds a "TCPOPTSTRIP" target, which allows you to strip
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 21f7860e8fa1..cb48a2b9cb9f 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -30,6 +30,7 @@
#define mtype_del IPSET_TOKEN(MTYPE, _del)
#define mtype_list IPSET_TOKEN(MTYPE, _list)
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
+#define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc)
#define mtype MTYPE
#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
@@ -59,9 +60,6 @@ mtype_destroy(struct ip_set *set)
{
struct mtype *map = set->data;
- if (SET_WITH_TIMEOUT(set))
- del_timer_sync(&map->gc);
-
if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
ip_set_free(map->members);
@@ -290,6 +288,15 @@ mtype_gc(struct timer_list *t)
add_timer(&map->gc);
}
+static void
+mtype_cancel_gc(struct ip_set *set)
+{
+ struct mtype *map = set->data;
+
+ if (SET_WITH_TIMEOUT(set))
+ del_timer_sync(&map->gc);
+}
+
static const struct ip_set_type_variant mtype = {
.kadt = mtype_kadt,
.uadt = mtype_uadt,
@@ -303,6 +310,7 @@ static const struct ip_set_type_variant mtype = {
.head = mtype_head,
.list = mtype_list,
.same_set = mtype_same_set,
+ .cancel_gc = mtype_cancel_gc,
};
#endif /* __IP_SET_BITMAP_IP_GEN_H */
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 4c133e06be1d..3184cc6be4c9 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1154,6 +1154,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
return ret;
cleanup:
+ set->variant->cancel_gc(set);
set->variant->destroy(set);
put_out:
module_put(set->type->me);
@@ -1182,6 +1183,14 @@ ip_set_destroy_set(struct ip_set *set)
kfree(set);
}
+static void
+ip_set_destroy_set_rcu(struct rcu_head *head)
+{
+ struct ip_set *set = container_of(head, struct ip_set, rcu);
+
+ ip_set_destroy_set(set);
+}
+
static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const attr[])
{
@@ -1193,8 +1202,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
- /* Must wait for flush to be really finished in list:set */
- rcu_barrier();
/* Commands are serialized and references are
* protected by the ip_set_ref_lock.
@@ -1206,8 +1213,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
* counter, so if it's already zero, we can proceed
* without holding the lock.
*/
- read_lock_bh(&ip_set_ref_lock);
if (!attr[IPSET_ATTR_SETNAME]) {
+ /* Must wait for flush to be really finished in list:set */
+ rcu_barrier();
+ read_lock_bh(&ip_set_ref_lock);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
if (s && (s->ref || s->ref_netlink)) {
@@ -1221,6 +1230,8 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
s = ip_set(inst, i);
if (s) {
ip_set(inst, i) = NULL;
+ /* Must cancel garbage collectors */
+ s->variant->cancel_gc(s);
ip_set_destroy_set(s);
}
}
@@ -1228,6 +1239,9 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
inst->is_destroyed = false;
} else {
u32 flags = flag_exist(info->nlh);
+ u16 features = 0;
+
+ read_lock_bh(&ip_set_ref_lock);
s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
&i);
if (!s) {
@@ -1238,10 +1252,16 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
ret = -IPSET_ERR_BUSY;
goto out;
}
+ features = s->type->features;
ip_set(inst, i) = NULL;
read_unlock_bh(&ip_set_ref_lock);
-
- ip_set_destroy_set(s);
+ if (features & IPSET_TYPE_NAME) {
+ /* Must wait for flush to be really finished */
+ rcu_barrier();
+ }
+ /* Must cancel garbage collectors */
+ s->variant->cancel_gc(s);
+ call_rcu(&s->rcu, ip_set_destroy_set_rcu);
}
return 0;
out:
@@ -1394,9 +1414,6 @@ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info,
ip_set(inst, to_id) = from;
write_unlock_bh(&ip_set_ref_lock);
- /* Make sure all readers of the old set pointers are completed. */
- synchronize_rcu();
-
return 0;
}
@@ -2362,6 +2379,7 @@ ip_set_net_exit(struct net *net)
set = ip_set(inst, i);
if (set) {
ip_set(inst, i) = NULL;
+ set->variant->cancel_gc(set);
ip_set_destroy_set(set);
}
}
@@ -2409,8 +2427,11 @@ ip_set_fini(void)
{
nf_unregister_sockopt(&so_set);
nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-
unregister_pernet_subsys(&ip_set_net_ops);
+
+ /* Wait for call_rcu() in destroy */
+ rcu_barrier();
+
pr_debug("these are the famous last words\n");
}
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index cbf80da9a01c..cf3ce72c3de6 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -222,6 +222,7 @@ static const union nf_inet_addr zeromask = {};
#undef mtype_gc_do
#undef mtype_gc
#undef mtype_gc_init
+#undef mtype_cancel_gc
#undef mtype_variant
#undef mtype_data_match
@@ -266,6 +267,7 @@ static const union nf_inet_addr zeromask = {};
#define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
#define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc)
#define mtype_variant IPSET_TOKEN(MTYPE, _variant)
#define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
@@ -430,7 +432,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
u32 i;
for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = __ipset_dereference(hbucket(t, i));
+ n = (__force struct hbucket *)hbucket(t, i);
if (!n)
continue;
if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
@@ -450,10 +452,7 @@ mtype_destroy(struct ip_set *set)
struct htype *h = set->data;
struct list_head *l, *lt;
- if (SET_WITH_TIMEOUT(set))
- cancel_delayed_work_sync(&h->gc.dwork);
-
- mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
+ mtype_ahash_destroy(set, (__force struct htable *)h->table, true);
list_for_each_safe(l, lt, &h->ad) {
list_del(l);
kfree(l);
@@ -599,6 +598,15 @@ mtype_gc_init(struct htable_gc *gc)
queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
}
+static void
+mtype_cancel_gc(struct ip_set *set)
+{
+ struct htype *h = set->data;
+
+ if (SET_WITH_TIMEOUT(set))
+ cancel_delayed_work_sync(&h->gc.dwork);
+}
+
static int
mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 flags);
@@ -1441,6 +1449,7 @@ static const struct ip_set_type_variant mtype_variant = {
.uref = mtype_uref,
.resize = mtype_resize,
.same_set = mtype_same_set,
+ .cancel_gc = mtype_cancel_gc,
.region_lock = true,
};
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index e162636525cf..6c3f28bc59b3 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -426,9 +426,6 @@ list_set_destroy(struct ip_set *set)
struct list_set *map = set->data;
struct set_elem *e, *n;
- if (SET_WITH_TIMEOUT(set))
- timer_shutdown_sync(&map->gc);
-
list_for_each_entry_safe(e, n, &map->members, list) {
list_del(&e->list);
ip_set_put_byindex(map->net, e->id);
@@ -545,6 +542,15 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
a->extensions == b->extensions;
}
+static void
+list_set_cancel_gc(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+
+ if (SET_WITH_TIMEOUT(set))
+ timer_shutdown_sync(&map->gc);
+}
+
static const struct ip_set_type_variant set_variant = {
.kadt = list_set_kadt,
.uadt = list_set_uadt,
@@ -558,6 +564,7 @@ static const struct ip_set_type_variant set_variant = {
.head = list_set_head,
.list = list_set_list,
.same_set = list_set_same_set,
+ .cancel_gc = list_set_cancel_gc,
};
static void
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a743db073887..98d7dbe3d787 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1511,9 +1511,7 @@ int __init ip_vs_conn_init(void)
return -ENOMEM;
/* Allocate ip_vs_conn slab cache */
- ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
- sizeof(struct ip_vs_conn), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ ip_vs_conn_cachep = KMEM_CACHE(ip_vs_conn, SLAB_HWCACHE_ALIGN);
if (!ip_vs_conn_cachep) {
kvfree(ip_vs_conn_tab);
return -ENOMEM;
diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
index 0e4beae421f8..5257d5e7eb09 100644
--- a/net/netfilter/nf_bpf_link.c
+++ b/net/netfilter/nf_bpf_link.c
@@ -314,7 +314,7 @@ static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
static const struct bpf_func_proto *
bpf_nf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
- return bpf_base_func_proto(func_id);
+ return bpf_base_func_proto(func_id, prog);
}
const struct bpf_verifier_ops netfilter_verifier_ops = {
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 5d8ed6c90b7e..8715617b02fe 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -605,15 +605,11 @@ static int __init nf_conncount_modinit(void)
for (i = 0; i < CONNCOUNT_SLOTS; ++i)
spin_lock_init(&nf_conncount_locks[i]);
- conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
- sizeof(struct nf_conncount_tuple),
- 0, 0, NULL);
+ conncount_conn_cachep = KMEM_CACHE(nf_conncount_tuple, 0);
if (!conncount_conn_cachep)
return -ENOMEM;
- conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
- sizeof(struct nf_conncount_rb),
- 0, 0, NULL);
+ conncount_rb_cachep = KMEM_CACHE(nf_conncount_rb, 0);
if (!conncount_rb_cachep) {
kmem_cache_destroy(conncount_conn_cachep);
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
index 475358ec8212..d2492d050fe6 100644
--- a/net/netfilter/nf_conntrack_bpf.c
+++ b/net/netfilter/nf_conntrack_bpf.c
@@ -467,7 +467,7 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
__bpf_kfunc_end_defs();
-BTF_SET8_START(nf_ct_kfunc_set)
+BTF_KFUNCS_START(nf_ct_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_xdp_ct_lookup, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_skb_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
@@ -478,7 +478,7 @@ BTF_ID_FLAGS(func, bpf_ct_set_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_timeout, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_set_status, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_ct_change_status, KF_TRUSTED_ARGS)
-BTF_SET8_END(nf_ct_kfunc_set)
+BTF_KFUNCS_END(nf_ct_kfunc_set)
static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 2e5f3864d353..c63868666bd9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2530,7 +2530,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
* netfilter framework. Roll on, two-stage module
* delete...
*/
- synchronize_net();
+ synchronize_rcu_expedited();
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
@@ -2756,6 +2756,7 @@ static const struct nf_ct_hook nf_conntrack_hook = {
.get_tuple_skb = nf_conntrack_get_tuple_skb,
.attach = nf_conntrack_attach,
.set_closing = nf_conntrack_set_closing,
+ .confirm = __nf_conntrack_confirm,
};
void nf_conntrack_init_end(void)
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 81ca348915c9..21fa550966f0 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -722,9 +722,7 @@ int nf_conntrack_expect_init(void)
nf_ct_expect_hsize = 1;
}
nf_ct_expect_max = nf_ct_expect_hsize * 4;
- nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
- sizeof(struct nf_conntrack_expect),
- 0, 0, NULL);
+ nf_ct_expect_cachep = KMEM_CACHE(nf_conntrack_expect, 0);
if (!nf_ct_expect_cachep)
return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index e697a824b001..540d97715bd2 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -533,6 +533,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
/* Get fields bitmap */
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
+ if (f->sz > 32)
+ return H323_ERROR_RANGE;
bmp = get_bitmap(bs, f->sz);
if (base)
*(unsigned int *)base = bmp;
@@ -589,6 +591,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
bmp2_len = get_bits(bs, 7) + 1;
if (nf_h323_error_boundary(bs, 0, bmp2_len))
return H323_ERROR_BOUND;
+ if (bmp2_len > 32)
+ return H323_ERROR_RANGE;
bmp2 = get_bitmap(bs, bmp2_len);
bmp |= bmp2 >> f->sz;
if (base)
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0c22a02c2035..3b846cbdc050 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -876,6 +876,7 @@ struct ctnetlink_filter_u32 {
struct ctnetlink_filter {
u8 family;
+ bool zone_filter;
u_int32_t orig_flags;
u_int32_t reply_flags;
@@ -992,9 +993,12 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
if (err)
goto err_filter;
- err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
- if (err < 0)
- goto err_filter;
+ if (cda[CTA_ZONE]) {
+ err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
+ if (err < 0)
+ goto err_filter;
+ filter->zone_filter = true;
+ }
if (!cda[CTA_FILTER])
return filter;
@@ -1148,7 +1152,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
if (filter->family && nf_ct_l3num(ct) != filter->family)
goto ignore_entry;
- if (filter->zone.id != NF_CT_DEFAULT_ZONE_ID &&
+ if (filter->zone_filter &&
!nf_ct_zone_equal_any(ct, &filter->zone))
goto ignore_entry;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index c6bd533983c1..4cc97f971264 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -283,7 +283,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
pr_debug("Setting vtag %x for secondary conntrack\n",
sh->vtag);
ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
- } else {
+ } else if (sch->type == SCTP_CID_SHUTDOWN_ACK) {
/* If it is a shutdown ack OOTB packet, we expect a return
shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
pr_debug("Setting vtag %x for new conn OOTB\n",
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index e573be5afde7..ae493599a3ef 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -457,7 +457,8 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
- u32 end, u32 win)
+ u32 end, u32 win,
+ enum ip_conntrack_dir dir)
{
/* SYN-ACK in reply to a SYN
* or SYN from reply direction in simultaneous open.
@@ -471,7 +472,8 @@ static void tcp_init_sender(struct ip_ct_tcp_state *sender,
* Both sides must send the Window Scale option
* to enable window scaling in either direction.
*/
- if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
+ if (dir == IP_CT_DIR_REPLY &&
+ !(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
sender->td_scale = 0;
receiver->td_scale = 0;
@@ -542,7 +544,7 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
if (tcph->syn) {
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
- end, win);
+ end, win, dir);
if (!tcph->ack)
/* Simultaneous open */
return NFCT_TCP_ACCEPT;
@@ -585,7 +587,7 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
*/
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
- end, win);
+ end, win, dir);
if (dir == IP_CT_DIR_REPLY && !tcph->ack)
return NFCT_TCP_ACCEPT;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 920a5a29ae1d..a0571339239c 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -87,12 +87,22 @@ static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
return 0;
}
+static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route,
+ enum flow_offload_tuple_dir dir)
+{
+ struct dst_entry *dst = route->tuple[dir].dst;
+
+ route->tuple[dir].dst = NULL;
+
+ return dst;
+}
+
static int flow_offload_fill_route(struct flow_offload *flow,
- const struct nf_flow_route *route,
+ struct nf_flow_route *route,
enum flow_offload_tuple_dir dir)
{
struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
- struct dst_entry *dst = route->tuple[dir].dst;
+ struct dst_entry *dst = nft_route_dst_fetch(route, dir);
int i, j = 0;
switch (flow_tuple->l3proto) {
@@ -122,6 +132,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
ETH_ALEN);
flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
+ dst_release(dst);
break;
case FLOW_OFFLOAD_XMIT_XFRM:
case FLOW_OFFLOAD_XMIT_NEIGH:
@@ -146,7 +157,7 @@ static void nft_flow_dst_release(struct flow_offload *flow,
}
void flow_offload_route_init(struct flow_offload *flow,
- const struct nf_flow_route *route)
+ struct nf_flow_route *route)
{
flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 8cc52d2bd31b..370f8231385c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -31,10 +31,10 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
int i;
for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
- if (loggers[pf][i] == NULL)
+ log = nft_log_dereference(loggers[pf][i]);
+ if (!log)
continue;
- log = nft_log_dereference(loggers[pf][i]);
if (!strncasecmp(str_logger, log->name, strlen(log->name)))
return log;
}
@@ -156,6 +156,11 @@ int nf_logger_find_get(int pf, enum nf_log_type type)
struct nf_logger *logger;
int ret = -ENOENT;
+ if (pf >= ARRAY_SIZE(loggers))
+ return -EINVAL;
+ if (type >= NF_LOG_TYPE_MAX)
+ return -EINVAL;
+
if (pf == NFPROTO_INET) {
ret = nf_logger_find_get(NFPROTO_IPV4, type);
if (ret < 0)
@@ -193,11 +198,12 @@ void nf_logger_put(int pf, enum nf_log_type type)
return;
}
- BUG_ON(loggers[pf][type] == NULL);
-
rcu_read_lock();
logger = rcu_dereference(loggers[pf][type]);
- module_put(logger->me);
+ if (!logger)
+ WARN_ON_ONCE(1);
+ else
+ module_put(logger->me);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_logger_put);
diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c
index 6e3b2f58855f..481be15609b1 100644
--- a/net/netfilter/nf_nat_bpf.c
+++ b/net/netfilter/nf_nat_bpf.c
@@ -54,9 +54,9 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
__bpf_kfunc_end_defs();
-BTF_SET8_START(nf_nat_kfunc_set)
+BTF_KFUNCS_START(nf_nat_kfunc_set)
BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
-BTF_SET8_END(nf_nat_kfunc_set)
+BTF_KFUNCS_END(nf_nat_kfunc_set)
static const struct btf_kfunc_id_set nf_bpf_nat_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index c3d7ecbc777c..016c816d91cb 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -551,8 +551,11 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
find_free_id:
if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
- else
+ else if ((range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL) ||
+ maniptype != NF_NAT_MANIP_DST)
off = get_random_u16();
+ else
+ off = 0;
attempts = range_size;
if (attempts > NF_NAT_MAX_ATTEMPTS)
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index e2f334f70281..7f12e56e6e52 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -248,109 +248,3 @@ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
return 0;
}
EXPORT_SYMBOL_GPL(nf_queue);
-
-static unsigned int nf_iterate(struct sk_buff *skb,
- struct nf_hook_state *state,
- const struct nf_hook_entries *hooks,
- unsigned int *index)
-{
- const struct nf_hook_entry *hook;
- unsigned int verdict, i = *index;
-
- while (i < hooks->num_hook_entries) {
- hook = &hooks->hooks[i];
-repeat:
- verdict = nf_hook_entry_hookfn(hook, skb, state);
- if (verdict != NF_ACCEPT) {
- *index = i;
- if (verdict != NF_REPEAT)
- return verdict;
- goto repeat;
- }
- i++;
- }
-
- *index = i;
- return NF_ACCEPT;
-}
-
-static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
-{
- switch (pf) {
-#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
- case NFPROTO_BRIDGE:
- return rcu_dereference(net->nf.hooks_bridge[hooknum]);
-#endif
- case NFPROTO_IPV4:
- return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
- case NFPROTO_IPV6:
- return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
- default:
- WARN_ON_ONCE(1);
- return NULL;
- }
-
- return NULL;
-}
-
-/* Caller must hold rcu read-side lock */
-void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
-{
- const struct nf_hook_entry *hook_entry;
- const struct nf_hook_entries *hooks;
- struct sk_buff *skb = entry->skb;
- const struct net *net;
- unsigned int i;
- int err;
- u8 pf;
-
- net = entry->state.net;
- pf = entry->state.pf;
-
- hooks = nf_hook_entries_head(net, pf, entry->state.hook);
-
- i = entry->hook_index;
- if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
- kfree_skb(skb);
- nf_queue_entry_free(entry);
- return;
- }
-
- hook_entry = &hooks->hooks[i];
-
- /* Continue traversal iff userspace said ok... */
- if (verdict == NF_REPEAT)
- verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
-
- if (verdict == NF_ACCEPT) {
- if (nf_reroute(skb, entry) < 0)
- verdict = NF_DROP;
- }
-
- if (verdict == NF_ACCEPT) {
-next_hook:
- ++i;
- verdict = nf_iterate(skb, &entry->state, hooks, &i);
- }
-
- switch (verdict & NF_VERDICT_MASK) {
- case NF_ACCEPT:
- case NF_STOP:
- local_bh_disable();
- entry->state.okfn(entry->state.net, entry->state.sk, skb);
- local_bh_enable();
- break;
- case NF_QUEUE:
- err = nf_queue(skb, &entry->state, i, verdict);
- if (err == 1)
- goto next_hook;
- break;
- case NF_STOLEN:
- break;
- default:
- kfree_skb(skb);
- }
-
- nf_queue_entry_free(entry);
-}
-EXPORT_SYMBOL(nf_reinject);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index fbbc4fd37349..5b140c12b7df 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -800,7 +800,7 @@ synproxy_build_ip_ipv6(struct net *net, struct sk_buff *skb,
skb_reset_network_header(skb);
iph = skb_put(skb, sizeof(*iph));
ip6_flow_hdr(iph, 0, 0);
- iph->hop_limit = net->ipv6.devconf_all->hop_limit;
+ iph->hop_limit = READ_ONCE(net->ipv6.devconf_all->hop_limit);
iph->nexthdr = IPPROTO_TCP;
iph->saddr = *saddr;
iph->daddr = *daddr;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 4b55533ce5ca..e93f905e60b6 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -24,6 +24,7 @@
#include <net/sock.h>
#define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
+#define NFT_SET_MAX_ANONLEN 16
unsigned int nf_tables_net_id __read_mostly;
@@ -683,15 +684,16 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
return err;
}
-static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
- struct nft_flowtable *flowtable)
+static struct nft_trans *
+nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+ struct nft_flowtable *flowtable)
{
struct nft_trans *trans;
trans = nft_trans_alloc(ctx, msg_type,
sizeof(struct nft_trans_flowtable));
if (trans == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
if (msg_type == NFT_MSG_NEWFLOWTABLE)
nft_activate_next(ctx->net, flowtable);
@@ -700,22 +702,22 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
nft_trans_flowtable(trans) = flowtable;
nft_trans_commit_list_add_tail(ctx->net, trans);
- return 0;
+ return trans;
}
static int nft_delflowtable(struct nft_ctx *ctx,
struct nft_flowtable *flowtable)
{
- int err;
+ struct nft_trans *trans;
- err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
- if (err < 0)
- return err;
+ trans = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
nft_deactivate_next(ctx->net, flowtable);
nft_use_dec(&ctx->table->use);
- return err;
+ return 0;
}
static void __nft_reg_track_clobber(struct nft_regs_track *track, u8 dreg)
@@ -1193,8 +1195,10 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
#define __NFT_TABLE_F_INTERNAL (NFT_TABLE_F_MASK + 1)
#define __NFT_TABLE_F_WAS_DORMANT (__NFT_TABLE_F_INTERNAL << 0)
#define __NFT_TABLE_F_WAS_AWAKEN (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_WAS_ORPHAN (__NFT_TABLE_F_INTERNAL << 2)
#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
- __NFT_TABLE_F_WAS_AWAKEN)
+ __NFT_TABLE_F_WAS_AWAKEN | \
+ __NFT_TABLE_F_WAS_ORPHAN)
static int nf_tables_updtable(struct nft_ctx *ctx)
{
@@ -1214,8 +1218,11 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if ((nft_table_has_owner(ctx->table) &&
!(flags & NFT_TABLE_F_OWNER)) ||
- (!nft_table_has_owner(ctx->table) &&
- flags & NFT_TABLE_F_OWNER))
+ (flags & NFT_TABLE_F_OWNER &&
+ !nft_table_is_orphan(ctx->table)))
+ return -EOPNOTSUPP;
+
+ if ((flags ^ ctx->table->flags) & NFT_TABLE_F_PERSIST)
return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */
@@ -1244,12 +1251,20 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
}
}
+ if ((flags & NFT_TABLE_F_OWNER) &&
+ !nft_table_has_owner(ctx->table)) {
+ ctx->table->nlpid = ctx->portid;
+ ctx->table->flags |= NFT_TABLE_F_OWNER |
+ __NFT_TABLE_F_WAS_ORPHAN;
+ }
+
nft_trans_table_update(trans) = true;
nft_trans_commit_list_add_tail(ctx->net, trans);
return 0;
err_register_hooks:
+ ctx->table->flags |= NFT_TABLE_F_DORMANT;
nft_trans_destroy(trans);
return ret;
}
@@ -2079,7 +2094,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
struct nft_hook *hook;
int err;
- hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
+ hook = kzalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
if (!hook) {
err = -ENOMEM;
goto err_hook_alloc;
@@ -2502,19 +2517,15 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
RCU_INIT_POINTER(chain->blob_gen_0, blob);
RCU_INIT_POINTER(chain->blob_gen_1, blob);
- err = nf_tables_register_hook(net, table, chain);
- if (err < 0)
- goto err_destroy_chain;
-
if (!nft_use_inc(&table->use)) {
err = -EMFILE;
- goto err_use;
+ goto err_destroy_chain;
}
trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
- goto err_unregister_hook;
+ goto err_trans;
}
nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET;
@@ -2522,17 +2533,22 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
nft_trans_chain_policy(trans) = policy;
err = nft_chain_add(table, chain);
- if (err < 0) {
- nft_trans_destroy(trans);
- goto err_unregister_hook;
- }
+ if (err < 0)
+ goto err_chain_add;
+
+ /* This must be LAST to ensure no packets are walking over this chain. */
+ err = nf_tables_register_hook(net, table, chain);
+ if (err < 0)
+ goto err_register_hook;
return 0;
-err_unregister_hook:
+err_register_hook:
+ nft_chain_del(chain);
+err_chain_add:
+ nft_trans_destroy(trans);
+err_trans:
nft_use_dec_restore(&table->use);
-err_use:
- nf_tables_unregister_hook(net, table, chain);
err_destroy_chain:
nf_tables_chain_destroy(ctx);
@@ -4234,23 +4250,18 @@ static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
* given, in that case the amount of memory per element is used.
*/
static const struct nft_set_ops *
-nft_select_set_ops(const struct nft_ctx *ctx,
- const struct nlattr * const nla[],
+nft_select_set_ops(const struct nft_ctx *ctx, u32 flags,
const struct nft_set_desc *desc)
{
struct nftables_pernet *nft_net = nft_pernet(ctx->net);
const struct nft_set_ops *ops, *bops;
struct nft_set_estimate est, best;
const struct nft_set_type *type;
- u32 flags = 0;
int i;
lockdep_assert_held(&nft_net->commit_mutex);
lockdep_nfnl_nft_mutex_not_held();
- if (nla[NFTA_SET_FLAGS] != NULL)
- flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
-
bops = NULL;
best.size = ~0;
best.lookup = ~0;
@@ -4413,6 +4424,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
if (p[1] != 'd' || strchr(p + 2, '%'))
return -EINVAL;
+ if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
+ return -EINVAL;
+
inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (inuse == NULL)
return -ENOMEM;
@@ -4994,6 +5008,12 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
(NFT_SET_EVAL | NFT_SET_OBJECT))
return -EOPNOTSUPP;
+ if ((flags & (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT | NFT_SET_EVAL)) ==
+ (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT))
+ return -EOPNOTSUPP;
+ if ((flags & (NFT_SET_CONSTANT | NFT_SET_TIMEOUT)) ==
+ (NFT_SET_CONSTANT | NFT_SET_TIMEOUT))
+ return -EOPNOTSUPP;
}
desc.dtype = 0;
@@ -5133,7 +5153,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
- ops = nft_select_set_ops(&ctx, nla, &desc);
+ ops = nft_select_set_ops(&ctx, flags, &desc);
if (IS_ERR(ops))
return PTR_ERR(ops);
@@ -5417,6 +5437,7 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
list_del_rcu(&set->list);
+ set->dead = 1;
if (event)
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
GFP_KERNEL);
@@ -7547,11 +7568,15 @@ nla_put_failure:
return -1;
}
-static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
+static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
{
const struct nft_object_type *type;
list_for_each_entry(type, &nf_tables_objects, list) {
+ if (type->family != NFPROTO_UNSPEC &&
+ type->family != family)
+ continue;
+
if (objtype == type->type)
return type;
}
@@ -7559,11 +7584,11 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype)
}
static const struct nft_object_type *
-nft_obj_type_get(struct net *net, u32 objtype)
+nft_obj_type_get(struct net *net, u32 objtype, u8 family)
{
const struct nft_object_type *type;
- type = __nft_obj_type_get(objtype);
+ type = __nft_obj_type_get(objtype, family);
if (type != NULL && try_module_get(type->owner))
return type;
@@ -7656,7 +7681,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
- type = __nft_obj_type_get(objtype);
+ type = __nft_obj_type_get(objtype, family);
if (WARN_ON_ONCE(!type))
return -ENOENT;
@@ -7670,7 +7695,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
if (!nft_use_inc(&table->use))
return -EMFILE;
- type = nft_obj_type_get(net, objtype);
+ type = nft_obj_type_get(net, objtype, family);
if (IS_ERR(type)) {
err = PTR_ERR(type);
goto err_type;
@@ -8447,9 +8472,9 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
u8 family = info->nfmsg->nfgen_family;
const struct nf_flowtable_type *type;
struct nft_flowtable *flowtable;
- struct nft_hook *hook, *next;
struct net *net = info->net;
struct nft_table *table;
+ struct nft_trans *trans;
struct nft_ctx ctx;
int err;
@@ -8529,34 +8554,34 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable,
extack, true);
if (err < 0)
- goto err4;
+ goto err_flowtable_parse_hooks;
list_splice(&flowtable_hook.list, &flowtable->hook_list);
flowtable->data.priority = flowtable_hook.priority;
flowtable->hooknum = flowtable_hook.num;
+ trans = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto err_flowtable_trans;
+ }
+
+ /* This must be LAST to ensure no packets are walking over this flowtable. */
err = nft_register_flowtable_net_hooks(ctx.net, table,
&flowtable->hook_list,
flowtable);
- if (err < 0) {
- nft_hooks_destroy(&flowtable->hook_list);
- goto err4;
- }
-
- err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
if (err < 0)
- goto err5;
+ goto err_flowtable_hooks;
list_add_tail_rcu(&flowtable->list, &table->flowtables);
return 0;
-err5:
- list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
- nft_unregister_flowtable_hook(net, flowtable, hook);
- list_del_rcu(&hook->list);
- kfree_rcu(hook, rcu);
- }
-err4:
+
+err_flowtable_hooks:
+ nft_trans_destroy(trans);
+err_flowtable_trans:
+ nft_hooks_destroy(&flowtable->hook_list);
+err_flowtable_parse_hooks:
flowtable->data.type->free(&flowtable->data);
err3:
module_put(type->owner);
@@ -9819,6 +9844,7 @@ dead_elem:
struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
{
struct nft_set_elem_catchall *catchall, *next;
+ u64 tstamp = nft_net_tstamp(gc->net);
const struct nft_set *set = gc->set;
struct nft_elem_priv *elem_priv;
struct nft_set_ext *ext;
@@ -9828,7 +9854,7 @@ struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
ext = nft_set_elem_ext(set, catchall->elem);
- if (!nft_set_elem_expired(ext))
+ if (!__nft_set_elem_expired(ext, tstamp))
continue;
gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
@@ -10416,6 +10442,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
} else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
}
+ if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_ORPHAN) {
+ trans->ctx.table->flags &= ~NFT_TABLE_F_OWNER;
+ trans->ctx.table->nlpid = 0;
+ }
trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
nft_trans_destroy(trans);
} else {
@@ -10614,6 +10644,7 @@ static bool nf_tables_valid_genid(struct net *net, u32 genid)
bool genid_ok;
mutex_lock(&nft_net->commit_mutex);
+ nft_net->tstamp = get_jiffies_64();
genid_ok = genid == 0 || nft_net->base_seq == genid;
if (!genid_ok)
@@ -10988,16 +11019,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
switch (data->verdict.code) {
- default:
- switch (data->verdict.code & NF_VERDICT_MASK) {
- case NF_ACCEPT:
- case NF_DROP:
- case NF_QUEUE:
- break;
- default:
- return -EINVAL;
- }
- fallthrough;
+ case NF_ACCEPT:
+ case NF_DROP:
+ case NF_QUEUE:
+ break;
case NFT_CONTINUE:
case NFT_BREAK:
case NFT_RETURN:
@@ -11032,6 +11057,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
data->verdict.chain = chain;
break;
+ default:
+ return -EINVAL;
}
desc->len = sizeof(data->verdict);
@@ -11345,6 +11372,10 @@ again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&
n->portid == table->nlpid) {
+ if (table->flags & NFT_TABLE_F_PERSIST) {
+ table->flags &= ~NFT_TABLE_F_OWNER;
+ continue;
+ }
__nft_release_hook(net, table);
list_del_rcu(&table->list);
to_delete[deleted++] = table;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 171d1f52d3dd..00f4bd21c59b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -225,6 +225,148 @@ find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
return entry;
}
+static unsigned int nf_iterate(struct sk_buff *skb,
+ struct nf_hook_state *state,
+ const struct nf_hook_entries *hooks,
+ unsigned int *index)
+{
+ const struct nf_hook_entry *hook;
+ unsigned int verdict, i = *index;
+
+ while (i < hooks->num_hook_entries) {
+ hook = &hooks->hooks[i];
+repeat:
+ verdict = nf_hook_entry_hookfn(hook, skb, state);
+ if (verdict != NF_ACCEPT) {
+ *index = i;
+ if (verdict != NF_REPEAT)
+ return verdict;
+ goto repeat;
+ }
+ i++;
+ }
+
+ *index = i;
+ return NF_ACCEPT;
+}
+
+static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
+{
+ switch (pf) {
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+ case NFPROTO_BRIDGE:
+ return rcu_dereference(net->nf.hooks_bridge[hooknum]);
+#endif
+ case NFPROTO_IPV4:
+ return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
+ case NFPROTO_IPV6:
+ return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
+{
+#ifdef CONFIG_INET
+ const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+ if (entry->state.hook == NF_INET_LOCAL_OUT) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ if (!(iph->tos == rt_info->tos &&
+ skb->mark == rt_info->mark &&
+ iph->daddr == rt_info->daddr &&
+ iph->saddr == rt_info->saddr))
+ return ip_route_me_harder(entry->state.net, entry->state.sk,
+ skb, RTN_UNSPEC);
+ }
+#endif
+ return 0;
+}
+
+static int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
+{
+ const struct nf_ipv6_ops *v6ops;
+ int ret = 0;
+
+ switch (entry->state.pf) {
+ case AF_INET:
+ ret = nf_ip_reroute(skb, entry);
+ break;
+ case AF_INET6:
+ v6ops = rcu_dereference(nf_ipv6_ops);
+ if (v6ops)
+ ret = v6ops->reroute(skb, entry);
+ break;
+ }
+ return ret;
+}
+
+/* caller must hold rcu read-side lock */
+static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
+{
+ const struct nf_hook_entry *hook_entry;
+ const struct nf_hook_entries *hooks;
+ struct sk_buff *skb = entry->skb;
+ const struct net *net;
+ unsigned int i;
+ int err;
+ u8 pf;
+
+ net = entry->state.net;
+ pf = entry->state.pf;
+
+ hooks = nf_hook_entries_head(net, pf, entry->state.hook);
+
+ i = entry->hook_index;
+ if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
+ nf_queue_entry_free(entry);
+ return;
+ }
+
+ hook_entry = &hooks->hooks[i];
+
+ /* Continue traversal iff userspace said ok... */
+ if (verdict == NF_REPEAT)
+ verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
+
+ if (verdict == NF_ACCEPT) {
+ if (nf_reroute(skb, entry) < 0)
+ verdict = NF_DROP;
+ }
+
+ if (verdict == NF_ACCEPT) {
+next_hook:
+ ++i;
+ verdict = nf_iterate(skb, &entry->state, hooks, &i);
+ }
+
+ switch (verdict & NF_VERDICT_MASK) {
+ case NF_ACCEPT:
+ case NF_STOP:
+ local_bh_disable();
+ entry->state.okfn(entry->state.net, entry->state.sk, skb);
+ local_bh_enable();
+ break;
+ case NF_QUEUE:
+ err = nf_queue(skb, &entry->state, i, verdict);
+ if (err == 1)
+ goto next_hook;
+ break;
+ case NF_STOLEN:
+ break;
+ default:
+ kfree_skb(skb);
+ }
+
+ nf_queue_entry_free(entry);
+}
+
static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
const struct nf_ct_hook *ct_hook;
@@ -232,18 +374,25 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
if (verdict == NF_ACCEPT ||
verdict == NF_REPEAT ||
verdict == NF_STOP) {
+ unsigned int ct_verdict = verdict;
+
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
if (ct_hook)
- verdict = ct_hook->update(entry->state.net, entry->skb);
+ ct_verdict = ct_hook->update(entry->state.net, entry->skb);
rcu_read_unlock();
- switch (verdict & NF_VERDICT_MASK) {
+ switch (ct_verdict & NF_VERDICT_MASK) {
+ case NF_ACCEPT:
+ /* follow userspace verdict, could be REPEAT */
+ break;
case NF_STOLEN:
nf_queue_entry_free(entry);
return;
+ default:
+ verdict = ct_verdict & NF_VERDICT_MASK;
+ break;
}
-
}
nf_reinject(entry, verdict);
}
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 680fe557686e..274b6f7e6bb5 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -357,9 +357,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct nft_base_chain *basechain;
struct nftables_pernet *nft_net;
- struct nft_table *table;
struct nft_chain *chain, *nr;
+ struct nft_table *table;
struct nft_ctx ctx = {
.net = dev_net(dev),
};
@@ -371,7 +372,8 @@ static int nf_tables_netdev_event(struct notifier_block *this,
nft_net = nft_pernet(ctx.net);
mutex_lock(&nft_net->commit_mutex);
list_for_each_entry(table, &nft_net->tables, list) {
- if (table->family != NFPROTO_NETDEV)
+ if (table->family != NFPROTO_NETDEV &&
+ table->family != NFPROTO_INET)
continue;
ctx.family = table->family;
@@ -380,6 +382,11 @@ static int nf_tables_netdev_event(struct notifier_block *this,
if (!nft_is_base_chain(chain))
continue;
+ basechain = nft_base_chain(chain);
+ if (table->family == NFPROTO_INET &&
+ basechain->ops.hooknum != NF_INET_INGRESS)
+ continue;
+
ctx.chain = chain;
nft_netdev_event(event, dev, &ctx);
}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 5284cd2ad532..d3d11dede545 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -135,7 +135,7 @@ static void nft_target_eval_bridge(const struct nft_expr *expr,
static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
[NFTA_TARGET_NAME] = { .type = NLA_NUL_STRING },
- [NFTA_TARGET_REV] = { .type = NLA_U32 },
+ [NFTA_TARGET_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_TARGET_INFO] = { .type = NLA_BINARY },
};
@@ -200,6 +200,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1]
static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
{
struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
+ u32 l4proto;
u32 flags;
int err;
@@ -212,12 +213,18 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
return -EINVAL;
flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
- if (flags & ~NFT_RULE_COMPAT_F_MASK)
+ if (flags & NFT_RULE_COMPAT_F_UNUSED ||
+ flags & ~NFT_RULE_COMPAT_F_MASK)
return -EINVAL;
if (flags & NFT_RULE_COMPAT_F_INV)
*inv = true;
- *proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+ l4proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+ if (l4proto > U16_MAX)
+ return -EINVAL;
+
+ *proto = l4proto;
+
return 0;
}
@@ -350,6 +357,22 @@ static int nft_target_validate(const struct nft_ctx *ctx,
unsigned int hook_mask = 0;
int ret;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET &&
+ ctx->family != NFPROTO_BRIDGE &&
+ ctx->family != NFPROTO_ARP)
+ return -EOPNOTSUPP;
+
+ ret = nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING));
+ if (ret)
+ return ret;
+
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
@@ -413,7 +436,7 @@ static void nft_match_eval(const struct nft_expr *expr,
static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
[NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
- [NFTA_MATCH_REV] = { .type = NLA_U32 },
+ [NFTA_MATCH_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_MATCH_INFO] = { .type = NLA_BINARY },
};
@@ -595,6 +618,22 @@ static int nft_match_validate(const struct nft_ctx *ctx,
unsigned int hook_mask = 0;
int ret;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET &&
+ ctx->family != NFPROTO_BRIDGE &&
+ ctx->family != NFPROTO_ARP)
+ return -EOPNOTSUPP;
+
+ ret = nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING));
+ if (ret)
+ return ret;
+
if (nft_is_base_chain(ctx->chain)) {
const struct nft_base_chain *basechain =
nft_base_chain(ctx->chain);
@@ -712,7 +751,7 @@ out_put:
static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
[NFTA_COMPAT_NAME] = { .type = NLA_NUL_STRING,
.len = NFT_COMPAT_NAME_MAX-1 },
- [NFTA_COMPAT_REV] = { .type = NLA_U32 },
+ [NFTA_COMPAT_REV] = NLA_POLICY_MAX(NLA_BE32, 255),
[NFTA_COMPAT_TYPE] = { .type = NLA_U32 },
};
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 86bb9d7797d9..255640013ab8 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -476,6 +476,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
break;
#endif
case NFT_CT_ID:
+ if (tb[NFTA_CT_DIRECTION])
+ return -EINVAL;
+
len = sizeof(u32);
break;
default:
@@ -1250,7 +1253,30 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
if (tb[NFTA_CT_EXPECT_L3PROTO])
priv->l3num = ntohs(nla_get_be16(tb[NFTA_CT_EXPECT_L3PROTO]));
+ switch (priv->l3num) {
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ if (priv->l3num == ctx->family || ctx->family == NFPROTO_INET)
+ break;
+
+ return -EINVAL;
+ case NFPROTO_INET: /* tuple.src.l3num supports NFPROTO_IPV4/6 only */
+ default:
+ return -EAFNOSUPPORT;
+ }
+
priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
+ switch (priv->l4proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ case IPPROTO_DCCP:
+ case IPPROTO_SCTP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
priv->dport = nla_get_be16(tb[NFTA_CT_EXPECT_DPORT]);
priv->timeout = nla_get_u32(tb[NFTA_CT_EXPECT_TIMEOUT]);
priv->size = nla_get_u8(tb[NFTA_CT_EXPECT_SIZE]);
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index ab3362c483b4..ab9576098701 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -361,6 +361,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
}
+ __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
ret = flow_offload_add(flowtable, flow);
if (ret < 0)
goto err_flow_add;
@@ -384,6 +385,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
{
unsigned int hook_mask = (1 << NF_INET_FORWARD);
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
return nft_chain_validate_hooks(ctx->chain, hook_mask);
}
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 79039afde34e..cefa25e0dbb0 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -58,17 +58,19 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
static int nft_limit_init(struct nft_limit_priv *priv,
const struct nlattr * const tb[], bool pkts)
{
+ u64 unit, tokens, rate_with_burst;
bool invert = false;
- u64 unit, tokens;
if (tb[NFTA_LIMIT_RATE] == NULL ||
tb[NFTA_LIMIT_UNIT] == NULL)
return -EINVAL;
priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+ if (priv->rate == 0)
+ return -EINVAL;
+
unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
- priv->nsecs = unit * NSEC_PER_SEC;
- if (priv->rate == 0 || priv->nsecs < unit)
+ if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs))
return -EOVERFLOW;
if (tb[NFTA_LIMIT_BURST])
@@ -77,18 +79,25 @@ static int nft_limit_init(struct nft_limit_priv *priv,
if (pkts && priv->burst == 0)
priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
- if (priv->rate + priv->burst < priv->rate)
+ if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst))
return -EOVERFLOW;
if (pkts) {
- tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
+ u64 tmp = div64_u64(priv->nsecs, priv->rate);
+
+ if (check_mul_overflow(tmp, priv->burst, &tokens))
+ return -EOVERFLOW;
} else {
+ u64 tmp;
+
/* The token bucket size limits the number of tokens can be
* accumulated. tokens_max specifies the bucket size.
* tokens_max = unit * (rate + burst) / rate.
*/
- tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
- priv->rate);
+ if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp))
+ return -EOVERFLOW;
+
+ tokens = div64_u64(tmp, priv->rate);
}
if (tb[NFTA_LIMIT_FLAGS]) {
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 583885ce7232..808f5802c270 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -143,6 +143,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
struct nft_nat *priv = nft_expr_priv(expr);
int err;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 7f61506e5b44..7fec57ff736f 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -63,7 +63,6 @@ static int nft_osf_init(const struct nft_ctx *ctx,
{
struct nft_osf *priv = nft_expr_priv(expr);
u32 flags;
- int err;
u8 ttl;
if (!tb[NFTA_OSF_DREG])
@@ -83,13 +82,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
priv->flags = flags;
}
- err = nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
- NULL, NFT_DATA_VALUE,
- NFT_OSF_MAXGENRELEN);
- if (err < 0)
- return err;
-
- return 0;
+ return nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE,
+ NFT_OSF_MAXGENRELEN);
}
static int nft_osf_dump(struct sk_buff *skb,
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index 35a2c28caa60..24d977138572 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -166,6 +166,11 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
const struct nft_rt *priv = nft_expr_priv(expr);
unsigned int hooks;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
switch (priv->key) {
case NFT_RT_NEXTHOP4:
case NFT_RT_NEXTHOP6:
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 6c2061bfdae6..6968a3b34236 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -36,6 +36,7 @@ struct nft_rhash_cmp_arg {
const struct nft_set *set;
const u32 *key;
u8 genmask;
+ u64 tstamp;
};
static inline u32 nft_rhash_key(const void *data, u32 len, u32 seed)
@@ -62,7 +63,7 @@ static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg,
return 1;
if (nft_set_elem_is_dead(&he->ext))
return 1;
- if (nft_set_elem_expired(&he->ext))
+ if (__nft_set_elem_expired(&he->ext, x->tstamp))
return 1;
if (!nft_set_elem_active(&he->ext, x->genmask))
return 1;
@@ -87,6 +88,7 @@ bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
.genmask = nft_genmask_cur(net),
.set = set,
.key = key,
+ .tstamp = get_jiffies_64(),
};
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
@@ -106,6 +108,7 @@ nft_rhash_get(const struct net *net, const struct nft_set *set,
.genmask = nft_genmask_cur(net),
.set = set,
.key = elem->key.val.data,
+ .tstamp = get_jiffies_64(),
};
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
@@ -131,6 +134,7 @@ static bool nft_rhash_update(struct nft_set *set, const u32 *key,
.genmask = NFT_GENMASK_ANY,
.set = set,
.key = key,
+ .tstamp = get_jiffies_64(),
};
he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params);
@@ -175,6 +179,7 @@ static int nft_rhash_insert(const struct net *net, const struct nft_set *set,
.genmask = nft_genmask_next(net),
.set = set,
.key = elem->key.val.data,
+ .tstamp = nft_net_tstamp(net),
};
struct nft_rhash_elem *prev;
@@ -216,6 +221,7 @@ nft_rhash_deactivate(const struct net *net, const struct nft_set *set,
.genmask = nft_genmask_next(net),
.set = set,
.key = elem->key.val.data,
+ .tstamp = nft_net_tstamp(net),
};
rcu_read_lock();
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index efd523496be4..c0ceea068936 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -342,9 +342,6 @@
#include "nft_set_pipapo_avx2.h"
#include "nft_set_pipapo.h"
-/* Current working bitmap index, toggled between field matches */
-static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
-
/**
* pipapo_refill() - For each set bit, set bits from selected mapping table item
* @map: Bitmap to be scanned for set bits
@@ -362,11 +359,13 @@ static DEFINE_PER_CPU(bool, nft_pipapo_scratch_index);
*
* Return: -1 on no match, bit position on 'match_only', 0 otherwise.
*/
-int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
- union nft_pipapo_map_bucket *mt, bool match_only)
+int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ unsigned long *dst,
+ const union nft_pipapo_map_bucket *mt, bool match_only)
{
unsigned long bitset;
- int k, ret = -1;
+ unsigned int k;
+ int ret = -1;
for (k = 0; k < len; k++) {
bitset = map[k];
@@ -412,25 +411,28 @@ bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_scratch *scratch;
unsigned long *res_map, *fill_map;
u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
bool map_index;
int i;
local_bh_disable();
- map_index = raw_cpu_read(nft_pipapo_scratch_index);
-
m = rcu_dereference(priv->match);
if (unlikely(!m || !*raw_cpu_ptr(m->scratch)))
goto out;
- res_map = *raw_cpu_ptr(m->scratch) + (map_index ? m->bsize_max : 0);
- fill_map = *raw_cpu_ptr(m->scratch) + (map_index ? 0 : m->bsize_max);
+ scratch = *raw_cpu_ptr(m->scratch);
+
+ map_index = scratch->map_index;
+
+ res_map = scratch->map + (map_index ? m->bsize_max : 0);
+ fill_map = scratch->map + (map_index ? 0 : m->bsize_max);
memset(res_map, 0xff, m->bsize_max * sizeof(*res_map));
@@ -460,7 +462,7 @@ next_match:
b = pipapo_refill(res_map, f->bsize, f->rules, fill_map, f->mt,
last);
if (b < 0) {
- raw_cpu_write(nft_pipapo_scratch_index, map_index);
+ scratch->map_index = map_index;
local_bh_enable();
return false;
@@ -477,7 +479,7 @@ next_match:
* current inactive bitmap is clean and can be reused as
* *next* bitmap (not initial) for the next packet.
*/
- raw_cpu_write(nft_pipapo_scratch_index, map_index);
+ scratch->map_index = map_index;
local_bh_enable();
return true;
@@ -504,6 +506,8 @@ out:
* @set: nftables API set representation
* @data: Key data to be matched against existing elements
* @genmask: If set, check that element is active in given genmask
+ * @tstamp: timestamp to check for expired elements
+ * @gfp: the type of memory to allocate (see kmalloc).
*
* This is essentially the same as the lookup function, except that it matches
* key data against the uncommitted copy and doesn't use preallocated maps for
@@ -513,22 +517,27 @@ out:
*/
static struct nft_pipapo_elem *pipapo_get(const struct net *net,
const struct nft_set *set,
- const u8 *data, u8 genmask)
+ const u8 *data, u8 genmask,
+ u64 tstamp, gfp_t gfp)
{
struct nft_pipapo_elem *ret = ERR_PTR(-ENOENT);
struct nft_pipapo *priv = nft_set_priv(set);
- struct nft_pipapo_match *m = priv->clone;
unsigned long *res_map, *fill_map = NULL;
- struct nft_pipapo_field *f;
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
int i;
- res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ m = priv->clone;
+ if (m->bsize_max == 0)
+ return ret;
+
+ res_map = kmalloc_array(m->bsize_max, sizeof(*res_map), gfp);
if (!res_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
}
- fill_map = kcalloc(m->bsize_max, sizeof(*res_map), GFP_ATOMIC);
+ fill_map = kcalloc(m->bsize_max, sizeof(*res_map), gfp);
if (!fill_map) {
ret = ERR_PTR(-ENOMEM);
goto out;
@@ -566,7 +575,7 @@ next_match:
goto out;
if (last) {
- if (nft_set_elem_expired(&f->mt[b].e->ext))
+ if (__nft_set_elem_expired(&f->mt[b].e->ext, tstamp))
goto next_match;
if ((genmask &&
!nft_set_elem_active(&f->mt[b].e->ext, genmask)))
@@ -603,10 +612,11 @@ static struct nft_elem_priv *
nft_pipapo_get(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem, unsigned int flags)
{
- static struct nft_pipapo_elem *e;
+ struct nft_pipapo_elem *e;
e = pipapo_get(net, set, (const u8 *)elem->key.val.data,
- nft_genmask_cur(net));
+ nft_genmask_cur(net), get_jiffies_64(),
+ GFP_ATOMIC);
if (IS_ERR(e))
return ERR_CAST(e);
@@ -614,6 +624,65 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
}
/**
+ * pipapo_realloc_mt() - Reallocate mapping table if needed upon resize
+ * @f: Field containing mapping table
+ * @old_rules: Amount of existing mapped rules
+ * @rules: Amount of new rules to map
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int pipapo_realloc_mt(struct nft_pipapo_field *f,
+ unsigned int old_rules, unsigned int rules)
+{
+ union nft_pipapo_map_bucket *new_mt = NULL, *old_mt = f->mt;
+ const unsigned int extra = PAGE_SIZE / sizeof(*new_mt);
+ unsigned int rules_alloc = rules;
+
+ might_sleep();
+
+ if (unlikely(rules == 0))
+ goto out_free;
+
+ /* growing and enough space left, no action needed */
+ if (rules > old_rules && f->rules_alloc > rules)
+ return 0;
+
+ /* downsize and extra slack has not grown too large */
+ if (rules < old_rules) {
+ unsigned int remove = f->rules_alloc - rules;
+
+ if (remove < (2u * extra))
+ return 0;
+ }
+
+ /* If set needs more than one page of memory for rules then
+ * allocate another extra page to avoid frequent reallocation.
+ */
+ if (rules > extra &&
+ check_add_overflow(rules, extra, &rules_alloc))
+ return -EOVERFLOW;
+
+ new_mt = kvmalloc_array(rules_alloc, sizeof(*new_mt), GFP_KERNEL);
+ if (!new_mt)
+ return -ENOMEM;
+
+ if (old_mt)
+ memcpy(new_mt, old_mt, min(old_rules, rules) * sizeof(*new_mt));
+
+ if (rules > old_rules) {
+ memset(new_mt + old_rules, 0,
+ (rules - old_rules) * sizeof(*new_mt));
+ }
+out_free:
+ f->rules_alloc = rules_alloc;
+ f->mt = new_mt;
+
+ kvfree(old_mt);
+
+ return 0;
+}
+
+/**
* pipapo_resize() - Resize lookup or mapping table, or both
* @f: Field containing lookup and mapping tables
* @old_rules: Previous amount of rules in field
@@ -625,12 +694,15 @@ nft_pipapo_get(const struct net *net, const struct nft_set *set,
*
* Return: 0 on success, -ENOMEM on allocation failure.
*/
-static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
+static int pipapo_resize(struct nft_pipapo_field *f,
+ unsigned int old_rules, unsigned int rules)
{
long *new_lt = NULL, *new_p, *old_lt = f->lt, *old_p;
- union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
- size_t new_bucket_size, copy;
- int group, bucket;
+ unsigned int new_bucket_size, copy;
+ int group, bucket, err;
+
+ if (rules >= NFT_PIPAPO_RULE0_MAX)
+ return -ENOSPC;
new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
#ifdef NFT_PIPAPO_ALIGN
@@ -670,27 +742,18 @@ static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
}
mt:
- new_mt = kvmalloc(rules * sizeof(*new_mt), GFP_KERNEL);
- if (!new_mt) {
+ err = pipapo_realloc_mt(f, old_rules, rules);
+ if (err) {
kvfree(new_lt);
- return -ENOMEM;
- }
-
- memcpy(new_mt, f->mt, min(old_rules, rules) * sizeof(*new_mt));
- if (rules > old_rules) {
- memset(new_mt + old_rules, 0,
- (rules - old_rules) * sizeof(*new_mt));
+ return err;
}
if (new_lt) {
f->bsize = new_bucket_size;
- NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+ f->lt = new_lt;
kvfree(old_lt);
}
- f->mt = new_mt;
- kvfree(old_mt);
-
return 0;
}
@@ -841,8 +904,8 @@ static void pipapo_lt_8b_to_4b(int old_groups, int bsize,
*/
static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
{
+ unsigned int groups, bb;
unsigned long *new_lt;
- int groups, bb;
size_t lt_size;
lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
@@ -892,7 +955,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
f->groups = groups;
f->bb = bb;
kvfree(f->lt);
- NFT_PIPAPO_LT_ASSIGN(f, new_lt);
+ f->lt = new_lt;
}
/**
@@ -909,7 +972,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
int mask_bits)
{
- int rule = f->rules, group, ret, bit_offset = 0;
+ unsigned int rule = f->rules, group, ret, bit_offset = 0;
ret = pipapo_resize(f, f->rules, f->rules + 1);
if (ret)
@@ -1109,6 +1172,25 @@ static void pipapo_map(struct nft_pipapo_match *m,
}
/**
+ * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
+ * @m: Matching data
+ * @cpu: CPU number
+ */
+static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
+{
+ struct nft_pipapo_scratch *s;
+ void *mem;
+
+ s = *per_cpu_ptr(m->scratch, cpu);
+ if (!s)
+ return;
+
+ mem = s;
+ mem -= s->align_off;
+ kfree(mem);
+}
+
+/**
* pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
* @clone: Copy of matching data with pending insertions and deletions
* @bsize_max: Maximum bucket size, scratch maps cover two buckets
@@ -1121,12 +1203,13 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
int i;
for_each_possible_cpu(i) {
- unsigned long *scratch;
+ struct nft_pipapo_scratch *scratch;
#ifdef NFT_PIPAPO_ALIGN
- unsigned long *scratch_aligned;
+ void *scratch_aligned;
+ u32 align_off;
#endif
-
- scratch = kzalloc_node(bsize_max * sizeof(*scratch) * 2 +
+ scratch = kzalloc_node(struct_size(scratch, map,
+ bsize_max * 2) +
NFT_PIPAPO_ALIGN_HEADROOM,
GFP_KERNEL, cpu_to_node(i));
if (!scratch) {
@@ -1140,14 +1223,25 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
return -ENOMEM;
}
- kfree(*per_cpu_ptr(clone->scratch, i));
-
- *per_cpu_ptr(clone->scratch, i) = scratch;
+ pipapo_free_scratch(clone, i);
#ifdef NFT_PIPAPO_ALIGN
- scratch_aligned = NFT_PIPAPO_LT_ALIGN(scratch);
- *per_cpu_ptr(clone->scratch_aligned, i) = scratch_aligned;
+ /* Align &scratch->map (not the struct itself): the extra
+ * %NFT_PIPAPO_ALIGN_HEADROOM bytes passed to kzalloc_node()
+ * above guarantee we can waste up to those bytes in order
+ * to align the map field regardless of its offset within
+ * the struct.
+ */
+ BUILD_BUG_ON(offsetof(struct nft_pipapo_scratch, map) > NFT_PIPAPO_ALIGN_HEADROOM);
+
+ scratch_aligned = NFT_PIPAPO_LT_ALIGN(&scratch->map);
+ scratch_aligned -= offsetof(struct nft_pipapo_scratch, map);
+ align_off = scratch_aligned - (void *)scratch;
+
+ scratch = scratch_aligned;
+ scratch->align_off = align_off;
#endif
+ *per_cpu_ptr(clone->scratch, i) = scratch;
}
return 0;
@@ -1173,6 +1267,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
struct nft_pipapo_match *m = priv->clone;
u8 genmask = nft_genmask_next(net);
struct nft_pipapo_elem *e, *dup;
+ u64 tstamp = nft_net_tstamp(net);
struct nft_pipapo_field *f;
const u8 *start_p, *end_p;
int i, bsize_max, err = 0;
@@ -1182,7 +1277,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
else
end = start;
- dup = pipapo_get(net, set, start, genmask);
+ dup = pipapo_get(net, set, start, genmask, tstamp, GFP_KERNEL);
if (!IS_ERR(dup)) {
/* Check if we already have the same exact entry */
const struct nft_data *dup_key, *dup_end;
@@ -1204,7 +1299,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
if (PTR_ERR(dup) == -ENOENT) {
/* Look for partially overlapping entries */
- dup = pipapo_get(net, set, end, nft_genmask_next(net));
+ dup = pipapo_get(net, set, end, nft_genmask_next(net), tstamp,
+ GFP_KERNEL);
}
if (PTR_ERR(dup) != -ENOENT) {
@@ -1217,8 +1313,14 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
/* Validate */
start_p = start;
end_p = end;
+
+ /* some helpers return -1, or 0 >= for valid rule pos,
+ * so we cannot support more than INT_MAX rules at this time.
+ */
+ BUILD_BUG_ON(NFT_PIPAPO_RULE0_MAX > INT_MAX);
+
nft_pipapo_for_each_field(f, i, m) {
- if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
+ if (f->rules >= NFT_PIPAPO_RULE0_MAX)
return -ENOSPC;
if (memcmp(start_p, end_p,
@@ -1301,11 +1403,6 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (!new->scratch)
goto out_scratch;
-#ifdef NFT_PIPAPO_ALIGN
- new->scratch_aligned = alloc_percpu(*new->scratch_aligned);
- if (!new->scratch_aligned)
- goto out_scratch;
-#endif
for_each_possible_cpu(i)
*per_cpu_ptr(new->scratch, i) = NULL;
@@ -1329,18 +1426,25 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (!new_lt)
goto out_lt;
- NFT_PIPAPO_LT_ASSIGN(dst, new_lt);
+ dst->lt = new_lt;
memcpy(NFT_PIPAPO_LT_ALIGN(new_lt),
NFT_PIPAPO_LT_ALIGN(src->lt),
src->bsize * sizeof(*dst->lt) *
src->groups * NFT_PIPAPO_BUCKETS(src->bb));
- dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
- if (!dst->mt)
- goto out_mt;
+ if (src->rules > 0) {
+ dst->mt = kvmalloc_array(src->rules_alloc,
+ sizeof(*src->mt), GFP_KERNEL);
+ if (!dst->mt)
+ goto out_mt;
+
+ memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
+ } else {
+ dst->mt = NULL;
+ dst->rules_alloc = 0;
+ }
- memcpy(dst->mt, src->mt, src->rules * sizeof(*src->mt));
src++;
dst++;
}
@@ -1357,10 +1461,7 @@ out_lt:
}
out_scratch_realloc:
for_each_possible_cpu(i)
- kfree(*per_cpu_ptr(new->scratch, i));
-#ifdef NFT_PIPAPO_ALIGN
- free_percpu(new->scratch_aligned);
-#endif
+ pipapo_free_scratch(new, i);
out_scratch:
free_percpu(new->scratch);
kfree(new);
@@ -1397,10 +1498,10 @@ out_scratch:
*
* Return: Number of rules that originated from the same entry as @first.
*/
-static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
+static unsigned int pipapo_rules_same_key(struct nft_pipapo_field *f, unsigned int first)
{
struct nft_pipapo_elem *e = NULL; /* Keep gcc happy */
- int r;
+ unsigned int r;
for (r = first; r < f->rules; r++) {
if (r != first && e != f->mt[r].e)
@@ -1453,8 +1554,9 @@ static int pipapo_rules_same_key(struct nft_pipapo_field *f, int first)
* 0 1 2
* element pointers: 0x42 0x42 0x44
*/
-static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules,
- int start, int n, int to_offset, bool is_last)
+static void pipapo_unmap(union nft_pipapo_map_bucket *mt, unsigned int rules,
+ unsigned int start, unsigned int n,
+ unsigned int to_offset, bool is_last)
{
int i;
@@ -1560,7 +1662,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
{
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
- int rules_f0, first_rule = 0;
+ unsigned int rules_f0, first_rule = 0;
+ u64 tstamp = nft_net_tstamp(net);
struct nft_pipapo_elem *e;
struct nft_trans_gc *gc;
@@ -1570,8 +1673,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
- struct nft_pipapo_field *f;
- int i, start, rules_fx;
+ const struct nft_pipapo_field *f;
+ unsigned int i, start, rules_fx;
start = first_rule;
rules_fx = rules_f0;
@@ -1594,7 +1697,7 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
/* synchronous gc never fails, there is no need to set on
* NFT_SET_ELEM_DEAD_BIT.
*/
- if (nft_set_elem_expired(&e->ext)) {
+ if (__nft_set_elem_expired(&e->ext, tstamp)) {
priv->dirty = true;
gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
@@ -1640,13 +1743,9 @@ static void pipapo_free_match(struct nft_pipapo_match *m)
int i;
for_each_possible_cpu(i)
- kfree(*per_cpu_ptr(m->scratch, i));
+ pipapo_free_scratch(m, i);
-#ifdef NFT_PIPAPO_ALIGN
- free_percpu(m->scratch_aligned);
-#endif
free_percpu(m->scratch);
-
pipapo_free_fields(m);
kfree(m);
@@ -1769,7 +1868,8 @@ static void *pipapo_deactivate(const struct net *net, const struct nft_set *set,
{
struct nft_pipapo_elem *e;
- e = pipapo_get(net, set, data, nft_genmask_next(net));
+ e = pipapo_get(net, set, data, nft_genmask_next(net),
+ nft_net_tstamp(net), GFP_KERNEL);
if (IS_ERR(e))
return NULL;
@@ -1953,7 +2053,7 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
{
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
- int rules_f0, first_rule = 0;
+ unsigned int rules_f0, first_rule = 0;
struct nft_pipapo_elem *e;
const u8 *data;
@@ -2016,9 +2116,9 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
{
struct nft_pipapo *priv = nft_set_priv(set);
struct net *net = read_pnet(&set->net);
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
- int i, r;
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
+ unsigned int i, r;
rcu_read_lock();
if (iter->genmask == nft_genmask_cur(net))
@@ -2122,6 +2222,9 @@ static int nft_pipapo_init(const struct nft_set *set,
field_count = desc->field_count ? : 1;
+ BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS > 255);
+ BUILD_BUG_ON(NFT_PIPAPO_MAX_FIELDS != NFT_REG32_COUNT);
+
if (field_count > NFT_PIPAPO_MAX_FIELDS)
return -EINVAL;
@@ -2132,7 +2235,7 @@ static int nft_pipapo_init(const struct nft_set *set,
m->field_count = field_count;
m->bsize_max = 0;
- m->scratch = alloc_percpu(unsigned long *);
+ m->scratch = alloc_percpu(struct nft_pipapo_scratch *);
if (!m->scratch) {
err = -ENOMEM;
goto out_scratch;
@@ -2140,20 +2243,14 @@ static int nft_pipapo_init(const struct nft_set *set,
for_each_possible_cpu(i)
*per_cpu_ptr(m->scratch, i) = NULL;
-#ifdef NFT_PIPAPO_ALIGN
- m->scratch_aligned = alloc_percpu(unsigned long *);
- if (!m->scratch_aligned) {
- err = -ENOMEM;
- goto out_free;
- }
- for_each_possible_cpu(i)
- *per_cpu_ptr(m->scratch_aligned, i) = NULL;
-#endif
-
rcu_head_init(&m->rcu);
nft_pipapo_for_each_field(f, i, m) {
- int len = desc->field_len[i] ? : set->klen;
+ unsigned int len = desc->field_len[i] ? : set->klen;
+
+ /* f->groups is u8 */
+ BUILD_BUG_ON((NFT_PIPAPO_MAX_BYTES *
+ BITS_PER_BYTE / NFT_PIPAPO_GROUP_BITS_LARGE_SET) >= 256);
f->bb = NFT_PIPAPO_GROUP_BITS_INIT;
f->groups = len * NFT_PIPAPO_GROUPS_PER_BYTE(f);
@@ -2162,7 +2259,8 @@ static int nft_pipapo_init(const struct nft_set *set,
f->bsize = 0;
f->rules = 0;
- NFT_PIPAPO_LT_ASSIGN(f, NULL);
+ f->rules_alloc = 0;
+ f->lt = NULL;
f->mt = NULL;
}
@@ -2180,9 +2278,6 @@ static int nft_pipapo_init(const struct nft_set *set,
return 0;
out_free:
-#ifdef NFT_PIPAPO_ALIGN
- free_percpu(m->scratch_aligned);
-#endif
free_percpu(m->scratch);
out_scratch:
kfree(m);
@@ -2201,7 +2296,7 @@ static void nft_set_pipapo_match_destroy(const struct nft_ctx *ctx,
struct nft_pipapo_match *m)
{
struct nft_pipapo_field *f;
- int i, r;
+ unsigned int i, r;
for (i = 0, f = m->f; i < m->field_count - 1; i++, f++)
;
@@ -2236,11 +2331,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
nft_set_pipapo_match_destroy(ctx, set, m);
-#ifdef NFT_PIPAPO_ALIGN
- free_percpu(m->scratch_aligned);
-#endif
for_each_possible_cpu(cpu)
- kfree(*per_cpu_ptr(m->scratch, cpu));
+ pipapo_free_scratch(m, cpu);
free_percpu(m->scratch);
pipapo_free_fields(m);
kfree(m);
@@ -2253,11 +2345,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
if (priv->dirty)
nft_set_pipapo_match_destroy(ctx, set, m);
-#ifdef NFT_PIPAPO_ALIGN
- free_percpu(priv->clone->scratch_aligned);
-#endif
for_each_possible_cpu(cpu)
- kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
+ pipapo_free_scratch(priv->clone, cpu);
free_percpu(priv->clone->scratch);
pipapo_free_fields(priv->clone);
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 1040223da5fa..24cd1ff73f98 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -70,15 +70,9 @@
#define NFT_PIPAPO_ALIGN_HEADROOM \
(NFT_PIPAPO_ALIGN - ARCH_KMALLOC_MINALIGN)
#define NFT_PIPAPO_LT_ALIGN(lt) (PTR_ALIGN((lt), NFT_PIPAPO_ALIGN))
-#define NFT_PIPAPO_LT_ASSIGN(field, x) \
- do { \
- (field)->lt_aligned = NFT_PIPAPO_LT_ALIGN(x); \
- (field)->lt = (x); \
- } while (0)
#else
#define NFT_PIPAPO_ALIGN_HEADROOM 0
#define NFT_PIPAPO_LT_ALIGN(lt) (lt)
-#define NFT_PIPAPO_LT_ASSIGN(field, x) ((field)->lt = (x))
#endif /* NFT_PIPAPO_ALIGN */
#define nft_pipapo_for_each_field(field, index, match) \
@@ -110,42 +104,48 @@ union nft_pipapo_map_bucket {
/**
* struct nft_pipapo_field - Lookup, mapping tables and related data for a field
- * @groups: Amount of bit groups
* @rules: Number of inserted rules
* @bsize: Size of each bucket in lookup table, in longs
+ * @rules_alloc: Number of allocated rules, always >= rules
+ * @groups: Amount of bit groups
* @bb: Number of bits grouped together in lookup table buckets
* @lt: Lookup table: 'groups' rows of buckets
- * @lt_aligned: Version of @lt aligned to NFT_PIPAPO_ALIGN bytes
* @mt: Mapping table: one bucket per rule
*/
struct nft_pipapo_field {
- int groups;
- unsigned long rules;
- size_t bsize;
- int bb;
-#ifdef NFT_PIPAPO_ALIGN
- unsigned long *lt_aligned;
-#endif
+ unsigned int rules;
+ unsigned int bsize;
+ unsigned int rules_alloc;
+ u8 groups;
+ u8 bb;
unsigned long *lt;
union nft_pipapo_map_bucket *mt;
};
/**
+ * struct nft_pipapo_scratch - percpu data used for lookup and matching
+ * @map_index: Current working bitmap index, toggled between field matches
+ * @align_off: Offset to get the originally allocated address
+ * @map: store partial matching results during lookup
+ */
+struct nft_pipapo_scratch {
+ u8 map_index;
+ u32 align_off;
+ unsigned long map[];
+};
+
+/**
* struct nft_pipapo_match - Data used for lookup and matching
- * @field_count Amount of fields in set
- * @scratch: Preallocated per-CPU maps for partial matching results
- * @scratch_aligned: Version of @scratch aligned to NFT_PIPAPO_ALIGN bytes
+ * @field_count: Amount of fields in set
* @bsize_max: Maximum lookup table bucket size of all fields, in longs
- * @rcu Matching data is swapped on commits
+ * @scratch: Preallocated per-CPU maps for partial matching results
+ * @rcu: Matching data is swapped on commits
* @f: Fields, with lookup and mapping tables
*/
struct nft_pipapo_match {
- int field_count;
-#ifdef NFT_PIPAPO_ALIGN
- unsigned long * __percpu *scratch_aligned;
-#endif
- unsigned long * __percpu *scratch;
- size_t bsize_max;
+ u8 field_count;
+ unsigned int bsize_max;
+ struct nft_pipapo_scratch * __percpu *scratch;
struct rcu_head rcu;
struct nft_pipapo_field f[] __counted_by(field_count);
};
@@ -178,8 +178,9 @@ struct nft_pipapo_elem {
struct nft_set_ext ext;
};
-int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
- union nft_pipapo_map_bucket *mt, bool match_only);
+int pipapo_refill(unsigned long *map, unsigned int len, unsigned int rules,
+ unsigned long *dst,
+ const union nft_pipapo_map_bucket *mt, bool match_only);
/**
* pipapo_and_field_buckets_4bit() - Intersect 4-bit buckets
@@ -187,7 +188,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
-static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_4bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
@@ -215,7 +216,7 @@ static inline void pipapo_and_field_buckets_4bit(struct nft_pipapo_field *f,
* @dst: Area to store result
* @data: Input data selecting table buckets
*/
-static inline void pipapo_and_field_buckets_8bit(struct nft_pipapo_field *f,
+static inline void pipapo_and_field_buckets_8bit(const struct nft_pipapo_field *f,
unsigned long *dst,
const u8 *data)
{
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index 52e0d026d30a..d08407d589ea 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -57,7 +57,7 @@
/* Jump to label if @reg is zero */
#define NFT_PIPAPO_AVX2_NOMATCH_GOTO(reg, label) \
- asm_volatile_goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \
+ asm goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \
"je %l[" #label "]" : : : : label)
/* Store 256 bits from YMM register into memory. Contrary to bucket load
@@ -71,9 +71,6 @@
#define NFT_PIPAPO_AVX2_ZERO(reg) \
asm volatile("vpxor %ymm" #reg ", %ymm" #reg ", %ymm" #reg)
-/* Current working bitmap index, toggled between field matches */
-static DEFINE_PER_CPU(bool, nft_pipapo_avx2_scratch_index);
-
/**
* nft_pipapo_avx2_prepare() - Prepare before main algorithm body
*
@@ -215,8 +212,9 @@ static int nft_pipapo_avx2_refill(int offset, unsigned long *map,
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_2(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[2] = { pkt[0] >> 4, pkt[0] & 0xf };
@@ -277,8 +275,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_4(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
u8 pg[4] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf };
@@ -353,8 +352,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_8(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[8] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -448,8 +448,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_12(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[12] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -537,8 +538,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_4b_32(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
u8 pg[32] = { pkt[0] >> 4, pkt[0] & 0xf, pkt[1] >> 4, pkt[1] & 0xf,
pkt[2] >> 4, pkt[2] & 0xf, pkt[3] >> 4, pkt[3] & 0xf,
@@ -672,8 +674,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_1(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -729,8 +732,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_2(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -793,8 +797,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_4(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -868,8 +873,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -953,8 +959,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
int i, ret = -1, m256_size = f->bsize / NFT_PIPAPO_LONGS_PER_M256, b;
unsigned long *lt = f->lt, bsize = f->bsize;
@@ -1045,8 +1052,9 @@ nothing:
* word index to be checked next (i.e. first filled word).
*/
static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
- struct nft_pipapo_field *f, int offset,
- const u8 *pkt, bool first, bool last)
+ const struct nft_pipapo_field *f,
+ int offset, const u8 *pkt,
+ bool first, bool last)
{
unsigned long bsize = f->bsize;
int i, ret = -1, b;
@@ -1120,11 +1128,12 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
const u32 *key, const struct nft_set_ext **ext)
{
struct nft_pipapo *priv = nft_set_priv(set);
- unsigned long *res, *fill, *scratch;
+ struct nft_pipapo_scratch *scratch;
u8 genmask = nft_genmask_cur(net);
+ const struct nft_pipapo_match *m;
+ const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- struct nft_pipapo_match *m;
- struct nft_pipapo_field *f;
+ unsigned long *res, *fill;
bool map_index;
int i, ret = 0;
@@ -1141,15 +1150,16 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
*/
kernel_fpu_begin_mask(0);
- scratch = *raw_cpu_ptr(m->scratch_aligned);
+ scratch = *raw_cpu_ptr(m->scratch);
if (unlikely(!scratch)) {
kernel_fpu_end();
return false;
}
- map_index = raw_cpu_read(nft_pipapo_avx2_scratch_index);
- res = scratch + (map_index ? m->bsize_max : 0);
- fill = scratch + (map_index ? 0 : m->bsize_max);
+ map_index = scratch->map_index;
+
+ res = scratch->map + (map_index ? m->bsize_max : 0);
+ fill = scratch->map + (map_index ? 0 : m->bsize_max);
/* Starting map doesn't need to be set for this implementation */
@@ -1221,7 +1231,7 @@ next_match:
out:
if (i % 2)
- raw_cpu_write(nft_pipapo_avx2_scratch_index, !map_index);
+ scratch->map_index = !map_index;
kernel_fpu_end();
return ret >= 0;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index baa3fea4fe65..9944fe479e53 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -234,7 +234,7 @@ static void nft_rbtree_gc_elem_remove(struct net *net, struct nft_set *set,
static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
- struct nft_rbtree_elem *rbe, u8 genmask)
+ struct nft_rbtree_elem *rbe)
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
@@ -253,7 +253,7 @@ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
while (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
if (nft_rbtree_interval_end(rbe_prev) &&
- nft_set_elem_active(&rbe_prev->ext, genmask))
+ nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
break;
prev = rb_prev(prev);
@@ -313,6 +313,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree *priv = nft_set_priv(set);
u8 cur_genmask = nft_genmask_cur(net);
u8 genmask = nft_genmask_next(net);
+ u64 tstamp = nft_net_tstamp(net);
int d;
/* Descend the tree to search for an existing element greater than the
@@ -360,11 +361,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
/* perform garbage collection to avoid bogus overlap reports
* but skip new elements in this transaction.
*/
- if (nft_set_elem_expired(&rbe->ext) &&
+ if (__nft_set_elem_expired(&rbe->ext, tstamp) &&
nft_set_elem_active(&rbe->ext, cur_genmask)) {
const struct nft_rbtree_elem *removed_end;
- removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+ removed_end = nft_rbtree_gc_elem(set, priv, rbe);
if (IS_ERR(removed_end))
return PTR_ERR(removed_end);
@@ -551,6 +552,7 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
const struct nft_rbtree *priv = nft_set_priv(set);
const struct rb_node *parent = priv->root.rb_node;
u8 genmask = nft_genmask_next(net);
+ u64 tstamp = nft_net_tstamp(net);
int d;
while (parent != NULL) {
@@ -571,7 +573,7 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
- } else if (nft_set_elem_expired(&rbe->ext)) {
+ } else if (__nft_set_elem_expired(&rbe->ext, tstamp)) {
break;
} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = parent->rb_left;
@@ -624,9 +626,10 @@ static void nft_rbtree_gc(struct nft_set *set)
{
struct nft_rbtree *priv = nft_set_priv(set);
struct nft_rbtree_elem *rbe, *rbe_end = NULL;
+ struct net *net = read_pnet(&set->net);
+ u64 tstamp = nft_net_tstamp(net);
struct rb_node *node, *next;
struct nft_trans_gc *gc;
- struct net *net;
set = nft_set_container_of(priv);
net = read_pnet(&set->net);
@@ -648,7 +651,7 @@ static void nft_rbtree_gc(struct nft_set *set)
rbe_end = rbe;
continue;
}
- if (!nft_set_elem_expired(&rbe->ext))
+ if (!__nft_set_elem_expired(&rbe->ext, tstamp))
continue;
gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index 9ed85be79452..f30163e2ca62 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -242,6 +242,11 @@ static int nft_socket_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index 13da882669a4..1d737f89dfc1 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -186,7 +186,6 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
break;
#endif
case NFPROTO_INET:
- case NFPROTO_BRIDGE:
err = nf_synproxy_ipv4_init(snet, ctx->net);
if (err)
goto nf_ct_failure;
@@ -219,7 +218,6 @@ static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
break;
#endif
case NFPROTO_INET:
- case NFPROTO_BRIDGE:
nf_synproxy_ipv4_fini(snet, ctx->net);
nf_synproxy_ipv6_fini(snet, ctx->net);
break;
@@ -253,6 +251,11 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_FORWARD));
}
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index ae15cd693f0e..71412adb73d4 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -316,6 +316,11 @@ static int nft_tproxy_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
}
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 9f21953c7433..f735d79d8be5 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -713,6 +713,7 @@ static const struct nft_object_ops nft_tunnel_obj_ops = {
static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
.type = NFT_OBJECT_TUNNEL,
+ .family = NFPROTO_NETDEV,
.ops = &nft_tunnel_obj_ops,
.maxattr = NFTA_TUNNEL_KEY_MAX,
.policy = nft_tunnel_key_policy,
diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
index 452f8587adda..1c866757db55 100644
--- a/net/netfilter/nft_xfrm.c
+++ b/net/netfilter/nft_xfrm.c
@@ -235,6 +235,11 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
const struct nft_xfrm *priv = nft_expr_priv(expr);
unsigned int hooks;
+ if (ctx->family != NFPROTO_IPV4 &&
+ ctx->family != NFPROTO_IPV6 &&
+ ctx->family != NFPROTO_INET)
+ return -EOPNOTSUPP;
+
switch (priv->dir) {
case XFRM_POLICY_IN:
hooks = (1 << NF_INET_FORWARD) |
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
index acef4155f0da..008419db815a 100644
--- a/net/netfilter/utils.c
+++ b/net/netfilter/utils.c
@@ -179,43 +179,6 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
}
EXPORT_SYMBOL_GPL(nf_route);
-static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
-{
-#ifdef CONFIG_INET
- const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
- if (entry->state.hook == NF_INET_LOCAL_OUT) {
- const struct iphdr *iph = ip_hdr(skb);
-
- if (!(iph->tos == rt_info->tos &&
- skb->mark == rt_info->mark &&
- iph->daddr == rt_info->daddr &&
- iph->saddr == rt_info->saddr))
- return ip_route_me_harder(entry->state.net, entry->state.sk,
- skb, RTN_UNSPEC);
- }
-#endif
- return 0;
-}
-
-int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
-{
- const struct nf_ipv6_ops *v6ops;
- int ret = 0;
-
- switch (entry->state.pf) {
- case AF_INET:
- ret = nf_ip_reroute(skb, entry);
- break;
- case AF_INET6:
- v6ops = rcu_dereference(nf_ipv6_ops);
- if (v6ops)
- ret = v6ops->reroute(skb, entry);
- break;
- }
- return ret;
-}
-
/* Only get and check the lengths, not do any hop-by-hop stuff. */
int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen)
{
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 21624d68314f..da5d929c7c85 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1142,7 +1142,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
if (target->compat_from_user)
target->compat_from_user(t->data, ct->data);
else
- memcpy(t->data, ct->data, tsize - sizeof(*ct));
+ unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct),
+ /* UAPI 0-sized destination */);
tsize += off;
t->u.user.target_size = tsize;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 27511c90a26f..1ba4f58e1d35 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -610,7 +610,7 @@ int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset)
struct netlbl_lsm_catmap *iter;
u32 idx;
u32 bit;
- NETLBL_CATMAP_MAPTYPE bitmap;
+ u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
@@ -666,8 +666,8 @@ int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset)
struct netlbl_lsm_catmap *prev = NULL;
u32 idx;
u32 bit;
- NETLBL_CATMAP_MAPTYPE bitmask;
- NETLBL_CATMAP_MAPTYPE bitmap;
+ u64 bitmask;
+ u64 bitmap;
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
if (iter == NULL)
@@ -857,7 +857,7 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
offset -= iter->startbit;
idx = offset / NETLBL_CATMAP_MAPSIZE;
- iter->bitmap[idx] |= (NETLBL_CATMAP_MAPTYPE)bitmap
+ iter->bitmap[idx] |= (u64)bitmap
<< (offset % NETLBL_CATMAP_MAPSIZE);
return 0;
@@ -876,7 +876,7 @@ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap,
* Description:
* Starting at @offset, walk the bitmap from left to right until either the
* desired bit is found or we reach the end. Return the bit offset, -1 if
- * not found, or -2 if error.
+ * not found.
*/
int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
u32 offset, u8 state)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4ed8ffd58ff3..da846212fb9b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -130,7 +130,7 @@ static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
"nlk_cb_mutex-MAX_LINKS"
};
-static int netlink_dump(struct sock *sk);
+static int netlink_dump(struct sock *sk, bool lock_taken);
/* nl_table locking explained:
* Lookup and traversal are protected with an RCU read-side lock. Insertion
@@ -167,7 +167,7 @@ static inline u32 netlink_group_mask(u32 group)
static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
gfp_t gfp_mask)
{
- unsigned int len = skb_end_offset(skb);
+ unsigned int len = skb->len;
struct sk_buff *new;
new = alloc_skb(len, gfp_mask);
@@ -374,7 +374,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
if (is_vmalloc_addr(skb->head)) {
if (!skb->cloned ||
!atomic_dec_return(&(skb_shinfo(skb)->dataref)))
- vfree(skb->head);
+ vfree_atomic(skb->head);
skb->head = NULL;
}
@@ -636,7 +636,7 @@ static struct proto netlink_proto = {
};
static int __netlink_create(struct net *net, struct socket *sock,
- struct mutex *cb_mutex, int protocol,
+ struct mutex *dump_cb_mutex, int protocol,
int kern)
{
struct sock *sk;
@@ -651,15 +651,11 @@ static int __netlink_create(struct net *net, struct socket *sock,
sock_init_data(sock, sk);
nlk = nlk_sk(sk);
- if (cb_mutex) {
- nlk->cb_mutex = cb_mutex;
- } else {
- nlk->cb_mutex = &nlk->cb_def_mutex;
- mutex_init(nlk->cb_mutex);
- lockdep_set_class_and_name(nlk->cb_mutex,
+ mutex_init(&nlk->nl_cb_mutex);
+ lockdep_set_class_and_name(&nlk->nl_cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
- }
+ nlk->dump_cb_mutex = dump_cb_mutex;
init_waitqueue_head(&nlk->wait);
sk->sk_destruct = netlink_sock_destruct;
@@ -1206,23 +1202,21 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
{
+ size_t head_size = SKB_HEAD_ALIGN(size);
struct sk_buff *skb;
void *data;
- if (size <= NLMSG_GOODSIZE || broadcast)
+ if (head_size <= PAGE_SIZE || broadcast)
return alloc_skb(size, GFP_KERNEL);
- size = SKB_DATA_ALIGN(size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- data = vmalloc(size);
- if (data == NULL)
+ data = kvmalloc(head_size, GFP_KERNEL);
+ if (!data)
return NULL;
- skb = __build_skb(data, size);
- if (skb == NULL)
- vfree(data);
- else
+ skb = __build_skb(data, head_size);
+ if (!skb)
+ kvfree(data);
+ else if (is_vmalloc_addr(data))
skb->destructor = netlink_skb_destructor;
return skb;
@@ -1987,7 +1981,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (READ_ONCE(nlk->cb_running) &&
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk, false);
if (ret) {
WRITE_ONCE(sk->sk_err, -ret);
sk_error_report(sk);
@@ -2196,7 +2190,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
return 0;
}
-static int netlink_dump(struct sock *sk)
+static int netlink_dump(struct sock *sk, bool lock_taken)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ext_ack extack = {};
@@ -2208,7 +2202,8 @@ static int netlink_dump(struct sock *sk)
int alloc_min_size;
int alloc_size;
- mutex_lock(nlk->cb_mutex);
+ if (!lock_taken)
+ mutex_lock(&nlk->nl_cb_mutex);
if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
@@ -2260,14 +2255,33 @@ static int netlink_dump(struct sock *sk)
netlink_skb_set_owner_r(skb, sk);
if (nlk->dump_done_errno > 0) {
+ struct mutex *extra_mutex = nlk->dump_cb_mutex;
+
cb->extack = &extack;
+
+ if (cb->flags & RTNL_FLAG_DUMP_UNLOCKED)
+ extra_mutex = NULL;
+ if (extra_mutex)
+ mutex_lock(extra_mutex);
nlk->dump_done_errno = cb->dump(skb, cb);
+ if (extra_mutex)
+ mutex_unlock(extra_mutex);
+
+ /* EMSGSIZE plus something already in the skb means
+ * that there's more to dump but current skb has filled up.
+ * If the callback really wants to return EMSGSIZE to user space
+ * it needs to do so again, on the next cb->dump() call,
+ * without putting data in the skb.
+ */
+ if (nlk->dump_done_errno == -EMSGSIZE && skb->len)
+ nlk->dump_done_errno = skb->len;
+
cb->extack = NULL;
}
if (nlk->dump_done_errno > 0 ||
skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
if (sk_filter(sk, skb))
kfree_skb(skb);
@@ -2301,13 +2315,13 @@ static int netlink_dump(struct sock *sk)
WRITE_ONCE(nlk->cb_running, false);
module = cb->module;
skb = cb->skb;
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
module_put(module);
consume_skb(skb);
return 0;
errout_skb:
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
kfree_skb(skb);
return err;
}
@@ -2330,7 +2344,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
}
nlk = nlk_sk(sk);
- mutex_lock(nlk->cb_mutex);
+ mutex_lock(&nlk->nl_cb_mutex);
/* A dump is in progress... */
if (nlk->cb_running) {
ret = -EBUSY;
@@ -2350,6 +2364,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->data = control->data;
cb->module = control->module;
cb->min_dump_alloc = control->min_dump_alloc;
+ cb->flags = control->flags;
cb->skb = skb;
cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
@@ -2365,9 +2380,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
WRITE_ONCE(nlk->cb_running, true);
nlk->dump_done_errno = INT_MAX;
- mutex_unlock(nlk->cb_mutex);
-
- ret = netlink_dump(sk);
+ ret = netlink_dump(sk, true);
sock_put(sk);
@@ -2383,7 +2396,7 @@ error_put:
module_put(control->module);
error_unlock:
sock_put(sk);
- mutex_unlock(nlk->cb_mutex);
+ mutex_unlock(&nlk->nl_cb_mutex);
error_free:
kfree_skb(skb);
return ret;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 2145979b9986..9751e29d4bbb 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -39,8 +39,9 @@ struct netlink_sock {
bool cb_running;
int dump_done_errno;
struct netlink_callback cb;
- struct mutex *cb_mutex;
- struct mutex cb_def_mutex;
+ struct mutex nl_cb_mutex;
+
+ struct mutex *dump_cb_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
int (*netlink_bind)(struct net *net, int group);
void (*netlink_unbind)(struct net *net, int group);
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index e12c90d5f6ad..61981e01fd6f 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -207,7 +207,7 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
}
- return err < 0 ? err : skb->len;
+ return err <= 0 ? err : skb->len;
}
static int netlink_diag_dump_done(struct netlink_callback *cb)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8c7af02f8454..3b7666944b11 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1232,7 +1232,7 @@ static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
if (hdr == NULL)
- return -1;
+ return -EMSGSIZE;
if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
@@ -1355,6 +1355,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int fams_to_skip = cb->args[0];
unsigned int id;
+ int err = 0;
idr_for_each_entry(&genl_fam_idr, rt, id) {
if (!rt->netnsok && !net_eq(net, &init_net))
@@ -1363,16 +1364,17 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
if (n++ < fams_to_skip)
continue;
- if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
- skb, CTRL_CMD_NEWFAMILY) < 0) {
+ err = ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ skb, CTRL_CMD_NEWFAMILY);
+ if (err) {
n--;
break;
}
}
cb->args[0] = n;
- return skb->len;
+ return err;
}
static struct sk_buff *ctrl_build_family_msg(const struct genl_family *family,
@@ -1836,6 +1838,9 @@ static int genl_bind(struct net *net, int group)
!ns_capable(net->user_ns, CAP_SYS_ADMIN))
ret = -EPERM;
+ if (family->bind)
+ family->bind(i);
+
break;
}
@@ -1843,12 +1848,39 @@ static int genl_bind(struct net *net, int group)
return ret;
}
+static void genl_unbind(struct net *net, int group)
+{
+ const struct genl_family *family;
+ unsigned int id;
+
+ down_read(&cb_lock);
+
+ idr_for_each_entry(&genl_fam_idr, family, id) {
+ int i;
+
+ if (family->n_mcgrps == 0)
+ continue;
+
+ i = group - family->mcgrp_offset;
+ if (i < 0 || i >= family->n_mcgrps)
+ continue;
+
+ if (family->unbind)
+ family->unbind(i);
+
+ break;
+ }
+
+ up_read(&cb_lock);
+}
+
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
.bind = genl_bind,
+ .unbind = genl_unbind,
.release = genl_release,
};
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 0eed00184adf..104a80b75477 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -453,16 +453,16 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
nr_init_timers(sk);
nr->t1 =
- msecs_to_jiffies(sysctl_netrom_transport_timeout);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout));
nr->t2 =
- msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay));
nr->n2 =
- msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries));
nr->t4 =
- msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay));
nr->idle =
- msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
- nr->window = sysctl_netrom_transport_requested_window_size;
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout));
+ nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size);
nr->bpqext = 1;
nr->state = NR_STATE_0;
@@ -954,7 +954,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
* G8PZT's Xrouter which is sending packets with command type 7
* as an extension of the protocol.
*/
- if (sysctl_netrom_reset_circuit &&
+ if (READ_ONCE(sysctl_netrom_reset_circuit) &&
(frametype != NR_RESET || flags != 0))
nr_transmit_reset(skb, 1);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 3aaac4a22b38..2c34389c3ce6 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -81,7 +81,7 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev,
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
- *buff++ = sysctl_netrom_network_ttl_initialiser;
+ *buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
*buff++ = NR_PROTO_IP;
*buff++ = NR_PROTO_IP;
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index 2f084b6f69d7..97944db6b5ac 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -97,7 +97,7 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
@@ -128,7 +128,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
@@ -262,7 +262,7 @@ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index 44929657f5b7..5e531394a724 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -204,7 +204,7 @@ void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
dptr[6] |= AX25_SSSID_SPARE;
dptr += AX25_ADDR_LEN;
- *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
if (!nr_route_frame(skb, NULL)) {
kfree_skb(skb);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index baea3cbd76ca..70480869ad1c 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -153,7 +153,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
nr_neigh->digipeat = NULL;
nr_neigh->ax25 = NULL;
nr_neigh->dev = dev;
- nr_neigh->quality = sysctl_netrom_default_path_quality;
+ nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality);
nr_neigh->locked = 0;
nr_neigh->count = 0;
nr_neigh->number = nr_neigh_no++;
@@ -728,7 +728,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
nr_neigh->ax25 = NULL;
ax25_cb_put(ax25);
- if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
+ if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
nr_neigh_put(nr_neigh);
return;
}
@@ -766,7 +766,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if (ax25 != NULL) {
ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
ax25->ax25_dev->dev, 0,
- sysctl_netrom_obsolescence_count_initialiser);
+ READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
if (ret)
return ret;
}
@@ -780,7 +780,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
return ret;
}
- if (!sysctl_netrom_routing_control && ax25 != NULL)
+ if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
return 0;
/* Its Time-To-Live has expired */
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index e2d2af924cff..c3bbd5880850 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -182,7 +182,8 @@ void nr_write_internal(struct sock *sk, int frametype)
*dptr++ = nr->my_id;
*dptr++ = frametype;
*dptr++ = nr->window;
- if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ if (nr->bpqext)
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
break;
case NR_DISCREQ:
@@ -236,7 +237,7 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
dptr[6] |= AX25_SSSID_SPARE;
dptr += AX25_ADDR_LEN;
- *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
if (mine) {
*dptr++ = 0;
diff --git a/net/nfc/core.c b/net/nfc/core.c
index eb2c0959e5b6..e58dc6405054 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -1015,7 +1015,7 @@ static void nfc_check_pres_timeout(struct timer_list *t)
schedule_work(&dev->check_pres_work);
}
-struct class nfc_class = {
+const struct class nfc_class = {
.name = "nfc",
.dev_release = nfc_release,
};
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
index 2140f6724644..ba91284f4086 100644
--- a/net/nfc/hci/llc.c
+++ b/net/nfc/hci/llc.c
@@ -30,15 +30,19 @@ exit:
return r;
}
+static void nfc_llc_del_engine(struct nfc_llc_engine *llc_engine)
+{
+ list_del(&llc_engine->entry);
+ kfree_const(llc_engine->name);
+ kfree(llc_engine);
+}
+
void nfc_llc_exit(void)
{
struct nfc_llc_engine *llc_engine, *n;
- list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) {
- list_del(&llc_engine->entry);
- kfree(llc_engine->name);
- kfree(llc_engine);
- }
+ list_for_each_entry_safe(llc_engine, n, &llc_engines, entry)
+ nfc_llc_del_engine(llc_engine);
}
int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
@@ -49,7 +53,7 @@ int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
if (llc_engine == NULL)
return -ENOMEM;
- llc_engine->name = kstrdup(name, GFP_KERNEL);
+ llc_engine->name = kstrdup_const(name, GFP_KERNEL);
if (llc_engine->name == NULL) {
kfree(llc_engine);
return -ENOMEM;
@@ -82,9 +86,7 @@ void nfc_llc_unregister(const char *name)
if (llc_engine == NULL)
return;
- list_del(&llc_engine->entry);
- kfree(llc_engine->name);
- kfree(llc_engine);
+ nfc_llc_del_engine(llc_engine);
}
struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 97348cedb16b..cdad47b140fa 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1208,6 +1208,10 @@ void nci_free_device(struct nci_dev *ndev)
{
nfc_free_device(ndev->nfc_dev);
nci_hci_deallocate(ndev);
+
+ /* drop partial rx data packet if present */
+ if (ndev->rx_data_reassembly)
+ kfree_skb(ndev->rx_data_reassembly);
kfree(ndev);
}
EXPORT_SYMBOL(nci_free_device);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 88965e2068ac..ebc5728aab4e 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -48,6 +48,7 @@ struct ovs_len_tbl {
#define OVS_ATTR_NESTED -1
#define OVS_ATTR_VARIABLE -2
+#define OVS_COPY_ACTIONS_MAX_DEPTH 16
static bool actions_may_change_flow(const struct nlattr *actions)
{
@@ -2545,13 +2546,15 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- u32 mpls_label_count, bool log);
+ u32 mpls_label_count, bool log,
+ u32 depth);
static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- u32 mpls_label_count, bool log, bool last)
+ u32 mpls_label_count, bool log, bool last,
+ u32 depth)
{
const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
const struct nlattr *probability, *actions;
@@ -2602,7 +2605,8 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
return err;
err = __ovs_nla_copy_actions(net, actions, key, sfa,
- eth_type, vlan_tci, mpls_label_count, log);
+ eth_type, vlan_tci, mpls_label_count, log,
+ depth + 1);
if (err)
return err;
@@ -2617,7 +2621,8 @@ static int validate_and_copy_dec_ttl(struct net *net,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- u32 mpls_label_count, bool log)
+ u32 mpls_label_count, bool log,
+ u32 depth)
{
const struct nlattr *attrs[OVS_DEC_TTL_ATTR_MAX + 1];
int start, action_start, err, rem;
@@ -2660,7 +2665,8 @@ static int validate_and_copy_dec_ttl(struct net *net,
return action_start;
err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type,
- vlan_tci, mpls_label_count, log);
+ vlan_tci, mpls_label_count, log,
+ depth + 1);
if (err)
return err;
@@ -2674,7 +2680,8 @@ static int validate_and_copy_clone(struct net *net,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- u32 mpls_label_count, bool log, bool last)
+ u32 mpls_label_count, bool log, bool last,
+ u32 depth)
{
int start, err;
u32 exec;
@@ -2694,7 +2701,8 @@ static int validate_and_copy_clone(struct net *net,
return err;
err = __ovs_nla_copy_actions(net, attr, key, sfa,
- eth_type, vlan_tci, mpls_label_count, log);
+ eth_type, vlan_tci, mpls_label_count, log,
+ depth + 1);
if (err)
return err;
@@ -3063,7 +3071,7 @@ static int validate_and_copy_check_pkt_len(struct net *net,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
u32 mpls_label_count,
- bool log, bool last)
+ bool log, bool last, u32 depth)
{
const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
@@ -3111,7 +3119,8 @@ static int validate_and_copy_check_pkt_len(struct net *net,
return nested_acts_start;
err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
- eth_type, vlan_tci, mpls_label_count, log);
+ eth_type, vlan_tci, mpls_label_count, log,
+ depth + 1);
if (err)
return err;
@@ -3124,7 +3133,8 @@ static int validate_and_copy_check_pkt_len(struct net *net,
return nested_acts_start;
err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
- eth_type, vlan_tci, mpls_label_count, log);
+ eth_type, vlan_tci, mpls_label_count, log,
+ depth + 1);
if (err)
return err;
@@ -3152,12 +3162,16 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
const struct sw_flow_key *key,
struct sw_flow_actions **sfa,
__be16 eth_type, __be16 vlan_tci,
- u32 mpls_label_count, bool log)
+ u32 mpls_label_count, bool log,
+ u32 depth)
{
u8 mac_proto = ovs_key_mac_proto(key);
const struct nlattr *a;
int rem, err;
+ if (depth > OVS_COPY_ACTIONS_MAX_DEPTH)
+ return -EOVERFLOW;
+
nla_for_each_nested(a, attr, rem) {
/* Expected argument lengths, (u32)-1 for variable length. */
static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
@@ -3355,7 +3369,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
err = validate_and_copy_sample(net, a, key, sfa,
eth_type, vlan_tci,
mpls_label_count,
- log, last);
+ log, last, depth);
if (err)
return err;
skip_copy = true;
@@ -3426,7 +3440,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
err = validate_and_copy_clone(net, a, key, sfa,
eth_type, vlan_tci,
mpls_label_count,
- log, last);
+ log, last, depth);
if (err)
return err;
skip_copy = true;
@@ -3440,7 +3454,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
eth_type,
vlan_tci,
mpls_label_count,
- log, last);
+ log, last,
+ depth);
if (err)
return err;
skip_copy = true;
@@ -3450,7 +3465,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
case OVS_ACTION_ATTR_DEC_TTL:
err = validate_and_copy_dec_ttl(net, a, key, sfa,
eth_type, vlan_tci,
- mpls_label_count, log);
+ mpls_label_count, log,
+ depth);
if (err)
return err;
skip_copy = true;
@@ -3495,7 +3511,8 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
(*sfa)->orig_len = nla_len(attr);
err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type,
- key->eth.vlan.tci, mpls_label_count, log);
+ key->eth.vlan.tci, mpls_label_count, log,
+ 0);
if (err)
ovs_nla_free_flow_actions(*sfa);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9bbc2686690..0db31ca4982d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2057,7 +2057,7 @@ retry:
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = READ_ONCE(sk->sk_mark);
skb->tstamp = sockc.transmit_time;
-
+ skb->mono_delivery_time = !!skb->tstamp;
skb_setup_tx_timestamp(skb, sockc.tsflags);
if (unlikely(extra_len == 4))
@@ -2586,6 +2586,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->priority = READ_ONCE(po->sk.sk_priority);
skb->mark = READ_ONCE(po->sk.sk_mark);
skb->tstamp = sockc->transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
skb_setup_tx_timestamp(skb, sockc->tsflags);
skb_zcopy_set_nouarg(skb, ph.raw);
@@ -3064,6 +3065,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc.mark;
skb->tstamp = sockc.transmit_time;
+ skb->mono_delivery_time = !!skb->tstamp;
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 3aa50dc7535b..976fe250b509 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -34,10 +34,10 @@ static int pn_ioctl(struct sock *sk, int cmd, int *karg)
switch (cmd) {
case SIOCINQ:
- lock_sock(sk);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
*karg = skb ? skb->len : 0;
- release_sock(sk);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
return 0;
case SIOCPNADDRESOURCE:
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index faba31f2eff2..3dd5f52bc1b5 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -917,6 +917,37 @@ static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
return 0;
}
+static unsigned int pep_first_packet_length(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct sk_buff_head *q;
+ struct sk_buff *skb;
+ unsigned int len = 0;
+ bool found = false;
+
+ if (sock_flag(sk, SOCK_URGINLINE)) {
+ q = &pn->ctrlreq_queue;
+ spin_lock_bh(&q->lock);
+ skb = skb_peek(q);
+ if (skb) {
+ len = skb->len;
+ found = true;
+ }
+ spin_unlock_bh(&q->lock);
+ }
+
+ if (likely(!found)) {
+ q = &sk->sk_receive_queue;
+ spin_lock_bh(&q->lock);
+ skb = skb_peek(q);
+ if (skb)
+ len = skb->len;
+ spin_unlock_bh(&q->lock);
+ }
+
+ return len;
+}
+
static int pep_ioctl(struct sock *sk, int cmd, int *karg)
{
struct pep_sock *pn = pep_sk(sk);
@@ -929,15 +960,7 @@ static int pep_ioctl(struct sock *sk, int cmd, int *karg)
break;
}
- lock_sock(sk);
- if (sock_flag(sk, SOCK_URGINLINE) &&
- !skb_queue_empty(&pn->ctrlreq_queue))
- *karg = skb_peek(&pn->ctrlreq_queue)->len;
- else if (!skb_queue_empty(&sk->sk_receive_queue))
- *karg = skb_peek(&sk->sk_receive_queue)->len;
- else
- *karg = 0;
- release_sock(sk);
+ *karg = pep_first_packet_length(sk);
ret = 0;
break;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 01c4cdfef45d..8435a20968ef 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -419,7 +419,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
rs->rs_rx_traces = trace.rx_traces;
for (i = 0; i < rs->rs_rx_traces; i++) {
- if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
+ if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) {
rs->rs_rx_traces = 0;
return -EFAULT;
}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index b4cc699c5fad..c749c5525b40 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -829,9 +829,7 @@ int rds_conn_init(void)
if (ret)
return ret;
- rds_conn_slab = kmem_cache_create("rds_connection",
- sizeof(struct rds_connection),
- 0, 0, NULL);
+ rds_conn_slab = KMEM_CACHE(rds_connection, 0);
if (!rds_conn_slab) {
rds_loop_net_exit();
return -ENOMEM;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index fba82d36593a..a4e3c5de998b 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -301,6 +301,9 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
kfree(sg);
}
ret = PTR_ERR(trans_private);
+ /* Trigger connection so that its ready for the next retry */
+ if (ret == -ENODEV)
+ rds_conn_connect_if_down(cp->cp_conn);
goto out;
}
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c71b923764fd..5627f80013f8 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -425,6 +425,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
struct sock *sk = rds_rs_to_sk(rs);
int ret = 0;
unsigned long flags;
+ struct rds_incoming *to_drop = NULL;
write_lock_irqsave(&rs->rs_recv_lock, flags);
if (!list_empty(&inc->i_item)) {
@@ -435,11 +436,14 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
-be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
list_del_init(&inc->i_item);
- rds_inc_put(inc);
+ to_drop = inc;
}
}
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
+ if (to_drop)
+ rds_inc_put(to_drop);
+
rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
return ret;
}
@@ -758,16 +762,21 @@ void rds_clear_recv_queue(struct rds_sock *rs)
struct sock *sk = rds_rs_to_sk(rs);
struct rds_incoming *inc, *tmp;
unsigned long flags;
+ LIST_HEAD(to_drop);
write_lock_irqsave(&rs->rs_recv_lock, flags);
list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
-be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
+ list_move(&inc->i_item, &to_drop);
+ }
+ write_unlock_irqrestore(&rs->rs_recv_lock, flags);
+
+ list_for_each_entry_safe(inc, tmp, &to_drop, i_item) {
list_del_init(&inc->i_item);
rds_inc_put(inc);
}
- write_unlock_irqrestore(&rs->rs_recv_lock, flags);
}
/*
diff --git a/net/rds/send.c b/net/rds/send.c
index 5e57a1581dc6..2899def23865 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1313,12 +1313,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
/* Parse any control messages the user may have included. */
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
- if (ret) {
- /* Trigger connection so that its ready for the next retry */
- if (ret == -EAGAIN)
- rds_conn_connect_if_down(conn);
+ if (ret)
goto out;
- }
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 465bfe5eb061..5222bc97d192 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
* rxrpc_kernel_set_max_life - Set maximum lifespan on a call
* @sock: The socket the call is on
* @call: The call to configure
- * @hard_timeout: The maximum lifespan of the call in jiffies
+ * @hard_timeout: The maximum lifespan of the call in ms
*
* Set the maximum lifespan of a call. The call will end with ETIME or
* ETIMEDOUT if it takes longer than this.
@@ -495,14 +495,14 @@ EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
unsigned long hard_timeout)
{
- unsigned long now;
+ ktime_t delay = ms_to_ktime(hard_timeout), expect_term_by;
mutex_lock(&call->user_mutex);
- now = jiffies;
- hard_timeout += now;
- WRITE_ONCE(call->expect_term_by, hard_timeout);
- rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard);
+ expect_term_by = ktime_add(ktime_get_real(), delay);
+ WRITE_ONCE(call->expect_term_by, expect_term_by);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
mutex_unlock(&call->user_mutex);
}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index dbeb75c29857..08c0a32db8c7 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -198,12 +198,23 @@ struct rxrpc_host_header {
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
- struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
- u16 offset; /* Offset of data */
- u16 len; /* Length of data */
- u8 flags;
+ union {
+ struct rxrpc_connection *conn; /* Connection referred to (poke packet) */
+ struct {
+ u16 offset; /* Offset of data */
+ u16 len; /* Length of data */
+ u8 flags;
#define RXRPC_RX_VERIFIED 0x01
-
+ };
+ struct {
+ rxrpc_seq_t first_ack; /* First packet in acks table */
+ rxrpc_seq_t prev_ack; /* Highest seq seen */
+ rxrpc_serial_t acked_serial; /* Packet in response to (or 0) */
+ u8 reason; /* Reason for ack */
+ u8 nr_acks; /* Number of acks+nacks */
+ u8 nr_nacks; /* Number of nacks */
+ } ack;
+ };
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
};
@@ -240,10 +251,9 @@ struct rxrpc_security {
struct rxrpc_key_token *);
/* Work out how much data we can store in a packet, given an estimate
- * of the amount of data remaining.
+ * of the amount of data remaining and allocate a data buffer.
*/
- int (*how_much_data)(struct rxrpc_call *, size_t,
- size_t *, size_t *, size_t *);
+ struct rxrpc_txbuf *(*alloc_txbuf)(struct rxrpc_call *call, size_t remaining, gfp_t gfp);
/* impose security on a packet */
int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *);
@@ -284,6 +294,7 @@ struct rxrpc_local {
struct socket *socket; /* my UDP socket */
struct task_struct *io_thread;
struct completion io_thread_ready; /* Indication that the I/O thread started */
+ struct page_frag_cache tx_alloc; /* Tx control packet allocation (I/O thread only) */
struct rxrpc_sock *service; /* Service(s) listening on this endpoint */
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
struct sk_buff_head rx_delay_queue; /* Delay injection queue */
@@ -344,8 +355,8 @@ struct rxrpc_peer {
u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
- u32 rto_j; /* Retransmission timeout in jiffies */
- u8 backoff; /* Backoff timeout */
+ u32 rto_us; /* Retransmission timeout in usec */
+ u8 backoff; /* Backoff timeout (as shift) */
u8 cong_ssthresh; /* Congestion slow-start threshold */
};
@@ -492,6 +503,8 @@ struct rxrpc_connection {
struct list_head proc_link; /* link in procfs list */
struct list_head link; /* link in master connection list */
struct sk_buff_head rx_queue; /* received conn-level packets */
+ struct page_frag_cache tx_data_alloc; /* Tx DATA packet allocation */
+ struct mutex tx_data_alloc_lock;
struct mutex security_lock; /* Lock for security management */
const struct rxrpc_security *security; /* applied security module */
@@ -510,7 +523,7 @@ struct rxrpc_connection {
enum rxrpc_call_completion completion; /* Completion condition */
s32 abort_code; /* Abort code of connection abort */
int debug_id; /* debug ID for printks */
- atomic_t serial; /* packet serial number counter */
+ rxrpc_serial_t tx_serial; /* Outgoing packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
u32 service_id; /* Service ID, possibly upgraded */
u32 security_level; /* Security level selected */
@@ -610,17 +623,17 @@ struct rxrpc_call {
const struct rxrpc_security *security; /* applied security module */
struct mutex user_mutex; /* User access mutex */
struct sockaddr_rxrpc dest_srx; /* Destination address */
- unsigned long delay_ack_at; /* When DELAY ACK needs to happen */
- unsigned long ack_lost_at; /* When ACK is figured as lost */
- unsigned long resend_at; /* When next resend needs to happen */
- unsigned long ping_at; /* When next to send a ping */
- unsigned long keepalive_at; /* When next to send a keepalive ping */
- unsigned long expect_rx_by; /* When we expect to get a packet by */
- unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
- unsigned long expect_term_by; /* When we expect call termination by */
- u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
- u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
- u32 hard_timo; /* Maximum lifetime or 0 (jif) */
+ ktime_t delay_ack_at; /* When DELAY ACK needs to happen */
+ ktime_t ack_lost_at; /* When ACK is figured as lost */
+ ktime_t resend_at; /* When next resend needs to happen */
+ ktime_t ping_at; /* When next to send a ping */
+ ktime_t keepalive_at; /* When next to send a keepalive ping */
+ ktime_t expect_rx_by; /* When we expect to get a packet by */
+ ktime_t expect_req_by; /* When we expect to get a request DATA packet by */
+ ktime_t expect_term_by; /* When we expect call termination by */
+ u32 next_rx_timo; /* Timeout for next Rx packet (ms) */
+ u32 next_req_timo; /* Timeout for next Rx request packet (ms) */
+ u32 hard_timo; /* Maximum lifetime or 0 (s) */
struct timer_list timer; /* Combined event timer */
struct work_struct destroyer; /* In-process-context destroyer */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
@@ -665,7 +678,7 @@ struct rxrpc_call {
rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */
rxrpc_seq_t tx_prepared; /* Highest Tx slot prepared. */
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
- u16 tx_backoff; /* Delay to insert due to Tx failure */
+ u16 tx_backoff; /* Delay to insert due to Tx failure (ms) */
u8 tx_winsize; /* Maximum size of Tx window */
#define RXRPC_TX_MAX_WINDOW 128
ktime_t tx_last_sent; /* Last time a transmission occurred */
@@ -692,11 +705,11 @@ struct rxrpc_call {
u8 cong_dup_acks; /* Count of ACKs showing missing packets */
u8 cong_cumul_acks; /* Cumulative ACK count */
ktime_t cong_tstamp; /* Last time cwnd was changed */
+ struct sk_buff *cong_last_nack; /* Last ACK with nacks received */
/* Receive-phase ACK management (ACKs we send). */
u8 ackr_reason; /* reason to ACK */
u16 ackr_sack_base; /* Starting slot in SACK table ring */
- rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
rxrpc_seq_t ackr_window; /* Base of SACK window */
rxrpc_seq_t ackr_wtop; /* Base of SACK window */
unsigned int ackr_nr_unacked; /* Number of unacked packets */
@@ -730,7 +743,8 @@ struct rxrpc_call {
struct rxrpc_ack_summary {
u16 nr_acks; /* Number of ACKs in packet */
u16 nr_new_acks; /* Number of new ACKs in packet */
- u16 nr_rot_new_acks; /* Number of rotated new ACKs */
+ u16 nr_new_nacks; /* Number of new nacks in packet */
+ u16 nr_retained_nacks; /* Number of nacks retained between ACKs */
u8 ack_reason;
bool saw_nacks; /* Saw NACKs in packet */
bool new_low_nack; /* T if new low NACK found */
@@ -779,40 +793,30 @@ struct rxrpc_send_params {
* Buffer of data to be output as a packet.
*/
struct rxrpc_txbuf {
- struct rcu_head rcu;
struct list_head call_link; /* Link in call->tx_sendmsg/tx_buffer */
struct list_head tx_link; /* Link in live Enc queue or Tx queue */
ktime_t last_sent; /* Time at which last transmitted */
refcount_t ref;
rxrpc_seq_t seq; /* Sequence number of this packet */
+ rxrpc_serial_t serial; /* Last serial number transmitted with */
unsigned int call_debug_id;
unsigned int debug_id;
unsigned int len; /* Amount of data in buffer */
unsigned int space; /* Remaining data space */
unsigned int offset; /* Offset of fill point */
- unsigned long flags;
-#define RXRPC_TXBUF_LAST 0 /* Set if last packet in Tx phase */
-#define RXRPC_TXBUF_RESENT 1 /* Set if has been resent */
+ unsigned int flags;
+#define RXRPC_TXBUF_WIRE_FLAGS 0xff /* The wire protocol flags */
+#define RXRPC_TXBUF_RESENT 0x100 /* Set if has been resent */
+ __be16 cksum; /* Checksum to go in header */
+ unsigned short ack_rwind; /* ACK receive window */
u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */
- struct {
- /* The packet for encrypting and DMA'ing. We align it such
- * that data[] aligns correctly for any crypto blocksize.
- */
- u8 pad[64 - sizeof(struct rxrpc_wire_header)];
- struct rxrpc_wire_header wire; /* Network-ready header */
- union {
- u8 data[RXRPC_JUMBO_DATALEN]; /* Data packet */
- struct {
- struct rxrpc_ackpacket ack;
- DECLARE_FLEX_ARRAY(u8, acks);
- };
- };
- } __aligned(64);
+ u8 nr_kvec; /* Amount of kvec[] used */
+ struct kvec kvec[3];
};
static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb)
{
- return txb->wire.flags & RXRPC_CLIENT_INITIATED;
+ return txb->flags & RXRPC_CLIENT_INITIATED;
}
static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
@@ -823,6 +827,20 @@ static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
#include <trace/events/rxrpc.h>
/*
+ * Allocate the next serial number on a connection. 0 must be skipped.
+ */
+static inline rxrpc_serial_t rxrpc_get_next_serial(struct rxrpc_connection *conn)
+{
+ rxrpc_serial_t serial;
+
+ serial = conn->tx_serial;
+ if (serial == 0)
+ serial = 1;
+ conn->tx_serial = serial + 1;
+ return serial;
+}
+
+/*
* af_rxrpc.c
*/
extern atomic_t rxrpc_n_rx_skbs;
@@ -846,17 +864,11 @@ int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
*/
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why);
-void rxrpc_send_ACK(struct rxrpc_call *, u8, rxrpc_serial_t, enum rxrpc_propose_ack_trace);
void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t,
enum rxrpc_propose_ack_trace);
void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *);
void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb);
-void rxrpc_reduce_call_timer(struct rxrpc_call *call,
- unsigned long expire_at,
- unsigned long now,
- enum rxrpc_timer_trace why);
-
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
/*
@@ -1137,9 +1149,9 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
/*
* output.c
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why);
int rxrpc_send_abort_packet(struct rxrpc_call *);
-int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
void rxrpc_send_keepalive(struct rxrpc_peer *);
@@ -1200,7 +1212,7 @@ static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
*/
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
-unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
+ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans);
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
/*
@@ -1272,8 +1284,9 @@ static inline void rxrpc_sysctl_exit(void) {}
* txbuf.c
*/
extern atomic_t rxrpc_nr_txbuf;
-struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
- gfp_t gfp);
+struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
+ size_t data_align, gfp_t gfp);
+struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_size);
void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index e363f21a2014..7bbb68504766 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -23,14 +23,14 @@
void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
enum rxrpc_propose_ack_trace why)
{
- unsigned long now = jiffies;
- unsigned long ping_at = now + rxrpc_idle_ack_delay;
-
- if (time_before(ping_at, call->ping_at)) {
- WRITE_ONCE(call->ping_at, ping_at);
- rxrpc_reduce_call_timer(call, ping_at, now,
- rxrpc_timer_set_for_ping);
- trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
+ ktime_t delay = ms_to_ktime(READ_ONCE(rxrpc_idle_ack_delay));
+ ktime_t now = ktime_get_real();
+ ktime_t ping_at = ktime_add(now, delay);
+
+ trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
+ if (ktime_before(ping_at, call->ping_at)) {
+ call->ping_at = ping_at;
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_ping);
}
}
@@ -40,64 +40,18 @@ void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
enum rxrpc_propose_ack_trace why)
{
- unsigned long expiry = rxrpc_soft_ack_delay;
- unsigned long now = jiffies, ack_at;
-
- call->ackr_serial = serial;
-
- if (rxrpc_soft_ack_delay < expiry)
- expiry = rxrpc_soft_ack_delay;
- if (call->peer->srtt_us != 0)
- ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
- else
- ack_at = expiry;
-
- ack_at += READ_ONCE(call->tx_backoff);
- ack_at += now;
- if (time_before(ack_at, call->delay_ack_at)) {
- WRITE_ONCE(call->delay_ack_at, ack_at);
- rxrpc_reduce_call_timer(call, ack_at, now,
- rxrpc_timer_set_for_ack);
- }
+ ktime_t now = ktime_get_real(), delay;
trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
-}
-
-/*
- * Queue an ACK for immediate transmission.
- */
-void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
- rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
-{
- struct rxrpc_txbuf *txb;
-
- if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- return;
- rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
-
- txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_ACK,
- rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS);
- if (!txb) {
- kleave(" = -ENOMEM");
- return;
- }
+ if (call->peer->srtt_us)
+ delay = (call->peer->srtt_us >> 3) * NSEC_PER_USEC;
+ else
+ delay = ms_to_ktime(READ_ONCE(rxrpc_soft_ack_delay));
+ ktime_add_ms(delay, call->tx_backoff);
- txb->ack_why = why;
- txb->wire.seq = 0;
- txb->wire.type = RXRPC_PACKET_TYPE_ACK;
- txb->wire.flags |= RXRPC_SLOW_START_OK;
- txb->ack.bufferSpace = 0;
- txb->ack.maxSkew = 0;
- txb->ack.firstPacket = 0;
- txb->ack.previousPacket = 0;
- txb->ack.serial = htonl(serial);
- txb->ack.reason = ack_reason;
- txb->ack.nAcks = 0;
-
- trace_rxrpc_send_ack(call, why, ack_reason, serial);
- rxrpc_send_ack_packet(call, txb);
- rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
+ call->delay_ack_at = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_delayed_ack);
}
/*
@@ -114,26 +68,21 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
{
struct rxrpc_ackpacket *ack = NULL;
+ struct rxrpc_skb_priv *sp;
struct rxrpc_txbuf *txb;
- unsigned long resend_at;
- rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
- ktime_t now, max_age, oldest, ack_ts;
- bool unacked = false;
+ rxrpc_seq_t transmitted = call->tx_transmitted;
+ ktime_t next_resend = KTIME_MAX, rto = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
+ ktime_t resend_at = KTIME_MAX, now, delay;
+ bool unacked = false, did_send = false;
unsigned int i;
- LIST_HEAD(retrans_queue);
_enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
now = ktime_get_real();
- max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
- oldest = now;
if (list_empty(&call->tx_buffer))
goto no_resend;
- if (list_empty(&call->tx_buffer))
- goto no_further_resend;
-
trace_rxrpc_resend(call, ack_skb);
txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link);
@@ -141,14 +90,15 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
* explicitly NAK'd packets.
*/
if (ack_skb) {
+ sp = rxrpc_skb(ack_skb);
ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
- for (i = 0; i < ack->nAcks; i++) {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
rxrpc_seq_t seq;
if (ack->acks[i] & 1)
continue;
- seq = ntohl(ack->firstPacket) + i;
+ seq = sp->ack.first_ack + i;
if (after(txb->seq, transmitted))
break;
if (after(txb->seq, seq))
@@ -160,19 +110,23 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
goto no_further_resend;
found_txb:
- if (after(ntohl(txb->wire.serial), call->acks_highest_serial))
+ resend_at = ktime_add(txb->last_sent, rto);
+ if (after(txb->serial, call->acks_highest_serial)) {
+ if (ktime_after(resend_at, now) &&
+ ktime_before(resend_at, next_resend))
+ next_resend = resend_at;
continue; /* Ack point not yet reached */
+ }
rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked);
- if (list_empty(&txb->tx_link)) {
- list_add_tail(&txb->tx_link, &retrans_queue);
- set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
- }
+ trace_rxrpc_retransmit(call, txb->seq, txb->serial,
+ ktime_sub(resend_at, now));
- trace_rxrpc_retransmit(call, txb->seq,
- ktime_to_ns(ktime_sub(txb->last_sent,
- max_age)));
+ txb->flags |= RXRPC_TXBUF_RESENT;
+ rxrpc_transmit_one(call, txb);
+ did_send = true;
+ now = ktime_get_real();
if (list_is_last(&txb->call_link, &call->tx_buffer))
goto no_further_resend;
@@ -184,43 +138,46 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
* seen. Anything between the soft-ACK table and that point will get
* ACK'd or NACK'd in due course, so don't worry about it here; here we
* need to consider retransmitting anything beyond that point.
- *
- * Note that ACK for a packet can beat the update of tx_transmitted.
*/
- if (after_eq(READ_ONCE(call->acks_prev_seq), READ_ONCE(call->tx_transmitted)))
+ if (after_eq(call->acks_prev_seq, call->tx_transmitted))
goto no_further_resend;
list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
- if (before_eq(txb->seq, READ_ONCE(call->acks_prev_seq)))
+ resend_at = ktime_add(txb->last_sent, rto);
+
+ if (before_eq(txb->seq, call->acks_prev_seq))
continue;
- if (after(txb->seq, READ_ONCE(call->tx_transmitted)))
+ if (after(txb->seq, call->tx_transmitted))
break; /* Not transmitted yet */
if (ack && ack->reason == RXRPC_ACK_PING_RESPONSE &&
- before(ntohl(txb->wire.serial), ntohl(ack->serial)))
+ before(txb->serial, ntohl(ack->serial)))
goto do_resend; /* Wasn't accounted for by a more recent ping. */
- if (ktime_after(txb->last_sent, max_age)) {
- if (ktime_before(txb->last_sent, oldest))
- oldest = txb->last_sent;
+ if (ktime_after(resend_at, now)) {
+ if (ktime_before(resend_at, next_resend))
+ next_resend = resend_at;
continue;
}
do_resend:
unacked = true;
- if (list_empty(&txb->tx_link)) {
- list_add_tail(&txb->tx_link, &retrans_queue);
- set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
- rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
- }
+
+ txb->flags |= RXRPC_TXBUF_RESENT;
+ rxrpc_transmit_one(call, txb);
+ did_send = true;
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
+ now = ktime_get_real();
}
no_further_resend:
no_resend:
- resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
- resend_at += jiffies + rxrpc_get_rto_backoff(call->peer,
- !list_empty(&retrans_queue));
- WRITE_ONCE(call->resend_at, resend_at);
+ if (resend_at < KTIME_MAX) {
+ delay = rxrpc_get_rto_backoff(call->peer, did_send);
+ resend_at = ktime_add(resend_at, delay);
+ trace_rxrpc_timer_set(call, resend_at - now, rxrpc_timer_trace_resend_reset);
+ }
+ call->resend_at = resend_at;
if (unacked)
rxrpc_congestion_timeout(call);
@@ -229,25 +186,15 @@ no_resend:
* that an ACK got lost somewhere. Send a ping to find out instead of
* retransmitting data.
*/
- if (list_empty(&retrans_queue)) {
- rxrpc_reduce_call_timer(call, resend_at, jiffies,
- rxrpc_timer_set_for_resend);
- ack_ts = ktime_sub(now, call->acks_latest_ts);
- if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
- goto out;
- rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
- rxrpc_propose_ack_ping_for_lost_ack);
- goto out;
- }
+ if (!did_send) {
+ ktime_t next_ping = ktime_add_us(call->acks_latest_ts,
+ call->peer->srtt_us >> 3);
- /* Retransmit the queue */
- while ((txb = list_first_entry_or_null(&retrans_queue,
- struct rxrpc_txbuf, tx_link))) {
- list_del_init(&txb->tx_link);
- rxrpc_transmit_one(call, txb);
+ if (ktime_sub(next_ping, now) <= 0)
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_0_retrans);
}
-out:
_leave("");
}
@@ -257,13 +204,11 @@ out:
*/
static void rxrpc_begin_service_reply(struct rxrpc_call *call)
{
- unsigned long now = jiffies;
-
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
- WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
- trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
+ call->delay_ack_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
}
/*
@@ -320,7 +265,7 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
call->tx_top = txb->seq;
list_add_tail(&txb->call_link, &call->tx_buffer);
- if (txb->wire.flags & RXRPC_LAST_PACKET)
+ if (txb->flags & RXRPC_LAST_PACKET)
rxrpc_close_tx_phase(call);
rxrpc_transmit_one(call, txb);
@@ -372,9 +317,8 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
*/
bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
{
- unsigned long now, next, t;
- rxrpc_serial_t ackr_serial;
- bool resend = false, expired = false;
+ ktime_t now, t;
+ bool resend = false;
s32 abort_code;
rxrpc_see_call(call, rxrpc_call_see_input);
@@ -398,71 +342,73 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
goto out;
+ if (skb)
+ rxrpc_input_call_packet(call, skb);
+
/* If we see our async-event poke, check for timeout trippage. */
- now = jiffies;
- t = READ_ONCE(call->expect_rx_by);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
- expired = true;
+ now = ktime_get_real();
+ t = ktime_sub(call->expect_rx_by, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_expect_rx);
+ goto expired;
}
- t = READ_ONCE(call->expect_req_by);
- if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
- time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
- expired = true;
+ t = ktime_sub(call->expect_req_by, now);
+ if (t <= 0) {
+ call->expect_req_by = KTIME_MAX;
+ if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_idle);
+ goto expired;
+ }
}
- t = READ_ONCE(call->expect_term_by);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
- expired = true;
+ t = ktime_sub(READ_ONCE(call->expect_term_by), now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_hard);
+ goto expired;
}
- t = READ_ONCE(call->delay_ack_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
- cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
- ackr_serial = xchg(&call->ackr_serial, 0);
- rxrpc_send_ACK(call, RXRPC_ACK_DELAY, ackr_serial,
- rxrpc_propose_ack_ping_for_lost_ack);
+ t = ktime_sub(call->delay_ack_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_delayed_ack);
+ call->delay_ack_at = KTIME_MAX;
+ rxrpc_send_ACK(call, RXRPC_ACK_DELAY, 0,
+ rxrpc_propose_ack_delayed_ack);
}
- t = READ_ONCE(call->ack_lost_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
- cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->ack_lost_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_lost_ack);
+ call->ack_lost_at = KTIME_MAX;
set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
}
- t = READ_ONCE(call->keepalive_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
- cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->ping_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_ping);
+ call->ping_at = KTIME_MAX;
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_keepalive);
}
- t = READ_ONCE(call->ping_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
- cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
- rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
- rxrpc_propose_ack_ping_for_keepalive);
- }
-
- t = READ_ONCE(call->resend_at);
- if (time_after_eq(now, t)) {
- trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
- cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
+ t = ktime_sub(call->resend_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_resend);
+ call->resend_at = KTIME_MAX;
resend = true;
}
- if (skb)
- rxrpc_input_call_packet(call, skb);
-
rxrpc_transmit_some_data(call);
+ now = ktime_get_real();
+ t = ktime_sub(call->keepalive_at, now);
+ if (t <= 0) {
+ trace_rxrpc_timer_exp(call, t, rxrpc_timer_trace_keepalive);
+ call->keepalive_at = KTIME_MAX;
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_keepalive);
+ }
+
if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -474,24 +420,13 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_send_initial_ping(call);
/* Process events */
- if (expired) {
- if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
- (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
- trace_rxrpc_call_reset(call);
- rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
- rxrpc_abort_call_reset);
- } else {
- rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
- rxrpc_abort_call_timeout);
- }
- goto out;
- }
-
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
rxrpc_propose_ack_ping_for_lost_ack);
- if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
+ if (resend &&
+ __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY &&
+ !test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags))
rxrpc_resend(call, NULL);
if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
@@ -513,23 +448,33 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
/* Make sure the timer is restarted */
if (!__rxrpc_call_is_complete(call)) {
- next = call->expect_rx_by;
+ ktime_t next = READ_ONCE(call->expect_term_by), delay;
-#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+#define set(T) { ktime_t _t = (T); if (ktime_before(_t, next)) next = _t; }
set(call->expect_req_by);
- set(call->expect_term_by);
+ set(call->expect_rx_by);
set(call->delay_ack_at);
set(call->ack_lost_at);
set(call->resend_at);
set(call->keepalive_at);
set(call->ping_at);
- now = jiffies;
- if (time_after_eq(now, next))
+ now = ktime_get_real();
+ delay = ktime_sub(next, now);
+ if (delay <= 0) {
rxrpc_poke_call(call, rxrpc_call_poke_timer_now);
-
- rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
+ } else {
+ unsigned long nowj = jiffies, delayj, nextj;
+
+ delayj = max(nsecs_to_jiffies(delay), 1);
+ nextj = nowj + delayj;
+ if (time_before(nextj, call->timer.expires) ||
+ !timer_pending(&call->timer)) {
+ trace_rxrpc_timer_restart(call, delay, delayj);
+ timer_reduce(&call->timer, nextj);
+ }
+ }
}
out:
@@ -544,4 +489,16 @@ out:
rxrpc_shrink_call_tx_buffer(call);
_leave("");
return true;
+
+expired:
+ if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
+ (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
+ trace_rxrpc_call_reset(call);
+ rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
+ rxrpc_abort_call_reset);
+ } else {
+ rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
+ rxrpc_abort_call_timeout);
+ }
+ goto out;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 0943e54370ba..01fa71e8b1f7 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -70,20 +70,11 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
_enter("%d", call->debug_id);
if (!__rxrpc_call_is_complete(call)) {
- trace_rxrpc_timer_expired(call, jiffies);
+ trace_rxrpc_timer_expired(call);
rxrpc_poke_call(call, rxrpc_call_poke_timer);
}
}
-void rxrpc_reduce_call_timer(struct rxrpc_call *call,
- unsigned long expire_at,
- unsigned long now,
- enum rxrpc_timer_trace why)
-{
- trace_rxrpc_timer(call, why, now);
- timer_reduce(&call->timer, expire_at);
-}
-
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
static void rxrpc_destroy_call(struct work_struct *);
@@ -163,12 +154,20 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
spin_lock_init(&call->notify_lock);
spin_lock_init(&call->tx_lock);
refcount_set(&call->ref, 1);
- call->debug_id = debug_id;
- call->tx_total_len = -1;
- call->next_rx_timo = 20 * HZ;
- call->next_req_timo = 1 * HZ;
- call->ackr_window = 1;
- call->ackr_wtop = 1;
+ call->debug_id = debug_id;
+ call->tx_total_len = -1;
+ call->next_rx_timo = 20 * HZ;
+ call->next_req_timo = 1 * HZ;
+ call->ackr_window = 1;
+ call->ackr_wtop = 1;
+ call->delay_ack_at = KTIME_MAX;
+ call->ack_lost_at = KTIME_MAX;
+ call->resend_at = KTIME_MAX;
+ call->ping_at = KTIME_MAX;
+ call->keepalive_at = KTIME_MAX;
+ call->expect_rx_by = KTIME_MAX;
+ call->expect_req_by = KTIME_MAX;
+ call->expect_term_by = KTIME_MAX;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
@@ -226,11 +225,11 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
__set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
if (p->timeouts.normal)
- call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL);
+ call->next_rx_timo = min(p->timeouts.normal, 1);
if (p->timeouts.idle)
- call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL);
+ call->next_req_timo = min(p->timeouts.idle, 1);
if (p->timeouts.hard)
- call->hard_timo = p->timeouts.hard * HZ;
+ call->hard_timo = p->timeouts.hard;
ret = rxrpc_init_client_call_security(call);
if (ret < 0) {
@@ -253,18 +252,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
*/
void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- unsigned long now = jiffies;
- unsigned long j = now + MAX_JIFFY_OFFSET;
-
- call->delay_ack_at = j;
- call->ack_lost_at = j;
- call->resend_at = j;
- call->ping_at = j;
- call->keepalive_at = j;
- call->expect_rx_by = j;
- call->expect_req_by = j;
- call->expect_term_by = j + call->hard_timo;
- call->timer.expires = now;
+ if (call->hard_timo) {
+ ktime_t delay = ms_to_ktime(call->hard_timo * 1000);
+
+ call->expect_term_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ }
+ call->timer.expires = jiffies;
}
/*
@@ -686,6 +680,7 @@ static void rxrpc_destroy_call(struct work_struct *work)
del_timer_sync(&call->timer);
+ rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
rxrpc_cleanup_ring(call);
while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
struct rxrpc_txbuf, call_link))) {
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 3b9b267a4431..d25bf1cf3670 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -636,7 +636,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
unsigned long final_ack_at = jiffies + 2;
- WRITE_ONCE(chan->final_ack_at, final_ack_at);
+ chan->final_ack_at = final_ack_at;
smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
rxrpc_reduce_conn_timer(conn, final_ack_at);
@@ -770,7 +770,7 @@ next:
conn_expires_at = conn->idle_timestamp + expiry;
- now = READ_ONCE(jiffies);
+ now = jiffies;
if (time_after(conn_expires_at, now))
goto not_yet_expired;
}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 95f4bc206b3d..598b4ee389fc 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -88,13 +88,21 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
struct rxrpc_ackpacket ack;
};
} __attribute__((packed)) pkt;
- struct rxrpc_ackinfo ack_info;
+ struct rxrpc_acktrailer trailer;
size_t len;
int ret, ioc;
u32 serial, mtu, call_id, padding;
_enter("%d", conn->debug_id);
+ if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
+ if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+ &pkt.ack, sizeof(pkt.ack)) < 0)
+ return;
+ if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE)
+ return;
+ }
+
chan = &conn->channels[channel];
/* If the last call got moved on whilst we were waiting to run, just
@@ -114,10 +122,10 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
iov[0].iov_len = sizeof(pkt.whdr);
iov[1].iov_base = &padding;
iov[1].iov_len = 3;
- iov[2].iov_base = &ack_info;
- iov[2].iov_len = sizeof(ack_info);
+ iov[2].iov_base = &trailer;
+ iov[2].iov_len = sizeof(trailer);
- serial = atomic_inc_return(&conn->serial);
+ serial = rxrpc_get_next_serial(conn);
pkt.whdr.epoch = htonl(conn->proto.epoch);
pkt.whdr.cid = htonl(conn->proto.cid | channel);
@@ -150,14 +158,14 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0;
- ack_info.rxMTU = htonl(rxrpc_rx_mtu);
- ack_info.maxMTU = htonl(mtu);
- ack_info.rwind = htonl(rxrpc_rx_window_size);
- ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
+ trailer.maxMTU = htonl(rxrpc_rx_mtu);
+ trailer.ifMTU = htonl(mtu);
+ trailer.rwind = htonl(rxrpc_rx_window_size);
+ trailer.jumbo_max = htonl(rxrpc_rx_jumbo_max);
pkt.whdr.flags |= RXRPC_SLOW_START_OK;
padding = 0;
iov[0].iov_len += sizeof(pkt.ack);
- len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
+ len += sizeof(pkt.ack) + 3 + sizeof(trailer);
ioc = 3;
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index df8a271948a1..0af4642aeec4 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -68,6 +68,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
mutex_init(&conn->security_lock);
+ mutex_init(&conn->tx_data_alloc_lock);
skb_queue_head_init(&conn->rx_queue);
conn->rxnet = rxnet;
conn->security = &rxrpc_no_security;
@@ -341,6 +342,9 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
*/
rxrpc_purge_queue(&conn->rx_queue);
+ if (conn->tx_data_alloc.va)
+ __page_frag_cache_drain(virt_to_page(conn->tx_data_alloc.va),
+ conn->tx_data_alloc.pagecnt_bias);
call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
}
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 92495e73b869..3dedb8c0618c 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -45,11 +45,9 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
}
cumulative_acks += summary->nr_new_acks;
- cumulative_acks += summary->nr_rot_new_acks;
if (cumulative_acks > 255)
cumulative_acks = 255;
- summary->mode = call->cong_mode;
summary->cwnd = call->cong_cwnd;
summary->ssthresh = call->cong_ssthresh;
summary->cumulative_acks = cumulative_acks;
@@ -151,6 +149,7 @@ out_no_clear_ca:
cwnd = RXRPC_TX_MAX_WINDOW;
call->cong_cwnd = cwnd;
call->cong_cumul_acks = cumulative_acks;
+ summary->mode = call->cong_mode;
trace_rxrpc_congest(call, summary, acked_serial, change);
if (resend)
rxrpc_resend(call, skb);
@@ -213,8 +212,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
if (before_eq(txb->seq, call->acks_hard_ack))
continue;
- summary->nr_rot_new_acks++;
- if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) {
+ if (txb->flags & RXRPC_LAST_PACKET) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
rot_last = true;
}
@@ -254,6 +252,14 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
{
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
+ call->resend_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_resend);
+
+ if (unlikely(call->cong_last_nack)) {
+ rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
+ call->cong_last_nack = NULL;
+ }
+
switch (__rxrpc_call_state(call)) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
@@ -285,15 +291,11 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
struct rxrpc_ack_summary summary = { 0 };
- unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
- now = jiffies;
- timo = now + MAX_JIFFY_OFFSET;
-
- WRITE_ONCE(call->delay_ack_at, timo);
- trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
+ call->delay_ack_at = KTIME_MAX;
+ trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
@@ -326,7 +328,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
case RXRPC_CALL_SERVER_RECV_REQUEST:
rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
- call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
+ call->expect_req_by = KTIME_MAX;
rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
break;
@@ -586,14 +588,12 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
case RXRPC_CALL_SERVER_RECV_REQUEST: {
unsigned long timo = READ_ONCE(call->next_req_timo);
- unsigned long now, expect_req_by;
if (timo) {
- now = jiffies;
- expect_req_by = now + timo;
- WRITE_ONCE(call->expect_req_by, expect_req_by);
- rxrpc_reduce_call_timer(call, expect_req_by, now,
- rxrpc_timer_set_for_idle);
+ ktime_t delay = ms_to_ktime(timo);
+
+ call->expect_req_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_idle);
}
break;
}
@@ -667,14 +667,14 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
/*
* Process the extra information that may be appended to an ACK packet
*/
-static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
- struct rxrpc_ackinfo *ackinfo)
+static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb,
+ struct rxrpc_acktrailer *trailer)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_peer *peer;
unsigned int mtu;
bool wake = false;
- u32 rwind = ntohl(ackinfo->rwind);
+ u32 rwind = ntohl(trailer->rwind);
if (rwind > RXRPC_TX_MAX_WINDOW)
rwind = RXRPC_TX_MAX_WINDOW;
@@ -688,7 +688,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
if (call->cong_ssthresh > rwind)
call->cong_ssthresh = rwind;
- mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
+ mtu = min(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
peer = call->peer;
if (mtu < peer->maxdata) {
@@ -703,6 +703,42 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
}
/*
+ * Determine how many nacks from the previous ACK have now been satisfied.
+ */
+static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ rxrpc_seq_t seq)
+{
+ struct sk_buff *skb = call->cong_last_nack;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned int i, new_acks = 0, retained_nacks = 0;
+ rxrpc_seq_t old_seq = sp->ack.first_ack;
+ u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
+
+ if (after_eq(seq, old_seq + sp->ack.nr_acks)) {
+ summary->nr_new_acks += sp->ack.nr_nacks;
+ summary->nr_new_acks += seq - (old_seq + sp->ack.nr_acks);
+ summary->nr_retained_nacks = 0;
+ } else if (seq == old_seq) {
+ summary->nr_retained_nacks = sp->ack.nr_nacks;
+ } else {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
+ if (acks[i] == RXRPC_ACK_TYPE_NACK) {
+ if (before(old_seq + i, seq))
+ new_acks++;
+ else
+ retained_nacks++;
+ }
+ }
+
+ summary->nr_new_acks += new_acks;
+ summary->nr_retained_nacks = retained_nacks;
+ }
+
+ return old_seq + sp->ack.nr_acks;
+}
+
+/*
* Process individual soft ACKs.
*
* Each ACK in the array corresponds to one packet and can be either an ACK or
@@ -711,25 +747,51 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
* the timer on the basis that the peer might just not have processed them at
* the time the ACK was sent.
*/
-static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
- rxrpc_seq_t seq, int nr_acks,
- struct rxrpc_ack_summary *summary)
+static void rxrpc_input_soft_acks(struct rxrpc_call *call,
+ struct rxrpc_ack_summary *summary,
+ struct sk_buff *skb,
+ rxrpc_seq_t seq,
+ rxrpc_seq_t since)
{
- unsigned int i;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned int i, old_nacks = 0;
+ rxrpc_seq_t lowest_nak = seq + sp->ack.nr_acks;
+ u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
- for (i = 0; i < nr_acks; i++) {
+ for (i = 0; i < sp->ack.nr_acks; i++) {
if (acks[i] == RXRPC_ACK_TYPE_ACK) {
summary->nr_acks++;
- summary->nr_new_acks++;
+ if (after_eq(seq, since))
+ summary->nr_new_acks++;
} else {
- if (!summary->saw_nacks &&
- call->acks_lowest_nak != seq + i) {
- call->acks_lowest_nak = seq + i;
- summary->new_low_nack = true;
- }
summary->saw_nacks = true;
+ if (before(seq, since)) {
+ /* Overlap with previous ACK */
+ old_nacks++;
+ } else {
+ summary->nr_new_nacks++;
+ sp->ack.nr_nacks++;
+ }
+
+ if (before(seq, lowest_nak))
+ lowest_nak = seq;
}
+ seq++;
}
+
+ if (lowest_nak != call->acks_lowest_nak) {
+ call->acks_lowest_nak = lowest_nak;
+ summary->new_low_nack = true;
+ }
+
+ /* We *can* have more nacks than we did - the peer is permitted to drop
+ * packets it has soft-acked and re-request them. Further, it is
+ * possible for the nack distribution to change whilst the number of
+ * nacks stays the same or goes down.
+ */
+ if (old_nacks < summary->nr_retained_nacks)
+ summary->nr_new_acks += summary->nr_retained_nacks - old_nacks;
+ summary->nr_retained_nacks = old_nacks;
}
/*
@@ -769,36 +831,32 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
- struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_ackinfo info;
+ struct rxrpc_acktrailer trailer;
rxrpc_serial_t ack_serial, acked_serial;
- rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
+ rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt, since;
int nr_acks, offset, ioffset;
_enter("");
- offset = sizeof(struct rxrpc_wire_header);
- if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
- offset += sizeof(ack);
+ offset = sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
- ack_serial = sp->hdr.serial;
- acked_serial = ntohl(ack.serial);
- first_soft_ack = ntohl(ack.firstPacket);
- prev_pkt = ntohl(ack.previousPacket);
- hard_ack = first_soft_ack - 1;
- nr_acks = ack.nAcks;
- summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ?
- ack.reason : RXRPC_ACK__INVALID);
+ ack_serial = sp->hdr.serial;
+ acked_serial = sp->ack.acked_serial;
+ first_soft_ack = sp->ack.first_ack;
+ prev_pkt = sp->ack.prev_ack;
+ nr_acks = sp->ack.nr_acks;
+ hard_ack = first_soft_ack - 1;
+ summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ?
+ sp->ack.reason : RXRPC_ACK__INVALID);
trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
- rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
+ rxrpc_inc_stat(call->rxnet, stat_rx_acks[summary.ack_reason]);
if (acked_serial != 0) {
- switch (ack.reason) {
+ switch (summary.ack_reason) {
case RXRPC_ACK_PING_RESPONSE:
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_ping_response);
@@ -818,7 +876,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
* indicates that the client address changed due to NAT. The server
* lost the call because it switched to a different peer.
*/
- if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
+ if (unlikely(summary.ack_reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
rxrpc_is_client_call(call)) {
@@ -831,7 +889,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
* indicate a change of address. However, we can retransmit the call
* if we still have it buffered to the beginning.
*/
- if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
+ if (unlikely(summary.ack_reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
call->acks_hard_ack == 0 &&
@@ -849,31 +907,41 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
goto send_response;
}
- info.rxMTU = 0;
+ trailer.maxMTU = 0;
ioffset = offset + nr_acks + 3;
- if (skb->len >= ioffset + sizeof(info) &&
- skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
- return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
+ if (skb->len >= ioffset + sizeof(trailer) &&
+ skb_copy_bits(skb, ioffset, &trailer, sizeof(trailer)) < 0)
+ return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_trailer);
if (nr_acks > 0)
skb_condense(skb);
+ if (call->cong_last_nack) {
+ since = rxrpc_input_check_prev_ack(call, &summary, first_soft_ack);
+ rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
+ call->cong_last_nack = NULL;
+ } else {
+ summary.nr_new_acks = first_soft_ack - call->acks_first_seq;
+ call->acks_lowest_nak = first_soft_ack + nr_acks;
+ since = first_soft_ack;
+ }
+
call->acks_latest_ts = skb->tstamp;
call->acks_first_seq = first_soft_ack;
call->acks_prev_seq = prev_pkt;
- switch (ack.reason) {
+ switch (summary.ack_reason) {
case RXRPC_ACK_PING:
break;
default:
- if (after(acked_serial, call->acks_highest_serial))
+ if (acked_serial && after(acked_serial, call->acks_highest_serial))
call->acks_highest_serial = acked_serial;
break;
}
/* Parse rwind and mtu sizes if provided. */
- if (info.rxMTU)
- rxrpc_input_ackinfo(call, skb, &info);
+ if (trailer.maxMTU)
+ rxrpc_input_ack_trailer(call, skb, &trailer);
if (first_soft_ack == 0)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
@@ -905,8 +973,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
if (nr_acks > 0) {
if (offset > (int)skb->len - nr_acks)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
- rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
- nr_acks, &summary);
+ rxrpc_input_soft_acks(call, &summary, skb, first_soft_ack, since);
+ rxrpc_get_skb(skb, rxrpc_skb_get_last_nack);
+ call->cong_last_nack = skb;
}
if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
@@ -918,7 +987,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
rxrpc_congestion_management(call, skb, &summary, acked_serial);
send_response:
- if (ack.reason == RXRPC_ACK_PING)
+ if (summary.ack_reason == RXRPC_ACK_PING)
rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
rxrpc_propose_ack_respond_to_ping);
else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
@@ -969,12 +1038,10 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
timo = READ_ONCE(call->next_rx_timo);
if (timo) {
- unsigned long now = jiffies, expect_rx_by;
+ ktime_t delay = ms_to_ktime(timo);
- expect_rx_by = now + timo;
- WRITE_ONCE(call->expect_rx_by, expect_rx_by);
- rxrpc_reduce_call_timer(call, expect_rx_by, now,
- rxrpc_timer_set_for_normal);
+ call->expect_rx_by = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
}
switch (sp->hdr.type) {
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index 34353b6e584b..f2701068ed9e 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -15,14 +15,11 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
}
/*
- * Work out how much data we can put in an unsecured packet.
+ * Allocate an appropriately sized buffer for the amount of data remaining.
*/
-static int none_how_much_data(struct rxrpc_call *call, size_t remain,
- size_t *_buf_size, size_t *_data_size, size_t *_offset)
+static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- *_buf_size = *_data_size = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- *_offset = 0;
- return 0;
+ return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 0, gfp);
}
static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
@@ -79,7 +76,7 @@ const struct rxrpc_security rxrpc_no_security = {
.exit = none_exit,
.init_connection_security = none_init_connection_security,
.free_call_crypto = none_free_call_crypto,
- .how_much_data = none_how_much_data,
+ .alloc_txbuf = none_alloc_txbuf,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
.respond_to_challenge = none_respond_to_challenge,
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 4a3a08a0e2cd..0300baa9afcd 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -124,6 +124,7 @@ static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
struct sk_buff *skb)
{
struct rxrpc_wire_header whdr;
+ struct rxrpc_ackpacket ack;
/* dig out the RxRPC connection details */
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
@@ -141,6 +142,16 @@ static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
sp->hdr.securityIndex = whdr.securityIndex;
sp->hdr._rsvd = ntohs(whdr._rsvd);
sp->hdr.serviceId = ntohs(whdr.serviceId);
+
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
+ if (skb_copy_bits(skb, sizeof(whdr), &ack, sizeof(ack)) < 0)
+ return rxrpc_bad_message(skb, rxrpc_badmsg_short_ack);
+ sp->ack.first_ack = ntohl(ack.firstPacket);
+ sp->ack.prev_ack = ntohl(ack.previousPacket);
+ sp->ack.acked_serial = ntohl(ack.serial);
+ sp->ack.reason = ack.reason;
+ sp->ack.nr_acks = ack.nAcks;
+ }
return true;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 34d307368135..504453c688d7 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -452,6 +452,9 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
#endif
rxrpc_purge_queue(&local->rx_queue);
rxrpc_purge_client_connections(local);
+ if (local->tx_alloc.va)
+ __page_frag_cache_drain(virt_to_page(local->tx_alloc.va),
+ local->tx_alloc.pagecnt_bias);
}
/*
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 825b81183046..657cf35089a6 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -17,22 +17,22 @@
unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ * How long to wait before scheduling an ACK with subtype DELAY (in ms).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
-unsigned long rxrpc_soft_ack_delay = HZ;
+unsigned long rxrpc_soft_ack_delay = 1000;
/*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ * How long to wait before scheduling an ACK with subtype IDLE (in ms).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
-unsigned long rxrpc_idle_ack_delay = HZ / 2;
+unsigned long rxrpc_idle_ack_delay = 500;
/*
* Receive window size in packets. This indicates the maximum number of
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index a0906145e829..5ea9601efd05 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -48,12 +48,10 @@ static const char rxrpc_keepalive_string[] = "";
static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
{
if (ret < 0) {
- u16 tx_backoff = READ_ONCE(call->tx_backoff);
-
- if (tx_backoff < HZ)
- WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+ if (call->tx_backoff < 1000)
+ call->tx_backoff += 100;
} else {
- WRITE_ONCE(call->tx_backoff, 0);
+ call->tx_backoff = 0;
}
}
@@ -65,84 +63,92 @@ static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
* Receiving a response to the ping will prevent the ->expect_rx_by timer from
* expiring.
*/
-static void rxrpc_set_keepalive(struct rxrpc_call *call)
+static void rxrpc_set_keepalive(struct rxrpc_call *call, ktime_t now)
{
- unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+ ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo) / 6);
- keepalive_at += now;
- WRITE_ONCE(call->keepalive_at, keepalive_at);
- rxrpc_reduce_call_timer(call, keepalive_at, now,
- rxrpc_timer_set_for_keepalive);
+ call->keepalive_at = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_keepalive);
}
/*
* Fill out an ACK packet.
*/
-static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
- struct rxrpc_call *call,
- struct rxrpc_txbuf *txb,
- u16 *_rwind)
+static void rxrpc_fill_out_ack(struct rxrpc_call *call,
+ struct rxrpc_txbuf *txb,
+ u8 ack_reason,
+ rxrpc_serial_t serial)
{
- struct rxrpc_ackinfo ackinfo;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxrpc_acktrailer *trailer = txb->kvec[2].iov_base + 3;
+ struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
unsigned int qsize, sack, wrap, to;
rxrpc_seq_t window, wtop;
int rsize;
u32 mtu, jmax;
- u8 *ackp = txb->acks;
+ u8 *filler = txb->kvec[2].iov_base;
+ u8 *sackp = txb->kvec[1].iov_base;
- call->ackr_nr_unacked = 0;
- atomic_set(&call->ackr_nr_consumed, 0);
rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
- clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
window = call->ackr_window;
wtop = call->ackr_wtop;
sack = call->ackr_sack_base % RXRPC_SACK_SIZE;
- txb->ack.firstPacket = htonl(window);
- txb->ack.nAcks = wtop - window;
+
+ whdr->seq = 0;
+ whdr->type = RXRPC_PACKET_TYPE_ACK;
+ txb->flags |= RXRPC_SLOW_START_OK;
+ ack->bufferSpace = 0;
+ ack->maxSkew = 0;
+ ack->firstPacket = htonl(window);
+ ack->previousPacket = htonl(call->rx_highest_seq);
+ ack->serial = htonl(serial);
+ ack->reason = ack_reason;
+ ack->nAcks = wtop - window;
+ filler[0] = 0;
+ filler[1] = 0;
+ filler[2] = 0;
+
+ if (ack_reason == RXRPC_ACK_PING)
+ txb->flags |= RXRPC_REQUEST_ACK;
if (after(wtop, window)) {
+ txb->len += ack->nAcks;
+ txb->kvec[1].iov_base = sackp;
+ txb->kvec[1].iov_len = ack->nAcks;
+
wrap = RXRPC_SACK_SIZE - sack;
- to = min_t(unsigned int, txb->ack.nAcks, RXRPC_SACK_SIZE);
+ to = min_t(unsigned int, ack->nAcks, RXRPC_SACK_SIZE);
- if (sack + txb->ack.nAcks <= RXRPC_SACK_SIZE) {
- memcpy(txb->acks, call->ackr_sack_table + sack, txb->ack.nAcks);
+ if (sack + ack->nAcks <= RXRPC_SACK_SIZE) {
+ memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks);
} else {
- memcpy(txb->acks, call->ackr_sack_table + sack, wrap);
- memcpy(txb->acks + wrap, call->ackr_sack_table,
- to - wrap);
+ memcpy(sackp, call->ackr_sack_table + sack, wrap);
+ memcpy(sackp + wrap, call->ackr_sack_table, to - wrap);
}
-
- ackp += to;
} else if (before(wtop, window)) {
pr_warn("ack window backward %x %x", window, wtop);
- } else if (txb->ack.reason == RXRPC_ACK_DELAY) {
- txb->ack.reason = RXRPC_ACK_IDLE;
+ } else if (ack->reason == RXRPC_ACK_DELAY) {
+ ack->reason = RXRPC_ACK_IDLE;
}
- mtu = conn->peer->if_mtu;
- mtu -= conn->peer->hdrsize;
+ mtu = call->peer->if_mtu;
+ mtu -= call->peer->hdrsize;
jmax = rxrpc_rx_jumbo_max;
qsize = (window - 1) - call->rx_consumed;
rsize = max_t(int, call->rx_winsize - qsize, 0);
- *_rwind = rsize;
- ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
- ackinfo.maxMTU = htonl(mtu);
- ackinfo.rwind = htonl(rsize);
- ackinfo.jumbo_max = htonl(jmax);
-
- *ackp++ = 0;
- *ackp++ = 0;
- *ackp++ = 0;
- memcpy(ackp, &ackinfo, sizeof(ackinfo));
- return txb->ack.nAcks + 3 + sizeof(ackinfo);
+ txb->ack_rwind = rsize;
+ trailer->maxMTU = htonl(rxrpc_rx_mtu);
+ trailer->ifMTU = htonl(mtu);
+ trailer->rwind = htonl(rsize);
+ trailer->jumbo_max = htonl(jmax);
}
/*
* Record the beginning of an RTT probe.
*/
-static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
- enum rxrpc_rtt_tx_trace why)
+static void rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
+ ktime_t now, enum rxrpc_rtt_tx_trace why)
{
unsigned long avail = call->rtt_avail;
int rtt_slot = 9;
@@ -155,47 +161,31 @@ static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
goto no_slot;
call->rtt_serial[rtt_slot] = serial;
- call->rtt_sent_at[rtt_slot] = ktime_get_real();
+ call->rtt_sent_at[rtt_slot] = now;
smp_wmb(); /* Write data before avail bit */
set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
- return rtt_slot;
+ return;
no_slot:
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
- return -1;
-}
-
-/*
- * Cancel an RTT probe.
- */
-static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
- rxrpc_serial_t serial, int rtt_slot)
-{
- if (rtt_slot != -1) {
- clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
- smp_wmb(); /* Clear pending bit before setting slot */
- set_bit(rtt_slot, &call->rtt_avail);
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
- }
}
/*
* Transmit an ACK packet.
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+static void rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
struct rxrpc_connection *conn;
+ struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
struct msghdr msg;
- struct kvec iov[1];
- rxrpc_serial_t serial;
- size_t len, n;
- int ret, rtt_slot = -1;
- u16 rwind;
+ ktime_t now;
+ int ret;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
- return -ECONNRESET;
+ return;
conn = call->conn;
@@ -203,55 +193,68 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
msg.msg_namelen = call->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- if (txb->ack.reason == RXRPC_ACK_PING)
- txb->wire.flags |= RXRPC_REQUEST_ACK;
-
- n = rxrpc_fill_out_ack(conn, call, txb, &rwind);
- if (n == 0)
- return 0;
-
- iov[0].iov_base = &txb->wire;
- iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n;
- len = iov[0].iov_len;
+ msg.msg_flags = MSG_SPLICE_PAGES;
- serial = atomic_inc_return(&conn->serial);
- txb->wire.serial = htonl(serial);
- trace_rxrpc_tx_ack(call->debug_id, serial,
- ntohl(txb->ack.firstPacket),
- ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks,
- rwind);
+ whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
- if (txb->ack.reason == RXRPC_ACK_PING)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
+ txb->serial = rxrpc_get_next_serial(conn);
+ whdr->serial = htonl(txb->serial);
+ trace_rxrpc_tx_ack(call->debug_id, txb->serial,
+ ntohl(ack->firstPacket),
+ ntohl(ack->serial), ack->reason, ack->nAcks,
+ txb->ack_rwind);
rxrpc_inc_stat(call->rxnet, stat_tx_ack_send);
- /* Grab the highest received seq as late as possible */
- txb->ack.previousPacket = htonl(call->rx_highest_seq);
-
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
- ret = do_udp_sendmsg(conn->local->socket, &msg, len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, txb->len);
+ rxrpc_local_dont_fragment(conn->local, false);
+ ret = do_udp_sendmsg(conn->local->socket, &msg, txb->len);
call->peer->last_tx_at = ktime_get_seconds();
if (ret < 0) {
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
+ trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret,
rxrpc_tx_point_call_ack);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
+ trace_rxrpc_tx_packet(call->debug_id, whdr,
rxrpc_tx_point_call_ack);
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- call->peer->rtt_last_req = ktime_get_real();
+ now = ktime_get_real();
+ if (ack->reason == RXRPC_ACK_PING)
+ rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_ping);
+ if (txb->flags & RXRPC_REQUEST_ACK)
+ call->peer->rtt_last_req = now;
+ rxrpc_set_keepalive(call, now);
}
rxrpc_tx_backoff(call, ret);
+}
- if (!__rxrpc_call_is_complete(call)) {
- if (ret < 0)
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- rxrpc_set_keepalive(call);
+/*
+ * Queue an ACK for immediate transmission.
+ */
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
+{
+ struct rxrpc_txbuf *txb;
+
+ if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
+ return;
+
+ rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
+
+ txb = rxrpc_alloc_ack_txbuf(call, call->ackr_wtop - call->ackr_window);
+ if (!txb) {
+ kleave(" = -ENOMEM");
+ return;
}
- return ret;
+ txb->ack_why = why;
+
+ rxrpc_fill_out_ack(call, txb, ack_reason, serial);
+ call->ackr_nr_unacked = 0;
+ atomic_set(&call->ackr_nr_consumed, 0);
+ clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
+
+ trace_rxrpc_send_ack(call, why, ack_reason, serial);
+ rxrpc_send_ack_packet(call, txb);
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
}
/*
@@ -302,7 +305,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
iov[0].iov_base = &pkt;
iov[0].iov_len = sizeof(pkt);
- serial = atomic_inc_return(&conn->serial);
+ serial = rxrpc_get_next_serial(conn);
pkt.whdr.serial = htonl(serial);
iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt));
@@ -319,38 +322,22 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
}
/*
- * send a packet through the transport endpoint
+ * Prepare a (sub)packet for transmission.
*/
-int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+static void rxrpc_prepare_data_subpacket(struct rxrpc_call *call, struct rxrpc_txbuf *txb,
+ rxrpc_serial_t serial)
{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
enum rxrpc_req_ack_trace why;
struct rxrpc_connection *conn = call->conn;
- struct msghdr msg;
- struct kvec iov[1];
- rxrpc_serial_t serial;
- size_t len;
- int ret, rtt_slot = -1;
_enter("%x,{%d}", txb->seq, txb->len);
- /* Each transmission of a Tx packet needs a new serial number */
- serial = atomic_inc_return(&conn->serial);
- txb->wire.serial = htonl(serial);
+ txb->serial = serial;
if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
txb->seq == 1)
- txb->wire.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
-
- iov[0].iov_base = &txb->wire;
- iov[0].iov_len = sizeof(txb->wire) + txb->len;
- len = iov[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
-
- msg.msg_name = &call->peer->srx.transport;
- msg.msg_namelen = call->peer->srx.transport_len;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
+ whdr->userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
/* If our RTT cache needs working on, request an ACK. Also request
* ACKs if a DATA packet appears to have been lost.
@@ -359,13 +346,13 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
* service call, lest OpenAFS incorrectly send us an ACK with some
* soft-ACKs in it and then never follow up with a proper hard ACK.
*/
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
+ if (txb->flags & RXRPC_REQUEST_ACK)
why = rxrpc_reqack_already_on;
- else if (test_bit(RXRPC_TXBUF_LAST, &txb->flags) && rxrpc_sending_to_client(txb))
+ else if ((txb->flags & RXRPC_LAST_PACKET) && rxrpc_sending_to_client(txb))
why = rxrpc_reqack_no_srv_last;
else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
why = rxrpc_reqack_ack_lost;
- else if (test_bit(RXRPC_TXBUF_RESENT, &txb->flags))
+ else if (txb->flags & RXRPC_TXBUF_RESENT)
why = rxrpc_reqack_retrans;
else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2)
why = rxrpc_reqack_slow_start;
@@ -381,42 +368,116 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]);
trace_rxrpc_req_ack(call->debug_id, txb->seq, why);
if (why != rxrpc_reqack_no_srv_last)
- txb->wire.flags |= RXRPC_REQUEST_ACK;
+ txb->flags |= RXRPC_REQUEST_ACK;
dont_set_request_ack:
+ whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
+ whdr->serial = htonl(txb->serial);
+ whdr->cksum = txb->cksum;
+
+ trace_rxrpc_tx_data(call, txb->seq, txb->serial, txb->flags, false);
+}
+
+/*
+ * Prepare a packet for transmission.
+ */
+static size_t rxrpc_prepare_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ rxrpc_serial_t serial;
+
+ /* Each transmission of a Tx packet needs a new serial number */
+ serial = rxrpc_get_next_serial(call->conn);
+
+ rxrpc_prepare_data_subpacket(call, txb, serial);
+
+ return txb->len;
+}
+
+/*
+ * Set timeouts after transmitting a packet.
+ */
+static void rxrpc_tstamp_data_packets(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ ktime_t now = ktime_get_real();
+ bool ack_requested = txb->flags & RXRPC_REQUEST_ACK;
+
+ call->tx_last_sent = now;
+ txb->last_sent = now;
+
+ if (ack_requested) {
+ rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_data);
+
+ call->peer->rtt_last_req = now;
+ if (call->peer->rtt_count > 1) {
+ ktime_t delay = rxrpc_get_rto_backoff(call->peer, false);
+
+ call->ack_lost_at = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_lost_ack);
+ }
+ }
+
+ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags)) {
+ ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo));
+
+ call->expect_rx_by = ktime_add(now, delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
+ }
+
+ rxrpc_set_keepalive(call, now);
+}
+
+/*
+ * send a packet through the transport endpoint
+ */
+static int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
+{
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxrpc_connection *conn = call->conn;
+ enum rxrpc_tx_point frag;
+ struct msghdr msg;
+ size_t len;
+ int ret;
+
+ _enter("%x,{%d}", txb->seq, txb->len);
+
+ len = rxrpc_prepare_data_packet(call, txb);
+
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
static int lose;
if ((lose++ & 7) == 7) {
ret = 0;
- trace_rxrpc_tx_data(call, txb->seq, serial,
- txb->wire.flags,
- test_bit(RXRPC_TXBUF_RESENT, &txb->flags),
- true);
+ trace_rxrpc_tx_data(call, txb->seq, txb->serial,
+ txb->flags, true);
goto done;
}
}
- trace_rxrpc_tx_data(call, txb->seq, serial, txb->wire.flags,
- test_bit(RXRPC_TXBUF_RESENT, &txb->flags), false);
+ iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, len);
+
+ msg.msg_name = &call->peer->srx.transport;
+ msg.msg_namelen = call->peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = MSG_SPLICE_PAGES;
/* Track what we've attempted to transmit at least once so that the
* retransmission algorithm doesn't try to resend what we haven't sent
- * yet. However, this can race as we can receive an ACK before we get
- * to this point. But, OTOH, if we won't get an ACK mentioning this
- * packet unless the far side received it (though it could have
- * discarded it anyway and NAK'd it).
+ * yet.
*/
- cmpxchg(&call->tx_transmitted, txb->seq - 1, txb->seq);
+ if (txb->seq == call->tx_transmitted + 1)
+ call->tx_transmitted = txb->seq;
/* send the packet with the don't fragment bit set if we currently
* think it's small enough */
- if (txb->len >= call->peer->maxdata)
- goto send_fragmentable;
-
- txb->last_sent = ktime_get_real();
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
+ if (txb->len >= call->peer->maxdata) {
+ rxrpc_local_dont_fragment(conn->local, false);
+ frag = rxrpc_tx_point_call_data_frag;
+ } else {
+ rxrpc_local_dont_fragment(conn->local, true);
+ frag = rxrpc_tx_point_call_data_nofrag;
+ }
+retry:
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
* to go out of the interface
@@ -429,46 +490,21 @@ dont_set_request_ack:
if (ret < 0) {
rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_point_call_data_nofrag);
+ trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret, frag);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
- rxrpc_tx_point_call_data_nofrag);
+ trace_rxrpc_tx_packet(call->debug_id, whdr, frag);
}
rxrpc_tx_backoff(call, ret);
- if (ret == -EMSGSIZE)
- goto send_fragmentable;
+ if (ret == -EMSGSIZE && frag == rxrpc_tx_point_call_data_frag) {
+ rxrpc_local_dont_fragment(conn->local, false);
+ frag = rxrpc_tx_point_call_data_frag;
+ goto retry;
+ }
done:
if (ret >= 0) {
- call->tx_last_sent = txb->last_sent;
- if (txb->wire.flags & RXRPC_REQUEST_ACK) {
- call->peer->rtt_last_req = txb->last_sent;
- if (call->peer->rtt_count > 1) {
- unsigned long nowj = jiffies, ack_lost_at;
-
- ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
- ack_lost_at += nowj;
- WRITE_ONCE(call->ack_lost_at, ack_lost_at);
- rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
- rxrpc_timer_set_for_lost_ack);
- }
- }
-
- if (txb->seq == 1 &&
- !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
- &call->flags)) {
- unsigned long nowj = jiffies, expect_rx_by;
-
- expect_rx_by = nowj + call->next_rx_timo;
- WRITE_ONCE(call->expect_rx_by, expect_rx_by);
- rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
- rxrpc_timer_set_for_normal);
- }
-
- rxrpc_set_keepalive(call);
+ rxrpc_tstamp_data_packets(call, txb);
} else {
/* Cancel the call if the initial transmission fails,
* particularly if that's due to network routing issues that
@@ -482,41 +518,6 @@ done:
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
-
-send_fragmentable:
- /* attempt to send this message with fragmentation enabled */
- _debug("send fragment");
-
- txb->last_sent = ktime_get_real();
- if (txb->wire.flags & RXRPC_REQUEST_ACK)
- rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
-
- switch (conn->local->srx.transport.family) {
- case AF_INET6:
- case AF_INET:
- rxrpc_local_dont_fragment(conn->local, false);
- rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag);
- ret = do_udp_sendmsg(conn->local->socket, &msg, len);
- conn->peer->last_tx_at = ktime_get_seconds();
-
- rxrpc_local_dont_fragment(conn->local, true);
- break;
-
- default:
- BUG();
- }
-
- if (ret < 0) {
- rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
- rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- trace_rxrpc_tx_fail(call->debug_id, serial, ret,
- rxrpc_tx_point_call_data_frag);
- } else {
- trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
- rxrpc_tx_point_call_data_frag);
- }
- rxrpc_tx_backoff(call, ret);
- goto done;
}
/*
@@ -558,7 +559,7 @@ void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
len = iov[0].iov_len + iov[1].iov_len;
- serial = atomic_inc_return(&conn->serial);
+ serial = rxrpc_get_next_serial(conn);
whdr.serial = htonl(serial);
iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
@@ -723,11 +724,9 @@ void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
rxrpc_instant_resend(call, txb);
}
} else {
- unsigned long now = jiffies;
- unsigned long resend_at = now + call->peer->rto_j;
+ ktime_t delay = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
- WRITE_ONCE(call->resend_at, resend_at);
- rxrpc_reduce_call_timer(call, resend_at, now,
- rxrpc_timer_set_for_send);
+ call->resend_at = ktime_add(ktime_get_real(), delay);
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_resend_tx);
}
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 6c86cbb98d1d..263a2251e3d2 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -52,9 +52,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
enum rxrpc_call_state state;
- unsigned long timeout = 0;
rxrpc_seq_t acks_hard_ack;
char lbuff[50], rbuff[50];
+ long timeout = 0;
if (v == &rxnet->calls) {
seq_puts(seq,
@@ -76,10 +76,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
state = rxrpc_call_state(call);
- if (state != RXRPC_CALL_SERVER_PREALLOC) {
- timeout = READ_ONCE(call->expect_rx_by);
- timeout -= jiffies;
- }
+ if (state != RXRPC_CALL_SERVER_PREALLOC)
+ timeout = ktime_ms_delta(READ_ONCE(call->expect_rx_by), ktime_get_real());
acks_hard_ack = READ_ONCE(call->acks_hard_ack);
seq_printf(seq,
@@ -181,7 +179,7 @@ print:
atomic_read(&conn->active),
state,
key_serial(conn->key),
- atomic_read(&conn->serial),
+ conn->tx_serial,
conn->hi_serial,
conn->channels[0].call_id,
conn->channels[1].call_id,
@@ -309,7 +307,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
peer->mtu,
now - peer->last_tx_at,
peer->srtt_us >> 3,
- jiffies_to_usecs(peer->rto_j));
+ peer->rto_us);
return 0;
}
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index e8ee4af43ca8..4fe6b4d20ada 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -135,9 +135,9 @@ struct rxrpc_ackpacket {
/*
* ACK packets can have a further piece of information tagged on the end
*/
-struct rxrpc_ackinfo {
- __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
- __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
+struct rxrpc_acktrailer {
+ __be32 maxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */
+ __be32 ifMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */
__be32 rwind; /* Rx window size (packets) [AFS 3.4] */
__be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */
};
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index be61d6f5be8d..cdab7b7d08a0 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -11,8 +11,8 @@
#include <linux/net.h>
#include "ar-internal.h"
-#define RXRPC_RTO_MAX ((unsigned)(120 * HZ))
-#define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
+#define RXRPC_RTO_MAX (120 * USEC_PER_SEC)
+#define RXRPC_TIMEOUT_INIT ((unsigned int)(1 * MSEC_PER_SEC)) /* RFC6298 2.1 initial RTO value */
#define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */
static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
@@ -22,7 +22,7 @@ static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
{
- return usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
+ return (peer->srtt_us >> 3) + peer->rttvar_us;
}
static u32 rxrpc_bound_rto(u32 rto)
@@ -124,7 +124,7 @@ static void rxrpc_set_rto(struct rxrpc_peer *peer)
/* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo
* guarantees that rto is higher.
*/
- peer->rto_j = rxrpc_bound_rto(rto);
+ peer->rto_us = rxrpc_bound_rto(rto);
}
static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
@@ -163,33 +163,33 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
spin_unlock(&peer->rtt_input_lock);
trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
- peer->srtt_us >> 3, peer->rto_j);
+ peer->srtt_us >> 3, peer->rto_us);
}
/*
- * Get the retransmission timeout to set in jiffies, backing it off each time
- * we retransmit.
+ * Get the retransmission timeout to set in nanoseconds, backing it off each
+ * time we retransmit.
*/
-unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
+ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
{
- u64 timo_j;
- u8 backoff = READ_ONCE(peer->backoff);
+ u64 timo_us;
+ u32 backoff = READ_ONCE(peer->backoff);
- timo_j = peer->rto_j;
- timo_j <<= backoff;
- if (retrans && timo_j * 2 <= RXRPC_RTO_MAX)
+ timo_us = peer->rto_us;
+ timo_us <<= backoff;
+ if (retrans && timo_us * 2 <= RXRPC_RTO_MAX)
WRITE_ONCE(peer->backoff, backoff + 1);
- if (timo_j < 1)
- timo_j = 1;
+ if (timo_us < 1)
+ timo_us = 1;
- return timo_j;
+ return ns_to_ktime(timo_us * NSEC_PER_USEC);
}
void rxrpc_peer_init_rtt(struct rxrpc_peer *peer)
{
- peer->rto_j = RXRPC_TIMEOUT_INIT;
- peer->mdev_us = jiffies_to_usecs(RXRPC_TIMEOUT_INIT);
+ peer->rto_us = RXRPC_TIMEOUT_INIT;
+ peer->mdev_us = RXRPC_TIMEOUT_INIT;
peer->backoff = 0;
//minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U);
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index b52dedcebce0..f1a68270862d 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -145,16 +145,17 @@ error:
/*
* Work out how much data we can put in a packet.
*/
-static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
- size_t *_buf_size, size_t *_data_size, size_t *_offset)
+static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- size_t shdr, buf_size, chunk;
+ struct rxrpc_txbuf *txb;
+ size_t shdr, space;
+
+ remain = min(remain, 65535 - sizeof(struct rxrpc_wire_header));
switch (call->conn->security_level) {
default:
- buf_size = chunk = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- shdr = 0;
- goto out;
+ space = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
+ return rxrpc_alloc_data_txbuf(call, space, 0, gfp);
case RXRPC_SECURITY_AUTH:
shdr = sizeof(struct rxkad_level1_hdr);
break;
@@ -163,17 +164,16 @@ static int rxkad_how_much_data(struct rxrpc_call *call, size_t remain,
break;
}
- buf_size = round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN);
+ space = min_t(size_t, round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN), remain + shdr);
+ space = round_up(space, RXKAD_ALIGN);
- chunk = buf_size - shdr;
- if (remain < chunk)
- buf_size = round_up(shdr + remain, RXKAD_ALIGN);
+ txb = rxrpc_alloc_data_txbuf(call, space, RXKAD_ALIGN, gfp);
+ if (!txb)
+ return NULL;
-out:
- *_buf_size = buf_size;
- *_data_size = chunk;
- *_offset = shdr;
- return 0;
+ txb->offset += shdr;
+ txb->space -= shdr;
+ return txb;
}
/*
@@ -251,7 +251,8 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
struct rxrpc_txbuf *txb,
struct skcipher_request *req)
{
- struct rxkad_level1_hdr *hdr = (void *)txb->data;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxkad_level1_hdr *hdr = (void *)(whdr + 1);
struct rxrpc_crypt iv;
struct scatterlist sg;
size_t pad;
@@ -259,7 +260,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
_enter("");
- check = txb->seq ^ ntohl(txb->wire.callNumber);
+ check = txb->seq ^ call->call_id;
hdr->data_size = htonl((u32)check << 16 | txb->len);
txb->len += sizeof(struct rxkad_level1_hdr);
@@ -267,14 +268,14 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
if (pad) {
- memset(txb->data + txb->offset, 0, pad);
+ memset(txb->kvec[0].iov_base + txb->offset, 0, pad);
txb->len += pad;
}
/* start the encryption afresh */
memset(&iv, 0, sizeof(iv));
- sg_init_one(&sg, txb->data, 8);
+ sg_init_one(&sg, hdr, 8);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
@@ -293,7 +294,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
- struct rxkad_level2_hdr *rxkhdr = (void *)txb->data;
+ struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
+ struct rxkad_level2_hdr *rxkhdr = (void *)(whdr + 1);
struct rxrpc_crypt iv;
struct scatterlist sg;
size_t pad;
@@ -302,7 +304,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
_enter("");
- check = txb->seq ^ ntohl(txb->wire.callNumber);
+ check = txb->seq ^ call->call_id;
rxkhdr->data_size = htonl(txb->len | (u32)check << 16);
rxkhdr->checksum = 0;
@@ -312,7 +314,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
if (pad) {
- memset(txb->data + txb->offset, 0, pad);
+ memset(txb->kvec[0].iov_base + txb->offset, 0, pad);
txb->len += pad;
}
@@ -320,7 +322,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
token = call->conn->key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- sg_init_one(&sg, txb->data, txb->len);
+ sg_init_one(&sg, rxkhdr, txb->len);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, txb->len, iv.x);
@@ -362,9 +364,9 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv));
/* calculate the security checksum */
- x = (ntohl(txb->wire.cid) & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
+ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= txb->seq & 0x3fffffff;
- crypto.buf[0] = txb->wire.callNumber;
+ crypto.buf[0] = htonl(call->call_id);
crypto.buf[1] = htonl(x);
sg_init_one(&sg, crypto.buf, 8);
@@ -378,7 +380,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
y = (y >> 16) & 0xffff;
if (y == 0)
y = 1; /* zero checksums are not permitted */
- txb->wire.cksum = htons(y);
+ txb->cksum = htons(y);
switch (call->conn->security_level) {
case RXRPC_SECURITY_PLAIN:
@@ -664,7 +666,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
len = iov[0].iov_len + iov[1].iov_len;
- serial = atomic_inc_return(&conn->serial);
+ serial = rxrpc_get_next_serial(conn);
whdr.serial = htonl(serial);
ret = kernel_sendmsg(conn->local->socket, &msg, iov, 2, len);
@@ -721,12 +723,11 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
- serial = atomic_inc_return(&conn->serial);
+ serial = rxrpc_get_next_serial(conn);
whdr.serial = htonl(serial);
rxrpc_local_dont_fragment(conn->local, false);
ret = kernel_sendmsg(conn->local->socket, &msg, iov, 3, len);
- rxrpc_local_dont_fragment(conn->local, true);
if (ret < 0) {
trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
rxrpc_tx_point_rxkad_response);
@@ -1256,7 +1257,7 @@ const struct rxrpc_security rxkad = {
.free_preparse_server_key = rxkad_free_preparse_server_key,
.destroy_server_key = rxkad_destroy_server_key,
.init_connection_security = rxkad_init_connection_security,
- .how_much_data = rxkad_how_much_data,
+ .alloc_txbuf = rxkad_alloc_txbuf,
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
.free_call_crypto = rxkad_free_call_crypto,
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 5677d5690a02..6f765768c49c 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -240,7 +240,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
rxrpc_notify_end_tx_t notify_end_tx)
{
rxrpc_seq_t seq = txb->seq;
- bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
+ bool poke, last = txb->flags & RXRPC_LAST_PACKET;
rxrpc_inc_stat(call->rxnet, stat_tx_data);
@@ -336,7 +336,7 @@ reload:
do {
if (!txb) {
- size_t remain, bufsize, chunk, offset;
+ size_t remain;
_debug("alloc");
@@ -348,23 +348,11 @@ reload:
* region (enc blocksize), but the trailer is not.
*/
remain = more ? INT_MAX : msg_data_left(msg);
- ret = call->conn->security->how_much_data(call, remain,
- &bufsize, &chunk, &offset);
- if (ret < 0)
+ txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation);
+ if (IS_ERR(txb)) {
+ ret = PTR_ERR(txb);
goto maybe_error;
-
- _debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset);
-
- /* create a buffer that we can retain until it's ACK'd */
- ret = -ENOMEM;
- txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_DATA,
- GFP_KERNEL);
- if (!txb)
- goto maybe_error;
-
- txb->offset = offset;
- txb->space -= offset;
- txb->space = min_t(size_t, chunk, txb->space);
+ }
}
_debug("append");
@@ -374,8 +362,8 @@ reload:
size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
_debug("add %zu", copy);
- if (!copy_from_iter_full(txb->data + txb->offset, copy,
- &msg->msg_iter))
+ if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset,
+ copy, &msg->msg_iter))
goto efault;
_debug("added");
txb->space -= copy;
@@ -394,18 +382,18 @@ reload:
/* add the packet to the send queue if it's now full */
if (!txb->space ||
(msg_data_left(msg) == 0 && !more)) {
- if (msg_data_left(msg) == 0 && !more) {
- txb->wire.flags |= RXRPC_LAST_PACKET;
- __set_bit(RXRPC_TXBUF_LAST, &txb->flags);
- }
+ if (msg_data_left(msg) == 0 && !more)
+ txb->flags |= RXRPC_LAST_PACKET;
else if (call->tx_top - call->acks_hard_ack <
call->tx_winsize)
- txb->wire.flags |= RXRPC_MORE_PACKETS;
+ txb->flags |= RXRPC_MORE_PACKETS;
ret = call->security->secure_packet(call, txb);
if (ret < 0)
goto out;
+ txb->kvec[0].iov_len += txb->len;
+ txb->len = txb->kvec[0].iov_len;
rxrpc_queue_packet(rx, call, txb, notify_end_tx);
txb = NULL;
}
@@ -621,7 +609,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call;
- unsigned long now, j;
bool dropped_lock = false;
int ret;
@@ -699,25 +686,21 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
switch (p.call.nr_timeouts) {
case 3:
- j = msecs_to_jiffies(p.call.timeouts.normal);
- if (p.call.timeouts.normal > 0 && j == 0)
- j = 1;
- WRITE_ONCE(call->next_rx_timo, j);
+ WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal);
fallthrough;
case 2:
- j = msecs_to_jiffies(p.call.timeouts.idle);
- if (p.call.timeouts.idle > 0 && j == 0)
- j = 1;
- WRITE_ONCE(call->next_req_timo, j);
+ WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle);
fallthrough;
case 1:
if (p.call.timeouts.hard > 0) {
- j = p.call.timeouts.hard * HZ;
- now = jiffies;
- j += now;
- WRITE_ONCE(call->expect_term_by, j);
- rxrpc_reduce_call_timer(call, j, now,
- rxrpc_timer_set_for_hard);
+ ktime_t delay = ms_to_ktime(p.call.timeouts.hard * MSEC_PER_SEC);
+
+ WRITE_ONCE(call->expect_term_by,
+ ktime_add(p.call.timeouts.hard,
+ ktime_get_real()));
+ trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
+ rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
+
}
break;
}
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index ecaeb4ecfb58..c9bedd0e2d86 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -15,6 +15,8 @@ static const unsigned int four = 4;
static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1;
static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = 255;
+static const unsigned long one_ms = 1;
+static const unsigned long max_ms = 1000;
static const unsigned long one_jiffy = 1;
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
#ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
@@ -28,24 +30,24 @@ static const unsigned long max_500 = 500;
* information on the individual parameters.
*/
static struct ctl_table rxrpc_sysctl_table[] = {
- /* Values measured in milliseconds but used in jiffies */
+ /* Values measured in milliseconds */
{
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)&one_ms,
+ .extra2 = (void *)&max_ms,
},
{
.procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = (void *)&one_ms,
+ .extra2 = (void *)&max_ms,
},
{
.procname = "idle_conn_expiry",
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index d43be8512386..b2a82ab756c2 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -14,45 +14,146 @@ static atomic_t rxrpc_txbuf_debug_ids;
atomic_t rxrpc_nr_txbuf;
/*
- * Allocate and partially initialise an I/O request structure.
+ * Allocate and partially initialise a data transmission buffer.
*/
-struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
- gfp_t gfp)
+struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
+ size_t data_align, gfp_t gfp)
{
+ struct rxrpc_wire_header *whdr;
struct rxrpc_txbuf *txb;
+ size_t total, hoff = 0;
+ void *buf;
txb = kmalloc(sizeof(*txb), gfp);
- if (txb) {
- INIT_LIST_HEAD(&txb->call_link);
- INIT_LIST_HEAD(&txb->tx_link);
- refcount_set(&txb->ref, 1);
- txb->call_debug_id = call->debug_id;
- txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
- txb->space = sizeof(txb->data);
- txb->len = 0;
- txb->offset = 0;
- txb->flags = 0;
- txb->ack_why = 0;
- txb->seq = call->tx_prepared + 1;
- txb->wire.epoch = htonl(call->conn->proto.epoch);
- txb->wire.cid = htonl(call->cid);
- txb->wire.callNumber = htonl(call->call_id);
- txb->wire.seq = htonl(txb->seq);
- txb->wire.type = packet_type;
- txb->wire.flags = call->conn->out_clientflag;
- txb->wire.userStatus = 0;
- txb->wire.securityIndex = call->security_ix;
- txb->wire._rsvd = 0;
- txb->wire.serviceId = htons(call->dest_srx.srx_service);
-
- trace_rxrpc_txbuf(txb->debug_id,
- txb->call_debug_id, txb->seq, 1,
- packet_type == RXRPC_PACKET_TYPE_DATA ?
- rxrpc_txbuf_alloc_data :
- rxrpc_txbuf_alloc_ack);
- atomic_inc(&rxrpc_nr_txbuf);
+ if (!txb)
+ return NULL;
+
+ if (data_align)
+ hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
+ total = hoff + sizeof(*whdr) + data_size;
+
+ mutex_lock(&call->conn->tx_data_alloc_lock);
+ buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+ ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
+ mutex_unlock(&call->conn->tx_data_alloc_lock);
+ if (!buf) {
+ kfree(txb);
+ return NULL;
+ }
+
+ whdr = buf + hoff;
+
+ INIT_LIST_HEAD(&txb->call_link);
+ INIT_LIST_HEAD(&txb->tx_link);
+ refcount_set(&txb->ref, 1);
+ txb->last_sent = KTIME_MIN;
+ txb->call_debug_id = call->debug_id;
+ txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
+ txb->space = data_size;
+ txb->len = 0;
+ txb->offset = sizeof(*whdr);
+ txb->flags = call->conn->out_clientflag;
+ txb->ack_why = 0;
+ txb->seq = call->tx_prepared + 1;
+ txb->serial = 0;
+ txb->cksum = 0;
+ txb->nr_kvec = 1;
+ txb->kvec[0].iov_base = whdr;
+ txb->kvec[0].iov_len = sizeof(*whdr);
+
+ whdr->epoch = htonl(call->conn->proto.epoch);
+ whdr->cid = htonl(call->cid);
+ whdr->callNumber = htonl(call->call_id);
+ whdr->seq = htonl(txb->seq);
+ whdr->type = RXRPC_PACKET_TYPE_DATA;
+ whdr->flags = 0;
+ whdr->userStatus = 0;
+ whdr->securityIndex = call->security_ix;
+ whdr->_rsvd = 0;
+ whdr->serviceId = htons(call->dest_srx.srx_service);
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 1,
+ rxrpc_txbuf_alloc_data);
+
+ atomic_inc(&rxrpc_nr_txbuf);
+ return txb;
+}
+
+/*
+ * Allocate and partially initialise an ACK packet.
+ */
+struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_size)
+{
+ struct rxrpc_wire_header *whdr;
+ struct rxrpc_acktrailer *trailer;
+ struct rxrpc_ackpacket *ack;
+ struct rxrpc_txbuf *txb;
+ gfp_t gfp = rcu_read_lock_held() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS;
+ void *buf, *buf2 = NULL;
+ u8 *filler;
+
+ txb = kmalloc(sizeof(*txb), gfp);
+ if (!txb)
+ return NULL;
+
+ buf = page_frag_alloc(&call->local->tx_alloc,
+ sizeof(*whdr) + sizeof(*ack) + 1 + 3 + sizeof(*trailer), gfp);
+ if (!buf) {
+ kfree(txb);
+ return NULL;
+ }
+
+ if (sack_size) {
+ buf2 = page_frag_alloc(&call->local->tx_alloc, sack_size, gfp);
+ if (!buf2) {
+ page_frag_free(buf);
+ kfree(txb);
+ return NULL;
+ }
}
+ whdr = buf;
+ ack = buf + sizeof(*whdr);
+ filler = buf + sizeof(*whdr) + sizeof(*ack) + 1;
+ trailer = buf + sizeof(*whdr) + sizeof(*ack) + 1 + 3;
+
+ INIT_LIST_HEAD(&txb->call_link);
+ INIT_LIST_HEAD(&txb->tx_link);
+ refcount_set(&txb->ref, 1);
+ txb->call_debug_id = call->debug_id;
+ txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
+ txb->space = 0;
+ txb->len = sizeof(*whdr) + sizeof(*ack) + 3 + sizeof(*trailer);
+ txb->offset = 0;
+ txb->flags = call->conn->out_clientflag;
+ txb->ack_rwind = 0;
+ txb->seq = 0;
+ txb->serial = 0;
+ txb->cksum = 0;
+ txb->nr_kvec = 3;
+ txb->kvec[0].iov_base = whdr;
+ txb->kvec[0].iov_len = sizeof(*whdr) + sizeof(*ack);
+ txb->kvec[1].iov_base = buf2;
+ txb->kvec[1].iov_len = sack_size;
+ txb->kvec[2].iov_base = filler;
+ txb->kvec[2].iov_len = 3 + sizeof(*trailer);
+
+ whdr->epoch = htonl(call->conn->proto.epoch);
+ whdr->cid = htonl(call->cid);
+ whdr->callNumber = htonl(call->call_id);
+ whdr->seq = 0;
+ whdr->type = RXRPC_PACKET_TYPE_ACK;
+ whdr->flags = 0;
+ whdr->userStatus = 0;
+ whdr->securityIndex = call->security_ix;
+ whdr->_rsvd = 0;
+ whdr->serviceId = htons(call->dest_srx.srx_service);
+
+ get_page(virt_to_head_page(trailer));
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 1,
+ rxrpc_txbuf_alloc_ack);
+ atomic_inc(&rxrpc_nr_txbuf);
return txb;
}
@@ -71,12 +172,15 @@ void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r, what);
}
-static void rxrpc_free_txbuf(struct rcu_head *rcu)
+static void rxrpc_free_txbuf(struct rxrpc_txbuf *txb)
{
- struct rxrpc_txbuf *txb = container_of(rcu, struct rxrpc_txbuf, rcu);
+ int i;
trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 0,
rxrpc_txbuf_free);
+ for (i = 0; i < txb->nr_kvec; i++)
+ if (txb->kvec[i].iov_base)
+ page_frag_free(txb->kvec[i].iov_base);
kfree(txb);
atomic_dec(&rxrpc_nr_txbuf);
}
@@ -95,7 +199,7 @@ void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
dead = __refcount_dec_and_test(&txb->ref, &r);
trace_rxrpc_txbuf(debug_id, call_debug_id, seq, r - 1, what);
if (dead)
- call_rcu(&txb->rcu, rxrpc_free_txbuf);
+ rxrpc_free_txbuf(txb);
}
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 470c70deffe2..8180d0c12fce 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -737,16 +737,6 @@ config NET_ACT_SAMPLE
To compile this code as a module, choose M here: the
module will be called act_sample.
-config NET_ACT_IPT
- tristate "IPtables targets"
- depends on NET_CLS_ACT && NETFILTER && NETFILTER_XTABLES
- help
- Say Y here to be able to invoke iptables targets after successful
- classification.
-
- To compile this code as a module, choose M here: the
- module will be called act_ipt.
-
config NET_ACT_NAT
tristate "Stateless NAT"
depends on NET_CLS_ACT
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 3e30d7260493..9ee622fb1160 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1363,7 +1363,7 @@ struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
if (rtnl_held)
rtnl_unlock();
- request_module("act_%s", act_name);
+ request_module(NET_ACT_ALIAS_PREFIX "%s", act_name);
if (rtnl_held)
rtnl_lock();
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 6cfee6658103..0e3cf11ae5fc 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -401,6 +401,7 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
.init = tcf_bpf_init,
.size = sizeof(struct tcf_bpf),
};
+MODULE_ALIAS_NET_ACT("bpf");
static __net_init int bpf_init_net(struct net *net)
{
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index f8762756657d..0fce631e7c91 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -242,6 +242,7 @@ static struct tc_action_ops act_connmark_ops = {
.cleanup = tcf_connmark_cleanup,
.size = sizeof(struct tcf_connmark_info),
};
+MODULE_ALIAS_NET_ACT("connmark");
static __net_init int connmark_init_net(struct net *net)
{
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 7f8b1f2f2ed9..5cc8e407e791 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -709,6 +709,7 @@ static struct tc_action_ops act_csum_ops = {
.offload_act_setup = tcf_csum_offload_act_setup,
.size = sizeof(struct tcf_csum),
};
+MODULE_ALIAS_NET_ACT("csum");
static __net_init int csum_init_net(struct net *net)
{
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 6124d8b128d1..baac083fd8f1 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1600,6 +1600,7 @@ static struct tc_action_ops act_ct_ops = {
.offload_act_setup = tcf_ct_offload_act_setup,
.size = sizeof(struct tcf_ct),
};
+MODULE_ALIAS_NET_ACT("ct");
static __net_init int ct_init_net(struct net *net)
{
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index e620f9a84afe..5dd41a012110 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -363,6 +363,7 @@ static struct tc_action_ops act_ctinfo_ops = {
.cleanup= tcf_ctinfo_cleanup,
.size = sizeof(struct tcf_ctinfo),
};
+MODULE_ALIAS_NET_ACT("ctinfo");
static __net_init int ctinfo_init_net(struct net *net)
{
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 4af3b7ec249f..e949280eb800 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -296,6 +296,7 @@ static struct tc_action_ops act_gact_ops = {
.offload_act_setup = tcf_gact_offload_act_setup,
.size = sizeof(struct tcf_gact),
};
+MODULE_ALIAS_NET_ACT("gact");
static __net_init int gact_init_net(struct net *net)
{
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index c681cd011afd..1dd74125398a 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -645,6 +645,7 @@ static struct tc_action_ops act_gate_ops = {
.offload_act_setup = tcf_gate_offload_act_setup,
.size = sizeof(struct tcf_gate),
};
+MODULE_ALIAS_NET_ACT("gate");
static __net_init int gate_init_net(struct net *net)
{
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 0e867d13beb5..107c6d83dc5c 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -889,6 +889,7 @@ static struct tc_action_ops act_ife_ops = {
.init = tcf_ife_init,
.size = sizeof(struct tcf_ife_info),
};
+MODULE_ALIAS_NET_ACT("ife");
static __net_init int ife_init_net(struct net *net)
{
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 12386f590b0f..5b3814365924 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -232,18 +232,14 @@ release_idr:
return err;
}
-static bool is_mirred_nested(void)
-{
- return unlikely(__this_cpu_read(mirred_nest_level) > 1);
-}
-
-static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+static int
+tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb)
{
int err;
if (!want_ingress)
err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
- else if (is_mirred_nested())
+ else if (!at_ingress)
err = netif_rx(skb);
else
err = netif_receive_skb(skb);
@@ -270,8 +266,7 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
dev->name);
- err = -ENODEV;
- goto out;
+ goto err_cant_do;
}
/* we could easily avoid the clone only if called by ingress and clsact;
@@ -283,10 +278,8 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
tcf_mirred_can_reinsert(retval);
if (!dont_clone) {
skb_to_send = skb_clone(skb, GFP_ATOMIC);
- if (!skb_to_send) {
- err = -ENOMEM;
- goto out;
- }
+ if (!skb_to_send)
+ goto err_cant_do;
}
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
@@ -319,19 +312,20 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
- err = tcf_mirred_forward(want_ingress, skb_to_send);
+ err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
} else {
- err = tcf_mirred_forward(want_ingress, skb_to_send);
+ err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
}
-
- if (err) {
-out:
+ if (err)
tcf_action_inc_overlimit_qstats(&m->common);
- if (is_redirect)
- retval = TC_ACT_SHOT;
- }
return retval;
+
+err_cant_do:
+ if (is_redirect)
+ retval = TC_ACT_SHOT;
+ tcf_action_inc_overlimit_qstats(&m->common);
+ return retval;
}
static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m,
@@ -533,8 +527,6 @@ static int mirred_device_event(struct notifier_block *unused,
* net_device are already rcu protected.
*/
RCU_INIT_POINTER(m->tcfm_dev, NULL);
- } else if (m->tcfm_blockid) {
- m->tcfm_blockid = 0;
}
spin_unlock_bh(&m->tcf_lock);
}
@@ -643,6 +635,7 @@ static struct tc_action_ops act_mirred_ops = {
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
};
+MODULE_ALIAS_NET_ACT("mirred");
static __net_init int mirred_init_net(struct net *net)
{
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 34b8edb6cc77..44a37a71ae92 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -452,6 +452,7 @@ static struct tc_action_ops act_mpls_ops = {
.offload_act_setup = tcf_mpls_offload_act_setup,
.size = sizeof(struct tcf_mpls),
};
+MODULE_ALIAS_NET_ACT("mpls");
static __net_init int mpls_init_net(struct net *net)
{
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index a180e724634e..d541f553805f 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -324,6 +324,7 @@ static struct tc_action_ops act_nat_ops = {
.cleanup = tcf_nat_cleanup,
.size = sizeof(struct tcf_nat),
};
+MODULE_ALIAS_NET_ACT("nat");
static __net_init int nat_init_net(struct net *net)
{
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 2ef22969f274..fc0a35a7b62a 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -515,11 +515,11 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
spin_unlock_bh(&p->tcf_lock);
return -ENOBUFS;
}
+ opt->nkeys = parms->tcfp_nkeys;
memcpy(opt->keys, parms->tcfp_keys,
flex_array_size(opt, keys, parms->tcfp_nkeys));
opt->index = p->tcf_index;
- opt->nkeys = parms->tcfp_nkeys;
opt->flags = parms->tcfp_flags;
opt->action = p->tcf_action;
opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
@@ -620,6 +620,7 @@ static struct tc_action_ops act_pedit_ops = {
.offload_act_setup = tcf_pedit_offload_act_setup,
.size = sizeof(struct tcf_pedit),
};
+MODULE_ALIAS_NET_ACT("pedit");
static __net_init int pedit_init_net(struct net *net)
{
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index e119b4a3db9f..8555125ed34d 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -502,6 +502,7 @@ static struct tc_action_ops act_police_ops = {
.offload_act_setup = tcf_police_offload_act_setup,
.size = sizeof(struct tcf_police),
};
+MODULE_ALIAS_NET_ACT("police");
static __net_init int police_init_net(struct net *net)
{
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index c5c61efe6db4..a69b53d54039 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -316,6 +316,7 @@ static struct tc_action_ops act_sample_ops = {
.offload_act_setup = tcf_sample_offload_act_setup,
.size = sizeof(struct tcf_sample),
};
+MODULE_ALIAS_NET_ACT("sample");
static __net_init int sample_init_net(struct net *net)
{
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 0a3e92888295..f3abe0545989 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -209,6 +209,7 @@ static struct tc_action_ops act_simp_ops = {
.init = tcf_simp_init,
.size = sizeof(struct tcf_defact),
};
+MODULE_ALIAS_NET_ACT("simple");
static __net_init int simp_init_net(struct net *net)
{
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 754f78b35bb8..1f1d9ce3e968 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -426,6 +426,7 @@ static struct tc_action_ops act_skbedit_ops = {
.offload_act_setup = tcf_skbedit_offload_act_setup,
.size = sizeof(struct tcf_skbedit),
};
+MODULE_ALIAS_NET_ACT("skbedit");
static __net_init int skbedit_init_net(struct net *net)
{
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index bcb673ab0008..39945b139c48 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -287,6 +287,7 @@ static struct tc_action_ops act_skbmod_ops = {
.cleanup = tcf_skbmod_cleanup,
.size = sizeof(struct tcf_skbmod),
};
+MODULE_ALIAS_NET_ACT("skbmod");
static __net_init int skbmod_init_net(struct net *net)
{
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 300b08aa8283..1536f8b16f1b 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -842,6 +842,7 @@ static struct tc_action_ops act_tunnel_key_ops = {
.offload_act_setup = tcf_tunnel_key_offload_act_setup,
.size = sizeof(struct tcf_tunnel_key),
};
+MODULE_ALIAS_NET_ACT("tunnel_key");
static __net_init int tunnel_key_init_net(struct net *net)
{
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 836183011a7c..22f4b1e8ade9 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -427,6 +427,7 @@ static struct tc_action_ops act_vlan_ops = {
.offload_act_setup = tcf_vlan_offload_act_setup,
.size = sizeof(struct tcf_vlan),
};
+MODULE_ALIAS_NET_ACT("vlan");
static __net_init int vlan_init_net(struct net *net)
{
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 92a12e3d0fe6..ca5676b2668e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -257,7 +257,7 @@ tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
#ifdef CONFIG_MODULES
if (rtnl_held)
rtnl_unlock();
- request_module("cls_%s", kind);
+ request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
if (rtnl_held)
rtnl_lock();
ops = __tcf_proto_lookup_ops(kind);
@@ -1560,6 +1560,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
chain_prev = chain,
chain = __tcf_get_next_chain(block, chain),
tcf_chain_put(chain_prev)) {
+ if (chain->tmplt_ops && add)
+ chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
+ cb_priv);
for (tp = __tcf_get_next_proto(chain, NULL); tp;
tp_prev = tp,
tp = __tcf_get_next_proto(chain, tp),
@@ -1575,6 +1578,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
goto err_playback_remove;
}
}
+ if (chain->tmplt_ops && !add)
+ chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
+ cb_priv);
}
return 0;
@@ -3000,7 +3006,8 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
ops = tcf_proto_lookup_ops(name, true, extack);
if (IS_ERR(ops))
return PTR_ERR(ops);
- if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
+ if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
+ !ops->tmplt_reoffload) {
NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
module_put(ops->owner);
return -EOPNOTSUPP;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index a1f56931330c..ecfaa4f9a04e 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -328,6 +328,7 @@ static struct tcf_proto_ops cls_basic_ops __read_mostly = {
.bind_class = basic_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("basic");
static int __init init_basic(void)
{
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 382c7a71f81f..5e83e890f6a4 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -693,6 +693,7 @@ static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
.dump = cls_bpf_dump,
.bind_class = cls_bpf_bind_class,
};
+MODULE_ALIAS_NET_CLS("bpf");
static int __init cls_bpf_init_mod(void)
{
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 7ee8dbf49ed0..424252982d6a 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -209,6 +209,7 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
.dump = cls_cgroup_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("cgroup");
static int __init init_cgroup_cls(void)
{
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6ab317b48d6c..5502998aace7 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -702,6 +702,7 @@ static struct tcf_proto_ops cls_flow_ops __read_mostly = {
.walk = flow_walk,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("flow");
static int __init cls_flow_init(void)
{
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index e5314a31f75a..e1314674b4a9 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -2460,8 +2460,11 @@ unbind_filter:
}
errout_idr:
- if (!fold)
+ if (!fold) {
+ spin_lock(&tp->lock);
idr_remove(&head->handle_idr, fnew->handle);
+ spin_unlock(&tp->lock);
+ }
__fl_put(fnew);
errout_tb:
kfree(tb);
@@ -2721,6 +2724,28 @@ static void fl_tmplt_destroy(void *tmplt_priv)
kfree(tmplt);
}
+static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
+ struct flow_cls_offload cls_flower = {};
+
+ cls_flower.rule = flow_rule_alloc(0);
+ if (!cls_flower.rule)
+ return;
+
+ cls_flower.common.chain_index = chain->index;
+ cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
+ FLOW_CLS_TMPLT_DESTROY;
+ cls_flower.cookie = (unsigned long) tmplt;
+ cls_flower.rule->match.dissector = &tmplt->dissector;
+ cls_flower.rule->match.mask = &tmplt->mask;
+ cls_flower.rule->match.key = &tmplt->dummy_key;
+
+ cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+ kfree(cls_flower.rule);
+}
+
static int fl_dump_key_val(struct sk_buff *skb,
void *val, int val_type,
void *mask, int mask_type, int len)
@@ -3628,11 +3653,13 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
.bind_class = fl_bind_class,
.tmplt_create = fl_tmplt_create,
.tmplt_destroy = fl_tmplt_destroy,
+ .tmplt_reoffload = fl_tmplt_reoffload,
.tmplt_dump = fl_tmplt_dump,
.get_exts = fl_get_exts,
.owner = THIS_MODULE,
.flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
};
+MODULE_ALIAS_NET_CLS("flower");
static int __init cls_fl_init(void)
{
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index afc534ee0a18..cdddc8695228 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -433,6 +433,7 @@ static struct tcf_proto_ops cls_fw_ops __read_mostly = {
.bind_class = fw_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("fw");
static int __init init_fw(void)
{
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index c4ed11df6254..9f1e62ca508d 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -398,6 +398,7 @@ static struct tcf_proto_ops cls_mall_ops __read_mostly = {
.bind_class = mall_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("matchall");
static int __init cls_mall_init(void)
{
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 12a505db4183..b9c58c040c30 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -671,6 +671,7 @@ static struct tcf_proto_ops cls_route4_ops __read_mostly = {
.bind_class = route4_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("route");
static int __init init_route4(void)
{
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 289e1755c26b..9412d88a99bc 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -1453,6 +1453,7 @@ static struct tcf_proto_ops cls_u32_ops __read_mostly = {
.bind_class = u32_bind_class,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_CLS("u32");
static int __init init_u32(void)
{
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
index 5ea84decec19..5337bc462755 100644
--- a/net/sched/em_canid.c
+++ b/net/sched/em_canid.c
@@ -222,6 +222,7 @@ static void __exit exit_em_canid(void)
tcf_em_unregister(&em_canid_ops);
}
+MODULE_DESCRIPTION("ematch classifier to match CAN IDs embedded in skb CAN frames");
MODULE_LICENSE("GPL");
module_init(init_em_canid);
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index f17b049ea530..c90ad7ea26b4 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -87,6 +87,7 @@ static void __exit exit_em_cmp(void)
tcf_em_unregister(&em_cmp_ops);
}
+MODULE_DESCRIPTION("ematch classifier for basic data types(8/16/32 bit) against skb data");
MODULE_LICENSE("GPL");
module_init(init_em_cmp);
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 09d8afd04a2a..8996c73c9779 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -1006,6 +1006,7 @@ static void __exit exit_em_meta(void)
tcf_em_unregister(&em_meta_ops);
}
+MODULE_DESCRIPTION("ematch classifier for various internal kernel metadata, skb metadata and sk metadata");
MODULE_LICENSE("GPL");
module_init(init_em_meta);
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index a83b237cbeb0..4f9f21a05d5e 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -68,6 +68,7 @@ static void __exit exit_em_nbyte(void)
tcf_em_unregister(&em_nbyte_ops);
}
+MODULE_DESCRIPTION("ematch classifier for arbitrary skb multi-bytes");
MODULE_LICENSE("GPL");
module_init(init_em_nbyte);
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index f176afb70559..420c66203b17 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -147,6 +147,7 @@ static void __exit exit_em_text(void)
tcf_em_unregister(&em_text_ops);
}
+MODULE_DESCRIPTION("ematch classifier for embedded text in skbs");
MODULE_LICENSE("GPL");
module_init(init_em_text);
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c
index 71b070da0437..fdec4db5ec89 100644
--- a/net/sched/em_u32.c
+++ b/net/sched/em_u32.c
@@ -52,6 +52,7 @@ static void __exit exit_em_u32(void)
tcf_em_unregister(&em_u32_ops);
}
+MODULE_DESCRIPTION("ematch skb classifier using 32 bit chunks of data");
MODULE_LICENSE("GPL");
module_init(init_em_u32);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 36b025cc4fd2..9d928f6a473a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -228,7 +228,7 @@ int qdisc_set_default(const char *name)
if (!ops) {
/* Not found, drop lock and try to load module */
write_unlock(&qdisc_mod_lock);
- request_module("sch_%s", name);
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
@@ -1275,7 +1275,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
* go away in the mean time.
*/
rtnl_unlock();
- request_module("sch_%s", name);
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
rtnl_lock();
ops = qdisc_lookup_ops(kind);
if (ops != NULL) {
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 9cff99558694..edee926ccde8 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -3103,6 +3103,7 @@ static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
.dump_stats = cake_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("cake");
static int __init cake_module_init(void)
{
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index beece8e82c23..69001eff0315 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -546,6 +546,7 @@ static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
.dump = cbs_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("cbs");
static struct notifier_block cbs_device_notifier = {
.notifier_call = cbs_dev_notifier,
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index ae1da08e268f..ea108030c6b4 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -498,6 +498,7 @@ static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
.dump_stats = choke_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("choke");
static int __init choke_module_init(void)
{
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index d7a4874543de..ecb3f164bb25 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Codel - The Controlled-Delay Active Queue Management algorithm
*
@@ -7,37 +8,6 @@
* Implemented on linux by :
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
* Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the authors may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Alternatively, provided that this notice is retained in full, this
- * software may be distributed under the terms of the GNU General
- * Public License ("GPL") version 2, in which case the provisions of the
- * GPL apply INSTEAD OF those given above.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
*/
#include <linux/module.h>
@@ -287,6 +257,7 @@ static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
.dump_stats = codel_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("codel");
static int __init codel_module_init(void)
{
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 097740a9afea..c69b999fae17 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -481,6 +481,7 @@ static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
.destroy = drr_destroy_qdisc,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("drr");
static int __init drr_init(void)
{
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index 4808159a5466..2e4bef713b6a 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -500,6 +500,7 @@ static struct Qdisc_ops etf_qdisc_ops __read_mostly = {
.dump = etf_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("etf");
static int __init etf_module_init(void)
{
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index f7c88495946b..835b4460b448 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -812,6 +812,7 @@ static struct Qdisc_ops ets_qdisc_ops __read_mostly = {
.dump = ets_qdisc_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("ets");
static int __init ets_init(void)
{
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3a31c47fea9b..cdf23ff16f40 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -1264,6 +1264,7 @@ static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
.dump_stats = fq_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq");
static int __init fq_module_init(void)
{
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 8c4fee063436..79f9d6de6c85 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -717,6 +717,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
.dump_stats = fq_codel_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("fq_codel");
static int __init fq_codel_module_init(void)
{
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9b3e9262040b..ff5336493777 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -27,6 +27,7 @@
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
+#include <net/hotdata.h>
#include <trace/events/qdisc.h>
#include <trace/events/net.h>
#include <net/xfrm.h>
@@ -409,7 +410,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
void __qdisc_run(struct Qdisc *q)
{
- int quota = READ_ONCE(dev_tx_weight);
+ int quota = READ_ONCE(net_hotdata.dev_tx_weight);
int packets;
while (qdisc_restart(q, &packets)) {
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 8c61eb3dc943..79ba9dc70254 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -930,6 +930,7 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
.dump = gred_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("gred");
static int __init gred_module_init(void)
{
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 16c45da4036a..4e626df742d7 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1679,6 +1679,7 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct hfsc_sched),
.owner = THIS_MODULE
};
+MODULE_ALIAS_NET_SCH("hfsc");
static int __init
hfsc_init(void)
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index d26cd436cbe3..3f906df1435b 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -702,6 +702,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.dump_stats = hhf_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("hhf");
static int __init hhf_module_init(void)
{
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 7349233eaa9b..93e6fb56f3b5 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -2166,6 +2166,7 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.dump = htb_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("htb");
static int __init htb_module_init(void)
{
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 5fa9eaa79bfc..c2ef9dcf91d2 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -168,6 +168,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.ingress_block_get = ingress_ingress_block_get,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("ingress");
struct clsact_sched_data {
struct tcf_block *ingress_block;
@@ -344,6 +345,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.egress_block_get = clsact_egress_block_get,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("clsact");
static int __init ingress_module_init(void)
{
@@ -368,6 +370,5 @@ static void __exit ingress_module_exit(void)
module_init(ingress_module_init);
module_exit(ingress_module_exit);
-MODULE_ALIAS("sch_clsact");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 43e53ee00a56..225353fbb3f1 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -774,6 +774,7 @@ static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
.dump = mqprio_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("mqprio");
static int __init mqprio_module_init(void)
{
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index d66d5f0ec080..79e93a19d5fa 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -395,6 +395,7 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
.dump = multiq_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("multiq");
static int __init multiq_module_init(void)
{
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fa678eb88528..edc72962ae63 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -1293,6 +1293,7 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
.dump = netem_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("netem");
static int __init netem_module_init(void)
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 2da6250ec346..1764059b0635 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -556,6 +556,7 @@ static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
.dump_stats = pie_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("pie");
static int __init pie_module_init(void)
{
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 992f0c8d7988..cefb65201e17 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -213,6 +213,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
.reset = qdisc_reset_queue,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("plug");
static int __init plug_module_init(void)
{
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8ecdd3ef6f8e..cc30f7a32f1a 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -418,6 +418,7 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
.dump = prio_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("prio");
static int __init prio_module_init(void)
{
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 48a604c320c7..d584c0c25899 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1521,6 +1521,7 @@ static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
.destroy = qfq_destroy_qdisc,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("qfq");
static int __init qfq_init(void)
{
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 607b6c8b3a9b..b5f096588fae 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -548,6 +548,7 @@ static struct Qdisc_ops red_qdisc_ops __read_mostly = {
.dump_stats = red_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("red");
static int __init red_module_init(void)
{
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 1871a1c0224d..b717e15a3a17 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -709,6 +709,7 @@ static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
.dump_stats = sfb_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("sfb");
static int __init sfb_module_init(void)
{
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index eb77558fa367..e66f4afb920d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -925,6 +925,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
.dump = sfq_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("sfq");
static int __init sfq_module_init(void)
{
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 28beb11762d8..b4dd626c309c 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -292,6 +292,7 @@ static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
.destroy = skbprio_destroy,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("skbprio");
static int __init skbprio_module_init(void)
{
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 31a8252bd09c..c5de70efdc86 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -40,6 +40,8 @@ static struct static_key_false taprio_have_working_mqprio;
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
+#define TAPRIO_SUPPORTED_FLAGS \
+ (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
#define TAPRIO_FLAGS_INVALID U32_MAX
struct sched_entry {
@@ -408,19 +410,6 @@ static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
return entry;
}
-static bool taprio_flags_valid(u32 flags)
-{
- /* Make sure no other flag bits are set. */
- if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
- TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
- return false;
- /* txtime-assist and full offload are mutually exclusive */
- if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
- (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
- return false;
- return true;
-}
-
/* This returns the tstamp value set by TCP in terms of the set clock. */
static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
{
@@ -1031,7 +1020,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] =
NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range),
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
- [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
+ [TCA_TAPRIO_ATTR_FLAGS] =
+ NLA_POLICY_MASK(NLA_U32, TAPRIO_SUPPORTED_FLAGS),
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
[TCA_TAPRIO_ATTR_TC_ENTRY] = { .type = NLA_NESTED },
};
@@ -1815,33 +1805,6 @@ static int taprio_mqprio_cmp(const struct net_device *dev,
return 0;
}
-/* The semantics of the 'flags' argument in relation to 'change()'
- * requests, are interpreted following two rules (which are applied in
- * this order): (1) an omitted 'flags' argument is interpreted as
- * zero; (2) the 'flags' of a "running" taprio instance cannot be
- * changed.
- */
-static int taprio_new_flags(const struct nlattr *attr, u32 old,
- struct netlink_ext_ack *extack)
-{
- u32 new = 0;
-
- if (attr)
- new = nla_get_u32(attr);
-
- if (old != TAPRIO_FLAGS_INVALID && old != new) {
- NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
- return -EOPNOTSUPP;
- }
-
- if (!taprio_flags_valid(new)) {
- NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
- return -EINVAL;
- }
-
- return new;
-}
-
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -1852,6 +1815,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct net_device *dev = qdisc_dev(sch);
struct tc_mqprio_qopt *mqprio = NULL;
unsigned long flags;
+ u32 taprio_flags;
ktime_t start;
int i, err;
@@ -1863,12 +1827,28 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
- err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
- q->flags, extack);
- if (err < 0)
- return err;
+ /* The semantics of the 'flags' argument in relation to 'change()'
+ * requests, are interpreted following two rules (which are applied in
+ * this order): (1) an omitted 'flags' argument is interpreted as
+ * zero; (2) the 'flags' of a "running" taprio instance cannot be
+ * changed.
+ */
+ taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0;
- q->flags = err;
+ /* txtime-assist and full offload are mutually exclusive */
+ if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
+ (taprio_flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_TAPRIO_ATTR_FLAGS],
+ "TXTIME_ASSIST and FULL_OFFLOAD are mutually exclusive");
+ return -EINVAL;
+ }
+
+ if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing 'flags' of a running schedule is not supported");
+ return -EOPNOTSUPP;
+ }
+ q->flags = taprio_flags;
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
@@ -2548,6 +2528,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
.dump_stats = taprio_dump_stats,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("taprio");
static struct notifier_block taprio_device_notifier = {
.notifier_call = taprio_dev_notifier,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index dd6b1a723bf7..f1d09183ae63 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -608,6 +608,7 @@ static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
.dump = tbf_dump,
.owner = THIS_MODULE,
};
+MODULE_ALIAS_NET_SCH("tbf");
static int __init tbf_module_init(void)
{
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 7182c5a450fb..5c1652181805 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -38,6 +38,14 @@ void sctp_inq_init(struct sctp_inq *queue)
INIT_WORK(&queue->immediate, NULL);
}
+/* Properly release the chunk which is being worked on. */
+static inline void sctp_inq_chunk_free(struct sctp_chunk *chunk)
+{
+ if (chunk->head_skb)
+ chunk->skb = chunk->head_skb;
+ sctp_chunk_free(chunk);
+}
+
/* Release the memory associated with an SCTP inqueue. */
void sctp_inq_free(struct sctp_inq *queue)
{
@@ -53,7 +61,7 @@ void sctp_inq_free(struct sctp_inq *queue)
* free it as well.
*/
if (queue->in_progress) {
- sctp_chunk_free(queue->in_progress);
+ sctp_inq_chunk_free(queue->in_progress);
queue->in_progress = NULL;
}
}
@@ -130,9 +138,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
goto new_skb;
}
- if (chunk->head_skb)
- chunk->skb = chunk->head_skb;
- sctp_chunk_free(chunk);
+ sctp_inq_chunk_free(chunk);
chunk = queue->in_progress = NULL;
} else {
/* Nothing to do. Next chunk in the packet, please. */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 94c6dd53cd62..e849f368ed91 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1495,17 +1495,11 @@ static __init int sctp_init(void)
/* Allocate bind_bucket and chunk caches. */
status = -ENOBUFS;
- sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket",
- sizeof(struct sctp_bind_bucket),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ sctp_bucket_cachep = KMEM_CACHE(sctp_bind_bucket, SLAB_HWCACHE_ALIGN);
if (!sctp_bucket_cachep)
goto out;
- sctp_chunk_cachep = kmem_cache_create("sctp_chunk",
- sizeof(struct sctp_chunk),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
+ sctp_chunk_cachep = KMEM_CACHE(sctp_chunk, SLAB_HWCACHE_ALIGN);
if (!sctp_chunk_cachep)
goto err_chunk_cachep;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6b9fcdb0952a..c67679a41044 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -67,6 +67,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
+#include <net/rps.h>
/* Forward declarations for internal helper functions. */
static bool sctp_writeable(const struct sock *sk);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index a2cb30af46cb..4b52b3b159c0 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -924,6 +924,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
smc->clcsock->file->private_data = smc->clcsock;
smc->clcsock->wq.fasync_list =
smc->sk.sk_socket->wq.fasync_list;
+ smc->sk.sk_socket->wq.fasync_list = NULL;
/* There might be some wait entries remaining
* in smc sk->sk_wq and they should be woken up
@@ -1045,7 +1046,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
int rc = SMC_CLC_DECL_NOSMCDDEV;
struct smcd_dev *smcd;
int i = 1, entry = 1;
- bool is_virtual;
+ bool is_emulated;
u16 chid;
if (smcd_indicated(ini->smc_type_v1))
@@ -1057,12 +1058,12 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
chid = smc_ism_get_chid(smcd);
if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
continue;
- is_virtual = __smc_ism_is_virtual(chid);
+ is_emulated = __smc_ism_is_emulated(chid);
if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
- if (is_virtual && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
+ if (is_emulated && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
/* It's the last GID-CHID entry left in CLC
- * Proposal SMC-Dv2 extension, but a virtual
+ * Proposal SMC-Dv2 extension, but an Emulated-
* ISM device will take two entries. So give
* up it and try the next potential ISM device.
*/
@@ -1072,7 +1073,7 @@ static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
ini->is_smcd = true;
rc = 0;
i++;
- entry = is_virtual ? entry + 2 : entry + 1;
+ entry = is_emulated ? entry + 2 : entry + 1;
if (entry > SMCD_CLC_MAX_V2_GID_ENTRIES)
break;
}
@@ -1413,10 +1414,10 @@ static int smc_connect_ism(struct smc_sock *smc,
if (rc)
return rc;
- if (__smc_ism_is_virtual(ini->ism_chid[ini->ism_selected]))
+ if (__smc_ism_is_emulated(ini->ism_chid[ini->ism_selected]))
ini->ism_peer_gid[ini->ism_selected].gid_ext =
ntohll(aclc->d1.gid_ext);
- /* for non-virtual ISM devices, peer gid_ext remains 0. */
+ /* for non-Emulated-ISM devices, peer gid_ext remains 0. */
}
ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
@@ -2117,10 +2118,10 @@ static void smc_check_ism_v2_match(struct smc_init_info *ini,
if (smc_ism_get_chid(smcd) == proposed_chid &&
!smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
ini->ism_peer_gid[*matches].gid = proposed_gid->gid;
- if (__smc_ism_is_virtual(proposed_chid))
+ if (__smc_ism_is_emulated(proposed_chid))
ini->ism_peer_gid[*matches].gid_ext =
proposed_gid->gid_ext;
- /* non-virtual ISM's peer gid_ext remains 0. */
+ /* non-Emulated-ISM's peer gid_ext remains 0. */
ini->ism_dev[*matches] = smcd;
(*matches)++;
break;
@@ -2170,10 +2171,10 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
smcd_gid.gid = ntohll(smcd_v2_ext->gidchid[i].gid);
smcd_gid.gid_ext = 0;
chid = ntohs(smcd_v2_ext->gidchid[i].chid);
- if (__smc_ism_is_virtual(chid)) {
+ if (__smc_ism_is_emulated(chid)) {
if ((i + 1) == smc_v2_ext->hdr.ism_gid_cnt ||
chid != ntohs(smcd_v2_ext->gidchid[i + 1].chid))
- /* each virtual ISM device takes two GID-CHID
+ /* each Emulated-ISM device takes two GID-CHID
* entries and CHID of the second entry repeats
* that of the first entry.
*
diff --git a/net/smc/smc.h b/net/smc/smc.h
index df64efd2dee8..18c8b7870198 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -56,11 +56,11 @@ enum smc_state { /* possible states of an SMC socket */
};
enum smc_supplemental_features {
- SMC_SPF_VIRT_ISM_DEV = 0,
+ SMC_SPF_EMULATED_ISM_DEV = 0,
};
#define SMC_FEATURE_MASK \
- (BIT(SMC_SPF_VIRT_ISM_DEV))
+ (BIT(SMC_SPF_EMULATED_ISM_DEV))
struct smc_link_group;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 9a13709bea1c..e55026c7529c 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -952,8 +952,8 @@ int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
gidchids[entry].chid =
htons(smc_ism_get_chid(ini->ism_dev[i]));
gidchids[entry].gid = htonll(smcd_gid.gid);
- if (smc_ism_is_virtual(smcd)) {
- /* a virtual ISM device takes two
+ if (smc_ism_is_emulated(smcd)) {
+ /* an Emulated-ISM device takes two
* entries. CHID of the second entry
* repeats that of the first entry.
*/
@@ -1055,7 +1055,7 @@ smcd_clc_prep_confirm_accept(struct smc_connection *conn,
clc->d1.chid = htons(chid);
if (eid && eid[0])
memcpy(clc->d1.eid, eid, SMC_MAX_EID_LEN);
- if (__smc_ism_is_virtual(chid))
+ if (__smc_ism_is_emulated(chid))
clc->d1.gid_ext = htonll(smcd_gid.gid_ext);
len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
if (first_contact) {
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index a9f9bdd26dcd..7cc7070b9772 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -175,7 +175,7 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
#define SMCD_CLC_MAX_V2_GID_ENTRIES 8 /* max # of CHID-GID entries in CLC
* proposal SMC-Dv2 extension.
* each ISM device takes one entry and
- * each virtual ISM takes two entries.
+ * each Emulated-ISM takes two entries
*/
struct smc_clc_msg_proposal_area {
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 95cc95458e2d..9b84d5897aa5 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1535,7 +1535,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid,
list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
if ((!peer_gid->gid ||
(lgr->peer_gid.gid == peer_gid->gid &&
- !smc_ism_is_virtual(dev) ? 1 :
+ !smc_ism_is_emulated(dev) ? 1 :
lgr->peer_gid.gid_ext == peer_gid->gid_ext)) &&
(vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
if (peer_gid->gid) /* peer triggered termination */
@@ -1877,9 +1877,15 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
struct smcd_dev *smcismdev,
struct smcd_gid *peer_gid)
{
- return lgr->peer_gid.gid == peer_gid->gid && lgr->smcd == smcismdev &&
- smc_ism_is_virtual(smcismdev) ?
- (lgr->peer_gid.gid_ext == peer_gid->gid_ext) : 1;
+ if (lgr->peer_gid.gid != peer_gid->gid ||
+ lgr->smcd != smcismdev)
+ return false;
+
+ if (smc_ism_is_emulated(smcismdev) &&
+ lgr->peer_gid.gid_ext != peer_gid->gid_ext)
+ return false;
+
+ return true;
}
/* create a new SMC connection (and a new link group if necessary) */
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 32bad267fa3e..6fdb2d96777a 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -164,7 +164,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
}
if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd &&
(req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
- !list_empty(&smc->conn.lgr->list)) {
+ !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) {
struct smc_connection *conn = &smc->conn;
struct smcd_diag_dmbinfo dinfo;
struct smcd_dev *smcd = conn->lgr->smcd;
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index ffff40c30a06..165cd013404b 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -15,7 +15,7 @@
#include "smc.h"
-#define SMC_VIRTUAL_ISM_CHID_MASK 0xFF00
+#define SMC_EMULATED_ISM_CHID_MASK 0xFF00
#define SMC_ISM_IDENT_MASK 0x00FFFF
struct smcd_dev_list { /* List of SMCD devices */
@@ -66,10 +66,10 @@ static inline int smc_ism_write(struct smcd_dev *smcd, u64 dmb_tok,
return rc < 0 ? rc : 0;
}
-static inline bool __smc_ism_is_virtual(u16 chid)
+static inline bool __smc_ism_is_emulated(u16 chid)
{
/* CHIDs in range of 0xFF00 to 0xFFFF are reserved
- * for virtual ISM device.
+ * for Emulated-ISM device.
*
* loopback-ism: 0xFFFF
* virtio-ism: 0xFF00 ~ 0xFFFE
@@ -77,11 +77,11 @@ static inline bool __smc_ism_is_virtual(u16 chid)
return ((chid & 0xFF00) == 0xFF00);
}
-static inline bool smc_ism_is_virtual(struct smcd_dev *smcd)
+static inline bool smc_ism_is_emulated(struct smcd_dev *smcd)
{
u16 chid = smcd->ops->get_chid(smcd);
- return __smc_ism_is_virtual(chid);
+ return __smc_ism_is_emulated(chid);
}
#endif
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 9f2c58c5a86b..2adb92b8c469 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -806,6 +806,16 @@ static void smc_pnet_create_pnetids_list(struct net *net)
u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
struct net_device *dev;
+ /* Newly created netns do not have devices.
+ * Do not even acquire rtnl.
+ */
+ if (list_empty(&net->dev_base_head))
+ return;
+
+ /* Note: This might not be needed, because smc_pnet_netdev_event()
+ * is also calling smc_pnet_add_base_pnetid() when handling
+ * NETDEV_UP event.
+ */
rtnl_lock();
for_each_netdev(net, dev)
smc_pnet_add_base_pnetid(net, dev, ndev_pnetid);
diff --git a/net/socket.c b/net/socket.c
index ed3df2f749bf..7e9c8fc9a5b4 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -343,7 +343,7 @@ static void init_inodecache(void)
0,
(SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
- SLAB_MEM_SPREAD | SLAB_ACCOUNT),
+ SLAB_ACCOUNT),
init_once);
BUG_ON(sock_inode_cachep == NULL);
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index f60c93e5a25d..b969e505c7b7 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1598,10 +1598,10 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
/* Finally, send the reply synchronously */
if (rqstp->bc_to_initval > 0) {
timeout.to_initval = rqstp->bc_to_initval;
- timeout.to_retries = rqstp->bc_to_initval;
+ timeout.to_retries = rqstp->bc_to_retries;
} else {
timeout.to_initval = req->rq_xprt->timeout->to_initval;
- timeout.to_initval = req->rq_xprt->timeout->to_retries;
+ timeout.to_retries = req->rq_xprt->timeout->to_retries;
}
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req, &timeout);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index bfb2f78523a8..545017a3daa4 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -717,12 +717,12 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
ARRAY_SIZE(rqstp->rq_bvec), xdr);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
- count, 0);
+ count, rqstp->rq_res.len);
err = sock_sendmsg(svsk->sk_sock, &msg);
if (err == -ECONNREFUSED) {
/* ICMP error on earlier request. */
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
- count, 0);
+ count, rqstp->rq_res.len);
err = sock_sendmsg(svsk->sk_sock, &msg);
}
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 5b045284849e..c9189a970eec 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -19,6 +19,35 @@
#include <linux/rtnetlink.h>
#include <net/switchdev.h>
+static bool switchdev_obj_eq(const struct switchdev_obj *a,
+ const struct switchdev_obj *b)
+{
+ const struct switchdev_obj_port_vlan *va, *vb;
+ const struct switchdev_obj_port_mdb *ma, *mb;
+
+ if (a->id != b->id || a->orig_dev != b->orig_dev)
+ return false;
+
+ switch (a->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ va = SWITCHDEV_OBJ_PORT_VLAN(a);
+ vb = SWITCHDEV_OBJ_PORT_VLAN(b);
+ return va->flags == vb->flags &&
+ va->vid == vb->vid &&
+ va->changed == vb->changed;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ ma = SWITCHDEV_OBJ_PORT_MDB(a);
+ mb = SWITCHDEV_OBJ_PORT_MDB(b);
+ return ma->vid == mb->vid &&
+ ether_addr_equal(ma->addr, mb->addr);
+ default:
+ break;
+ }
+
+ BUG();
+}
+
static LIST_HEAD(deferred);
static DEFINE_SPINLOCK(deferred_lock);
@@ -307,6 +336,50 @@ int switchdev_port_obj_del(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
+/**
+ * switchdev_port_obj_act_is_deferred - Is object action pending?
+ *
+ * @dev: port device
+ * @nt: type of action; add or delete
+ * @obj: object to test
+ *
+ * Returns true if a deferred item is pending, which is
+ * equivalent to the action @nt on an object @obj.
+ *
+ * rtnl_lock must be held.
+ */
+bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
+ enum switchdev_notifier_type nt,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_deferred_item *dfitem;
+ bool found = false;
+
+ ASSERT_RTNL();
+
+ spin_lock_bh(&deferred_lock);
+
+ list_for_each_entry(dfitem, &deferred, list) {
+ if (dfitem->dev != dev)
+ continue;
+
+ if ((dfitem->func == switchdev_port_obj_add_deferred &&
+ nt == SWITCHDEV_PORT_OBJ_ADD) ||
+ (dfitem->func == switchdev_port_obj_del_deferred &&
+ nt == SWITCHDEV_PORT_OBJ_DEL)) {
+ if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&deferred_lock);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
+
static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index be1c4003d67d..bb0d71eb02a6 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -32,16 +32,17 @@ config TIPC_MEDIA_UDP
bool "IP/UDP media type support"
depends on TIPC
select NET_UDP_TUNNEL
+ default y
help
Saying Y here will enable support for running TIPC over IP/UDP
- bool
- default y
+
config TIPC_CRYPTO
bool "TIPC encryption support"
depends on TIPC
select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
+ default y
help
Saying Y here will enable support for TIPC encryption.
All TIPC messages will be encrypted/decrypted by using the currently most
@@ -49,8 +50,6 @@ config TIPC_CRYPTO
entering the TIPC stack.
Key setting from user-space is performed via netlink by a user program
(e.g. the iproute2 'tipc' tool).
- bool
- default y
config TIPC_DIAG
tristate "TIPC: socket monitoring interface"
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index ee49a9f1dd4f..18e1636aa036 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -18,5 +18,5 @@ tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
-
-obj-$(CONFIG_TIPC_DIAG) += diag.o
+obj-$(CONFIG_TIPC_DIAG) += tipc_diag.o
+tipc_diag-y += diag.o
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 2cde375477e3..5a526ebafeb4 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1079,24 +1079,27 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
b = tipc_bearer_find(net, name);
if (!b) {
- rtnl_unlock();
NL_SET_ERR_MSG(info->extack, "Bearer not found");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
#ifdef CONFIG_TIPC_MEDIA_UDP
if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
+ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
+ NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
+ err = -EINVAL;
+ goto out;
+ }
+
err = tipc_udp_nl_bearer_add(b,
attrs[TIPC_NLA_BEARER_UDP_OPTS]);
- if (err) {
- rtnl_unlock();
- return err;
- }
}
#endif
+out:
rtnl_unlock();
- return 0;
+ return err;
}
int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 1c2c6800949d..b4674f03d71a 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -1003,7 +1003,7 @@ static u16 tls_user_config(struct tls_context *ctx, bool tx)
return 0;
}
-static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
+static int tls_get_info(struct sock *sk, struct sk_buff *skb)
{
u16 version, cipher_type;
struct tls_context *ctx;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 31e8a94dfc11..211f57164cb6 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -52,6 +52,7 @@ struct tls_decrypt_arg {
struct_group(inargs,
bool zc;
bool async;
+ bool async_done;
u8 tail;
);
@@ -63,6 +64,7 @@ struct tls_decrypt_ctx {
u8 iv[TLS_MAX_IV_SIZE];
u8 aad[TLS_MAX_AAD_SIZE];
u8 tail;
+ bool free_sgout;
struct scatterlist sg[];
};
@@ -187,7 +189,6 @@ static void tls_decrypt_done(void *data, int err)
struct aead_request *aead_req = data;
struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
struct scatterlist *sgout = aead_req->dst;
- struct scatterlist *sgin = aead_req->src;
struct tls_sw_context_rx *ctx;
struct tls_decrypt_ctx *dctx;
struct tls_context *tls_ctx;
@@ -196,6 +197,17 @@ static void tls_decrypt_done(void *data, int err)
struct sock *sk;
int aead_size;
+ /* If requests get too backlogged crypto API returns -EBUSY and calls
+ * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
+ * to make waiting for backlog to flush with crypto_wait_req() easier.
+ * First wait converts -EBUSY -> -EINPROGRESS, and the second one
+ * -EINPROGRESS -> 0.
+ * We have a single struct crypto_async_request per direction, this
+ * scheme doesn't help us, so just ignore the first ->complete().
+ */
+ if (err == -EINPROGRESS)
+ return;
+
aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
aead_size = ALIGN(aead_size, __alignof__(*dctx));
dctx = (void *)((u8 *)aead_req + aead_size);
@@ -213,7 +225,7 @@ static void tls_decrypt_done(void *data, int err)
}
/* Free the destination pages if skb was not decrypted inplace */
- if (sgout != sgin) {
+ if (dctx->free_sgout) {
/* Skip the first S/G entry as it points to AAD */
for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
if (!sg)
@@ -224,10 +236,17 @@ static void tls_decrypt_done(void *data, int err)
kfree(aead_req);
- spin_lock_bh(&ctx->decrypt_compl_lock);
- if (!atomic_dec_return(&ctx->decrypt_pending))
+ if (atomic_dec_and_test(&ctx->decrypt_pending))
complete(&ctx->async_wait.completion);
- spin_unlock_bh(&ctx->decrypt_compl_lock);
+}
+
+static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
+{
+ if (!atomic_dec_and_test(&ctx->decrypt_pending))
+ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ atomic_inc(&ctx->decrypt_pending);
+
+ return ctx->async_wait.err;
}
static int tls_do_decryption(struct sock *sk,
@@ -253,20 +272,33 @@ static int tls_do_decryption(struct sock *sk,
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_decrypt_done, aead_req);
+ DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
atomic_inc(&ctx->decrypt_pending);
} else {
+ DECLARE_CRYPTO_WAIT(wait);
+
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &ctx->async_wait);
+ crypto_req_done, &wait);
+ ret = crypto_aead_decrypt(aead_req);
+ if (ret == -EINPROGRESS || ret == -EBUSY)
+ ret = crypto_wait_req(ret, &wait);
+ return ret;
}
ret = crypto_aead_decrypt(aead_req);
- if (ret == -EINPROGRESS) {
- if (darg->async)
- return 0;
+ if (ret == -EINPROGRESS)
+ return 0;
- ret = crypto_wait_req(ret, &ctx->async_wait);
+ if (ret == -EBUSY) {
+ ret = tls_decrypt_async_wait(ctx);
+ darg->async_done = true;
+ /* all completions have run, we're not doing async anymore */
+ darg->async = false;
+ return ret;
}
+
+ atomic_dec(&ctx->decrypt_pending);
darg->async = false;
return ret;
@@ -439,9 +471,10 @@ static void tls_encrypt_done(void *data, int err)
struct tls_rec *rec = data;
struct scatterlist *sge;
struct sk_msg *msg_en;
- bool ready = false;
struct sock *sk;
- int pending;
+
+ if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
+ return;
msg_en = &rec->msg_encrypted;
@@ -476,23 +509,25 @@ static void tls_encrypt_done(void *data, int err)
/* If received record is at head of tx_list, schedule tx */
first_rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
- if (rec == first_rec)
- ready = true;
+ if (rec == first_rec) {
+ /* Schedule the transmission */
+ if (!test_and_set_bit(BIT_TX_SCHEDULED,
+ &ctx->tx_bitmask))
+ schedule_delayed_work(&ctx->tx_work.work, 1);
+ }
}
- spin_lock_bh(&ctx->encrypt_compl_lock);
- pending = atomic_dec_return(&ctx->encrypt_pending);
-
- if (!pending && ctx->async_notify)
+ if (atomic_dec_and_test(&ctx->encrypt_pending))
complete(&ctx->async_wait.completion);
- spin_unlock_bh(&ctx->encrypt_compl_lock);
+}
- if (!ready)
- return;
+static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
+{
+ if (!atomic_dec_and_test(&ctx->encrypt_pending))
+ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ atomic_inc(&ctx->encrypt_pending);
- /* Schedule the transmission */
- if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
- schedule_delayed_work(&ctx->tx_work.work, 1);
+ return ctx->async_wait.err;
}
static int tls_do_encryption(struct sock *sk,
@@ -541,9 +576,14 @@ static int tls_do_encryption(struct sock *sk,
/* Add the record in tx_list */
list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
+ DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
atomic_inc(&ctx->encrypt_pending);
rc = crypto_aead_encrypt(aead_req);
+ if (rc == -EBUSY) {
+ rc = tls_encrypt_async_wait(ctx);
+ rc = rc ?: -EINPROGRESS;
+ }
if (!rc || rc != -EINPROGRESS) {
atomic_dec(&ctx->encrypt_pending);
sge->offset -= prot->prepend_size;
@@ -984,7 +1024,6 @@ static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
int num_zc = 0;
int orig_size;
int ret = 0;
- int pending;
if (!eor && (msg->msg_flags & MSG_EOR))
return -EINVAL;
@@ -1163,24 +1202,12 @@ trim_sgl:
if (!num_async) {
goto send_end;
} else if (num_zc) {
- /* Wait for pending encryptions to get completed */
- spin_lock_bh(&ctx->encrypt_compl_lock);
- ctx->async_notify = true;
-
- pending = atomic_read(&ctx->encrypt_pending);
- spin_unlock_bh(&ctx->encrypt_compl_lock);
- if (pending)
- crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
- else
- reinit_completion(&ctx->async_wait.completion);
-
- /* There can be no concurrent accesses, since we have no
- * pending encrypt operations
- */
- WRITE_ONCE(ctx->async_notify, false);
+ int err;
- if (ctx->async_wait.err) {
- ret = ctx->async_wait.err;
+ /* Wait for pending encryptions to get completed */
+ err = tls_encrypt_async_wait(ctx);
+ if (err) {
+ ret = err;
copied = 0;
}
}
@@ -1229,7 +1256,6 @@ void tls_sw_splice_eof(struct socket *sock)
ssize_t copied = 0;
bool retrying = false;
int ret = 0;
- int pending;
if (!ctx->open_rec)
return;
@@ -1264,22 +1290,7 @@ retry:
}
/* Wait for pending encryptions to get completed */
- spin_lock_bh(&ctx->encrypt_compl_lock);
- ctx->async_notify = true;
-
- pending = atomic_read(&ctx->encrypt_pending);
- spin_unlock_bh(&ctx->encrypt_compl_lock);
- if (pending)
- crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
- else
- reinit_completion(&ctx->async_wait.completion);
-
- /* There can be no concurrent accesses, since we have no pending
- * encrypt operations
- */
- WRITE_ONCE(ctx->async_notify, false);
-
- if (ctx->async_wait.err)
+ if (tls_encrypt_async_wait(ctx))
goto unlock;
/* Transmit if any encryptions have completed */
@@ -1581,12 +1592,16 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
} else if (out_sg) {
memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
}
+ dctx->free_sgout = !!pages;
/* Prepare and submit AEAD request */
err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
data_len + prot->tail_size, aead_req, darg);
- if (err)
+ if (err) {
+ if (darg->async_done)
+ goto exit_free_skb;
goto exit_free_pages;
+ }
darg->skb = clear_skb ?: tls_strp_msg(ctx);
clear_skb = NULL;
@@ -1598,6 +1613,9 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
return err;
}
+ if (unlikely(darg->async_done))
+ return 0;
+
if (prot->tail_size)
darg->tail = dctx->tail;
@@ -1769,7 +1787,8 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
u8 *control,
size_t skip,
size_t len,
- bool is_peek)
+ bool is_peek,
+ bool *more)
{
struct sk_buff *skb = skb_peek(&ctx->rx_list);
struct tls_msg *tlm;
@@ -1782,7 +1801,7 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
err = tls_record_content_type(msg, tlm, control);
if (err <= 0)
- goto out;
+ goto more;
if (skip < rxm->full_len)
break;
@@ -1800,12 +1819,12 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
err = tls_record_content_type(msg, tlm, control);
if (err <= 0)
- goto out;
+ goto more;
err = skb_copy_datagram_msg(skb, rxm->offset + skip,
msg, chunk);
if (err < 0)
- goto out;
+ goto more;
len = len - chunk;
copied = copied + chunk;
@@ -1841,6 +1860,10 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
out:
return copied ? : err;
+more:
+ if (more)
+ *more = true;
+ goto out;
}
static bool
@@ -1940,10 +1963,12 @@ int tls_sw_recvmsg(struct sock *sk,
struct strp_msg *rxm;
struct tls_msg *tlm;
ssize_t copied = 0;
+ ssize_t peeked = 0;
bool async = false;
int target, err;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
+ bool rx_more = false;
bool released = true;
bool bpf_strp_enabled;
bool zc_capable;
@@ -1963,12 +1988,12 @@ int tls_sw_recvmsg(struct sock *sk,
goto end;
/* Process pending decrypted records. It must be non-zero-copy */
- err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
+ err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
if (err < 0)
goto end;
copied = err;
- if (len <= copied)
+ if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
@@ -2061,6 +2086,8 @@ put_on_rx_list:
decrypted += chunk;
len -= chunk;
__skb_queue_tail(&ctx->rx_list, skb);
+ if (unlikely(control != TLS_RECORD_TYPE_DATA))
+ break;
continue;
}
@@ -2084,8 +2111,10 @@ put_on_rx_list:
if (err < 0)
goto put_on_rx_list_err;
- if (is_peek)
+ if (is_peek) {
+ peeked += chunk;
goto put_on_rx_list;
+ }
if (partially_consumed) {
rxm->offset += chunk;
@@ -2109,16 +2138,10 @@ put_on_rx_list:
recv_end:
if (async) {
- int ret, pending;
+ int ret;
/* Wait for all previously submitted records to be decrypted */
- spin_lock_bh(&ctx->decrypt_compl_lock);
- reinit_completion(&ctx->async_wait.completion);
- pending = atomic_read(&ctx->decrypt_pending);
- spin_unlock_bh(&ctx->decrypt_compl_lock);
- ret = 0;
- if (pending)
- ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ ret = tls_decrypt_async_wait(ctx);
__skb_queue_purge(&ctx->async_hold);
if (ret) {
@@ -2130,12 +2153,11 @@ recv_end:
/* Drain records from the rx_list & copy if required */
if (is_peek || is_kvec)
- err = process_rx_list(ctx, msg, &control, copied,
- decrypted, is_peek);
+ err = process_rx_list(ctx, msg, &control, copied + peeked,
+ decrypted - peeked, is_peek, NULL);
else
err = process_rx_list(ctx, msg, &control, 0,
- async_copy_bytes, is_peek);
- decrypted += max(err, 0);
+ async_copy_bytes, is_peek, NULL);
}
copied += decrypted;
@@ -2435,16 +2457,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec, *tmp;
- int pending;
/* Wait for any pending async encryptions to complete */
- spin_lock_bh(&ctx->encrypt_compl_lock);
- ctx->async_notify = true;
- pending = atomic_read(&ctx->encrypt_pending);
- spin_unlock_bh(&ctx->encrypt_compl_lock);
-
- if (pending)
- crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ tls_encrypt_async_wait(ctx);
tls_tx_records(sk, -1);
@@ -2607,7 +2622,7 @@ static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct soc
}
crypto_init_wait(&sw_ctx_tx->async_wait);
- spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
+ atomic_set(&sw_ctx_tx->encrypt_pending, 1);
INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
sw_ctx_tx->tx_work.sk = sk;
@@ -2628,7 +2643,7 @@ static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
}
crypto_init_wait(&sw_ctx_rx->async_wait);
- spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
+ atomic_set(&sw_ctx_rx->decrypt_pending, 1);
init_waitqueue_head(&sw_ctx_rx->wq);
skb_queue_head_init(&sw_ctx_rx->rx_list);
skb_queue_head_init(&sw_ctx_rx->async_hold);
diff --git a/net/unix/Kconfig b/net/unix/Kconfig
index 28b232f281ab..8b5d04210d7c 100644
--- a/net/unix/Kconfig
+++ b/net/unix/Kconfig
@@ -16,11 +16,6 @@ config UNIX
Say Y unless you know what you are doing.
-config UNIX_SCM
- bool
- depends on UNIX
- default y
-
config AF_UNIX_OOB
bool
depends on UNIX
diff --git a/net/unix/Makefile b/net/unix/Makefile
index 20491825b4d0..4ddd125c4642 100644
--- a/net/unix/Makefile
+++ b/net/unix/Makefile
@@ -11,5 +11,3 @@ unix-$(CONFIG_BPF_SYSCALL) += unix_bpf.o
obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
unix_diag-y := diag.o
-
-obj-$(CONFIG_UNIX_SCM) += scm.o
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ac1f2bc18fc9..5b41e2321209 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -118,8 +118,6 @@
#include <linux/btf_ids.h>
#include <linux/bpf-cgroup.h>
-#include "scm.h"
-
static atomic_long_t unix_nr_socks;
static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
@@ -782,19 +780,6 @@ static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
int);
-static int unix_set_peek_off(struct sock *sk, int val)
-{
- struct unix_sock *u = unix_sk(sk);
-
- if (mutex_lock_interruptible(&u->iolock))
- return -EINTR;
-
- WRITE_ONCE(sk->sk_peek_off, val);
- mutex_unlock(&u->iolock);
-
- return 0;
-}
-
#ifdef CONFIG_PROC_FS
static int unix_count_nr_fds(struct sock *sk)
{
@@ -862,7 +847,7 @@ static const struct proto_ops unix_stream_ops = {
.read_skb = unix_stream_read_skb,
.mmap = sock_no_mmap,
.splice_read = unix_stream_splice_read,
- .set_peek_off = unix_set_peek_off,
+ .set_peek_off = sk_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
};
@@ -886,7 +871,7 @@ static const struct proto_ops unix_dgram_ops = {
.read_skb = unix_read_skb,
.recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
- .set_peek_off = unix_set_peek_off,
+ .set_peek_off = sk_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
};
@@ -909,7 +894,7 @@ static const struct proto_ops unix_seqpacket_ops = {
.sendmsg = unix_seqpacket_sendmsg,
.recvmsg = unix_seqpacket_recvmsg,
.mmap = sock_no_mmap,
- .set_peek_off = unix_set_peek_off,
+ .set_peek_off = sk_set_peek_off,
.show_fdinfo = unix_show_fdinfo,
};
@@ -993,11 +978,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
- u = unix_sk(sk);
+ u = unix_sk(sk);
+ u->inflight = 0;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
- atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
@@ -1344,13 +1329,11 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
unix_state_lock(sk1);
return;
}
- if (sk1 < sk2) {
- unix_state_lock(sk1);
- unix_state_lock_nested(sk2);
- } else {
- unix_state_lock(sk2);
- unix_state_lock_nested(sk1);
- }
+ if (sk1 > sk2)
+ swap(sk1, sk2);
+
+ unix_state_lock(sk1);
+ unix_state_lock_nested(sk2, U_LOCK_SECOND);
}
static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
@@ -1591,7 +1574,7 @@ restart:
goto out_unlock;
}
- unix_state_lock_nested(sk);
+ unix_state_lock_nested(sk, U_LOCK_SECOND);
if (sk->sk_state != st) {
unix_state_unlock(sk);
@@ -1790,6 +1773,52 @@ out:
return err;
}
+/* The "user->unix_inflight" variable is protected by the garbage
+ * collection lock, and we just read it locklessly here. If you go
+ * over the limit, there might be a tiny race in actually noticing
+ * it across threads. Tough.
+ */
+static inline bool too_many_unix_fds(struct task_struct *p)
+{
+ struct user_struct *user = current_user();
+
+ if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
+ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
+ return false;
+}
+
+static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ int i;
+
+ if (too_many_unix_fds(current))
+ return -ETOOMANYREFS;
+
+ /* Need to duplicate file references for the sake of garbage
+ * collection. Otherwise a socket in the fps might become a
+ * candidate for GC while the skb is not yet queued.
+ */
+ UNIXCB(skb).fp = scm_fp_dup(scm->fp);
+ if (!UNIXCB(skb).fp)
+ return -ENOMEM;
+
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_inflight(scm->fp->user, scm->fp->fp[i]);
+
+ return 0;
+}
+
+static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ int i;
+
+ scm->fp = UNIXCB(skb).fp;
+ UNIXCB(skb).fp = NULL;
+
+ for (i = scm->fp->count - 1; i >= 0; i--)
+ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+}
+
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
@@ -1837,6 +1866,21 @@ static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
spin_unlock(&unix_gc_lock);
}
+static void unix_destruct_scm(struct sk_buff *skb)
+{
+ struct scm_cookie scm;
+
+ memset(&scm, 0, sizeof(scm));
+ scm.pid = UNIXCB(skb).pid;
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(&scm, skb);
+
+ /* Alas, it calls VFS */
+ /* So fscking what? fput() had been SMP-safe since the last Summer */
+ scm_destroy(&scm);
+ sock_wfree(skb);
+}
+
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
@@ -1923,11 +1967,12 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
long timeo;
int err;
- wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
+ wait_for_unix_gc(scm.fp);
+
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
@@ -2199,11 +2244,12 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
bool fds_sent = false;
int data_len;
- wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
+ wait_for_unix_gc(scm.fp);
+
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) {
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
diff --git a/net/unix/diag.c b/net/unix/diag.c
index c3648b706509..ae39538c5042 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -84,7 +84,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
* queue lock. With the other's queue locked it's
* OK to lock the state.
*/
- unix_state_lock_nested(req);
+ unix_state_lock_nested(req, U_LOCK_DIAG);
peer = unix_sk(req)->peer;
buf[i++] = (peer ? sock_i_ino(peer) : 0);
unix_state_unlock(req);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 2405f0f9af31..fa39b6265238 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -81,12 +81,80 @@
#include <net/scm.h>
#include <net/tcp_states.h>
-#include "scm.h"
+struct unix_sock *unix_get_socket(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+
+ /* Socket ? */
+ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
+ struct socket *sock = SOCKET_I(inode);
+ const struct proto_ops *ops;
+ struct sock *sk = sock->sk;
+
+ ops = READ_ONCE(sock->ops);
-/* Internal data structures and random procedures: */
+ /* PF_UNIX ? */
+ if (sk && ops && ops->family == PF_UNIX)
+ return unix_sk(sk);
+ }
+
+ return NULL;
+}
+DEFINE_SPINLOCK(unix_gc_lock);
+unsigned int unix_tot_inflight;
static LIST_HEAD(gc_candidates);
-static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
+static LIST_HEAD(gc_inflight_list);
+
+/* Keep the number of times in flight count for the file
+ * descriptor if it is for an AF_UNIX socket.
+ */
+void unix_inflight(struct user_struct *user, struct file *filp)
+{
+ struct unix_sock *u = unix_get_socket(filp);
+
+ spin_lock(&unix_gc_lock);
+
+ if (u) {
+ if (!u->inflight) {
+ WARN_ON_ONCE(!list_empty(&u->link));
+ list_add_tail(&u->link, &gc_inflight_list);
+ } else {
+ WARN_ON_ONCE(list_empty(&u->link));
+ }
+ u->inflight++;
+
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
+ }
+
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
+
+ spin_unlock(&unix_gc_lock);
+}
+
+void unix_notinflight(struct user_struct *user, struct file *filp)
+{
+ struct unix_sock *u = unix_get_socket(filp);
+
+ spin_lock(&unix_gc_lock);
+
+ if (u) {
+ WARN_ON_ONCE(!u->inflight);
+ WARN_ON_ONCE(list_empty(&u->link));
+
+ u->inflight--;
+ if (!u->inflight)
+ list_del_init(&u->link);
+
+ /* Paired with READ_ONCE() in wait_for_unix_gc() */
+ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
+ }
+
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
+
+ spin_unlock(&unix_gc_lock);
+}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
@@ -105,20 +173,15 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
while (nfd--) {
/* Get the socket the fd matches if it indeed does so */
- struct sock *sk = unix_get_socket(*fp++);
-
- if (sk) {
- struct unix_sock *u = unix_sk(sk);
+ struct unix_sock *u = unix_get_socket(*fp++);
- /* Ignore non-candidates, they could
- * have been added to the queues after
- * starting the garbage collection
- */
- if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
- hit = true;
+ /* Ignore non-candidates, they could have been added
+ * to the queues after starting the garbage collection
+ */
+ if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
+ hit = true;
- func(u);
- }
+ func(u);
}
}
if (hit && hitlist != NULL) {
@@ -151,7 +214,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
/* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
- BUG_ON(!list_empty(&u->link));
+ WARN_ON_ONCE(!list_empty(&u->link));
list_add_tail(&u->link, &embryos);
}
spin_unlock(&x->sk_receive_queue.lock);
@@ -166,17 +229,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
static void dec_inflight(struct unix_sock *usk)
{
- atomic_long_dec(&usk->inflight);
+ usk->inflight--;
}
static void inc_inflight(struct unix_sock *usk)
{
- atomic_long_inc(&usk->inflight);
+ usk->inflight++;
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
- atomic_long_inc(&u->inflight);
+ u->inflight++;
+
/* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
@@ -186,40 +250,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
}
static bool gc_in_progress;
-#define UNIX_INFLIGHT_TRIGGER_GC 16000
-
-void wait_for_unix_gc(void)
-{
- /* If number of inflight sockets is insane,
- * force a garbage collect right now.
- * Paired with the WRITE_ONCE() in unix_inflight(),
- * unix_notinflight() and gc_in_progress().
- */
- if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
- !READ_ONCE(gc_in_progress))
- unix_gc();
- wait_event(unix_gc_wait, gc_in_progress == false);
-}
-/* The external entry point: unix_gc() */
-void unix_gc(void)
+static void __unix_gc(struct work_struct *work)
{
- struct sk_buff *next_skb, *skb;
- struct unix_sock *u;
- struct unix_sock *next;
struct sk_buff_head hitlist;
- struct list_head cursor;
+ struct unix_sock *u, *next;
LIST_HEAD(not_cycle_list);
+ struct list_head cursor;
spin_lock(&unix_gc_lock);
- /* Avoid a recursive GC. */
- if (gc_in_progress)
- goto out;
-
- /* Paired with READ_ONCE() in wait_for_unix_gc(). */
- WRITE_ONCE(gc_in_progress, true);
-
/* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
@@ -237,14 +277,12 @@ void unix_gc(void)
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
- long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
- inflight_refs = atomic_long_read(&u->inflight);
- BUG_ON(inflight_refs < 1);
- BUG_ON(total_refs < inflight_refs);
- if (total_refs == inflight_refs) {
+ WARN_ON_ONCE(!u->inflight);
+ WARN_ON_ONCE(total_refs < u->inflight);
+ if (total_refs == u->inflight) {
list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
@@ -271,7 +309,7 @@ void unix_gc(void)
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
- if (atomic_long_read(&u->inflight) > 0) {
+ if (u->inflight) {
list_move_tail(&u->link, &not_cycle_list);
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
scan_children(&u->sk, inc_inflight_move_tail, NULL);
@@ -284,9 +322,17 @@ void unix_gc(void)
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
- list_for_each_entry(u, &gc_candidates, link)
+ list_for_each_entry(u, &gc_candidates, link) {
scan_children(&u->sk, inc_inflight, &hitlist);
+#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ if (u->oob_skb) {
+ kfree_skb(u->oob_skb);
+ u->oob_skb = NULL;
+ }
+#endif
+ }
+
/* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
@@ -298,38 +344,50 @@ void unix_gc(void)
spin_unlock(&unix_gc_lock);
- /* We need io_uring to clean its registered files, ignore all io_uring
- * originated skbs. It's fine as io_uring doesn't keep references to
- * other io_uring instances and so killing all other files in the cycle
- * will put all io_uring references forcing it to go through normal
- * release.path eventually putting registered files.
- */
- skb_queue_walk_safe(&hitlist, skb, next_skb) {
- if (skb->destructor == io_uring_destruct_scm) {
- __skb_unlink(skb, &hitlist);
- skb_queue_tail(&skb->sk->sk_receive_queue, skb);
- }
- }
-
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
- /* There could be io_uring registered files, just push them back to
- * the inflight list
- */
- list_for_each_entry_safe(u, next, &gc_candidates, link)
- list_move_tail(&u->link, &gc_inflight_list);
-
/* All candidates should have been detached by now. */
- BUG_ON(!list_empty(&gc_candidates));
+ WARN_ON_ONCE(!list_empty(&gc_candidates));
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, false);
- wake_up(&unix_gc_wait);
-
- out:
spin_unlock(&unix_gc_lock);
}
+
+static DECLARE_WORK(unix_gc_work, __unix_gc);
+
+void unix_gc(void)
+{
+ WRITE_ONCE(gc_in_progress, true);
+ queue_work(system_unbound_wq, &unix_gc_work);
+}
+
+#define UNIX_INFLIGHT_TRIGGER_GC 16000
+#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
+
+void wait_for_unix_gc(struct scm_fp_list *fpl)
+{
+ /* If number of inflight sockets is insane,
+ * force a garbage collect right now.
+ *
+ * Paired with the WRITE_ONCE() in unix_inflight(),
+ * unix_notinflight(), and __unix_gc().
+ */
+ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
+ !READ_ONCE(gc_in_progress))
+ unix_gc();
+
+ /* Penalise users who want to send AF_UNIX sockets
+ * but whose sockets have not been received yet.
+ */
+ if (!fpl || !fpl->count_unix ||
+ READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
+ return;
+
+ if (READ_ONCE(gc_in_progress))
+ flush_work(&unix_gc_work);
+}
diff --git a/net/unix/scm.c b/net/unix/scm.c
deleted file mode 100644
index 822ce0d0d791..000000000000
--- a/net/unix/scm.c
+++ /dev/null
@@ -1,159 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/net.h>
-#include <linux/fs.h>
-#include <net/af_unix.h>
-#include <net/scm.h>
-#include <linux/init.h>
-#include <linux/io_uring.h>
-
-#include "scm.h"
-
-unsigned int unix_tot_inflight;
-EXPORT_SYMBOL(unix_tot_inflight);
-
-LIST_HEAD(gc_inflight_list);
-EXPORT_SYMBOL(gc_inflight_list);
-
-DEFINE_SPINLOCK(unix_gc_lock);
-EXPORT_SYMBOL(unix_gc_lock);
-
-struct sock *unix_get_socket(struct file *filp)
-{
- struct sock *u_sock = NULL;
- struct inode *inode = file_inode(filp);
-
- /* Socket ? */
- if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
- struct socket *sock = SOCKET_I(inode);
- const struct proto_ops *ops = READ_ONCE(sock->ops);
- struct sock *s = sock->sk;
-
- /* PF_UNIX ? */
- if (s && ops && ops->family == PF_UNIX)
- u_sock = s;
- }
-
- return u_sock;
-}
-EXPORT_SYMBOL(unix_get_socket);
-
-/* Keep the number of times in flight count for the file
- * descriptor if it is for an AF_UNIX socket.
- */
-void unix_inflight(struct user_struct *user, struct file *fp)
-{
- struct sock *s = unix_get_socket(fp);
-
- spin_lock(&unix_gc_lock);
-
- if (s) {
- struct unix_sock *u = unix_sk(s);
-
- if (atomic_long_inc_return(&u->inflight) == 1) {
- BUG_ON(!list_empty(&u->link));
- list_add_tail(&u->link, &gc_inflight_list);
- } else {
- BUG_ON(list_empty(&u->link));
- }
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
- }
- WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
- spin_unlock(&unix_gc_lock);
-}
-
-void unix_notinflight(struct user_struct *user, struct file *fp)
-{
- struct sock *s = unix_get_socket(fp);
-
- spin_lock(&unix_gc_lock);
-
- if (s) {
- struct unix_sock *u = unix_sk(s);
-
- BUG_ON(!atomic_long_read(&u->inflight));
- BUG_ON(list_empty(&u->link));
-
- if (atomic_long_dec_and_test(&u->inflight))
- list_del_init(&u->link);
- /* Paired with READ_ONCE() in wait_for_unix_gc() */
- WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
- }
- WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
- spin_unlock(&unix_gc_lock);
-}
-
-/*
- * The "user->unix_inflight" variable is protected by the garbage
- * collection lock, and we just read it locklessly here. If you go
- * over the limit, there might be a tiny race in actually noticing
- * it across threads. Tough.
- */
-static inline bool too_many_unix_fds(struct task_struct *p)
-{
- struct user_struct *user = current_user();
-
- if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
- return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
- return false;
-}
-
-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
-{
- int i;
-
- if (too_many_unix_fds(current))
- return -ETOOMANYREFS;
-
- /*
- * Need to duplicate file references for the sake of garbage
- * collection. Otherwise a socket in the fps might become a
- * candidate for GC while the skb is not yet queued.
- */
- UNIXCB(skb).fp = scm_fp_dup(scm->fp);
- if (!UNIXCB(skb).fp)
- return -ENOMEM;
-
- for (i = scm->fp->count - 1; i >= 0; i--)
- unix_inflight(scm->fp->user, scm->fp->fp[i]);
- return 0;
-}
-EXPORT_SYMBOL(unix_attach_fds);
-
-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
-{
- int i;
-
- scm->fp = UNIXCB(skb).fp;
- UNIXCB(skb).fp = NULL;
-
- for (i = scm->fp->count-1; i >= 0; i--)
- unix_notinflight(scm->fp->user, scm->fp->fp[i]);
-}
-EXPORT_SYMBOL(unix_detach_fds);
-
-void unix_destruct_scm(struct sk_buff *skb)
-{
- struct scm_cookie scm;
-
- memset(&scm, 0, sizeof(scm));
- scm.pid = UNIXCB(skb).pid;
- if (UNIXCB(skb).fp)
- unix_detach_fds(&scm, skb);
-
- /* Alas, it calls VFS */
- /* So fscking what? fput() had been SMP-safe since the last Summer */
- scm_destroy(&scm);
- sock_wfree(skb);
-}
-EXPORT_SYMBOL(unix_destruct_scm);
-
-void io_uring_destruct_scm(struct sk_buff *skb)
-{
- unix_destruct_scm(skb);
-}
-EXPORT_SYMBOL(io_uring_destruct_scm);
diff --git a/net/unix/scm.h b/net/unix/scm.h
deleted file mode 100644
index 5a255a477f16..000000000000
--- a/net/unix/scm.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef NET_UNIX_SCM_H
-#define NET_UNIX_SCM_H
-
-extern struct list_head gc_inflight_list;
-extern spinlock_t unix_gc_lock;
-
-int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
-void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
-
-#endif
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index a9ac85e09af3..10345388ad13 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -206,7 +206,6 @@ config CFG80211_KUNIT_TEST
depends on KUNIT
depends on CFG80211
default KUNIT_ALL_TESTS
- depends on !KERNEL_6_2
help
Enable this option to test cfg80211 functions with kunit.
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index ceb9174c5c3d..3414b2c3abcc 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,7 +6,7 @@
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2018-2023 Intel Corporation
+ * Copyright 2018-2024 Intel Corporation
*/
#include <linux/export.h>
@@ -27,11 +27,10 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
if (WARN_ON(!chan))
return;
- chandef->chan = chan;
- chandef->freq1_offset = chan->freq_offset;
- chandef->center_freq2 = 0;
- chandef->edmg.bw_config = 0;
- chandef->edmg.channels = 0;
+ *chandef = (struct cfg80211_chan_def) {
+ .chan = chan,
+ .freq1_offset = chan->freq_offset,
+ };
switch (chan_type) {
case NL80211_CHAN_NO_HT:
@@ -56,6 +55,73 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
}
EXPORT_SYMBOL(cfg80211_chandef_create);
+struct cfg80211_per_bw_puncturing_values {
+ u8 len;
+ const u16 *valid_values;
+};
+
+static const u16 puncturing_values_80mhz[] = {
+ 0x8, 0x4, 0x2, 0x1
+};
+
+static const u16 puncturing_values_160mhz[] = {
+ 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
+};
+
+static const u16 puncturing_values_320mhz[] = {
+ 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
+ 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
+ 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
+};
+
+#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
+ { \
+ .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
+ .valid_values = puncturing_values_ ## _bw ## mhz \
+ }
+
+static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
+ CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
+};
+
+static bool valid_puncturing_bitmap(const struct cfg80211_chan_def *chandef)
+{
+ u32 idx, i, start_freq, primary_center = chandef->chan->center_freq;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_80:
+ idx = 0;
+ start_freq = chandef->center_freq1 - 40;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ idx = 1;
+ start_freq = chandef->center_freq1 - 80;
+ break;
+ case NL80211_CHAN_WIDTH_320:
+ idx = 2;
+ start_freq = chandef->center_freq1 - 160;
+ break;
+ default:
+ return chandef->punctured == 0;
+ }
+
+ if (!chandef->punctured)
+ return true;
+
+ /* check if primary channel is punctured */
+ if (chandef->punctured & (u16)BIT((primary_center - start_freq) / 20))
+ return false;
+
+ for (i = 0; i < per_bw_puncturing[idx].len; i++) {
+ if (per_bw_puncturing[idx].valid_values[i] == chandef->punctured)
+ return true;
+ }
+
+ return false;
+}
+
static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
{
int max_contiguous = 0;
@@ -317,72 +383,81 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
!cfg80211_edmg_chandef_valid(chandef))
return false;
- return true;
+ return valid_puncturing_bitmap(chandef);
}
EXPORT_SYMBOL(cfg80211_chandef_valid);
-static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
- u32 *pri40, u32 *pri80, u32 *pri160)
+int cfg80211_chandef_primary(const struct cfg80211_chan_def *c,
+ enum nl80211_chan_width primary_chan_width,
+ u16 *punctured)
{
- int tmp;
+ int pri_width = nl80211_chan_width_to_mhz(primary_chan_width);
+ int width = cfg80211_chandef_get_width(c);
+ u32 control = c->chan->center_freq;
+ u32 center = c->center_freq1;
+ u16 _punct = 0;
- switch (c->width) {
- case NL80211_CHAN_WIDTH_40:
- *pri40 = c->center_freq1;
- *pri80 = 0;
- *pri160 = 0;
- break;
- case NL80211_CHAN_WIDTH_80:
- case NL80211_CHAN_WIDTH_80P80:
- *pri160 = 0;
- *pri80 = c->center_freq1;
- /* n_P20 */
- tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 20 + 40 * tmp;
- break;
- case NL80211_CHAN_WIDTH_160:
- *pri160 = c->center_freq1;
- /* n_P20 */
- tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 60 + 40 * tmp;
- /* n_P80 */
- tmp /= 2;
- *pri80 = c->center_freq1 - 40 + 80 * tmp;
- break;
- case NL80211_CHAN_WIDTH_320:
- /* n_P20 */
- tmp = (150 + c->chan->center_freq - c->center_freq1) / 20;
- /* n_P40 */
- tmp /= 2;
- /* freq_P40 */
- *pri40 = c->center_freq1 - 140 + 40 * tmp;
- /* n_P80 */
- tmp /= 2;
- *pri80 = c->center_freq1 - 120 + 80 * tmp;
- /* n_P160 */
- tmp /= 2;
- *pri160 = c->center_freq1 - 80 + 160 * tmp;
- break;
- default:
- WARN_ON_ONCE(1);
+ if (WARN_ON_ONCE(pri_width < 0 || width < 0))
+ return -1;
+
+ /* not intended to be called this way, can't determine */
+ if (WARN_ON_ONCE(pri_width > width))
+ return -1;
+
+ if (!punctured)
+ punctured = &_punct;
+
+ *punctured = c->punctured;
+
+ while (width > pri_width) {
+ unsigned int bits_to_drop = width / 20 / 2;
+
+ if (control > center) {
+ center += width / 4;
+ *punctured >>= bits_to_drop;
+ } else {
+ center -= width / 4;
+ *punctured &= (1 << bits_to_drop) - 1;
+ }
+ width /= 2;
}
+
+ return center;
}
+EXPORT_SYMBOL(cfg80211_chandef_primary);
-const struct cfg80211_chan_def *
-cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
- const struct cfg80211_chan_def *c2)
+static const struct cfg80211_chan_def *
+check_chandef_primary_compat(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2,
+ enum nl80211_chan_width primary_chan_width)
{
- u32 c1_pri40, c1_pri80, c2_pri40, c2_pri80, c1_pri160, c2_pri160;
+ u16 punct_c1 = 0, punct_c2 = 0;
+
+ /* check primary is compatible -> error if not */
+ if (cfg80211_chandef_primary(c1, primary_chan_width, &punct_c1) !=
+ cfg80211_chandef_primary(c2, primary_chan_width, &punct_c2))
+ return ERR_PTR(-EINVAL);
+
+ if (punct_c1 != punct_c2)
+ return ERR_PTR(-EINVAL);
+
+ /* assumes c1 is smaller width, if that was just checked -> done */
+ if (c1->width == primary_chan_width)
+ return c2;
+
+ /* otherwise continue checking the next width */
+ return NULL;
+}
+
+static const struct cfg80211_chan_def *
+_cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2)
+{
+ const struct cfg80211_chan_def *ret;
/* If they are identical, return */
if (cfg80211_chandef_identical(c1, c2))
- return c1;
+ return c2;
/* otherwise, must have same control channel */
if (c1->chan != c2->chan)
@@ -396,53 +471,76 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
return NULL;
/*
- * can't be compatible if one of them is 5 or 10 MHz,
+ * can't be compatible if one of them is 5/10 MHz or S1G
* but they don't have the same width.
*/
- if (c1->width == NL80211_CHAN_WIDTH_5 ||
- c1->width == NL80211_CHAN_WIDTH_10 ||
- c2->width == NL80211_CHAN_WIDTH_5 ||
- c2->width == NL80211_CHAN_WIDTH_10)
+#define NARROW_OR_S1G(width) ((width) == NL80211_CHAN_WIDTH_5 || \
+ (width) == NL80211_CHAN_WIDTH_10 || \
+ (width) == NL80211_CHAN_WIDTH_1 || \
+ (width) == NL80211_CHAN_WIDTH_2 || \
+ (width) == NL80211_CHAN_WIDTH_4 || \
+ (width) == NL80211_CHAN_WIDTH_8 || \
+ (width) == NL80211_CHAN_WIDTH_16)
+
+ if (NARROW_OR_S1G(c1->width) || NARROW_OR_S1G(c2->width))
return NULL;
- if (c1->width == NL80211_CHAN_WIDTH_20_NOHT ||
- c1->width == NL80211_CHAN_WIDTH_20)
+ /*
+ * Make sure that c1 is always the narrower one, so that later
+ * we either return NULL or c2 and don't have to check both
+ * directions.
+ */
+ if (c1->width > c2->width)
+ swap(c1, c2);
+
+ /*
+ * No further checks needed if the "narrower" one is only 20 MHz.
+ * Here "narrower" includes being a 20 MHz non-HT channel vs. a
+ * 20 MHz HT (or later) one.
+ */
+ if (c1->width <= NL80211_CHAN_WIDTH_20)
return c2;
- if (c2->width == NL80211_CHAN_WIDTH_20_NOHT ||
- c2->width == NL80211_CHAN_WIDTH_20)
- return c1;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_40);
+ if (ret)
+ return ret;
- chandef_primary_freqs(c1, &c1_pri40, &c1_pri80, &c1_pri160);
- chandef_primary_freqs(c2, &c2_pri40, &c2_pri80, &c2_pri160);
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_80);
+ if (ret)
+ return ret;
- if (c1_pri40 != c2_pri40)
+ /*
+ * If c1 is 80+80, then c2 is 160 or higher, but that cannot
+ * match. If c2 was also 80+80 it was already either accepted
+ * or rejected above (identical or not, respectively.)
+ */
+ if (c1->width == NL80211_CHAN_WIDTH_80P80)
return NULL;
- if (c1->width == NL80211_CHAN_WIDTH_40)
- return c2;
-
- if (c2->width == NL80211_CHAN_WIDTH_40)
- return c1;
+ ret = check_chandef_primary_compat(c1, c2, NL80211_CHAN_WIDTH_160);
+ if (ret)
+ return ret;
- if (c1_pri80 != c2_pri80)
- return NULL;
+ /*
+ * Getting here would mean they're both wider than 160, have the
+ * same primary 160, but are not identical - this cannot happen
+ * since they must be 320 (no wider chandefs exist, at least yet.)
+ */
+ WARN_ON_ONCE(1);
- if (c1->width == NL80211_CHAN_WIDTH_80 &&
- c2->width > NL80211_CHAN_WIDTH_80)
- return c2;
+ return NULL;
+}
- if (c2->width == NL80211_CHAN_WIDTH_80 &&
- c1->width > NL80211_CHAN_WIDTH_80)
- return c1;
+const struct cfg80211_chan_def *
+cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
+ const struct cfg80211_chan_def *c2)
+{
+ const struct cfg80211_chan_def *ret;
- WARN_ON(!c1_pri160 && !c2_pri160);
- if (c1_pri160 && c2_pri160 && c1_pri160 != c2_pri160)
+ ret = _cfg80211_chandef_compatible(c1, c2);
+ if (IS_ERR(ret))
return NULL;
-
- if (c1->width > c2->width)
- return c1;
- return c2;
+ return ret;
}
EXPORT_SYMBOL(cfg80211_chandef_compatible);
@@ -1047,7 +1145,7 @@ EXPORT_SYMBOL(cfg80211_chandef_dfs_cac_time);
static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
u32 center_freq, u32 bandwidth,
- u32 prohibited_flags)
+ u32 prohibited_flags, bool monitor)
{
struct ieee80211_channel *c;
u32 freq, start_freq, end_freq;
@@ -1057,7 +1155,11 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
c = ieee80211_get_channel_khz(wiphy, freq);
- if (!c || c->flags & prohibited_flags)
+ if (!c)
+ return false;
+ if (monitor && c->flags & IEEE80211_CHAN_CAN_MONITOR)
+ continue;
+ if (c->flags & prohibited_flags)
return false;
}
@@ -1117,9 +1219,9 @@ static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
return true;
}
-bool cfg80211_chandef_usable(struct wiphy *wiphy,
- const struct cfg80211_chan_def *chandef,
- u32 prohibited_flags)
+bool _cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags, bool monitor)
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
@@ -1281,14 +1383,22 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
if (!cfg80211_secondary_chans_ok(wiphy,
ieee80211_chandef_to_khz(chandef),
- width, prohibited_flags))
+ width, prohibited_flags, monitor))
return false;
if (!chandef->center_freq2)
return true;
return cfg80211_secondary_chans_ok(wiphy,
MHZ_TO_KHZ(chandef->center_freq2),
- width, prohibited_flags);
+ width, prohibited_flags, monitor);
+}
+
+bool cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags)
+{
+ return _cfg80211_chandef_usable(wiphy, chandef, prohibited_flags,
+ false);
}
EXPORT_SYMBOL(cfg80211_chandef_usable);
@@ -1532,72 +1642,3 @@ struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
}
}
EXPORT_SYMBOL(wdev_chandef);
-
-struct cfg80211_per_bw_puncturing_values {
- u8 len;
- const u16 *valid_values;
-};
-
-static const u16 puncturing_values_80mhz[] = {
- 0x8, 0x4, 0x2, 0x1
-};
-
-static const u16 puncturing_values_160mhz[] = {
- 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
-};
-
-static const u16 puncturing_values_320mhz[] = {
- 0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
- 0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
- 0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
-};
-
-#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
- { \
- .len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
- .valid_values = puncturing_values_ ## _bw ## mhz \
- }
-
-static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
- CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
-};
-
-bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
- const struct cfg80211_chan_def *chandef)
-{
- u32 idx, i, start_freq;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80:
- idx = 0;
- start_freq = chandef->center_freq1 - 40;
- break;
- case NL80211_CHAN_WIDTH_160:
- idx = 1;
- start_freq = chandef->center_freq1 - 80;
- break;
- case NL80211_CHAN_WIDTH_320:
- idx = 2;
- start_freq = chandef->center_freq1 - 160;
- break;
- default:
- *bitmap = 0;
- break;
- }
-
- if (!*bitmap)
- return true;
-
- /* check if primary channel is punctured */
- if (*bitmap & (u16)BIT((chandef->chan->center_freq - start_freq) / 20))
- return false;
-
- for (i = 0; i < per_bw_puncturing[idx].len; i++)
- if (per_bw_puncturing[idx].valid_values[i] == *bitmap)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(cfg80211_valid_disable_subchannel_bitmap);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 409d74c57ca0..3fb1b637352a 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1661,6 +1661,7 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy,
unsigned long delay)
{
if (!delay) {
+ del_timer(&dwork->timer);
wiphy_work_queue(wiphy, &dwork->work);
return;
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 13657a85cf61..118f2f619828 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,7 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -362,7 +362,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct cfg80211_auth_request *req);
int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct cfg80211_assoc_request *req);
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack);
int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
struct net_device *dev, const u8 *bssid,
const u8 *ie, int ie_len, u16 reason,
@@ -491,6 +492,9 @@ bool cfg80211_is_sub_chan(struct cfg80211_chan_def *chandef,
bool cfg80211_wdev_on_sub_chan(struct wireless_dev *wdev,
struct ieee80211_channel *chan,
bool primary_only);
+bool _cfg80211_chandef_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef,
+ u32 prohibited_flags, bool monitor);
static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
{
@@ -549,9 +553,53 @@ int cfg80211_remove_virtual_intf(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask);
+/**
+ * struct cfg80211_colocated_ap - colocated AP information
+ *
+ * @list: linked list to all colocated APs
+ * @bssid: BSSID of the reported AP
+ * @ssid: SSID of the reported AP
+ * @ssid_len: length of the ssid
+ * @center_freq: frequency the reported AP is on
+ * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
+ * that operate in the same channel as the reported AP and that might be
+ * detected by a STA receiving this frame, are transmitting unsolicited
+ * Probe Response frames every 20 TUs
+ * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
+ * @same_ssid: the reported AP has the same SSID as the reporting AP
+ * @multi_bss: the reported AP is part of a multiple BSSID set
+ * @transmitted_bssid: the reported AP is the transmitting BSSID
+ * @colocated_ess: all the APs that share the same ESS as the reported AP are
+ * colocated and can be discovered via legacy bands.
+ * @short_ssid_valid: short_ssid is valid and can be used
+ * @short_ssid: the short SSID for this SSID
+ * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
+ */
+struct cfg80211_colocated_ap {
+ struct list_head list;
+ u8 bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ u32 short_ssid;
+ u32 center_freq;
+ u8 unsolicited_probe:1,
+ oct_recommended:1,
+ same_ssid:1,
+ multi_bss:1,
+ transmitted_bssid:1,
+ colocated_ess:1,
+ short_ssid_valid:1;
+ s8 psd_20;
+};
+
#if IS_ENABLED(CONFIG_CFG80211_KUNIT_TEST)
#define EXPORT_SYMBOL_IF_CFG80211_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
#define VISIBLE_IF_CFG80211_KUNIT
+void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list);
+
+int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list);
+
size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
const u8 *subie, size_t subie_len,
u8 *new_ie, size_t new_ie_len);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f635a8b6ca2e..4052041a19ea 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2009, Jouni Malinen <j@w1.fi>
* Copyright (c) 2015 Intel Deutschland GmbH
- * Copyright (C) 2019-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2019-2020, 2022-2024 Intel Corporation
*/
#include <linux/kernel.h>
@@ -241,12 +241,12 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
char *buf = kmalloc(128, gfp);
if (buf) {
- sprintf(buf, "MLME-MICHAELMICFAILURE.indication("
- "keyid=%d %scast addr=%pM)", key_id,
- key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni",
- addr);
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
+ wrqu.data.length =
+ sprintf(buf, "MLME-MICHAELMICFAILURE."
+ "indication(keyid=%d %scast addr=%pM)",
+ key_id, key_type == NL80211_KEYTYPE_GROUP
+ ? "broad" : "uni", addr);
wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
kfree(buf);
}
@@ -325,28 +325,136 @@ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
p1[i] &= p2[i];
}
-/* Note: caller must cfg80211_put_bss() regardless of result */
-int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_assoc_request *req)
+static int
+cfg80211_mlme_check_mlo_compat(const struct ieee80211_multi_link_elem *mle_a,
+ const struct ieee80211_multi_link_elem *mle_b,
+ struct netlink_ext_ack *extack)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err, i, j;
+ const struct ieee80211_mle_basic_common_info *common_a, *common_b;
- lockdep_assert_wiphy(wdev->wiphy);
+ common_a = (const void *)mle_a->variable;
+ common_b = (const void *)mle_b->variable;
+
+ if (memcmp(common_a->mld_mac_addr, common_b->mld_mac_addr, ETH_ALEN)) {
+ NL_SET_ERR_MSG(extack, "AP MLD address mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_a) !=
+ ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link EML medium sync delay mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_eml_cap((const u8 *)mle_a) !=
+ ieee80211_mle_get_eml_cap((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link EML capabilities mismatch");
+ return -EINVAL;
+ }
+
+ if (ieee80211_mle_get_mld_capa_op((const u8 *)mle_a) !=
+ ieee80211_mle_get_mld_capa_op((const u8 *)mle_b)) {
+ NL_SET_ERR_MSG(extack, "link MLD capabilities/ops mismatch");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cfg80211_mlme_check_mlo(struct net_device *dev,
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack)
+{
+ const struct ieee80211_multi_link_elem *mles[ARRAY_SIZE(req->links)] = {};
+ int i;
+
+ if (req->link_id < 0)
+ return 0;
+
+ if (!req->links[req->link_id].bss) {
+ NL_SET_ERR_MSG(extack, "no BSS for assoc link");
+ return -EINVAL;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(req->links); i++) {
+ const struct cfg80211_bss_ies *ies;
+ const struct element *ml;
- for (i = 1; i < ARRAY_SIZE(req->links); i++) {
if (!req->links[i].bss)
continue;
- for (j = 0; j < i; j++) {
- if (req->links[i].bss == req->links[j].bss)
- return -EINVAL;
+
+ if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr)) {
+ NL_SET_ERR_MSG(extack, "BSSID must not be our address");
+ req->links[i].error = -EINVAL;
+ goto error;
}
- if (ether_addr_equal(req->links[i].bss->bssid, dev->dev_addr))
- return -EINVAL;
+ ies = rcu_dereference(req->links[i].bss->ies);
+ ml = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK,
+ ies->data, ies->len);
+ if (!ml) {
+ NL_SET_ERR_MSG(extack, "MLO BSS w/o ML element");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+
+ if (!ieee80211_mle_type_ok(ml->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ ml->datalen - 1)) {
+ NL_SET_ERR_MSG(extack, "BSS with invalid ML element");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+
+ mles[i] = (const void *)(ml->data + 1);
+
+ if (ieee80211_mle_get_link_id((const u8 *)mles[i]) != i) {
+ NL_SET_ERR_MSG(extack, "link ID mismatch");
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
+ }
+
+ if (WARN_ON(!mles[req->link_id]))
+ goto error;
+
+ for (i = 0; i < ARRAY_SIZE(req->links); i++) {
+ if (i == req->link_id || !req->links[i].bss)
+ continue;
+
+ if (WARN_ON(!mles[i]))
+ goto error;
+
+ if (cfg80211_mlme_check_mlo_compat(mles[req->link_id], mles[i],
+ extack)) {
+ req->links[i].error = -EINVAL;
+ goto error;
+ }
}
+ rcu_read_unlock();
+ return 0;
+error:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Note: caller must cfg80211_put_bss() regardless of result */
+int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_assoc_request *req,
+ struct netlink_ext_ack *extack)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ lockdep_assert_wiphy(wdev->wiphy);
+
+ err = cfg80211_mlme_check_mlo(dev, req, extack);
+ if (err)
+ return err;
+
if (wdev->connected &&
(!req->prev_bssid ||
!ether_addr_equal(wdev->u.client.connected_addr, req->prev_bssid)))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 60877b532993..b4edba6b0b7b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/if.h>
@@ -581,7 +581,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
[NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
- [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ [NL80211_ATTR_WPA_VERSIONS] =
+ NLA_POLICY_RANGE(NLA_U32, 0,
+ NL80211_WPA_VERSION_1 |
+ NL80211_WPA_VERSION_2 |
+ NL80211_WPA_VERSION_3),
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
[NL80211_ATTR_PMKID] = NLA_POLICY_EXACT_LEN_WARN(WLAN_PMKID_LEN),
@@ -821,6 +825,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA] = { .type = NLA_FLAG },
[NL80211_ATTR_MLO_TTLM_DLINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
[NL80211_ATTR_MLO_TTLM_ULINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
+ [NL80211_ATTR_ASSOC_SPP_AMSDU] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -906,22 +911,11 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
};
static const struct nla_policy
-nl80211_match_band_rssi_policy[NUM_NL80211_BANDS] = {
- [NL80211_BAND_2GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_5GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_6GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_60GHZ] = { .type = NLA_S32 },
- [NL80211_BAND_LC] = { .type = NLA_S32 },
-};
-
-static const struct nla_policy
nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
[NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_SSID_LEN },
[NL80211_SCHED_SCAN_MATCH_ATTR_BSSID] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN),
[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
- [NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI] =
- NLA_POLICY_NESTED(nl80211_match_band_rssi_policy),
};
static const struct nla_policy
@@ -1204,11 +1198,11 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_DFS_CONCURRENT) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DFS_CONCURRENT))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_UHB_VLP_CLIENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT))
+ if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_NO_UHB_AFC_CLIENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT))
+ if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT))
goto nla_put_failure;
}
@@ -3224,24 +3218,9 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
-static int nl80211_parse_punct_bitmap(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- const struct cfg80211_chan_def *chandef,
- u16 *punct_bitmap)
-{
- if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_PUNCT))
- return -EINVAL;
-
- *punct_bitmap = nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
- if (!cfg80211_valid_disable_subchannel_bitmap(punct_bitmap, chandef))
- return -EINVAL;
-
- return 0;
-}
-
-int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- struct cfg80211_chan_def *chandef)
+static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info, bool monitor,
+ struct cfg80211_chan_def *chandef)
{
struct netlink_ext_ack *extack = info->extack;
struct nlattr **attrs = info->attrs;
@@ -3266,10 +3245,9 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->freq1_offset = control_freq % 1000;
chandef->center_freq2 = 0;
- /* Primary channel not allowed */
- if (!chandef->chan || chandef->chan->flags & IEEE80211_CHAN_DISABLED) {
+ if (!chandef->chan) {
NL_SET_ERR_MSG_ATTR(extack, attrs[NL80211_ATTR_WIPHY_FREQ],
- "Channel is disabled");
+ "Unknown channel");
return -EINVAL;
}
@@ -3346,13 +3324,27 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
chandef->edmg.channels = 0;
}
+ if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
+ chandef->punctured =
+ nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
+
+ if (chandef->punctured &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_PUNCT)) {
+ NL_SET_ERR_MSG(extack,
+ "driver doesn't support puncturing");
+ return -EINVAL;
+ }
+ }
+
if (!cfg80211_chandef_valid(chandef)) {
NL_SET_ERR_MSG(extack, "invalid channel definition");
return -EINVAL;
}
- if (!cfg80211_chandef_usable(&rdev->wiphy, chandef,
- IEEE80211_CHAN_DISABLED)) {
+ if (!_cfg80211_chandef_usable(&rdev->wiphy, chandef,
+ IEEE80211_CHAN_DISABLED,
+ monitor)) {
NL_SET_ERR_MSG(extack, "(extension) channel is disabled");
return -EINVAL;
}
@@ -3367,6 +3359,13 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
return 0;
}
+int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ struct cfg80211_chan_def *chandef)
+{
+ return _nl80211_parse_chandef(rdev, info, false, chandef);
+}
+
static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct genl_info *info,
@@ -3391,7 +3390,9 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
link_id = 0;
}
- result = nl80211_parse_chandef(rdev, info, &chandef);
+ result = _nl80211_parse_chandef(rdev, info,
+ iftype == NL80211_IFTYPE_MONITOR,
+ &chandef);
if (result)
return result;
@@ -3822,6 +3823,10 @@ int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *ch
if (chandef->center_freq2 &&
nla_put_u32(msg, NL80211_ATTR_CENTER_FREQ2, chandef->center_freq2))
return -ENOBUFS;
+ if (chandef->punctured &&
+ nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, chandef->punctured))
+ return -ENOBUFS;
+
return 0;
}
EXPORT_SYMBOL(nl80211_send_chandef);
@@ -4020,6 +4025,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
}
wiphy_unlock(&rdev->wiphy);
+ if_start = 0;
wp_idx++;
}
out:
@@ -4196,11 +4202,11 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (ntype != NL80211_IFTYPE_MESH_POINT)
return -EINVAL;
+ if (otype != NL80211_IFTYPE_MESH_POINT)
+ return -EINVAL;
if (netif_running(dev))
return -EBUSY;
- BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
- IEEE80211_MAX_MESH_ID_LEN);
wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
memcpy(wdev->u.mesh.id,
@@ -4306,8 +4312,6 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_MESH_POINT:
if (!info->attrs[NL80211_ATTR_MESH_ID])
break;
- BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
- IEEE80211_MAX_MESH_ID_LEN);
wdev->u.mesh.id_up_len =
nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
memcpy(wdev->u.mesh.id,
@@ -6066,14 +6070,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
- err = nl80211_parse_punct_bitmap(rdev, info,
- &params->chandef,
- &params->punct_bitmap);
- if (err)
- goto out;
- }
-
if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params->chandef,
wdev->iftype)) {
err = -EINVAL;
@@ -6873,7 +6869,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
return -EINVAL;
/* When you run into this, adjust the code below for the new flag */
- BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 8);
switch (statype) {
case CFG80211_STA_MESH_PEER_KERNEL:
@@ -6933,6 +6929,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
params->link_sta_params.he_capa ||
params->link_sta_params.eht_capa)
return -EINVAL;
+ if (params->sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ return -EINVAL;
}
if (statype != CFG80211_STA_AP_CLIENT &&
@@ -6956,7 +6954,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
BIT(NL80211_STA_FLAG_ASSOCIATED) |
BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
BIT(NL80211_STA_FLAG_WME) |
- BIT(NL80211_STA_FLAG_MFP)))
+ BIT(NL80211_STA_FLAG_MFP) |
+ BIT(NL80211_STA_FLAG_SPP_AMSDU)))
return -EINVAL;
/* but authenticated/associated only if driver handles it */
@@ -7515,7 +7514,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
/* When you run into this, adjust the code below for the new flag */
- BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
+ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 8);
switch (dev->ieee80211_ptr->iftype) {
case NL80211_IFTYPE_AP:
@@ -7539,6 +7538,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.sta_flags_mask & auth_assoc)
return -EINVAL;
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT) &&
+ params.sta_flags_mask & BIT(NL80211_STA_FLAG_SPP_AMSDU))
+ return -EINVAL;
+
/* Older userspace, or userspace wanting to be compatible with
* !NL80211_FEATURE_FULL_AP_CLIENT_STATE, will not set the auth
* and assoc flags in the mask, but assumes the station will be
@@ -7627,14 +7631,16 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
struct station_del_parameters params;
+ int link_id = nl80211_link_id_or_invalid(info->attrs);
memset(&params, 0, sizeof(params));
if (info->attrs[NL80211_ATTR_MAC])
params.mac = nla_data(info->attrs[NL80211_ATTR_MAC]);
- switch (dev->ieee80211_ptr->iftype) {
+ switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MESH_POINT:
@@ -7675,6 +7681,17 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
params.reason_code = WLAN_REASON_PREV_AUTH_NOT_VALID;
}
+ /* Link ID not expected in case of non-ML operation */
+ if (!wdev->valid_links && link_id != -1)
+ return -EINVAL;
+
+ /* If given, a valid link ID should be passed during MLO */
+ if (wdev->valid_links && link_id >= 0 &&
+ !(wdev->valid_links & BIT(link_id)))
+ return -EINVAL;
+
+ params.link_id = link_id;
+
return rdev_del_station(rdev, dev, &params);
}
@@ -9480,41 +9497,6 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
return 0;
}
-static int
-nl80211_parse_sched_scan_per_band_rssi(struct wiphy *wiphy,
- struct cfg80211_match_set *match_sets,
- struct nlattr *tb_band_rssi,
- s32 rssi_thold)
-{
- struct nlattr *attr;
- int i, tmp, ret = 0;
-
- if (!wiphy_ext_feature_isset(wiphy,
- NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD)) {
- if (tb_band_rssi)
- ret = -EOPNOTSUPP;
- else
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- match_sets->per_band_rssi_thold[i] =
- NL80211_SCAN_RSSI_THOLD_OFF;
- return ret;
- }
-
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- match_sets->per_band_rssi_thold[i] = rssi_thold;
-
- nla_for_each_nested(attr, tb_band_rssi, tmp) {
- enum nl80211_band band = nla_type(attr);
-
- if (band < 0 || band >= NUM_NL80211_BANDS)
- return -EINVAL;
-
- match_sets->per_band_rssi_thold[band] = nla_get_s32(attr);
- }
-
- return 0;
-}
-
static struct cfg80211_sched_scan_request *
nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr **attrs, int max_match_sets)
@@ -9789,15 +9771,6 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (rssi)
request->match_sets[i].rssi_thold =
nla_get_s32(rssi);
-
- /* Parse per band RSSI attribute */
- err = nl80211_parse_sched_scan_per_band_rssi(wiphy,
- &request->match_sets[i],
- tb[NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI],
- request->match_sets[i].rssi_thold);
- if (err)
- goto out_free;
-
i++;
}
@@ -10077,6 +10050,42 @@ static int nl80211_notify_radar_detection(struct sk_buff *skb,
return 0;
}
+static int nl80211_parse_counter_offsets(struct cfg80211_registered_device *rdev,
+ const u8 *data, size_t datalen,
+ int first_count, struct nlattr *attr,
+ const u16 **offsets, unsigned int *n_offsets)
+{
+ int i;
+
+ *n_offsets = 0;
+
+ if (!attr)
+ return 0;
+
+ if (!nla_len(attr) || (nla_len(attr) % sizeof(u16)))
+ return -EINVAL;
+
+ *n_offsets = nla_len(attr) / sizeof(u16);
+ if (rdev->wiphy.max_num_csa_counters &&
+ (*n_offsets > rdev->wiphy.max_num_csa_counters))
+ return -EINVAL;
+
+ *offsets = nla_data(attr);
+
+ /* sanity checks - counters should fit and be the same */
+ for (i = 0; i < *n_offsets; i++) {
+ u16 offset = (*offsets)[i];
+
+ if (offset >= datalen)
+ return -EINVAL;
+
+ if (first_count != -1 && data[offset] != first_count)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -10088,7 +10097,6 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
int err;
bool need_new_beacon = false;
bool need_handle_dfs_flag = true;
- int len, i;
u32 cs_count;
if (!rdev->ops->channel_switch ||
@@ -10173,72 +10181,23 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
goto free;
}
- len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
- if (!len || (len % sizeof(u16))) {
- err = -EINVAL;
+ err = nl80211_parse_counter_offsets(rdev, params.beacon_csa.tail,
+ params.beacon_csa.tail_len,
+ params.count,
+ csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON],
+ &params.counter_offsets_beacon,
+ &params.n_counter_offsets_beacon);
+ if (err)
goto free;
- }
- params.n_counter_offsets_beacon = len / sizeof(u16);
- if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_beacon >
- rdev->wiphy.max_num_csa_counters)) {
- err = -EINVAL;
+ err = nl80211_parse_counter_offsets(rdev, params.beacon_csa.probe_resp,
+ params.beacon_csa.probe_resp_len,
+ params.count,
+ csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP],
+ &params.counter_offsets_presp,
+ &params.n_counter_offsets_presp);
+ if (err)
goto free;
- }
-
- params.counter_offsets_beacon =
- nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
-
- /* sanity checks - counters should fit and be the same */
- for (i = 0; i < params.n_counter_offsets_beacon; i++) {
- u16 offset = params.counter_offsets_beacon[i];
-
- if (offset >= params.beacon_csa.tail_len) {
- err = -EINVAL;
- goto free;
- }
-
- if (params.beacon_csa.tail[offset] != params.count) {
- err = -EINVAL;
- goto free;
- }
- }
-
- if (csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]) {
- len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
- if (!len || (len % sizeof(u16))) {
- err = -EINVAL;
- goto free;
- }
-
- params.n_counter_offsets_presp = len / sizeof(u16);
- if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_presp >
- rdev->wiphy.max_num_csa_counters)) {
- err = -EINVAL;
- goto free;
- }
-
- params.counter_offsets_presp =
- nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
-
- /* sanity checks - counters should fit and be the same */
- for (i = 0; i < params.n_counter_offsets_presp; i++) {
- u16 offset = params.counter_offsets_presp[i];
-
- if (offset >= params.beacon_csa.probe_resp_len) {
- err = -EINVAL;
- goto free;
- }
-
- if (params.beacon_csa.probe_resp[offset] !=
- params.count) {
- err = -EINVAL;
- goto free;
- }
- }
- }
skip_beacons:
err = nl80211_parse_chandef(rdev, info, &params.chandef);
@@ -10269,14 +10228,7 @@ skip_beacons:
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
- if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
- err = nl80211_parse_punct_bitmap(rdev, info,
- &params.chandef,
- &params.punct_bitmap);
- if (err)
- goto free;
- }
-
+ params.link_id = link_id;
err = rdev_channel_switch(rdev, dev, &params);
free:
@@ -10649,13 +10601,6 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
return res;
}
-static bool nl80211_valid_wpa_versions(u32 wpa_versions)
-{
- return !(wpa_versions & ~(NL80211_WPA_VERSION_1 |
- NL80211_WPA_VERSION_2 |
- NL80211_WPA_VERSION_3));
-}
-
static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -10881,12 +10826,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
- if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) {
+ if (info->attrs[NL80211_ATTR_WPA_VERSIONS])
settings->wpa_versions =
nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]);
- if (!nl80211_valid_wpa_versions(settings->wpa_versions))
- return -EINVAL;
- }
if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
void *data;
@@ -11101,6 +11043,15 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
sizeof(req.s1g_capa));
}
+ if (nla_get_flag(info->attrs[NL80211_ATTR_ASSOC_SPP_AMSDU])) {
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT)) {
+ GENL_SET_ERR_MSG(info, "SPP A-MSDUs not supported");
+ return -EINVAL;
+ }
+ req.flags |= ASSOC_REQ_SPP_AMSDU;
+ }
+
req.link_id = nl80211_link_id_or_invalid(info->attrs);
if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
@@ -11226,7 +11177,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
struct nlattr *link;
int rem = 0;
- err = cfg80211_mlme_assoc(rdev, dev, &req);
+ err = cfg80211_mlme_assoc(rdev, dev, &req,
+ info->extack);
if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
dev->ieee80211_ptr->conn_owner_nlportid =
@@ -12674,23 +12626,12 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
params.buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
params.len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
- if (info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]) {
- int len = nla_len(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
- int i;
-
- if (len % sizeof(u16))
- return -EINVAL;
-
- params.n_csa_offsets = len / sizeof(u16);
- params.csa_offsets =
- nla_data(info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX]);
-
- /* check that all the offsets fit the frame */
- for (i = 0; i < params.n_csa_offsets; i++) {
- if (params.csa_offsets[i] >= params.len)
- return -EINVAL;
- }
- }
+ err = nl80211_parse_counter_offsets(rdev, NULL, params.len, -1,
+ info->attrs[NL80211_ATTR_CSA_C_OFFSETS_TX],
+ &params.csa_offsets,
+ &params.n_csa_offsets);
+ if (err)
+ return err;
if (!params.dont_wait_for_ack) {
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -16827,6 +16768,10 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_del_station,
.flags = GENL_UNS_ADMIN_PERM,
+ /* cannot use NL80211_FLAG_MLO_VALID_LINK_ID, depends on
+ * whether MAC address is passed or not. If MAC address is
+ * passed, then even during MLO, link ID is not required.
+ */
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
},
{
@@ -19397,7 +19342,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef,
gfp_t gfp,
enum nl80211_commands notif,
- u8 count, bool quiet, u16 punct_bitmap)
+ u8 count, bool quiet)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct sk_buff *msg;
@@ -19431,9 +19376,6 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
}
- if (nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, punct_bitmap))
- goto nla_put_failure;
-
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -19446,7 +19388,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id, u16 punct_bitmap)
+ unsigned int link_id)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19455,7 +19397,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
lockdep_assert_wiphy(wdev->wiphy);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_notify(dev, chandef, link_id, punct_bitmap);
+ trace_cfg80211_ch_switch_notify(dev, chandef, link_id);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -19484,15 +19426,14 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
cfg80211_sched_dfs_chan_update(rdev);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
- NL80211_CMD_CH_SWITCH_NOTIFY, 0, false,
- punct_bitmap);
+ NL80211_CMD_CH_SWITCH_NOTIFY, 0, false);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
- bool quiet, u16 punct_bitmap)
+ bool quiet)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19501,13 +19442,12 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
lockdep_assert_wiphy(wdev->wiphy);
WARN_INVALID_LINK_ID(wdev, link_id);
- trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id,
- punct_bitmap);
+ trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
- count, quiet, punct_bitmap);
+ count, quiet);
}
EXPORT_SYMBOL(cfg80211_ch_switch_started_notify);
@@ -19884,6 +19824,11 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS))
goto free_msg;
+ if (wakeup->unprot_deauth_disassoc &&
+ nla_put_flag(msg,
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC))
+ goto free_msg;
+
if (wakeup->packet) {
u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
u32 len_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN;
@@ -20164,9 +20109,26 @@ int cfg80211_external_auth_request(struct net_device *dev,
if (!hdr)
goto nla_put_failure;
+ /* Some historical mistakes in drivers <-> userspace interface (notably
+ * between drivers and wpa_supplicant) led to a big-endian conversion
+ * being needed on NL80211_ATTR_AKM_SUITES _only_ when its value is
+ * WLAN_AKM_SUITE_SAE. This is now fixed on userspace side, but for the
+ * benefit of older wpa_supplicant versions, send this particular value
+ * in big-endian. Note that newer wpa_supplicant will also detect this
+ * particular value in big endian still, so it all continues to work.
+ */
+ if (params->key_mgmt_suite == WLAN_AKM_SUITE_SAE) {
+ if (nla_put_be32(msg, NL80211_ATTR_AKM_SUITES,
+ cpu_to_be32(WLAN_AKM_SUITE_SAE)))
+ goto nla_put_failure;
+ } else {
+ if (nla_put_u32(msg, NL80211_ATTR_AKM_SUITES,
+ params->key_mgmt_suite))
+ goto nla_put_failure;
+ }
+
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) ||
nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION,
params->action) ||
nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) ||
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2741b626919a..753f8e9aa4b1 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -5,7 +5,7 @@
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -57,6 +57,8 @@
#include <linux/verification.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
+#include <linux/units.h>
+
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
@@ -1289,20 +1291,17 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
u32 freq_khz)
{
-#define ONE_GHZ_IN_KHZ 1000000
/*
* From 802.11ad: directional multi-gigabit (DMG):
* Pertaining to operation in a frequency band containing a channel
* with the Channel starting frequency above 45 GHz.
*/
- u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
- 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+ u32 limit = freq_khz > 45 * KHZ_PER_GHZ ? 20 * KHZ_PER_GHZ : 2 * KHZ_PER_GHZ;
if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
return true;
return false;
-#undef ONE_GHZ_IN_KHZ
}
/*
@@ -1595,10 +1594,10 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_EHT;
if (rd_flags & NL80211_RRF_DFS_CONCURRENT)
channel_flags |= IEEE80211_CHAN_DFS_CONCURRENT;
- if (rd_flags & NL80211_RRF_NO_UHB_VLP_CLIENT)
- channel_flags |= IEEE80211_CHAN_NO_UHB_VLP_CLIENT;
- if (rd_flags & NL80211_RRF_NO_UHB_AFC_CLIENT)
- channel_flags |= IEEE80211_CHAN_NO_UHB_AFC_CLIENT;
+ if (rd_flags & NL80211_RRF_NO_6GHZ_VLP_CLIENT)
+ channel_flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (rd_flags & NL80211_RRF_NO_6GHZ_AFC_CLIENT)
+ channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
if (rd_flags & NL80211_RRF_PSD)
channel_flags |= IEEE80211_CHAN_PSD;
return channel_flags;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2249b1a89d1c..7cf36b8d3ae7 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,7 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -77,45 +77,6 @@ MODULE_PARM_DESC(bss_entries_limit,
#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
-/**
- * struct cfg80211_colocated_ap - colocated AP information
- *
- * @list: linked list to all colocated aPS
- * @bssid: BSSID of the reported AP
- * @ssid: SSID of the reported AP
- * @ssid_len: length of the ssid
- * @center_freq: frequency the reported AP is on
- * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
- * that operate in the same channel as the reported AP and that might be
- * detected by a STA receiving this frame, are transmitting unsolicited
- * Probe Response frames every 20 TUs
- * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
- * @same_ssid: the reported AP has the same SSID as the reporting AP
- * @multi_bss: the reported AP is part of a multiple BSSID set
- * @transmitted_bssid: the reported AP is the transmitting BSSID
- * @colocated_ess: all the APs that share the same ESS as the reported AP are
- * colocated and can be discovered via legacy bands.
- * @short_ssid_valid: short_ssid is valid and can be used
- * @short_ssid: the short SSID for this SSID
- * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP
- */
-struct cfg80211_colocated_ap {
- struct list_head list;
- u8 bssid[ETH_ALEN];
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- size_t ssid_len;
- u32 short_ssid;
- u32 center_freq;
- u8 unsolicited_probe:1,
- oct_recommended:1,
- same_ssid:1,
- multi_bss:1,
- transmitted_bssid:1,
- colocated_ess:1,
- short_ssid_valid:1;
- s8 psd_20;
-};
-
static void bss_free(struct cfg80211_internal_bss *bss)
{
struct cfg80211_bss_ies *ies;
@@ -566,7 +527,8 @@ static int cfg80211_calc_short_ssid(const struct cfg80211_bss_ies *ies,
return 0;
}
-static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
+VISIBLE_IF_CFG80211_KUNIT void
+cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
{
struct cfg80211_colocated_ap *ap, *tmp_ap;
@@ -575,6 +537,7 @@ static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
kfree(ap);
}
}
+EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_free_coloc_ap_list);
static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
const u8 *pos, u8 length,
@@ -648,104 +611,146 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
return 0;
}
-static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
- struct list_head *list)
+enum cfg80211_rnr_iter_ret {
+ RNR_ITER_CONTINUE,
+ RNR_ITER_BREAK,
+ RNR_ITER_ERROR,
+};
+
+static bool
+cfg80211_iter_rnr(const u8 *elems, size_t elems_len,
+ enum cfg80211_rnr_iter_ret
+ (*iter)(void *data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len),
+ void *iter_data)
{
- struct ieee80211_neighbor_ap_info *ap_info;
- const struct element *elem, *ssid_elem;
+ const struct element *rnr;
const u8 *pos, *end;
- u32 s_ssid_tmp;
- int n_coloc = 0, ret;
- LIST_HEAD(ap_list);
- ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
- if (ret)
- return 0;
+ for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
+ elems, elems_len) {
+ const struct ieee80211_neighbor_ap_info *info;
- for_each_element_id(elem, WLAN_EID_REDUCED_NEIGHBOR_REPORT,
- ies->data, ies->len) {
- pos = elem->data;
- end = elem->data + elem->datalen;
+ pos = rnr->data;
+ end = rnr->data + rnr->datalen;
/* RNR IE may contain more than one NEIGHBOR_AP_INFO */
- while (pos + sizeof(*ap_info) <= end) {
- enum nl80211_band band;
- int freq;
+ while (sizeof(*info) <= end - pos) {
u8 length, i, count;
+ u8 type;
- ap_info = (void *)pos;
- count = u8_get_bits(ap_info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
- length = ap_info->tbtt_info_len;
+ info = (void *)pos;
+ count = u8_get_bits(info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_COUNT) +
+ 1;
+ length = info->tbtt_info_len;
- pos += sizeof(*ap_info);
+ pos += sizeof(*info);
- if (!ieee80211_operating_class_to_band(ap_info->op_class,
- &band))
- break;
+ if (count * length > end - pos)
+ return false;
- freq = ieee80211_channel_to_frequency(ap_info->channel,
- band);
+ type = u8_get_bits(info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE);
- if (end - pos < count * length)
- break;
+ for (i = 0; i < count; i++) {
+ switch (iter(iter_data, type, info,
+ pos, length)) {
+ case RNR_ITER_CONTINUE:
+ break;
+ case RNR_ITER_BREAK:
+ return true;
+ case RNR_ITER_ERROR:
+ return false;
+ }
- if (u8_get_bits(ap_info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_TYPE) !=
- IEEE80211_TBTT_INFO_TYPE_TBTT) {
- pos += count * length;
- continue;
+ pos += length;
}
+ }
- /* TBTT info must include bss param + BSSID +
- * (short SSID or same_ssid bit to be set).
- * ignore other options, and move to the
- * next AP info
- */
- if (band != NL80211_BAND_6GHZ ||
- !(length == offsetofend(struct ieee80211_tbtt_info_7_8_9,
- bss_params) ||
- length == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
- length >= offsetofend(struct ieee80211_tbtt_info_ge_11,
- bss_params))) {
- pos += count * length;
- continue;
- }
+ if (pos != end)
+ return false;
+ }
- for (i = 0; i < count; i++) {
- struct cfg80211_colocated_ap *entry;
+ return true;
+}
- entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
- GFP_ATOMIC);
+struct colocated_ap_data {
+ const struct element *ssid_elem;
+ struct list_head ap_list;
+ u32 s_ssid_tmp;
+ int n_coloc;
+};
- if (!entry)
- goto error;
+static enum cfg80211_rnr_iter_ret
+cfg80211_parse_colocated_ap_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ struct colocated_ap_data *data = _data;
+ struct cfg80211_colocated_ap *entry;
+ enum nl80211_band band;
- entry->center_freq = freq;
+ if (type != IEEE80211_TBTT_INFO_TYPE_TBTT)
+ return RNR_ITER_CONTINUE;
- if (!cfg80211_parse_ap_info(entry, pos, length,
- ssid_elem,
- s_ssid_tmp)) {
- n_coloc++;
- list_add_tail(&entry->list, &ap_list);
- } else {
- kfree(entry);
- }
+ if (!ieee80211_operating_class_to_band(info->op_class, &band))
+ return RNR_ITER_CONTINUE;
- pos += length;
- }
- }
+ /* TBTT info must include bss param + BSSID + (short SSID or
+ * same_ssid bit to be set). Ignore other options, and move to
+ * the next AP info
+ */
+ if (band != NL80211_BAND_6GHZ ||
+ !(tbtt_info_len == offsetofend(struct ieee80211_tbtt_info_7_8_9,
+ bss_params) ||
+ tbtt_info_len == sizeof(struct ieee80211_tbtt_info_7_8_9) ||
+ tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+ bss_params)))
+ return RNR_ITER_CONTINUE;
+
+ entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN, GFP_ATOMIC);
+ if (!entry)
+ return RNR_ITER_ERROR;
+
+ entry->center_freq =
+ ieee80211_channel_to_frequency(info->channel, band);
+
+ if (!cfg80211_parse_ap_info(entry, tbtt_info, tbtt_info_len,
+ data->ssid_elem, data->s_ssid_tmp)) {
+ data->n_coloc++;
+ list_add_tail(&entry->list, &data->ap_list);
+ } else {
+ kfree(entry);
+ }
-error:
- if (pos != end) {
- cfg80211_free_coloc_ap_list(&ap_list);
- return 0;
- }
+ return RNR_ITER_CONTINUE;
+}
+
+VISIBLE_IF_CFG80211_KUNIT int
+cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list)
+{
+ struct colocated_ap_data data = {};
+ int ret;
+
+ INIT_LIST_HEAD(&data.ap_list);
+
+ ret = cfg80211_calc_short_ssid(ies, &data.ssid_elem, &data.s_ssid_tmp);
+ if (ret)
+ return 0;
+
+ if (!cfg80211_iter_rnr(ies->data, ies->len,
+ cfg80211_parse_colocated_ap_iter, &data)) {
+ cfg80211_free_coloc_ap_list(&data.ap_list);
+ return 0;
}
- list_splice_tail(&ap_list, list);
- return n_coloc;
+ list_splice_tail(&data.ap_list, list);
+ return data.n_coloc;
}
+EXPORT_SYMBOL_IF_CFG80211_KUNIT(cfg80211_parse_colocated_ap);
static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
struct ieee80211_channel *chan,
@@ -1731,6 +1736,61 @@ static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known,
}
}
+static void cfg80211_check_stuck_ecsa(struct cfg80211_registered_device *rdev,
+ struct cfg80211_internal_bss *known,
+ const struct cfg80211_bss_ies *old)
+{
+ const struct ieee80211_ext_chansw_ie *ecsa;
+ const struct element *elem_new, *elem_old;
+ const struct cfg80211_bss_ies *new, *bcn;
+
+ if (known->pub.proberesp_ecsa_stuck)
+ return;
+
+ new = rcu_dereference_protected(known->pub.proberesp_ies,
+ lockdep_is_held(&rdev->bss_lock));
+ if (WARN_ON(!new))
+ return;
+
+ if (new->tsf - old->tsf < USEC_PER_SEC)
+ return;
+
+ elem_old = cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN,
+ old->data, old->len);
+ if (!elem_old)
+ return;
+
+ elem_new = cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN,
+ new->data, new->len);
+ if (!elem_new)
+ return;
+
+ bcn = rcu_dereference_protected(known->pub.beacon_ies,
+ lockdep_is_held(&rdev->bss_lock));
+ if (bcn &&
+ cfg80211_find_elem(WLAN_EID_EXT_CHANSWITCH_ANN,
+ bcn->data, bcn->len))
+ return;
+
+ if (elem_new->datalen != elem_old->datalen)
+ return;
+ if (elem_new->datalen < sizeof(struct ieee80211_ext_chansw_ie))
+ return;
+ if (memcmp(elem_new->data, elem_old->data, elem_new->datalen))
+ return;
+
+ ecsa = (void *)elem_new->data;
+
+ if (!ecsa->mode)
+ return;
+
+ if (ecsa->new_ch_num !=
+ ieee80211_frequency_to_channel(known->pub.channel->center_freq))
+ return;
+
+ known->pub.proberesp_ecsa_stuck = 1;
+}
+
static bool
cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
struct cfg80211_internal_bss *known,
@@ -1750,8 +1810,10 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
/* Override possible earlier Beacon frame IEs */
rcu_assign_pointer(known->pub.ies,
new->pub.proberesp_ies);
- if (old)
+ if (old) {
+ cfg80211_check_stuck_ecsa(rdev, known, old);
kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+ }
}
if (rcu_access_pointer(new->pub.beacon_ies)) {
@@ -2065,6 +2127,35 @@ struct cfg80211_inform_single_bss_data {
u64 cannot_use_reasons;
};
+static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
+ const u32 flags)
+{
+ const struct element *tmp;
+ struct ieee80211_he_operation *he_oper;
+
+ tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
+ if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+
+ he_oper = (void *)&tmp->data[1];
+ he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
+
+ if (!he_6ghz_oper)
+ return false;
+
+ switch (u8_get_bits(he_6ghz_oper->control,
+ IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ return true;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT);
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT);
+ }
+ }
+ return false;
+}
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_bss *
cfg80211_inform_single_bss_data(struct wiphy *wiphy,
@@ -2097,6 +2188,14 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!channel)
return NULL;
+ if (channel->band == NL80211_BAND_6GHZ &&
+ !cfg80211_6ghz_power_type_valid(data->ie, data->ielen,
+ channel->flags)) {
+ data->use_for = 0;
+ data->cannot_use_reasons =
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH;
+ }
+
memcpy(tmp.pub.bssid, data->bssid, ETH_ALEN);
tmp.pub.channel = channel;
if (data->bss_source != BSS_SOURCE_STA_PROFILE)
@@ -2108,6 +2207,9 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
tmp.ts_boottime = drv_data->boottime_ns;
tmp.parent_tsf = drv_data->parent_tsf;
ether_addr_copy(tmp.parent_bssid, drv_data->parent_bssid);
+ tmp.pub.chains = drv_data->chains;
+ memcpy(tmp.pub.chain_signal, drv_data->chain_signal,
+ IEEE80211_MAX_CHAINS);
tmp.pub.use_for = data->use_for;
tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
@@ -2151,6 +2253,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
switch (data->ftype) {
case CFG80211_BSS_FTYPE_BEACON:
+ case CFG80211_BSS_FTYPE_S1G_BEACON:
ies->from_beacon = true;
fallthrough;
case CFG80211_BSS_FTYPE_UNKNOWN:
@@ -2544,77 +2647,168 @@ error:
return NULL;
}
+struct tbtt_info_iter_data {
+ const struct ieee80211_neighbor_ap_info *ap_info;
+ u8 param_ch_count;
+ u32 use_for;
+ u8 mld_id, link_id;
+};
+
+static enum cfg80211_rnr_iter_ret
+cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
+ const struct ieee80211_neighbor_ap_info *info,
+ const u8 *tbtt_info, u8 tbtt_info_len)
+{
+ const struct ieee80211_rnr_mld_params *mld_params;
+ struct tbtt_info_iter_data *data = _data;
+ u8 link_id;
+
+ if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
+ tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
+ mld_params))
+ mld_params = (void *)(tbtt_info +
+ offsetof(struct ieee80211_tbtt_info_ge_11,
+ mld_params));
+ else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
+ tbtt_info_len >= sizeof(struct ieee80211_rnr_mld_params))
+ mld_params = (void *)tbtt_info;
+ else
+ return RNR_ITER_CONTINUE;
+
+ link_id = le16_get_bits(mld_params->params,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+
+ if (data->mld_id != mld_params->mld_id)
+ return RNR_ITER_CONTINUE;
+
+ if (data->link_id != link_id)
+ return RNR_ITER_CONTINUE;
+
+ data->ap_info = info;
+ data->param_ch_count =
+ le16_get_bits(mld_params->params,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+
+ if (type == IEEE80211_TBTT_INFO_TYPE_TBTT)
+ data->use_for = NL80211_BSS_USE_FOR_ALL;
+ else
+ data->use_for = NL80211_BSS_USE_FOR_MLD_LINK;
+ return RNR_ITER_BREAK;
+}
+
static u8
-cfg80211_tbtt_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
- const struct ieee80211_neighbor_ap_info **ap_info,
- const u8 **tbtt_info)
+cfg80211_rnr_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
+ const struct ieee80211_neighbor_ap_info **ap_info,
+ u8 *param_ch_count)
{
- const struct ieee80211_neighbor_ap_info *info;
- const struct element *rnr;
- const u8 *pos, *end;
+ struct tbtt_info_iter_data data = {
+ .mld_id = mld_id,
+ .link_id = link_id,
+ };
- for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT, ie, ielen) {
- pos = rnr->data;
- end = rnr->data + rnr->datalen;
+ cfg80211_iter_rnr(ie, ielen, cfg802121_mld_ap_rnr_iter, &data);
- /* RNR IE may contain more than one NEIGHBOR_AP_INFO */
- while (sizeof(*info) <= end - pos) {
- const struct ieee80211_rnr_mld_params *mld_params;
- u16 params;
- u8 length, i, count, mld_params_offset;
- u8 type, lid;
- u32 use_for;
+ *ap_info = data.ap_info;
+ *param_ch_count = data.param_ch_count;
- info = (void *)pos;
- count = u8_get_bits(info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
- length = info->tbtt_info_len;
+ return data.use_for;
+}
- pos += sizeof(*info);
+static struct element *
+cfg80211_gen_reporter_rnr(struct cfg80211_bss *source_bss, bool is_mbssid,
+ bool same_mld, u8 link_id, u8 bss_change_count,
+ gfp_t gfp)
+{
+ const struct cfg80211_bss_ies *ies;
+ struct ieee80211_neighbor_ap_info ap_info;
+ struct ieee80211_tbtt_info_ge_11 tbtt_info;
+ u32 short_ssid;
+ const struct element *elem;
+ struct element *res;
- if (count * length > end - pos)
- return 0;
+ /*
+ * We only generate the RNR to permit ML lookups. For that we do not
+ * need an entry for the corresponding transmitting BSS, lets just skip
+ * it even though it would be easy to add.
+ */
+ if (!same_mld)
+ return NULL;
- type = u8_get_bits(info->tbtt_info_hdr,
- IEEE80211_AP_INFO_TBTT_HDR_TYPE);
+ /* We could use tx_data->ies if we change cfg80211_calc_short_ssid */
+ rcu_read_lock();
+ ies = rcu_dereference(source_bss->ies);
- if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
- length >=
- offsetofend(struct ieee80211_tbtt_info_ge_11,
- mld_params)) {
- mld_params_offset =
- offsetof(struct ieee80211_tbtt_info_ge_11, mld_params);
- use_for = NL80211_BSS_USE_FOR_ALL;
- } else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
- length >= sizeof(struct ieee80211_rnr_mld_params)) {
- mld_params_offset = 0;
- use_for = NL80211_BSS_USE_FOR_MLD_LINK;
- } else {
- pos += count * length;
- continue;
- }
+ ap_info.tbtt_info_len = offsetofend(typeof(tbtt_info), mld_params);
+ ap_info.tbtt_info_hdr =
+ u8_encode_bits(IEEE80211_TBTT_INFO_TYPE_TBTT,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE) |
+ u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT);
- for (i = 0; i < count; i++) {
- mld_params = (void *)pos + mld_params_offset;
- params = le16_to_cpu(mld_params->params);
+ ap_info.channel = ieee80211_frequency_to_channel(source_bss->channel->center_freq);
- lid = u16_get_bits(params,
- IEEE80211_RNR_MLD_PARAMS_LINK_ID);
+ /* operating class */
+ elem = cfg80211_find_elem(WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ ies->data, ies->len);
+ if (elem && elem->datalen >= 1) {
+ ap_info.op_class = elem->data[0];
+ } else {
+ struct cfg80211_chan_def chandef;
- if (mld_id == mld_params->mld_id &&
- link_id == lid) {
- *ap_info = info;
- *tbtt_info = pos;
+ /* The AP is not providing us with anything to work with. So
+ * make up a somewhat reasonable operating class, but don't
+ * bother with it too much as no one will ever use the
+ * information.
+ */
+ cfg80211_chandef_create(&chandef, source_bss->channel,
+ NL80211_CHAN_NO_HT);
- return use_for;
- }
+ if (!ieee80211_chandef_to_operating_class(&chandef,
+ &ap_info.op_class))
+ goto out_unlock;
+ }
- pos += length;
- }
- }
+ /* Just set TBTT offset and PSD 20 to invalid/unknown */
+ tbtt_info.tbtt_offset = 255;
+ tbtt_info.psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+
+ memcpy(tbtt_info.bssid, source_bss->bssid, ETH_ALEN);
+ if (cfg80211_calc_short_ssid(ies, &elem, &short_ssid))
+ goto out_unlock;
+
+ rcu_read_unlock();
+
+ tbtt_info.short_ssid = cpu_to_le32(short_ssid);
+
+ tbtt_info.bss_params = IEEE80211_RNR_TBTT_PARAMS_SAME_SSID;
+
+ if (is_mbssid) {
+ tbtt_info.bss_params |= IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID;
+ tbtt_info.bss_params |= IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID;
}
- return 0;
+ tbtt_info.mld_params.mld_id = 0;
+ tbtt_info.mld_params.params =
+ le16_encode_bits(link_id, IEEE80211_RNR_MLD_PARAMS_LINK_ID) |
+ le16_encode_bits(bss_change_count,
+ IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+
+ res = kzalloc(struct_size(res, data,
+ sizeof(ap_info) + ap_info.tbtt_info_len),
+ gfp);
+ if (!res)
+ return NULL;
+
+ /* Copy the data */
+ res->id = WLAN_EID_REDUCED_NEIGHBOR_REPORT;
+ res->datalen = sizeof(ap_info) + ap_info.tbtt_info_len;
+ memcpy(res->data, &ap_info, sizeof(ap_info));
+ memcpy(res->data + sizeof(ap_info), &tbtt_info, ap_info.tbtt_info_len);
+
+ return res;
+
+out_unlock:
+ rcu_read_unlock();
+ return NULL;
}
static void
@@ -2630,25 +2824,25 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
.source_bss = source_bss,
.bss_source = BSS_SOURCE_STA_PROFILE,
};
+ struct element *reporter_rnr = NULL;
struct ieee80211_multi_link_elem *ml_elem;
struct cfg80211_mle *mle;
u16 control;
u8 ml_common_len;
- u8 *new_ie;
+ u8 *new_ie = NULL;
struct cfg80211_bss *bss;
- int mld_id;
+ u8 mld_id, reporter_link_id, bss_change_count;
u16 seen_links = 0;
- const u8 *pos;
u8 i;
- if (!ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1))
+ if (!ieee80211_mle_type_ok(elem->data + 1,
+ IEEE80211_ML_CONTROL_TYPE_BASIC,
+ elem->datalen - 1))
return;
- ml_elem = (void *)elem->data + 1;
+ ml_elem = (void *)(elem->data + 1);
control = le16_to_cpu(ml_elem->control);
- if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) !=
- IEEE80211_ML_CONTROL_TYPE_BASIC)
- return;
+ ml_common_len = ml_elem->variable[0];
/* Must be present when transmitted by an AP (in a probe response) */
if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) ||
@@ -2656,18 +2850,8 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
return;
- ml_common_len = ml_elem->variable[0];
-
- /* length + MLD MAC address + link ID info + BSS Params Change Count */
- pos = ml_elem->variable + 1 + 6 + 1 + 1;
-
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
- pos += 2;
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_EML_CAPA))
- pos += 2;
-
- /* MLD capabilities and operations */
- pos += 2;
+ reporter_link_id = ieee80211_mle_get_link_id(elem->data + 1);
+ bss_change_count = ieee80211_mle_get_bss_param_ch_cnt(elem->data + 1);
/*
* The MLD ID of the reporting AP is always zero. It is set if the AP
@@ -2675,32 +2859,35 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
* relating to a nontransmitted BSS (matching the Multi-BSSID Index,
* Draft P802.11be_D3.2, 35.3.4.2)
*/
- if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MLD_ID)) {
- mld_id = *pos;
- pos += 1;
- } else {
- mld_id = 0;
- }
-
- /* Extended MLD capabilities and operations */
- pos += 2;
+ mld_id = ieee80211_mle_get_mld_id(elem->data + 1);
/* Fully defrag the ML element for sta information/profile iteration */
mle = cfg80211_defrag_mle(elem, tx_data->ie, tx_data->ielen, gfp);
if (!mle)
return;
+ /* No point in doing anything if there is no per-STA profile */
+ if (!mle->sta_prof[0])
+ goto out;
+
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
if (!new_ie)
goto out;
+ reporter_rnr = cfg80211_gen_reporter_rnr(source_bss,
+ u16_get_bits(control,
+ IEEE80211_MLC_BASIC_PRES_MLD_ID),
+ mld_id == 0, reporter_link_id,
+ bss_change_count,
+ gfp);
+
for (i = 0; i < ARRAY_SIZE(mle->sta_prof) && mle->sta_prof[i]; i++) {
const struct ieee80211_neighbor_ap_info *ap_info;
enum nl80211_band band;
u32 freq;
const u8 *profile;
- const u8 *tbtt_info;
ssize_t profile_len;
+ u8 param_ch_count;
u8 link_id, use_for;
if (!ieee80211_mle_basic_sta_prof_size_ok((u8 *)mle->sta_prof[i],
@@ -2743,10 +2930,11 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
profile_len -= 2;
/* Find in RNR to look up channel information */
- use_for = cfg80211_tbtt_info_for_mld_ap(tx_data->ie,
- tx_data->ielen,
- mld_id, link_id,
- &ap_info, &tbtt_info);
+ use_for = cfg80211_rnr_info_for_mld_ap(tx_data->ie,
+ tx_data->ielen,
+ mld_id, link_id,
+ &ap_info,
+ &param_ch_count);
if (!use_for)
continue;
@@ -2789,7 +2977,8 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
continue;
/* Copy the Basic Multi-Link element including the common
- * information, and then fix up the link ID.
+ * information, and then fix up the link ID and BSS param
+ * change count.
* Note that the ML element length has been verified and we
* also checked that it contains the link ID.
*/
@@ -2800,10 +2989,21 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
sizeof(*ml_elem) + ml_common_len);
new_ie[data.ielen + sizeof(*ml_elem) + 1 + ETH_ALEN] = link_id;
+ new_ie[data.ielen + sizeof(*ml_elem) + 1 + ETH_ALEN + 1] =
+ param_ch_count;
data.ielen += sizeof(*ml_elem) + ml_common_len;
- /* TODO: Add an RNR containing only the reporting AP */
+ if (reporter_rnr && (use_for & NL80211_BSS_USE_FOR_NORMAL)) {
+ if (data.ielen + sizeof(struct element) +
+ reporter_rnr->datalen > IEEE80211_MAX_DATA_LEN)
+ continue;
+
+ memcpy(new_ie + data.ielen, reporter_rnr,
+ sizeof(struct element) + reporter_rnr->datalen);
+ data.ielen += sizeof(struct element) +
+ reporter_rnr->datalen;
+ }
bss = cfg80211_inform_single_bss_data(wiphy, &data, gfp);
if (!bss)
@@ -2812,6 +3012,7 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
}
out:
+ kfree(reporter_rnr);
kfree(new_ie);
kfree(mle);
}
@@ -2864,6 +3065,10 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
if (!res)
return NULL;
+ /* don't do any further MBSSID/ML handling for S1G */
+ if (ftype == CFG80211_BSS_FTYPE_S1G_BEACON)
+ return res;
+
cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
@@ -2872,59 +3077,22 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_inform_bss_data);
-static bool cfg80211_uhb_power_type_valid(const u8 *ie,
- size_t ielen,
- const u32 flags)
-{
- const struct element *tmp;
- struct ieee80211_he_operation *he_oper;
-
- tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
- if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
- const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
-
- he_oper = (void *)&tmp->data[1];
- he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
-
- if (!he_6ghz_oper)
- return false;
-
- switch (u8_get_bits(he_6ghz_oper->control,
- IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
- case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
- return true;
- case IEEE80211_6GHZ_CTRL_REG_SP_AP:
- return !(flags & IEEE80211_CHAN_NO_UHB_AFC_CLIENT);
- case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
- return !(flags & IEEE80211_CHAN_NO_UHB_VLP_CLIENT);
- }
- }
- return false;
-}
-
-/* cfg80211_inform_bss_width_frame helper */
-static struct cfg80211_bss *
-cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
+struct cfg80211_bss *
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
{
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct cfg80211_internal_bss tmp = {}, *res;
- struct cfg80211_bss_ies *ies;
- struct ieee80211_channel *channel;
- bool signal_valid;
+ size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
struct ieee80211_ext *ext = NULL;
- u8 *bssid, *variable;
- u16 capability, beacon_int;
- size_t ielen, min_hdr_len = offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable);
- int bss_type;
-
- BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
- offsetof(struct ieee80211_mgmt, u.beacon.variable));
-
- trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
+ enum cfg80211_bss_frame_type ftype;
+ u16 beacon_interval;
+ const u8 *bssid;
+ u16 capability;
+ const u8 *ie;
+ size_t ielen;
+ u64 tsf;
if (WARN_ON(!mgmt))
return NULL;
@@ -2932,9 +3100,10 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
if (WARN_ON(!wiphy))
return NULL;
- if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (data->signal < 0 || data->signal > 100)))
- return NULL;
+ BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
+ offsetof(struct ieee80211_mgmt, u.beacon.variable));
+
+ trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
ext = (void *) mgmt;
@@ -2948,32 +3117,17 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
return NULL;
ielen = len - min_hdr_len;
- variable = mgmt->u.probe_resp.variable;
- if (ext) {
- if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
- variable = ext->u.s1g_short_beacon.variable;
- else
- variable = ext->u.s1g_beacon.variable;
- }
-
- channel = cfg80211_get_bss_channel(wiphy, variable, ielen, data->chan);
- if (!channel)
- return NULL;
-
- if (channel->band == NL80211_BAND_6GHZ &&
- !cfg80211_uhb_power_type_valid(variable, ielen, channel->flags)) {
- data->restrict_use = 1;
- data->use_for = 0;
- data->cannot_use_reasons =
- NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH;
- }
-
+ ie = mgmt->u.probe_resp.variable;
if (ext) {
const struct ieee80211_s1g_bcn_compat_ie *compat;
const struct element *elem;
- elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
- variable, ielen);
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ ie = ext->u.s1g_short_beacon.variable;
+ else
+ ie = ext->u.s1g_beacon.variable;
+
+ elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT, ie, ielen);
if (!elem)
return NULL;
if (elem->datalen < sizeof(*compat))
@@ -2981,112 +3135,26 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
compat = (void *)elem->data;
bssid = ext->u.s1g_beacon.sa;
capability = le16_to_cpu(compat->compat_info);
- beacon_int = le16_to_cpu(compat->beacon_int);
+ beacon_interval = le16_to_cpu(compat->beacon_int);
} else {
bssid = mgmt->bssid;
- beacon_int = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
+ beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
}
- if (channel->band == NL80211_BAND_60GHZ) {
- bss_type = capability & WLAN_CAPABILITY_DMG_TYPE_MASK;
- if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP ||
- bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
- } else {
- if (capability & WLAN_CAPABILITY_ESS)
- regulatory_hint_found_beacon(wiphy, channel, gfp);
- }
-
- ies = kzalloc(sizeof(*ies) + ielen, gfp);
- if (!ies)
- return NULL;
- ies->len = ielen;
- ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control) ||
- ieee80211_is_s1g_beacon(mgmt->frame_control);
- memcpy(ies->data, variable, ielen);
+ tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
if (ieee80211_is_probe_resp(mgmt->frame_control))
- rcu_assign_pointer(tmp.pub.proberesp_ies, ies);
+ ftype = CFG80211_BSS_FTYPE_PRESP;
+ else if (ext)
+ ftype = CFG80211_BSS_FTYPE_S1G_BEACON;
else
- rcu_assign_pointer(tmp.pub.beacon_ies, ies);
- rcu_assign_pointer(tmp.pub.ies, ies);
-
- memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
- tmp.pub.beacon_interval = beacon_int;
- tmp.pub.capability = capability;
- tmp.pub.channel = channel;
- tmp.pub.signal = data->signal;
- tmp.ts_boottime = data->boottime_ns;
- tmp.parent_tsf = data->parent_tsf;
- tmp.pub.chains = data->chains;
- memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
- ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
- tmp.pub.use_for = data->restrict_use ?
- data->use_for :
- NL80211_BSS_USE_FOR_ALL;
- tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
-
- signal_valid = data->chan == channel;
- spin_lock_bh(&rdev->bss_lock);
- res = __cfg80211_bss_update(rdev, &tmp, signal_valid, jiffies);
- if (!res)
- goto drop;
-
- rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
-
- spin_unlock_bh(&rdev->bss_lock);
-
- trace_cfg80211_return_bss(&res->pub);
- /* __cfg80211_bss_update gives us a referenced result */
- return &res->pub;
-
-drop:
- spin_unlock_bh(&rdev->bss_lock);
- return NULL;
-}
-
-struct cfg80211_bss *
-cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
-{
- struct cfg80211_inform_single_bss_data inform_data = {
- .drv_data = data,
- .ie = mgmt->u.probe_resp.variable,
- .ielen = len - offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable),
- .use_for = data->restrict_use ?
- data->use_for :
- NL80211_BSS_USE_FOR_ALL,
- .cannot_use_reasons = data->cannot_use_reasons,
- };
- struct cfg80211_bss *res;
-
- res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
- len, gfp);
- if (!res)
- return NULL;
-
- /* don't do any further MBSSID/ML handling for S1G */
- if (ieee80211_is_s1g_beacon(mgmt->frame_control))
- return res;
-
- inform_data.ftype = ieee80211_is_beacon(mgmt->frame_control) ?
- CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
- memcpy(inform_data.bssid, mgmt->bssid, ETH_ALEN);
- inform_data.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- inform_data.beacon_interval =
- le16_to_cpu(mgmt->u.probe_resp.beacon_int);
+ ftype = CFG80211_BSS_FTYPE_BEACON;
- /* process each non-transmitting bss */
- cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp);
-
- cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp);
-
- return res;
+ return cfg80211_inform_bss_data(wiphy, data, ftype,
+ bssid, tsf, capability,
+ beacon_interval, ie, ielen,
+ gfp);
}
EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 195c8532734b..82e3ce42206c 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -209,7 +209,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev,
if (!req.bss) {
err = -ENOENT;
} else {
- err = cfg80211_mlme_assoc(rdev, wdev->netdev, &req);
+ err = cfg80211_mlme_assoc(rdev, wdev->netdev,
+ &req, NULL);
cfg80211_put_bss(&rdev->wiphy, req.bss);
}
diff --git a/net/wireless/tests/Makefile b/net/wireless/tests/Makefile
index 1f6622fcb758..c364e63b508e 100644
--- a/net/wireless/tests/Makefile
+++ b/net/wireless/tests/Makefile
@@ -1,3 +1,3 @@
-cfg80211-tests-y += module.o fragmentation.o scan.o util.o
+cfg80211-tests-y += module.o fragmentation.o scan.o util.o chan.o
obj-$(CONFIG_CFG80211_KUNIT_TEST) += cfg80211-tests.o
diff --git a/net/wireless/tests/chan.c b/net/wireless/tests/chan.c
new file mode 100644
index 000000000000..d02258ac2dab
--- /dev/null
+++ b/net/wireless/tests/chan.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2023-2024 Intel Corporation
+ */
+#include <net/cfg80211.h>
+#include <kunit/test.h>
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static struct ieee80211_channel chan_6ghz_1 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5955,
+};
+
+static struct ieee80211_channel chan_6ghz_5 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 5975,
+};
+
+static struct ieee80211_channel chan_6ghz_105 = {
+ .band = NL80211_BAND_6GHZ,
+ .center_freq = 6475,
+};
+
+static const struct chandef_compat_case {
+ const char *desc;
+ /* leave c1 empty for tests for identical */
+ struct cfg80211_chan_def c1, c2;
+ /* we test both ways around, so c2 should always be the compat one */
+ bool compat;
+} chandef_compat_cases[] = {
+ {
+ .desc = "identical non-HT",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20_NOHT,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 20 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 40 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_40,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 80 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_80,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 160 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "identical 320 MHz",
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40 + 80,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "20 MHz in 320 MHz\n",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955 + 10 + 20 + 40 + 80,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "different 20 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_1,
+ .center_freq1 = 5955,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_20,
+ .chan = &chan_6ghz_5,
+ .center_freq1 = 5975,
+ },
+ },
+ {
+ .desc = "different primary 160 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 150,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ },
+ },
+ {
+ /* similar to previous test but one has lower BW */
+ .desc = "matching primary 160 MHz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured secondary 160 Mhz",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xf,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured matching",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ .punctured = 0xc0,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xc000,
+ },
+ .compat = true,
+ },
+ {
+ .desc = "matching primary 160 MHz & punctured not matching",
+ .c1 = {
+ .width = NL80211_CHAN_WIDTH_160,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 + 70,
+ .punctured = 0x80,
+ },
+ .c2 = {
+ .width = NL80211_CHAN_WIDTH_320,
+ .chan = &chan_6ghz_105,
+ .center_freq1 = 6475 - 10,
+ .punctured = 0xc000,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(chandef_compat, chandef_compat_cases, desc)
+
+static void test_chandef_compat(struct kunit *test)
+{
+ const struct chandef_compat_case *params = test->param_value;
+ const struct cfg80211_chan_def *ret, *expect;
+ struct cfg80211_chan_def c1 = params->c1;
+
+ /* tests with identical ones */
+ if (!params->c1.chan)
+ c1 = params->c2;
+
+ KUNIT_EXPECT_EQ(test, cfg80211_chandef_valid(&c1), true);
+ KUNIT_EXPECT_EQ(test, cfg80211_chandef_valid(&params->c2), true);
+
+ expect = params->compat ? &params->c2 : NULL;
+
+ ret = cfg80211_chandef_compatible(&c1, &params->c2);
+ KUNIT_EXPECT_PTR_EQ(test, ret, expect);
+
+ if (!params->c1.chan)
+ expect = &c1;
+
+ ret = cfg80211_chandef_compatible(&params->c2, &c1);
+ KUNIT_EXPECT_PTR_EQ(test, ret, expect);
+}
+
+static struct kunit_case chandef_compat_test_cases[] = {
+ KUNIT_CASE_PARAM(test_chandef_compat, chandef_compat_gen_params),
+ {}
+};
+
+static struct kunit_suite chandef_compat = {
+ .name = "cfg80211-chandef-compat",
+ .test_cases = chandef_compat_test_cases,
+};
+
+kunit_test_suite(chandef_compat);
diff --git a/net/wireless/tests/scan.c b/net/wireless/tests/scan.c
index 77854161cd22..9f458be71659 100644
--- a/net/wireless/tests/scan.c
+++ b/net/wireless/tests/scan.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for inform_bss functions
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
@@ -406,9 +406,32 @@ static struct inform_bss_ml_sta_case {
const char *desc;
int mld_id;
bool sta_prof_vendor_elems;
+ bool include_oper_class;
+ bool nstr;
} inform_bss_ml_sta_cases[] = {
- { .desc = "no_mld_id", .mld_id = 0, .sta_prof_vendor_elems = false },
- { .desc = "mld_id_eq_1", .mld_id = 1, .sta_prof_vendor_elems = true },
+ {
+ .desc = "zero_mld_id",
+ .mld_id = 0,
+ .sta_prof_vendor_elems = false,
+ }, {
+ .desc = "zero_mld_id_with_oper_class",
+ .mld_id = 0,
+ .sta_prof_vendor_elems = false,
+ .include_oper_class = true,
+ }, {
+ .desc = "mld_id_eq_1",
+ .mld_id = 1,
+ .sta_prof_vendor_elems = true,
+ }, {
+ .desc = "mld_id_eq_1_with_oper_class",
+ .mld_id = 1,
+ .sta_prof_vendor_elems = true,
+ .include_oper_class = true,
+ }, {
+ .desc = "nstr",
+ .mld_id = 0,
+ .nstr = true,
+ },
};
KUNIT_ARRAY_PARAM_DESC(inform_bss_ml_sta, inform_bss_ml_sta_cases, desc)
@@ -440,7 +463,7 @@ static void test_inform_bss_ml_sta(struct kunit *test)
struct {
struct ieee80211_neighbor_ap_info info;
struct ieee80211_tbtt_info_ge_11 ap;
- } __packed rnr = {
+ } __packed rnr_normal = {
.info = {
.tbtt_info_hdr = u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT),
.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_ge_11),
@@ -460,6 +483,28 @@ static void test_inform_bss_ml_sta(struct kunit *test)
}
};
struct {
+ struct ieee80211_neighbor_ap_info info;
+ struct ieee80211_rnr_mld_params mld_params;
+ } __packed rnr_nstr = {
+ .info = {
+ .tbtt_info_hdr =
+ u8_encode_bits(0, IEEE80211_AP_INFO_TBTT_HDR_COUNT) |
+ u8_encode_bits(IEEE80211_TBTT_INFO_TYPE_MLD,
+ IEEE80211_AP_INFO_TBTT_HDR_TYPE),
+ .tbtt_info_len = sizeof(struct ieee80211_rnr_mld_params),
+ .op_class = 81,
+ .channel = 11,
+ },
+ .mld_params = {
+ .mld_id = params->mld_id,
+ .params =
+ le16_encode_bits(link_id,
+ IEEE80211_RNR_MLD_PARAMS_LINK_ID),
+ }
+ };
+ size_t rnr_len = params->nstr ? sizeof(rnr_nstr) : sizeof(rnr_normal);
+ void *rnr = params->nstr ? (void *)&rnr_nstr : (void *)&rnr_normal;
+ struct {
__le16 control;
u8 var_len;
u8 mld_mac_addr[ETH_ALEN];
@@ -498,7 +543,7 @@ static void test_inform_bss_ml_sta(struct kunit *test)
u16_encode_bits(link_id,
IEEE80211_MLE_STA_CONTROL_LINK_ID)),
.var_len = sizeof(sta_prof) - 2 - 2,
- .bssid = { *rnr.ap.bssid },
+ .bssid = { *rnr_normal.ap.bssid },
.beacon_int = cpu_to_le16(101),
.tsf_offset = cpu_to_le64(-123ll),
.capabilities = cpu_to_le16(0xdead),
@@ -515,9 +560,15 @@ static void test_inform_bss_ml_sta(struct kunit *test)
skb_put_u8(input, 4);
skb_put_data(input, "TEST", 4);
+ if (params->include_oper_class) {
+ skb_put_u8(input, WLAN_EID_SUPPORTED_REGULATORY_CLASSES);
+ skb_put_u8(input, 1);
+ skb_put_u8(input, 81);
+ }
+
skb_put_u8(input, WLAN_EID_REDUCED_NEIGHBOR_REPORT);
- skb_put_u8(input, sizeof(rnr));
- skb_put_data(input, &rnr, sizeof(rnr));
+ skb_put_u8(input, rnr_len);
+ skb_put_data(input, rnr, rnr_len);
/* build a multi-link element */
skb_put_u8(input, WLAN_EID_EXTENSION);
@@ -563,9 +614,10 @@ static void test_inform_bss_ml_sta(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx.inform_bss_count, 2);
/* Check link_bss *****************************************************/
- link_bss = cfg80211_get_bss(wiphy, NULL, sta_prof.bssid, NULL, 0,
- IEEE80211_BSS_TYPE_ANY,
- IEEE80211_PRIVACY_ANY);
+ link_bss = __cfg80211_get_bss(wiphy, NULL, sta_prof.bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY,
+ 0);
KUNIT_ASSERT_NOT_NULL(test, link_bss);
KUNIT_EXPECT_EQ(test, link_bss->signal, 0);
KUNIT_EXPECT_EQ(test, link_bss->beacon_interval,
@@ -576,21 +628,43 @@ static void test_inform_bss_ml_sta(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, link_bss->channel,
ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(2462)));
+ /* Test wiphy does not set WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY */
+ if (params->nstr) {
+ KUNIT_EXPECT_EQ(test, link_bss->use_for, 0);
+ KUNIT_EXPECT_EQ(test, link_bss->cannot_use_reasons,
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY);
+ KUNIT_EXPECT_NULL(test,
+ cfg80211_get_bss(wiphy, NULL, sta_prof.bssid,
+ NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY));
+ } else {
+ KUNIT_EXPECT_EQ(test, link_bss->use_for,
+ NL80211_BSS_USE_FOR_ALL);
+ KUNIT_EXPECT_EQ(test, link_bss->cannot_use_reasons, 0);
+ }
+
rcu_read_lock();
ies = rcu_dereference(link_bss->ies);
KUNIT_EXPECT_NOT_NULL(test, ies);
KUNIT_EXPECT_EQ(test, ies->tsf, tsf + le64_to_cpu(sta_prof.tsf_offset));
/* Resulting length should be:
* SSID (inherited) + RNR (inherited) + vendor element(s) +
+ * operating class (if requested) +
+ * generated RNR (if MLD ID == 0 and not NSTR) +
* MLE common info + MLE header and control
*/
if (params->sta_prof_vendor_elems)
KUNIT_EXPECT_EQ(test, ies->len,
- 6 + 2 + sizeof(rnr) + 2 + 160 + 2 + 165 +
+ 6 + 2 + rnr_len + 2 + 160 + 2 + 165 +
+ (params->include_oper_class ? 3 : 0) +
+ (!params->mld_id && !params->nstr ? 22 : 0) +
mle_basic_common_info.var_len + 5);
else
KUNIT_EXPECT_EQ(test, ies->len,
- 6 + 2 + sizeof(rnr) + 2 + 155 +
+ 6 + 2 + rnr_len + 2 + 155 +
+ (params->include_oper_class ? 3 : 0) +
+ (!params->mld_id && !params->nstr ? 22 : 0) +
mle_basic_common_info.var_len + 5);
rcu_read_unlock();
@@ -598,6 +672,172 @@ static void test_inform_bss_ml_sta(struct kunit *test)
cfg80211_put_bss(wiphy, link_bss);
}
+static struct cfg80211_parse_colocated_ap_case {
+ const char *desc;
+ u8 op_class;
+ u8 channel;
+ struct ieee80211_neighbor_ap_info info;
+ union {
+ struct ieee80211_tbtt_info_ge_11 tbtt_long;
+ struct ieee80211_tbtt_info_7_8_9 tbtt_short;
+ };
+ bool add_junk;
+ bool same_ssid;
+ bool valid;
+} cfg80211_parse_colocated_ap_cases[] = {
+ {
+ .desc = "wrong_band",
+ .info.op_class = 81,
+ .info.channel = 11,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "wrong_type",
+ /* IEEE80211_AP_INFO_TBTT_HDR_TYPE is in the least significant bits */
+ .info.tbtt_info_hdr = IEEE80211_TBTT_INFO_TYPE_MLD,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_invalid_len_short",
+ .info.tbtt_info_len = 6,
+ .tbtt_short = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_invalid_len_short_mld",
+ .info.tbtt_info_len = 10,
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = false,
+ },
+ {
+ .desc = "colocated_non_mld",
+ .info.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_7_8_9),
+ .tbtt_short = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .same_ssid = true,
+ .valid = true,
+ },
+ {
+ .desc = "colocated_non_mld_invalid_bssid",
+ .info.tbtt_info_len = sizeof(struct ieee80211_tbtt_info_7_8_9),
+ .tbtt_short = {
+ .bssid = { 0xff, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP |
+ IEEE80211_RNR_TBTT_PARAMS_SAME_SSID,
+ },
+ .same_ssid = true,
+ .valid = false,
+ },
+ {
+ .desc = "colocated_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .valid = true,
+ },
+ {
+ .desc = "colocated_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ },
+ .add_junk = true,
+ .valid = false,
+ },
+ {
+ .desc = "colocated_disabled_mld",
+ .tbtt_long = {
+ .bssid = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
+ .bss_params = IEEE80211_RNR_TBTT_PARAMS_COLOC_AP,
+ .mld_params.params = cpu_to_le16(IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK),
+ },
+ .valid = false,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(cfg80211_parse_colocated_ap, cfg80211_parse_colocated_ap_cases, desc)
+
+static void test_cfg80211_parse_colocated_ap(struct kunit *test)
+{
+ const struct cfg80211_parse_colocated_ap_case *params = test->param_value;
+ struct sk_buff *input = kunit_zalloc_skb(test, 1024, GFP_KERNEL);
+ struct cfg80211_bss_ies *ies;
+ struct ieee80211_neighbor_ap_info info;
+ LIST_HEAD(coloc_ap_list);
+ int count;
+
+ KUNIT_ASSERT_NOT_NULL(test, input);
+
+ info = params->info;
+
+ /* Reasonable values for a colocated AP */
+ if (!info.tbtt_info_len)
+ info.tbtt_info_len = sizeof(params->tbtt_long);
+ if (!info.op_class)
+ info.op_class = 131;
+ if (!info.channel)
+ info.channel = 33;
+ /* Zero is the correct default for .btt_info_hdr (one entry, TBTT type) */
+
+ skb_put_u8(input, WLAN_EID_SSID);
+ skb_put_u8(input, 4);
+ skb_put_data(input, "TEST", 4);
+
+ skb_put_u8(input, WLAN_EID_REDUCED_NEIGHBOR_REPORT);
+ skb_put_u8(input, sizeof(info) + info.tbtt_info_len + (params->add_junk ? 3 : 0));
+ skb_put_data(input, &info, sizeof(info));
+ skb_put_data(input, &params->tbtt_long, info.tbtt_info_len);
+
+ if (params->add_junk)
+ skb_put_data(input, "123", 3);
+
+ ies = kunit_kzalloc(test, struct_size(ies, data, input->len), GFP_KERNEL);
+ ies->len = input->len;
+ memcpy(ies->data, input->data, input->len);
+
+ count = cfg80211_parse_colocated_ap(ies, &coloc_ap_list);
+
+ KUNIT_EXPECT_EQ(test, count, params->valid);
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&coloc_ap_list), params->valid);
+
+ if (params->valid && !list_empty(&coloc_ap_list)) {
+ struct cfg80211_colocated_ap *ap;
+
+ ap = list_first_entry(&coloc_ap_list, typeof(*ap), list);
+ if (info.tbtt_info_len <= sizeof(params->tbtt_short))
+ KUNIT_EXPECT_MEMEQ(test, ap->bssid, params->tbtt_short.bssid, ETH_ALEN);
+ else
+ KUNIT_EXPECT_MEMEQ(test, ap->bssid, params->tbtt_long.bssid, ETH_ALEN);
+
+ if (params->same_ssid) {
+ KUNIT_EXPECT_EQ(test, ap->ssid_len, 4);
+ KUNIT_EXPECT_MEMEQ(test, ap->ssid, "TEST", 4);
+ } else {
+ KUNIT_EXPECT_EQ(test, ap->ssid_len, 0);
+ }
+ }
+
+ cfg80211_free_coloc_ap_list(&coloc_ap_list);
+}
+
static struct kunit_case gen_new_ie_test_cases[] = {
KUNIT_CASE_PARAM(test_gen_new_ie, gen_new_ie_gen_params),
KUNIT_CASE(test_gen_new_ie_malformed),
@@ -623,3 +863,16 @@ static struct kunit_suite inform_bss = {
};
kunit_test_suite(inform_bss);
+
+static struct kunit_case scan_6ghz_cases[] = {
+ KUNIT_CASE_PARAM(test_cfg80211_parse_colocated_ap,
+ cfg80211_parse_colocated_ap_gen_params),
+ {}
+};
+
+static struct kunit_suite scan_6ghz = {
+ .name = "cfg80211-scan-6ghz",
+ .test_cases = scan_6ghz_cases,
+};
+
+kunit_test_suite(scan_6ghz);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 1f374c8a17a5..361331c29116 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
+ */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cfg80211
@@ -135,7 +140,8 @@
__field(u32, width) \
__field(u32, center_freq1) \
__field(u32, freq1_offset) \
- __field(u32, center_freq2)
+ __field(u32, center_freq2) \
+ __field(u16, punctured)
#define CHAN_DEF_ASSIGN(chandef) \
do { \
if ((chandef) && (chandef)->chan) { \
@@ -148,6 +154,7 @@
__entry->center_freq1 = (chandef)->center_freq1;\
__entry->freq1_offset = (chandef)->freq1_offset;\
__entry->center_freq2 = (chandef)->center_freq2;\
+ __entry->punctured = (chandef)->punctured; \
} else { \
__entry->band = 0; \
__entry->control_freq = 0; \
@@ -156,14 +163,15 @@
__entry->center_freq1 = 0; \
__entry->freq1_offset = 0; \
__entry->center_freq2 = 0; \
+ __entry->punctured = 0; \
} \
} while (0)
#define CHAN_DEF_PR_FMT \
- "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u"
+ "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u, punct: 0x%x"
#define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq, \
__entry->freq_offset, __entry->width, \
__entry->center_freq1, __entry->freq1_offset, \
- __entry->center_freq2
+ __entry->center_freq2, __entry->punctured
#define FILS_AAD_ASSIGN(fa) \
do { \
@@ -859,6 +867,7 @@ DECLARE_EVENT_CLASS(station_del,
MAC_ENTRY(sta_mac)
__field(u8, subtype)
__field(u16, reason_code)
+ __field(int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -866,11 +875,13 @@ DECLARE_EVENT_CLASS(station_del,
MAC_ASSIGN(sta_mac, params->mac);
__entry->subtype = params->subtype;
__entry->reason_code = params->reason_code;
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM"
- ", subtype: %u, reason_code: %u",
+ ", subtype: %u, reason_code: %u, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac,
- __entry->subtype, __entry->reason_code)
+ __entry->subtype, __entry->reason_code,
+ __entry->link_id)
);
DEFINE_EVENT(station_del, rdev_del_station,
@@ -2324,6 +2335,7 @@ TRACE_EVENT(rdev_channel_switch,
__field(u8, count)
__dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon)
__dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp)
+ __field(u8, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -2341,11 +2353,13 @@ TRACE_EVENT(rdev_channel_switch,
memcpy(__get_dynamic_array(pres_ofs),
params->counter_offsets_presp,
params->n_counter_offsets_presp * sizeof(u16));
+ __entry->link_id = params->link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", block_tx: %d, count: %u, radar_required: %d",
+ ", block_tx: %d, count: %u, radar_required: %d, link_id: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->block_tx, __entry->count, __entry->radar_required)
+ __entry->block_tx, __entry->count, __entry->radar_required,
+ __entry->link_id)
);
TRACE_EVENT(rdev_set_qos_map,
@@ -3267,47 +3281,39 @@ TRACE_EVENT(cfg80211_chandef_dfs_required,
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id,
- u16 punct_bitmap),
- TP_ARGS(netdev, chandef, link_id, punct_bitmap),
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
- __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
- __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
- __entry->punct_bitmap)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_ch_switch_started_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- unsigned int link_id,
- u16 punct_bitmap),
- TP_ARGS(netdev, chandef, link_id, punct_bitmap),
+ unsigned int link_id),
+ TP_ARGS(netdev, chandef, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
- __field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
- __entry->punct_bitmap = punct_bitmap;
),
- TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
- NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
- __entry->punct_bitmap)
+ TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
+ NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
);
TRACE_EVENT(cfg80211_radar_event,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d1ce3bee2797..379f742fd741 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -2073,6 +2073,82 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
}
EXPORT_SYMBOL(ieee80211_operating_class_to_band);
+bool ieee80211_operating_class_to_chandef(u8 operating_class,
+ struct ieee80211_channel *chan,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 control_freq, offset = 0;
+ enum nl80211_band band;
+
+ if (!ieee80211_operating_class_to_band(operating_class, &band) ||
+ !chan || band != chan->band)
+ return false;
+
+ control_freq = chan->center_freq;
+ chandef->chan = chan;
+
+ if (control_freq >= 5955)
+ offset = control_freq - 5955;
+ else if (control_freq >= 5745)
+ offset = control_freq - 5745;
+ else if (control_freq >= 5180)
+ offset = control_freq - 5180;
+ offset /= 20;
+
+ switch (operating_class) {
+ case 81: /* 2 GHz band; 20 MHz; channels 1..13 */
+ case 82: /* 2 GHz band; 20 MHz; channel 14 */
+ case 115: /* 5 GHz band; 20 MHz; channels 36,40,44,48 */
+ case 118: /* 5 GHz band; 20 MHz; channels 52,56,60,64 */
+ case 121: /* 5 GHz band; 20 MHz; channels 100..144 */
+ case 124: /* 5 GHz band; 20 MHz; channels 149,153,157,161 */
+ case 125: /* 5 GHz band; 20 MHz; channels 149..177 */
+ case 131: /* 6 GHz band; 20 MHz; channels 1..233*/
+ case 136: /* 6 GHz band; 20 MHz; channel 2 */
+ chandef->center_freq1 = control_freq;
+ chandef->width = NL80211_CHAN_WIDTH_20;
+ return true;
+ case 83: /* 2 GHz band; 40 MHz; channels 1..9 */
+ case 116: /* 5 GHz band; 40 MHz; channels 36,44 */
+ case 119: /* 5 GHz band; 40 MHz; channels 52,60 */
+ case 122: /* 5 GHz band; 40 MHz; channels 100,108,116,124,132,140 */
+ case 126: /* 5 GHz band; 40 MHz; channels 149,157,165,173 */
+ chandef->center_freq1 = control_freq + 10;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 84: /* 2 GHz band; 40 MHz; channels 5..13 */
+ case 117: /* 5 GHz band; 40 MHz; channels 40,48 */
+ case 120: /* 5 GHz band; 40 MHz; channels 56,64 */
+ case 123: /* 5 GHz band; 40 MHz; channels 104,112,120,128,136,144 */
+ case 127: /* 5 GHz band; 40 MHz; channels 153,161,169,177 */
+ chandef->center_freq1 = control_freq - 10;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 132: /* 6 GHz band; 40 MHz; channels 1,5,..,229*/
+ chandef->center_freq1 = control_freq + 10 - (offset & 1) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_40;
+ return true;
+ case 128: /* 5 GHz band; 80 MHz; channels 36..64,100..144,149..177 */
+ case 133: /* 6 GHz band; 80 MHz; channels 1,5,..,229 */
+ chandef->center_freq1 = control_freq + 30 - (offset & 3) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_80;
+ return true;
+ case 129: /* 5 GHz band; 160 MHz; channels 36..64,100..144,149..177 */
+ case 134: /* 6 GHz band; 160 MHz; channels 1,5,..,229 */
+ chandef->center_freq1 = control_freq + 70 - (offset & 7) * 20;
+ chandef->width = NL80211_CHAN_WIDTH_160;
+ return true;
+ case 130: /* 5 GHz band; 80+80 MHz; channels 36..64,100..144,149..177 */
+ case 135: /* 6 GHz band; 80+80 MHz; channels 1,5,..,229 */
+ /* The center_freq2 of 80+80 MHz is unknown */
+ case 137: /* 6 GHz band; 320 MHz; channels 1,5,..,229 */
+ /* 320-1 or 320-2 channelization is unknown */
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(ieee80211_operating_class_to_chandef);
+
bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class)
{
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index 68729aa3a5d5..dc72302cbd07 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -17,8 +17,6 @@ config X25
if you want that) and the lower level data link layer protocol LAPB
(say Y to "LAPB Data Link Driver" below if you want that).
- You can read more about X.25 at <https://www.sangoma.com/tutorials/x25/> and
- <http://docwiki.cisco.com/wiki/X.25>.
Information about X.25 for Linux is contained in the files
<file:Documentation/networking/x25.rst> and
<file:Documentation/networking/x25-iface.rst>.
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 9f13aa3353e3..3404d076a8a3 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -167,8 +167,10 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
contd = XDP_PKT_CONTD;
err = __xsk_rcv_zc(xs, xskb, len, contd);
- if (err || likely(!frags))
- goto out;
+ if (err)
+ goto err;
+ if (likely(!frags))
+ return 0;
xskb_list = &xskb->pool->xskb_list;
list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
@@ -177,11 +179,13 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
len = pos->xdp.data_end - pos->xdp.data;
err = __xsk_rcv_zc(xs, pos, len, contd);
if (err)
- return err;
+ goto err;
list_del(&pos->xskb_list_node);
}
-out:
+ return 0;
+err:
+ xsk_buff_free(xdp);
return err;
}
@@ -309,10 +313,13 @@ static bool xsk_is_bound(struct xdp_sock *xs)
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
+ struct net_device *dev = xdp->rxq->dev;
+ u32 qid = xdp->rxq->queue_index;
+
if (!xsk_is_bound(xs))
return -ENXIO;
- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+ if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
return -EINVAL;
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
@@ -718,7 +725,8 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
memcpy(vaddr, buffer, len);
kunmap_local(vaddr);
- skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
+ skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
+ refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
}
if (first_frag && desc->options & XDP_TX_METADATA) {
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 28711cc44ced..ce60ecd48a4d 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -555,6 +555,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
xskb->xdp.data_meta = xskb->xdp.data;
+ xskb->xdp.flags = 0;
if (pool->dma_need_sync) {
dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index d3b3f9e720b3..fe82e2d07300 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -10,6 +10,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6_stubs.h>
#endif
+#include <net/hotdata.h>
static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
struct sock *sk)
@@ -169,7 +170,8 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
- if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
+ if (skb_queue_len(&ctx->out_queue) >=
+ READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
__skb_queue_tail(&ctx->out_queue, skb);
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 41533c631431..e6da7e8495c9 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -858,4 +858,5 @@ int xfrm_count_pfkey_enc_supported(void)
}
EXPORT_SYMBOL_GPL(xfrm_count_pfkey_enc_supported);
+MODULE_DESCRIPTION("XFRM Algorithm interface");
MODULE_LICENSE("GPL");
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 3784534c9185..653e51ae3964 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -407,7 +407,7 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct net_device *dev = x->xso.dev;
- if (!x->type_offload || x->encap)
+ if (!x->type_offload)
return false;
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index bd4ce21d76d7..161f535c8b94 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -21,6 +21,7 @@
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
#include <net/dst_metadata.h>
+#include <net/hotdata.h>
#include "xfrm_inout.h"
@@ -764,7 +765,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
trans = this_cpu_ptr(&xfrm_trans_tasklet);
- if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
+ if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog))
return -ENOBUFS;
BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c
index 7d5e920141e9..5ea15037ebd1 100644
--- a/net/xfrm/xfrm_interface_bpf.c
+++ b/net/xfrm/xfrm_interface_bpf.c
@@ -93,10 +93,10 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp
__bpf_kfunc_end_defs();
-BTF_SET8_START(xfrm_ifc_kfunc_set)
+BTF_KFUNCS_START(xfrm_ifc_kfunc_set)
BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info)
BTF_ID_FLAGS(func, bpf_skb_set_xfrm_info)
-BTF_SET8_END(xfrm_ifc_kfunc_set)
+BTF_KFUNCS_END(xfrm_ifc_kfunc_set)
static const struct btf_kfunc_id_set xfrm_interface_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index 1a1f6613fda2..4df5c06e3ece 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -726,7 +726,7 @@ static int xfrmi_get_iflink(const struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- return xi->p.link;
+ return READ_ONCE(xi->p.link);
}
static const struct net_device_ops xfrmi_netdev_ops = {
@@ -951,12 +951,12 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
.get_link_net = xfrmi_get_link_net,
};
-static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+static void __net_exit xfrmi_exit_batch_rtnl(struct list_head *net_exit_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
+ ASSERT_RTNL();
list_for_each_entry(net, net_exit_list, exit_list) {
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
struct xfrm_if __rcu **xip;
@@ -967,18 +967,16 @@ static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
for (xip = &xfrmn->xfrmi[i];
(xi = rtnl_dereference(*xip)) != NULL;
xip = &xi->next)
- unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_queue(xi->dev, dev_to_kill);
}
xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
if (xi)
- unregister_netdevice_queue(xi->dev, &list);
+ unregister_netdevice_queue(xi->dev, dev_to_kill);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations xfrmi_net_ops = {
- .exit_batch = xfrmi_exit_batch_net,
+ .exit_batch_rtnl = xfrmi_exit_batch_rtnl,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 662c83beb345..e5722c95b8bb 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -704,9 +704,13 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
struct xfrm_state *x = skb_dst(skb)->xfrm;
+ int family;
int err;
- switch (x->outer_mode.family) {
+ family = (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) ? x->outer_mode.family
+ : skb_dst(skb)->ops->family;
+
+ switch (family) {
case AF_INET:
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 53b7ce4a4db0..6affe5cd85d8 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2695,7 +2695,9 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
- family = xfrm[i]->props.family;
+ if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ family = xfrm[i]->props.family;
+
oif = fl->flowi_oif ? : fl->flowi_l3mdev;
dst = xfrm_dst_lookup(xfrm[i], tos, oif,
&saddr, &daddr, family, mark);
@@ -3417,7 +3419,7 @@ decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reve
}
fl4->flowi4_proto = flkeys->basic.ip_proto;
- fl4->flowi4_tos = flkeys->ip.tos;
+ fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
}
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fee9b5cf37a7..5f9bf8e5c933 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -52,6 +52,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
+ xfrm_state_update_stats(net);
snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index bda5327bf34d..0c306473a79d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -570,7 +570,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
int err = 0;
spin_lock(&x->lock);
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
@@ -1935,7 +1935,7 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
@@ -1957,6 +1957,19 @@ int xfrm_state_check_expire(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_check_expire);
+void xfrm_state_update_stats(struct net *net)
+{
+ struct xfrm_state *x;
+ int i;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
+ xfrm_dev_state_update_stats(x);
+ }
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+}
+
struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family)
diff --git a/net/xfrm/xfrm_state_bpf.c b/net/xfrm/xfrm_state_bpf.c
index 9e20d4a377f7..2248eda741f8 100644
--- a/net/xfrm/xfrm_state_bpf.c
+++ b/net/xfrm/xfrm_state_bpf.c
@@ -117,10 +117,10 @@ __bpf_kfunc void bpf_xdp_xfrm_state_release(struct xfrm_state *x)
__bpf_kfunc_end_defs();
-BTF_SET8_START(xfrm_state_kfunc_set)
+BTF_KFUNCS_START(xfrm_state_kfunc_set)
BTF_ID_FLAGS(func, bpf_xdp_get_xfrm_state, KF_RET_NULL | KF_ACQUIRE)
BTF_ID_FLAGS(func, bpf_xdp_xfrm_state_release, KF_RELEASE)
-BTF_SET8_END(xfrm_state_kfunc_set)
+BTF_KFUNCS_END(xfrm_state_kfunc_set)
static const struct btf_kfunc_id_set xfrm_state_xdp_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index ad01997c3aa9..810b520493f3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -902,7 +902,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev)
- xfrm_dev_state_update_curlft(x);
+ xfrm_dev_state_update_stats(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay);
@@ -2017,6 +2017,9 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
if (xp->xfrm_nr == 0)
return 0;
+ if (xp->xfrm_nr > XFRM_MAX_DEPTH)
+ return -ENOBUFS;
+
for (i = 0; i < xp->xfrm_nr; i++) {
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
@@ -3888,5 +3891,6 @@ static void __exit xfrm_user_exit(void)
module_init(xfrm_user_init);
module_exit(xfrm_user_exit);
+MODULE_DESCRIPTION("XFRM User interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index e457b3c7cb2f..96e09c6e8530 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -4,7 +4,7 @@
//! Network PHY device.
//!
-//! C headers: [`include/linux/phy.h`](../../../../../../../include/linux/phy.h).
+//! C headers: [`include/linux/phy.h`](srctree/include/linux/phy.h).
use crate::{bindings, error::*, prelude::*, str::CStr, types::Opaque};
@@ -16,7 +16,7 @@ use core::marker::PhantomData;
///
/// Some of PHY drivers access to the state of PHY's software state machine.
///
-/// [`enum phy_state`]: ../../../../../../../include/linux/phy.h
+/// [`enum phy_state`]: srctree/include/linux/phy.h
#[derive(PartialEq, Eq)]
pub enum DeviceState {
/// PHY device and driver are not ready for anything.
@@ -61,7 +61,7 @@ pub enum DuplexMode {
/// Referencing a `phy_device` using this struct asserts that you are in
/// a context where all methods defined on this struct are safe to call.
///
-/// [`struct phy_device`]: ../../../../../../../include/linux/phy.h
+/// [`struct phy_device`]: srctree/include/linux/phy.h
// During the calls to most functions in [`Driver`], the C side (`PHYLIB`) holds a lock that is
// unique for every instance of [`Device`]. `PHYLIB` uses a different serialization technique for
// [`Driver::resume`] and [`Driver::suspend`]: `PHYLIB` updates `phy_device`'s state with
@@ -486,7 +486,7 @@ impl<T: Driver> Adapter<T> {
///
/// `self.0` is always in a valid state.
///
-/// [`struct phy_driver`]: ../../../../../../../include/linux/phy.h
+/// [`struct phy_driver`]: srctree/include/linux/phy.h
#[repr(transparent)]
pub struct DriverVTable(Opaque<bindings::phy_driver>);
@@ -580,12 +580,12 @@ pub trait Driver {
/// Issues a PHY software reset.
fn soft_reset(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Probes the hardware to determine what abilities it has.
fn get_features(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Returns true if this is a suitable driver for the given phydev.
@@ -597,32 +597,32 @@ pub trait Driver {
/// Configures the advertisement and resets auto-negotiation
/// if auto-negotiation is enabled.
fn config_aneg(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Determines the negotiated speed and duplex.
fn read_status(_dev: &mut Device) -> Result<u16> {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Suspends the hardware, saving state if needed.
fn suspend(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Resumes the hardware, restoring state if needed.
fn resume(_dev: &mut Device) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Overrides the default MMD read function for reading a MMD register.
fn read_mmd(_dev: &mut Device, _devnum: u8, _regnum: u16) -> Result<u16> {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Overrides the default MMD write function for writing a MMD register.
fn write_mmd(_dev: &mut Device, _devnum: u8, _regnum: u16, _val: u16) -> Result {
- Err(code::ENOTSUPP)
+ kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Callback for notification of link change.
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
index 7048bb3594d6..634e81d83efd 100644
--- a/samples/bpf/asm_goto_workaround.h
+++ b/samples/bpf/asm_goto_workaround.h
@@ -4,14 +4,14 @@
#define __ASM_GOTO_WORKAROUND_H
/*
- * This will bring in asm_volatile_goto and asm_inline macro definitions
+ * This will bring in asm_goto_output and asm_inline macro definitions
* if enabled by compiler and config options.
*/
#include <linux/types.h>
-#ifdef asm_volatile_goto
-#undef asm_volatile_goto
-#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
+#ifdef asm_goto_output
+#undef asm_goto_output
+#define asm_goto_output(x...) asm volatile("invalid use of asm_goto_output")
#endif
/*
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index d2fbcf963cdf..07ff471ed6ae 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -370,7 +370,7 @@ static void run_perf_test(int tasks)
static void fill_lpm_trie(void)
{
- struct bpf_lpm_trie_key *key;
+ struct bpf_lpm_trie_key_u8 *key;
unsigned long value = 0;
unsigned int i;
int r;
diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
index 9d41db09c480..266fdd0b025d 100644
--- a/samples/bpf/xdp_router_ipv4_user.c
+++ b/samples/bpf/xdp_router_ipv4_user.c
@@ -91,7 +91,7 @@ static int recv_msg(struct sockaddr_nl sock_addr, int sock)
static void read_route(struct nlmsghdr *nh, int nll)
{
char dsts[24], gws[24], ifs[16], dsts_len[24], metrics[24];
- struct bpf_lpm_trie_key *prefix_key;
+ struct bpf_lpm_trie_key_u8 *prefix_key;
struct rtattr *rt_attr;
struct rtmsg *rt_msg;
int rtm_family;
diff --git a/samples/cgroup/.gitignore b/samples/cgroup/.gitignore
new file mode 100644
index 000000000000..3a0161194cce
--- /dev/null
+++ b/samples/cgroup/.gitignore
@@ -0,0 +1,3 @@
+/cgroup_event_listener
+/memcg_event_listener
+
diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
index e2a6a69352df..81220390851a 100644
--- a/samples/ftrace/ftrace-direct-modify.c
+++ b/samples/ftrace/ftrace-direct-modify.c
@@ -24,6 +24,41 @@ extern void my_tramp2(void *);
static unsigned long my_ip = (unsigned long)schedule;
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp1, @function\n"
+" .globl my_tramp1\n"
+" my_tramp1:\n"
+" addi sp,sp,-2*"SZREG"\n"
+" "REG_S" t0,0*"SZREG"(sp)\n"
+" "REG_S" ra,1*"SZREG"(sp)\n"
+" call my_direct_func1\n"
+" "REG_L" t0,0*"SZREG"(sp)\n"
+" "REG_L" ra,1*"SZREG"(sp)\n"
+" addi sp,sp,2*"SZREG"\n"
+" jr t0\n"
+" .size my_tramp1, .-my_tramp1\n"
+" .type my_tramp2, @function\n"
+" .globl my_tramp2\n"
+
+" my_tramp2:\n"
+" addi sp,sp,-2*"SZREG"\n"
+" "REG_S" t0,0*"SZREG"(sp)\n"
+" "REG_S" ra,1*"SZREG"(sp)\n"
+" call my_direct_func2\n"
+" "REG_L" t0,0*"SZREG"(sp)\n"
+" "REG_L" ra,1*"SZREG"(sp)\n"
+" addi sp,sp,2*"SZREG"\n"
+" jr t0\n"
+" .size my_tramp2, .-my_tramp2\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
index 2e349834d63c..f943e40d57fd 100644
--- a/samples/ftrace/ftrace-direct-multi-modify.c
+++ b/samples/ftrace/ftrace-direct-multi-modify.c
@@ -22,6 +22,47 @@ void my_direct_func2(unsigned long ip)
extern void my_tramp1(void *);
extern void my_tramp2(void *);
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp1, @function\n"
+" .globl my_tramp1\n"
+" my_tramp1:\n"
+" addi sp,sp,-3*"SZREG"\n"
+" "REG_S" a0,0*"SZREG"(sp)\n"
+" "REG_S" t0,1*"SZREG"(sp)\n"
+" "REG_S" ra,2*"SZREG"(sp)\n"
+" mv a0,t0\n"
+" call my_direct_func1\n"
+" "REG_L" a0,0*"SZREG"(sp)\n"
+" "REG_L" t0,1*"SZREG"(sp)\n"
+" "REG_L" ra,2*"SZREG"(sp)\n"
+" addi sp,sp,3*"SZREG"\n"
+" jr t0\n"
+" .size my_tramp1, .-my_tramp1\n"
+
+" .type my_tramp2, @function\n"
+" .globl my_tramp2\n"
+" my_tramp2:\n"
+" addi sp,sp,-3*"SZREG"\n"
+" "REG_S" a0,0*"SZREG"(sp)\n"
+" "REG_S" t0,1*"SZREG"(sp)\n"
+" "REG_S" ra,2*"SZREG"(sp)\n"
+" mv a0,t0\n"
+" call my_direct_func2\n"
+" "REG_L" a0,0*"SZREG"(sp)\n"
+" "REG_L" t0,1*"SZREG"(sp)\n"
+" "REG_L" ra,2*"SZREG"(sp)\n"
+" addi sp,sp,3*"SZREG"\n"
+" jr t0\n"
+" .size my_tramp2, .-my_tramp2\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c
index 9243dbfe4d0c..aed6df2927ce 100644
--- a/samples/ftrace/ftrace-direct-multi.c
+++ b/samples/ftrace/ftrace-direct-multi.c
@@ -17,6 +17,31 @@ void my_direct_func(unsigned long ip)
extern void my_tramp(void *);
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:\n"
+" addi sp,sp,-3*"SZREG"\n"
+" "REG_S" a0,0*"SZREG"(sp)\n"
+" "REG_S" t0,1*"SZREG"(sp)\n"
+" "REG_S" ra,2*"SZREG"(sp)\n"
+" mv a0,t0\n"
+" call my_direct_func\n"
+" "REG_L" a0,0*"SZREG"(sp)\n"
+" "REG_L" t0,1*"SZREG"(sp)\n"
+" "REG_L" ra,2*"SZREG"(sp)\n"
+" addi sp,sp,3*"SZREG"\n"
+" jr t0\n"
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c
index e39c3563ae4e..6ff546a5d7eb 100644
--- a/samples/ftrace/ftrace-direct-too.c
+++ b/samples/ftrace/ftrace-direct-too.c
@@ -19,6 +19,34 @@ void my_direct_func(struct vm_area_struct *vma, unsigned long address,
extern void my_tramp(void *);
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:\n"
+" addi sp,sp,-5*"SZREG"\n"
+" "REG_S" a0,0*"SZREG"(sp)\n"
+" "REG_S" a1,1*"SZREG"(sp)\n"
+" "REG_S" a2,2*"SZREG"(sp)\n"
+" "REG_S" t0,3*"SZREG"(sp)\n"
+" "REG_S" ra,4*"SZREG"(sp)\n"
+" call my_direct_func\n"
+" "REG_L" a0,0*"SZREG"(sp)\n"
+" "REG_L" a1,1*"SZREG"(sp)\n"
+" "REG_L" a2,2*"SZREG"(sp)\n"
+" "REG_L" t0,3*"SZREG"(sp)\n"
+" "REG_L" ra,4*"SZREG"(sp)\n"
+" addi sp,sp,5*"SZREG"\n"
+" jr t0\n"
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c
index 32c477da1e9a..ef0945670e1e 100644
--- a/samples/ftrace/ftrace-direct.c
+++ b/samples/ftrace/ftrace-direct.c
@@ -16,6 +16,30 @@ void my_direct_func(struct task_struct *p)
extern void my_tramp(void *);
+#ifdef CONFIG_RISCV
+#include <asm/asm.h>
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:\n"
+" addi sp,sp,-3*"SZREG"\n"
+" "REG_S" a0,0*"SZREG"(sp)\n"
+" "REG_S" t0,1*"SZREG"(sp)\n"
+" "REG_S" ra,2*"SZREG"(sp)\n"
+" call my_direct_func\n"
+" "REG_L" a0,0*"SZREG"(sp)\n"
+" "REG_L" t0,1*"SZREG"(sp)\n"
+" "REG_L" ra,2*"SZREG"(sp)\n"
+" addi sp,sp,3*"SZREG"\n"
+" jr t0\n"
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_RISCV */
+
#ifdef CONFIG_X86_64
#include <asm/ibt.h>
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 5a84b6443875..3ee8ecfb8c04 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -33,7 +33,7 @@ ld-option = $(success,$(LD) -v $(1))
# $(as-instr,<instr>)
# Return y if the assembler supports <instr>, n otherwise
-as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler-with-cpp -o /dev/null -)
+as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o /dev/null -)
# check if $(CC) and $(LD) exist
$(error-if,$(failure,command -v $(CC)),C compiler '$(CC)' not found)
diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
index 8fcb427405a6..92be0c9a13ee 100644
--- a/scripts/Makefile.compiler
+++ b/scripts/Makefile.compiler
@@ -38,7 +38,7 @@ as-option = $(call try-run,\
# Usage: aflags-y += $(call as-instr,instr,option1,option2)
as-instr = $(call try-run,\
- printf "%b\n" "$(1)" | $(CC) -Werror $(CLANG_FLAGS) $(KBUILD_AFLAGS) -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
+ printf "%b\n" "$(1)" | $(CC) -Werror $(CLANG_FLAGS) $(KBUILD_AFLAGS) -Wa$(comma)--fatal-warnings -c -x assembler-with-cpp -o "$$TMP" -,$(2),$(3))
# __cc-option
# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
diff --git a/scripts/Makefile.defconf b/scripts/Makefile.defconf
index ab271b2051a2..226ea3df3b4b 100644
--- a/scripts/Makefile.defconf
+++ b/scripts/Makefile.defconf
@@ -9,8 +9,8 @@
# Input config fragments without '.config' suffix
define merge_into_defconfig
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
- -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/$(1) \
- $(foreach config,$(2),$(srctree)/arch/$(ARCH)/configs/$(config).config)
+ -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
+ $(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
endef
@@ -23,7 +23,7 @@ endef
# Input config fragments without '.config' suffix
define merge_into_defconfig_override
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
- -Q -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/$(1) \
- $(foreach config,$(2),$(srctree)/arch/$(ARCH)/configs/$(config).config)
+ -Q -m -O $(objtree) $(srctree)/arch/$(SRCARCH)/configs/$(1) \
+ $(foreach config,$(2),$(srctree)/arch/$(SRCARCH)/configs/$(config).config)
+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
endef
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index c9725685aa76..a9e552a1e910 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -82,15 +82,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# Warn if there is an enum types mismatch
KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion)
-# backward compatibility
-KBUILD_EXTRA_WARN ?= $(KBUILD_ENABLE_EXTRA_GCC_CHECKS)
-
-ifeq ("$(origin W)", "command line")
- KBUILD_EXTRA_WARN := $(W)
-endif
-
-export KBUILD_EXTRA_WARN
-
#
# W=1 - warnings which may be relevant and do not occur too often
#
@@ -106,7 +97,6 @@ KBUILD_CFLAGS += $(call cc-option, -Wunused-const-variable)
KBUILD_CFLAGS += $(call cc-option, -Wpacked-not-aligned)
KBUILD_CFLAGS += $(call cc-option, -Wformat-overflow)
KBUILD_CFLAGS += $(call cc-option, -Wformat-truncation)
-KBUILD_CFLAGS += $(call cc-option, -Wstringop-overflow)
KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation)
KBUILD_CPPFLAGS += -Wundef
@@ -122,7 +112,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
-KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
ifdef CONFIG_CC_IS_CLANG
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 1a965fe68e01..cd5b181060f1 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -83,8 +83,8 @@ dtb-$(CONFIG_OF_ALL_DTBS) += $(dtb-)
multi-dtb-y := $(call multi-search, $(dtb-y), .dtb, -dtbs)
# Primitive DTB compiled from *.dts
real-dtb-y := $(call real-search, $(dtb-y), .dtb, -dtbs)
-# Base DTB that overlay is applied onto (each first word of $(*-dtbs) expansion)
-base-dtb-y := $(foreach m, $(multi-dtb-y), $(firstword $(call suffix-search, $m, .dtb, -dtbs)))
+# Base DTB that overlay is applied onto
+base-dtb-y := $(filter %.dtb, $(call real-search, $(multi-dtb-y), .dtb, -dtbs))
always-y += $(dtb-y)
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 3addd1c0b989..a81dfb1f5181 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -4,27 +4,6 @@
include $(srctree)/scripts/Kbuild.include
include $(srctree)/scripts/Makefile.lib
-KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE))
-# Include only those top-level files that are needed by make, plus the GPL copy
-TAR_CONTENT := Documentation LICENSES arch block certs crypto drivers fs \
- include init io_uring ipc kernel lib mm net rust \
- samples scripts security sound tools usr virt \
- .config Makefile \
- Kbuild Kconfig COPYING $(wildcard localversion*)
-
-quiet_cmd_src_tar = TAR $(2).tar.gz
- cmd_src_tar = \
-if test "$(objtree)" != "$(srctree)"; then \
- echo >&2; \
- echo >&2 " ERROR:"; \
- echo >&2 " Building source tarball is not possible outside the"; \
- echo >&2 " kernel source tree. Don't set KBUILD_OUTPUT"; \
- echo >&2; \
- false; \
-fi ; \
-tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \
- --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3)
-
# Git
# ---------------------------------------------------------------------------
@@ -130,8 +109,6 @@ debian-orig: linux.tar$(debian-orig-suffix) debian
cp $< ../$(orig-name); \
fi
-KBUILD_PKG_ROOTCMD ?= 'fakeroot -u'
-
PHONY += deb-pkg srcdeb-pkg bindeb-pkg
deb-pkg: private build-type := source,binary
@@ -146,7 +123,7 @@ deb-pkg srcdeb-pkg bindeb-pkg:
$(if $(findstring source, $(build-type)), \
--unsigned-source --compression=$(KDEB_SOURCE_COMPRESS)) \
$(if $(findstring binary, $(build-type)), \
- --rules-file='$(MAKE) -f debian/rules' --jobs=1 -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch), \
+ -R'$(MAKE) -f debian/rules' -j1 -a$$(cat debian/arch), \
--no-check-builddeps) \
$(DPKG_FLAGS))
@@ -157,9 +134,8 @@ snap-pkg:
rm -rf $(objtree)/snap
mkdir $(objtree)/snap
$(MAKE) clean
- $(call cmd,src_tar,$(KERNELPATH))
sed "s@KERNELRELEASE@$(KERNELRELEASE)@; \
- s@SRCTREE@$(shell realpath $(KERNELPATH).tar.gz)@" \
+ s@SRCTREE@$(abs_srctree)@" \
$(srctree)/scripts/package/snapcraft.template > \
$(objtree)/snap/snapcraft.yaml
cd $(objtree)/snap && \
diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
index 61b7dddedc46..4606944984ee 100755
--- a/scripts/bpf_doc.py
+++ b/scripts/bpf_doc.py
@@ -513,7 +513,7 @@ eBPF programs can have an associated license, passed along with the bytecode
instructions to the kernel when the programs are loaded. The format for that
string is identical to the one in use for kernel modules (Dual licenses, such
as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
-programs that are compatible with the GNU Privacy License (GPL).
+programs that are compatible with the GNU General Public License (GNU GPL).
In order to use such helpers, the eBPF program must be loaded with the correct
license string passed (via **attr**) to the **bpf**\\ () system call, and this
@@ -827,7 +827,7 @@ class PrinterHelpers(Printer):
print(' *{}{}'.format(' \t' if line else '', line))
print(' */')
- print('static %s %s(*%s)(' % (self.map_type(proto['ret_type']),
+ print('static %s %s(* const %s)(' % (self.map_type(proto['ret_type']),
proto['ret_star'], proto['name']), end='')
comma = ''
for i, a in enumerate(proto['args']):
diff --git a/scripts/check-uapi.sh b/scripts/check-uapi.sh
new file mode 100755
index 000000000000..955581735cb3
--- /dev/null
+++ b/scripts/check-uapi.sh
@@ -0,0 +1,573 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+# Script to check commits for UAPI backwards compatibility
+
+set -o errexit
+set -o pipefail
+
+print_usage() {
+ name=$(basename "$0")
+ cat << EOF
+$name - check for UAPI header stability across Git commits
+
+By default, the script will check to make sure the latest commit (or current
+dirty changes) did not introduce ABI changes when compared to HEAD^1. You can
+check against additional commit ranges with the -b and -p options.
+
+The script will not check UAPI headers for architectures other than the one
+defined in ARCH.
+
+Usage: $name [-b BASE_REF] [-p PAST_REF] [-j N] [-l ERROR_LOG] [-i] [-q] [-v]
+
+Options:
+ -b BASE_REF Base git reference to use for comparison. If unspecified or empty,
+ will use any dirty changes in tree to UAPI files. If there are no
+ dirty changes, HEAD will be used.
+ -p PAST_REF Compare BASE_REF to PAST_REF (e.g. -p v6.1). If unspecified or empty,
+ will use BASE_REF^1. Must be an ancestor of BASE_REF. Only headers
+ that exist on PAST_REF will be checked for compatibility.
+ -j JOBS Number of checks to run in parallel (default: number of CPU cores).
+ -l ERROR_LOG Write error log to file (default: no error log is generated).
+ -i Ignore ambiguous changes that may or may not break UAPI compatibility.
+ -q Quiet operation.
+ -v Verbose operation (print more information about each header being checked).
+
+Environmental args:
+ ABIDIFF Custom path to abidiff binary
+ CC C compiler (default is "gcc")
+ ARCH Target architecture for the UAPI check (default is host arch)
+
+Exit codes:
+ $SUCCESS) Success
+ $FAIL_ABI) ABI difference detected
+ $FAIL_PREREQ) Prerequisite not met
+EOF
+}
+
+readonly SUCCESS=0
+readonly FAIL_ABI=1
+readonly FAIL_PREREQ=2
+
+# Print to stderr
+eprintf() {
+ # shellcheck disable=SC2059
+ printf "$@" >&2
+}
+
+# Expand an array with a specific character (similar to Python string.join())
+join() {
+ local IFS="$1"
+ shift
+ printf "%s" "$*"
+}
+
+# Create abidiff suppressions
+gen_suppressions() {
+ # Common enum variant names which we don't want to worry about
+ # being shifted when new variants are added.
+ local -a enum_regex=(
+ ".*_AFTER_LAST$"
+ ".*_CNT$"
+ ".*_COUNT$"
+ ".*_END$"
+ ".*_LAST$"
+ ".*_MASK$"
+ ".*_MAX$"
+ ".*_MAX_BIT$"
+ ".*_MAX_BPF_ATTACH_TYPE$"
+ ".*_MAX_ID$"
+ ".*_MAX_SHIFT$"
+ ".*_NBITS$"
+ ".*_NETDEV_NUMHOOKS$"
+ ".*_NFT_META_IIFTYPE$"
+ ".*_NL80211_ATTR$"
+ ".*_NLDEV_NUM_OPS$"
+ ".*_NUM$"
+ ".*_NUM_ELEMS$"
+ ".*_NUM_IRQS$"
+ ".*_SIZE$"
+ ".*_TLSMAX$"
+ "^MAX_.*"
+ "^NUM_.*"
+ )
+
+ # Common padding field names which can be expanded into
+ # without worrying about users.
+ local -a padding_regex=(
+ ".*end$"
+ ".*pad$"
+ ".*pad[0-9]?$"
+ ".*pad_[0-9]?$"
+ ".*padding$"
+ ".*padding[0-9]?$"
+ ".*padding_[0-9]?$"
+ ".*res$"
+ ".*resv$"
+ ".*resv[0-9]?$"
+ ".*resv_[0-9]?$"
+ ".*reserved$"
+ ".*reserved[0-9]?$"
+ ".*reserved_[0-9]?$"
+ ".*rsvd[0-9]?$"
+ ".*unused$"
+ )
+
+ cat << EOF
+[suppress_type]
+ type_kind = enum
+ changed_enumerators_regexp = $(join , "${enum_regex[@]}")
+EOF
+
+ for p in "${padding_regex[@]}"; do
+ cat << EOF
+[suppress_type]
+ type_kind = struct
+ has_data_member_inserted_at = offset_of_first_data_member_regexp(${p})
+EOF
+ done
+
+if [ "$IGNORE_AMBIGUOUS_CHANGES" = "true" ]; then
+ cat << EOF
+[suppress_type]
+ type_kind = struct
+ has_data_member_inserted_at = end
+ has_size_change = yes
+EOF
+fi
+}
+
+# Check if git tree is dirty
+tree_is_dirty() {
+ ! git diff --quiet
+}
+
+# Get list of files installed in $ref
+get_file_list() {
+ local -r ref="$1"
+ local -r tree="$(get_header_tree "$ref")"
+
+ # Print all installed headers, filtering out ones that can't be compiled
+ find "$tree" -type f -name '*.h' -printf '%P\n' | grep -v -f "$INCOMPAT_LIST"
+}
+
+# Add to the list of incompatible headers
+add_to_incompat_list() {
+ local -r ref="$1"
+
+ # Start with the usr/include/Makefile to get a list of the headers
+ # that don't compile using this method.
+ if [ ! -f usr/include/Makefile ]; then
+ eprintf "error - no usr/include/Makefile present at %s\n" "$ref"
+ eprintf "Note: usr/include/Makefile was added in the v5.3 kernel release\n"
+ exit "$FAIL_PREREQ"
+ fi
+ {
+ # shellcheck disable=SC2016
+ printf 'all: ; @echo $(no-header-test)\n'
+ cat usr/include/Makefile
+ } | SRCARCH="$ARCH" make --always-make -f - | tr " " "\n" \
+ | grep -v "asm-generic" >> "$INCOMPAT_LIST"
+
+ # The makefile also skips all asm-generic files, but prints "asm-generic/%"
+ # which won't work for our grep match. Instead, print something grep will match.
+ printf "asm-generic/.*\.h\n" >> "$INCOMPAT_LIST"
+}
+
+# Compile the simple test app
+do_compile() {
+ local -r inc_dir="$1"
+ local -r header="$2"
+ local -r out="$3"
+ printf "int main(void) { return 0; }\n" | \
+ "$CC" -c \
+ -o "$out" \
+ -x c \
+ -O0 \
+ -std=c90 \
+ -fno-eliminate-unused-debug-types \
+ -g \
+ "-I${inc_dir}" \
+ -include "$header" \
+ -
+}
+
+# Run make headers_install
+run_make_headers_install() {
+ local -r ref="$1"
+ local -r install_dir="$(get_header_tree "$ref")"
+ make -j "$MAX_THREADS" ARCH="$ARCH" INSTALL_HDR_PATH="$install_dir" \
+ headers_install > /dev/null
+}
+
+# Install headers for both git refs
+install_headers() {
+ local -r base_ref="$1"
+ local -r past_ref="$2"
+
+ for ref in "$base_ref" "$past_ref"; do
+ printf "Installing user-facing UAPI headers from %s... " "${ref:-dirty tree}"
+ if [ -n "$ref" ]; then
+ git archive --format=tar --prefix="${ref}-archive/" "$ref" \
+ | (cd "$TMP_DIR" && tar xf -)
+ (
+ cd "${TMP_DIR}/${ref}-archive"
+ run_make_headers_install "$ref"
+ add_to_incompat_list "$ref" "$INCOMPAT_LIST"
+ )
+ else
+ run_make_headers_install "$ref"
+ add_to_incompat_list "$ref" "$INCOMPAT_LIST"
+ fi
+ printf "OK\n"
+ done
+ sort -u -o "$INCOMPAT_LIST" "$INCOMPAT_LIST"
+ sed -i -e '/^$/d' "$INCOMPAT_LIST"
+}
+
+# Print the path to the headers_install tree for a given ref
+get_header_tree() {
+ local -r ref="$1"
+ printf "%s" "${TMP_DIR}/${ref}/usr"
+}
+
+# Check file list for UAPI compatibility
+check_uapi_files() {
+ local -r base_ref="$1"
+ local -r past_ref="$2"
+ local -r abi_error_log="$3"
+
+ local passed=0;
+ local failed=0;
+ local -a threads=()
+ set -o errexit
+
+ printf "Checking changes to UAPI headers between %s and %s...\n" "$past_ref" "${base_ref:-dirty tree}"
+ # Loop over all UAPI headers that were installed by $past_ref (if they only exist on $base_ref,
+ # there's no way they're broken and no way to compare anyway)
+ while read -r file; do
+ if [ "${#threads[@]}" -ge "$MAX_THREADS" ]; then
+ if wait "${threads[0]}"; then
+ passed=$((passed + 1))
+ else
+ failed=$((failed + 1))
+ fi
+ threads=("${threads[@]:1}")
+ fi
+
+ check_individual_file "$base_ref" "$past_ref" "$file" &
+ threads+=("$!")
+ done < <(get_file_list "$past_ref")
+
+ for t in "${threads[@]}"; do
+ if wait "$t"; then
+ passed=$((passed + 1))
+ else
+ failed=$((failed + 1))
+ fi
+ done
+
+ if [ -n "$abi_error_log" ]; then
+ printf 'Generated by "%s %s" from git ref %s\n\n' \
+ "$0" "$*" "$(git rev-parse HEAD)" > "$abi_error_log"
+ fi
+
+ while read -r error_file; do
+ {
+ cat "$error_file"
+ printf "\n\n"
+ } | tee -a "${abi_error_log:-/dev/null}" >&2
+ done < <(find "$TMP_DIR" -type f -name '*.error' | sort)
+
+ total="$((passed + failed))"
+ if [ "$failed" -gt 0 ]; then
+ eprintf "error - %d/%d UAPI headers compatible with %s appear _not_ to be backwards compatible\n" \
+ "$failed" "$total" "$ARCH"
+ if [ -n "$abi_error_log" ]; then
+ eprintf "Failure summary saved to %s\n" "$abi_error_log"
+ fi
+ else
+ printf "All %d UAPI headers compatible with %s appear to be backwards compatible\n" \
+ "$total" "$ARCH"
+ fi
+
+ return "$failed"
+}
+
+# Check an individual file for UAPI compatibility
+check_individual_file() {
+ local -r base_ref="$1"
+ local -r past_ref="$2"
+ local -r file="$3"
+
+ local -r base_header="$(get_header_tree "$base_ref")/${file}"
+ local -r past_header="$(get_header_tree "$past_ref")/${file}"
+
+ if [ ! -f "$base_header" ]; then
+ mkdir -p "$(dirname "$base_header")"
+ printf "==== UAPI header %s was removed between %s and %s ====" \
+ "$file" "$past_ref" "$base_ref" \
+ > "${base_header}.error"
+ return 1
+ fi
+
+ compare_abi "$file" "$base_header" "$past_header" "$base_ref" "$past_ref"
+}
+
+# Perform the A/B compilation and compare output ABI
+compare_abi() {
+ local -r file="$1"
+ local -r base_header="$2"
+ local -r past_header="$3"
+ local -r base_ref="$4"
+ local -r past_ref="$5"
+ local -r log="${TMP_DIR}/log/${file}.log"
+ local -r error_log="${TMP_DIR}/log/${file}.error"
+
+ mkdir -p "$(dirname "$log")"
+
+ if ! do_compile "$(get_header_tree "$base_ref")/include" "$base_header" "${base_header}.bin" 2> "$log"; then
+ {
+ warn_str=$(printf "==== Could not compile version of UAPI header %s at %s ====\n" \
+ "$file" "$base_ref")
+ printf "%s\n" "$warn_str"
+ cat "$log"
+ printf -- "=%.0s" $(seq 0 ${#warn_str})
+ } > "$error_log"
+ return 1
+ fi
+
+ if ! do_compile "$(get_header_tree "$past_ref")/include" "$past_header" "${past_header}.bin" 2> "$log"; then
+ {
+ warn_str=$(printf "==== Could not compile version of UAPI header %s at %s ====\n" \
+ "$file" "$past_ref")
+ printf "%s\n" "$warn_str"
+ cat "$log"
+ printf -- "=%.0s" $(seq 0 ${#warn_str})
+ } > "$error_log"
+ return 1
+ fi
+
+ local ret=0
+ "$ABIDIFF" --non-reachable-types \
+ --suppressions "$SUPPRESSIONS" \
+ "${past_header}.bin" "${base_header}.bin" > "$log" || ret="$?"
+ if [ "$ret" -eq 0 ]; then
+ if [ "$VERBOSE" = "true" ]; then
+ printf "No ABI differences detected in %s from %s -> %s\n" \
+ "$file" "$past_ref" "$base_ref"
+ fi
+ else
+ # Bits in abidiff's return code can be used to determine the type of error
+ if [ $((ret & 0x2)) -gt 0 ]; then
+ eprintf "error - abidiff did not run properly\n"
+ exit 1
+ fi
+
+ if [ "$IGNORE_AMBIGUOUS_CHANGES" = "true" ] && [ "$ret" -eq 4 ]; then
+ return 0
+ fi
+
+ # If the only changes were additions (not modifications to existing APIs), then
+ # there's no problem. Ignore these diffs.
+ if grep "Unreachable types summary" "$log" | grep -q "0 removed" &&
+ grep "Unreachable types summary" "$log" | grep -q "0 changed"; then
+ return 0
+ fi
+
+ {
+ warn_str=$(printf "==== ABI differences detected in %s from %s -> %s ====" \
+ "$file" "$past_ref" "$base_ref")
+ printf "%s\n" "$warn_str"
+ sed -e '/summary:/d' -e '/changed type/d' -e '/^$/d' -e 's/^/ /g' "$log"
+ printf -- "=%.0s" $(seq 0 ${#warn_str})
+ if cmp "$past_header" "$base_header" > /dev/null 2>&1; then
+ printf "\n%s did not change between %s and %s...\n" "$file" "$past_ref" "${base_ref:-dirty tree}"
+ printf "It's possible a change to one of the headers it includes caused this error:\n"
+ grep '^#include' "$base_header"
+ printf "\n"
+ fi
+ } > "$error_log"
+
+ return 1
+ fi
+}
+
+# Check that a minimum software version number is satisfied
+min_version_is_satisfied() {
+ local -r min_version="$1"
+ local -r version_installed="$2"
+
+ printf "%s\n%s\n" "$min_version" "$version_installed" \
+ | sort -Vc > /dev/null 2>&1
+}
+
+# Make sure we have the tools we need and the arguments make sense
+check_deps() {
+ ABIDIFF="${ABIDIFF:-abidiff}"
+ CC="${CC:-gcc}"
+ ARCH="${ARCH:-$(uname -m)}"
+ if [ "$ARCH" = "x86_64" ]; then
+ ARCH="x86"
+ fi
+
+ local -r abidiff_min_version="2.4"
+ local -r libdw_min_version_if_clang="0.171"
+
+ if ! command -v "$ABIDIFF" > /dev/null 2>&1; then
+ eprintf "error - abidiff not found!\n"
+ eprintf "Please install abigail-tools version %s or greater\n" "$abidiff_min_version"
+ eprintf "See: https://sourceware.org/libabigail/manual/libabigail-overview.html\n"
+ return 1
+ fi
+
+ local -r abidiff_version="$("$ABIDIFF" --version | cut -d ' ' -f 2)"
+ if ! min_version_is_satisfied "$abidiff_min_version" "$abidiff_version"; then
+ eprintf "error - abidiff version too old: %s\n" "$abidiff_version"
+ eprintf "Please install abigail-tools version %s or greater\n" "$abidiff_min_version"
+ eprintf "See: https://sourceware.org/libabigail/manual/libabigail-overview.html\n"
+ return 1
+ fi
+
+ if ! command -v "$CC" > /dev/null 2>&1; then
+ eprintf 'error - %s not found\n' "$CC"
+ return 1
+ fi
+
+ if "$CC" --version | grep -q clang; then
+ local -r libdw_version="$(ldconfig -v 2>/dev/null | grep -v SKIPPED | grep -m 1 -o 'libdw-[0-9]\+.[0-9]\+' | cut -c 7-)"
+ if ! min_version_is_satisfied "$libdw_min_version_if_clang" "$libdw_version"; then
+ eprintf "error - libdw version too old for use with clang: %s\n" "$libdw_version"
+ eprintf "Please install libdw from elfutils version %s or greater\n" "$libdw_min_version_if_clang"
+ eprintf "See: https://sourceware.org/elfutils/\n"
+ return 1
+ fi
+ fi
+
+ if [ ! -d "arch/${ARCH}" ]; then
+ eprintf 'error - ARCH "%s" is not a subdirectory under arch/\n' "$ARCH"
+ eprintf "Please set ARCH to one of:\n%s\n" "$(find arch -maxdepth 1 -mindepth 1 -type d -printf '%f ' | fmt)"
+ return 1
+ fi
+
+ if ! git rev-parse --is-inside-work-tree > /dev/null 2>&1; then
+ eprintf "error - this script requires the kernel tree to be initialized with Git\n"
+ return 1
+ fi
+
+ if ! git rev-parse --verify "$past_ref" > /dev/null 2>&1; then
+ printf 'error - invalid git reference "%s"\n' "$past_ref"
+ return 1
+ fi
+
+ if [ -n "$base_ref" ]; then
+ if ! git merge-base --is-ancestor "$past_ref" "$base_ref" > /dev/null 2>&1; then
+ printf 'error - "%s" is not an ancestor of base ref "%s"\n' "$past_ref" "$base_ref"
+ return 1
+ fi
+ if [ "$(git rev-parse "$base_ref")" = "$(git rev-parse "$past_ref")" ]; then
+ printf 'error - "%s" and "%s" are the same reference\n' "$past_ref" "$base_ref"
+ return 1
+ fi
+ fi
+}
+
+run() {
+ local base_ref="$1"
+ local past_ref="$2"
+ local abi_error_log="$3"
+ shift 3
+
+ if [ -z "$KERNEL_SRC" ]; then
+ KERNEL_SRC="$(realpath "$(dirname "$0")"/..)"
+ fi
+
+ cd "$KERNEL_SRC"
+
+ if [ -z "$base_ref" ] && ! tree_is_dirty; then
+ base_ref=HEAD
+ fi
+
+ if [ -z "$past_ref" ]; then
+ if [ -n "$base_ref" ]; then
+ past_ref="${base_ref}^1"
+ else
+ past_ref=HEAD
+ fi
+ fi
+
+ if ! check_deps; then
+ exit "$FAIL_PREREQ"
+ fi
+
+ TMP_DIR=$(mktemp -d)
+ readonly TMP_DIR
+ trap 'rm -rf "$TMP_DIR"' EXIT
+
+ readonly INCOMPAT_LIST="${TMP_DIR}/incompat_list.txt"
+ touch "$INCOMPAT_LIST"
+
+ readonly SUPPRESSIONS="${TMP_DIR}/suppressions.txt"
+ gen_suppressions > "$SUPPRESSIONS"
+
+ # Run make install_headers for both refs
+ install_headers "$base_ref" "$past_ref"
+
+ # Check for any differences in the installed header trees
+ if diff -r -q "$(get_header_tree "$base_ref")" "$(get_header_tree "$past_ref")" > /dev/null 2>&1; then
+ printf "No changes to UAPI headers were applied between %s and %s\n" "$past_ref" "${base_ref:-dirty tree}"
+ exit "$SUCCESS"
+ fi
+
+ if ! check_uapi_files "$base_ref" "$past_ref" "$abi_error_log"; then
+ exit "$FAIL_ABI"
+ fi
+}
+
+main() {
+ MAX_THREADS=$(nproc)
+ VERBOSE="false"
+ IGNORE_AMBIGUOUS_CHANGES="false"
+ quiet="false"
+ local base_ref=""
+ while getopts "hb:p:j:l:iqv" opt; do
+ case $opt in
+ h)
+ print_usage
+ exit "$SUCCESS"
+ ;;
+ b)
+ base_ref="$OPTARG"
+ ;;
+ p)
+ past_ref="$OPTARG"
+ ;;
+ j)
+ MAX_THREADS="$OPTARG"
+ ;;
+ l)
+ abi_error_log="$OPTARG"
+ ;;
+ i)
+ IGNORE_AMBIGUOUS_CHANGES="true"
+ ;;
+ q)
+ quiet="true"
+ VERBOSE="false"
+ ;;
+ v)
+ VERBOSE="true"
+ quiet="false"
+ ;;
+ *)
+ exit "$FAIL_PREREQ"
+ esac
+ done
+
+ if [ "$quiet" = "true" ]; then
+ exec > /dev/null 2>&1
+ fi
+
+ run "$base_ref" "$past_ref" "$abi_error_log" "$@"
+}
+
+main "$@"
diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
index 5dea4479240b..e4fb686dfaa9 100755
--- a/scripts/clang-tools/gen_compile_commands.py
+++ b/scripts/clang-tools/gen_compile_commands.py
@@ -170,7 +170,7 @@ def process_line(root_directory, command_prefix, file_path):
# escape the pound sign '#', either as '\#' or '$(pound)' (depending on the
# kernel version). The compile_commands.json file is not interepreted
# by Make, so this code replaces the escaped version with '#'.
- prefix = command_prefix.replace('\#', '#').replace('$(pound)', '#')
+ prefix = command_prefix.replace(r'\#', '#').replace('$(pound)', '#')
# Return the canonical path, eliminating any symbolic links encountered in the path.
abs_path = os.path.realpath(os.path.join(root_directory, file_path))
diff --git a/scripts/coccinelle/api/device_attr_show.cocci b/scripts/coccinelle/api/device_attr_show.cocci
index a28dc061653a..550d1d2fc02a 100644
--- a/scripts/coccinelle/api/device_attr_show.cocci
+++ b/scripts/coccinelle/api/device_attr_show.cocci
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
///
/// From Documentation/filesystems/sysfs.rst:
-/// show() must not use snprintf() when formatting the value to be
-/// returned to user space. If you can guarantee that an overflow
-/// will never happen you can use sprintf() otherwise you must use
-/// scnprintf().
+/// show() should only use sysfs_emit() or sysfs_emit_at() when formatting
+/// the value to be returned to user space.
///
// Confidence: High
// Copyright: (C) 2020 Denis Efremov ISPRAS
@@ -30,15 +28,16 @@ ssize_t show(struct device *dev, struct device_attribute *attr, char *buf)
@rp depends on patch@
identifier show, dev, attr, buf;
+expression BUF, SZ, FORMAT, STR;
@@
ssize_t show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
-- snprintf
-+ scnprintf
- (...);
+- snprintf(BUF, SZ, FORMAT
++ sysfs_emit(BUF, FORMAT
+ ,...);
...>
}
@@ -46,10 +45,10 @@ ssize_t show(struct device *dev, struct device_attribute *attr, char *buf)
p << r.p;
@@
-coccilib.report.print_report(p[0], "WARNING: use scnprintf or sprintf")
+coccilib.report.print_report(p[0], "WARNING: please use sysfs_emit or sysfs_emit_at")
@script: python depends on org@
p << r.p;
@@
-coccilib.org.print_todo(p[0], "WARNING: use scnprintf or sprintf")
+coccilib.org.print_todo(p[0], "WARNING: please use sysfs_emit or sysfs_emit_at")
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
index aa5ab6251f76..6793d6e86e77 100644
--- a/scripts/gdb/linux/tasks.py
+++ b/scripts/gdb/linux/tasks.py
@@ -82,21 +82,12 @@ LxPs()
thread_info_type = utils.CachedType("struct thread_info")
-ia64_task_size = None
-
def get_thread_info(task):
thread_info_ptr_type = thread_info_type.get_type().pointer()
- if utils.is_target_arch("ia64"):
- global ia64_task_size
- if ia64_task_size is None:
- ia64_task_size = gdb.parse_and_eval("sizeof(struct task_struct)")
- thread_info_addr = task.address + ia64_task_size
- thread_info = thread_info_addr.cast(thread_info_ptr_type)
- else:
- if task.type.fields()[0].type == thread_info_type.get_type():
- return task['thread_info']
- thread_info = task['stack'].cast(thread_info_ptr_type)
+ if task.type.fields()[0].type == thread_info_type.get_type():
+ return task['thread_info']
+ thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 3c6cbe2b278d..0da52b548ba5 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -161,6 +161,13 @@ fn main() {
ts.push("features", features);
ts.push("llvm-target", "x86_64-linux-gnu");
ts.push("target-pointer-width", "64");
+ } else if cfg.has("LOONGARCH") {
+ ts.push("arch", "loongarch64");
+ ts.push("data-layout", "e-m:e-p:64:64-i64:64-i128:128-n64-S128");
+ ts.push("features", "-f,-d");
+ ts.push("llvm-target", "loongarch64-linux-gnusf");
+ ts.push("llvm-abiname", "lp64s");
+ ts.push("target-pointer-width", "64");
} else {
panic!("Unsupported architecture");
}
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index f5dfdb9d80e9..f3901c55df23 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -16,9 +16,7 @@
#include <unistd.h>
#include <assert.h>
#include <stdarg.h>
-#ifdef __GNU_LIBRARY__
#include <getopt.h>
-#endif /* __GNU_LIBRARY__ */
#include "genksyms.h"
/*----------------------------------------------------------------------*/
@@ -718,8 +716,6 @@ void error_with_pos(const char *fmt, ...)
static void genksyms_usage(void)
{
fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
-#ifdef __GNU_LIBRARY__
- " -s, --symbol-prefix Select symbol prefix\n"
" -d, --debug Increment the debug level (repeatable)\n"
" -D, --dump Dump expanded symbol defs (for debugging only)\n"
" -r, --reference file Read reference symbols from a file\n"
@@ -729,18 +725,6 @@ static void genksyms_usage(void)
" -q, --quiet Disable warnings (default)\n"
" -h, --help Print this message\n"
" -V, --version Print the release version\n"
-#else /* __GNU_LIBRARY__ */
- " -s Select symbol prefix\n"
- " -d Increment the debug level (repeatable)\n"
- " -D Dump expanded symbol defs (for debugging only)\n"
- " -r file Read reference symbols from a file\n"
- " -T file Dump expanded types into file\n"
- " -p Preserve reference modversions or fail\n"
- " -w Enable warnings\n"
- " -q Disable warnings (default)\n"
- " -h Print this message\n"
- " -V Print the release version\n"
-#endif /* __GNU_LIBRARY__ */
, stderr);
}
@@ -749,7 +733,6 @@ int main(int argc, char **argv)
FILE *dumpfile = NULL, *ref_file = NULL;
int o;
-#ifdef __GNU_LIBRARY__
struct option long_opts[] = {
{"debug", 0, 0, 'd'},
{"warnings", 0, 0, 'w'},
@@ -763,11 +746,8 @@ int main(int argc, char **argv)
{0, 0, 0, 0}
};
- while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph",
+ while ((o = getopt_long(argc, argv, "dwqVDr:T:ph",
&long_opts[0], NULL)) != EOF)
-#else /* __GNU_LIBRARY__ */
- while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF)
-#endif /* __GNU_LIBRARY__ */
switch (o) {
case 'd':
flag_debug++;
diff --git a/scripts/git.orderFile b/scripts/git.orderFile
new file mode 100644
index 000000000000..5102ba73357f
--- /dev/null
+++ b/scripts/git.orderFile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# order file for git, to produce patches which are easier to review
+# by diffing the important stuff like header changes first.
+#
+# one-off usage:
+# git diff -O scripts/git.orderFile ...
+#
+# add to git config:
+# git config diff.orderFile scripts/git.orderFile
+#
+
+MAINTAINERS
+
+# Documentation
+Documentation/*
+*.rst
+
+# git-specific
+.gitignore
+scripts/git.orderFile
+
+# build system
+Kconfig*
+*/Kconfig*
+Kbuild*
+*/Kbuild*
+Makefile*
+*/Makefile*
+*.mak
+*.mk
+scripts/*
+
+# semantic patches
+*.cocci
+
+# headers
+*types.h
+*.h
+
+# code
+*.c
diff --git a/scripts/head-object-list.txt b/scripts/head-object-list.txt
index 26359968744e..890f69005bab 100644
--- a/scripts/head-object-list.txt
+++ b/scripts/head-object-list.txt
@@ -17,7 +17,6 @@ arch/arm/kernel/head-nommu.o
arch/arm/kernel/head.o
arch/csky/kernel/head.o
arch/hexagon/kernel/head.o
-arch/ia64/kernel/head.o
arch/loongarch/kernel/head.o
arch/m68k/68000/head.o
arch/m68k/coldfire/head.o
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 4eee155121a8..ea1bf3b3dbde 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -27,6 +27,14 @@ KCONFIG_DEFCONFIG_LIST += \
endif
KCONFIG_DEFCONFIG_LIST += arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)
+ifneq ($(findstring c, $(KBUILD_EXTRA_WARN)),)
+export KCONFIG_WARN_UNKNOWN_SYMBOLS=1
+endif
+
+ifneq ($(findstring e, $(KBUILD_EXTRA_WARN)),)
+export KCONFIG_WERROR=1
+endif
+
# We need this, in case the user has it in its environment
unexport CONFIG_
@@ -99,7 +107,7 @@ config-fragments = $(call configfiles,$@)
%.config: $(obj)/conf
$(if $(config-fragments),, $(error $@ fragment does not exists on this architecture))
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m .config $(config-fragments)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m $(KCONFIG_CONFIG) $(config-fragments)
$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
PHONY += tinyconfig
@@ -166,7 +174,7 @@ conf-objs := conf.o $(common-objs)
# nconf: Used for the nconfig target based on ncurses
hostprogs += nconf
-nconf-objs := nconf.o nconf.gui.o $(common-objs)
+nconf-objs := nconf.o nconf.gui.o mnconf-common.o $(common-objs)
HOSTLDLIBS_nconf = $(call read-file, $(obj)/nconf-libs)
HOSTCFLAGS_nconf.o = $(call read-file, $(obj)/nconf-cflags)
@@ -179,7 +187,7 @@ $(obj)/nconf.o $(obj)/nconf.gui.o: | $(obj)/nconf-cflags
hostprogs += mconf
lxdialog := $(addprefix lxdialog/, \
checklist.o inputbox.o menubox.o textbox.o util.o yesno.o)
-mconf-objs := mconf.o $(lxdialog) $(common-objs)
+mconf-objs := mconf.o $(lxdialog) mnconf-common.o $(common-objs)
HOSTLDLIBS_mconf = $(call read-file, $(obj)/mconf-libs)
$(foreach f, mconf.o $(lxdialog), \
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 33d19e419908..662a5e7c37c2 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -827,6 +827,9 @@ int main(int ac, char **av)
break;
}
+ if (conf_errors())
+ exit(1);
+
if (sync_kconfig) {
name = getenv("KCONFIG_NOSILENTUPDATE");
if (name && *name) {
@@ -890,6 +893,9 @@ int main(int ac, char **av)
break;
}
+ if (sym_dep_errors())
+ exit(1);
+
if (input_mode == savedefconfig) {
if (conf_write_defconfig(defconfig_file)) {
fprintf(stderr, "n*** Error while saving defconfig to: %s\n\n",
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 4a6811d77d18..f53dcdd44597 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -155,6 +155,13 @@ static void conf_message(const char *fmt, ...)
static const char *conf_filename;
static int conf_lineno, conf_warnings;
+bool conf_errors(void)
+{
+ if (conf_warnings)
+ return getenv("KCONFIG_WERROR");
+ return false;
+}
+
static void conf_warning(const char *fmt, ...)
{
va_list ap;
@@ -289,16 +296,12 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
#define LINE_GROWTH 16
static int add_byte(int c, char **lineptr, size_t slen, size_t *n)
{
- char *nline;
size_t new_size = slen + 1;
+
if (new_size > *n) {
new_size += LINE_GROWTH - 1;
new_size *= 2;
- nline = xrealloc(*lineptr, new_size);
- if (!nline)
- return -1;
-
- *lineptr = nline;
+ *lineptr = xrealloc(*lineptr, new_size);
*n = new_size;
}
@@ -341,19 +344,37 @@ e_out:
return -1;
}
+/* like getline(), but the newline character is stripped away */
+static ssize_t getline_stripped(char **lineptr, size_t *n, FILE *stream)
+{
+ ssize_t len;
+
+ len = compat_getline(lineptr, n, stream);
+
+ if (len > 0 && (*lineptr)[len - 1] == '\n') {
+ len--;
+ (*lineptr)[len] = '\0';
+
+ if (len > 0 && (*lineptr)[len - 1] == '\r') {
+ len--;
+ (*lineptr)[len] = '\0';
+ }
+ }
+
+ return len;
+}
+
int conf_read_simple(const char *name, int def)
{
FILE *in = NULL;
char *line = NULL;
size_t line_asize = 0;
- char *p, *p2;
+ char *p, *val;
struct symbol *sym;
int i, def_flags;
- const char *warn_unknown;
- const char *werror;
+ const char *warn_unknown, *sym_name;
warn_unknown = getenv("KCONFIG_WARN_UNKNOWN_SYMBOLS");
- werror = getenv("KCONFIG_WERROR");
if (name) {
in = zconf_fopen(name);
} else {
@@ -417,8 +438,7 @@ load:
case S_INT:
case S_HEX:
case S_STRING:
- if (sym->def[def].val)
- free(sym->def[def].val);
+ free(sym->def[def].val);
/* fall through */
default:
sym->def[def].val = NULL;
@@ -426,90 +446,68 @@ load:
}
}
- while (compat_getline(&line, &line_asize, in) != -1) {
+ while (getline_stripped(&line, &line_asize, in) != -1) {
conf_lineno++;
- sym = NULL;
+
+ if (!line[0]) /* blank line */
+ continue;
+
if (line[0] == '#') {
- if (memcmp(line + 2, CONFIG_, strlen(CONFIG_)))
+ if (line[1] != ' ')
+ continue;
+ p = line + 2;
+ if (memcmp(p, CONFIG_, strlen(CONFIG_)))
continue;
- p = strchr(line + 2 + strlen(CONFIG_), ' ');
+ sym_name = p + strlen(CONFIG_);
+ p = strchr(sym_name, ' ');
if (!p)
continue;
*p++ = 0;
- if (strncmp(p, "is not set", 10))
+ if (strcmp(p, "is not set"))
continue;
- if (def == S_DEF_USER) {
- sym = sym_find(line + 2 + strlen(CONFIG_));
- if (!sym) {
- if (warn_unknown)
- conf_warning("unknown symbol: %s",
- line + 2 + strlen(CONFIG_));
-
- conf_set_changed(true);
- continue;
- }
- } else {
- sym = sym_lookup(line + 2 + strlen(CONFIG_), 0);
- if (sym->type == S_UNKNOWN)
- sym->type = S_BOOLEAN;
- }
- if (sym->flags & def_flags) {
- conf_warning("override: reassigning to symbol %s", sym->name);
- }
- switch (sym->type) {
- case S_BOOLEAN:
- case S_TRISTATE:
- sym->def[def].tri = no;
- sym->flags |= def_flags;
- break;
- default:
- ;
- }
- } else if (memcmp(line, CONFIG_, strlen(CONFIG_)) == 0) {
- p = strchr(line + strlen(CONFIG_), '=');
- if (!p)
+
+ val = "n";
+ } else {
+ if (memcmp(line, CONFIG_, strlen(CONFIG_))) {
+ conf_warning("unexpected data: %s", line);
continue;
- *p++ = 0;
- p2 = strchr(p, '\n');
- if (p2) {
- *p2-- = 0;
- if (*p2 == '\r')
- *p2 = 0;
}
- sym = sym_find(line + strlen(CONFIG_));
- if (!sym) {
- if (def == S_DEF_AUTO) {
- /*
- * Reading from include/config/auto.conf
- * If CONFIG_FOO previously existed in
- * auto.conf but it is missing now,
- * include/config/FOO must be touched.
- */
- conf_touch_dep(line + strlen(CONFIG_));
- } else {
- if (warn_unknown)
- conf_warning("unknown symbol: %s",
- line + strlen(CONFIG_));
-
- conf_set_changed(true);
- }
+ sym_name = line + strlen(CONFIG_);
+ p = strchr(sym_name, '=');
+ if (!p) {
+ conf_warning("unexpected data: %s", line);
continue;
}
+ *p = 0;
+ val = p + 1;
+ }
- if (sym->flags & def_flags) {
- conf_warning("override: reassigning to symbol %s", sym->name);
- }
- if (conf_set_sym_val(sym, def, def_flags, p))
- continue;
- } else {
- if (line[0] != '\r' && line[0] != '\n')
- conf_warning("unexpected data: %.*s",
- (int)strcspn(line, "\r\n"), line);
+ sym = sym_find(sym_name);
+ if (!sym) {
+ if (def == S_DEF_AUTO) {
+ /*
+ * Reading from include/config/auto.conf.
+ * If CONFIG_FOO previously existed in auto.conf
+ * but it is missing now, include/config/FOO
+ * must be touched.
+ */
+ conf_touch_dep(sym_name);
+ } else {
+ if (warn_unknown)
+ conf_warning("unknown symbol: %s", sym_name);
+ conf_set_changed(true);
+ }
continue;
}
+ if (sym->flags & def_flags)
+ conf_warning("override: reassigning to symbol %s", sym->name);
+
+ if (conf_set_sym_val(sym, def, def_flags, val))
+ continue;
+
if (sym && sym_is_choice_value(sym)) {
struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym));
switch (sym->def[def].tri) {
@@ -533,9 +531,6 @@ load:
free(line);
fclose(in);
- if (conf_warnings && werror)
- exit(1);
-
return 0;
}
@@ -594,7 +589,7 @@ int conf_read(const char *name)
/* Reset a string value if it's out of range */
if (sym_string_within_range(sym, sym->def[S_DEF_USER].val))
break;
- sym->flags &= ~(SYMBOL_VALID|SYMBOL_DEF_USER);
+ sym->flags &= ~SYMBOL_VALID;
conf_unsaved++;
break;
default:
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index 81ebf8108ca7..a290de36307b 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -1131,7 +1131,6 @@ static int expr_compare_type(enum expr_type t1, enum expr_type t2)
default:
return -1;
}
- printf("[%dgt%d?]", t1, t2);
return 0;
}
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 471a59acecec..5cdc8f5e6446 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -99,8 +99,6 @@ bool menu_is_visible(struct menu *menu);
bool menu_has_prompt(struct menu *menu);
const char *menu_get_prompt(struct menu *menu);
struct menu *menu_get_parent_menu(struct menu *menu);
-bool menu_has_help(struct menu *menu);
-const char *menu_get_help(struct menu *menu);
int get_jump_key_char(void);
struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head);
void menu_get_ext_help(struct menu *menu, struct gstr *help);
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index edd1e617b25c..a4ae5e9eadad 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LKC_PROTO_H
+#define LKC_PROTO_H
+
#include <stdarg.h>
/* confdata.c */
@@ -12,6 +15,7 @@ void conf_set_changed(bool val);
bool conf_get_changed(void);
void conf_set_changed_callback(void (*fn)(void));
void conf_set_message_callback(void (*fn)(const char *s));
+bool conf_errors(void);
/* symbol.c */
extern struct symbol * symbol_hash[SYMBOL_HASHSIZE];
@@ -22,6 +26,7 @@ void print_symbol_for_listconfig(struct symbol *sym);
struct symbol ** sym_re_search(const char *pattern);
const char * sym_type_name(enum symbol_type type);
void sym_calc_value(struct symbol *sym);
+bool sym_dep_errors(void);
enum symbol_type sym_get_type(struct symbol *sym);
bool sym_tristate_within_range(struct symbol *sym,tristate tri);
bool sym_set_tristate_value(struct symbol *sym,tristate tri);
@@ -50,3 +55,5 @@ char *expand_one_token(const char **str);
/* expr.c */
void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken);
+
+#endif /* LKC_PROTO_H */
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index eccc87a441e7..5df32148a869 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -21,6 +21,7 @@
#include "lkc.h"
#include "lxdialog/dialog.h"
+#include "mnconf-common.h"
static const char mconf_readme[] =
"Overview\n"
@@ -247,7 +248,7 @@ search_help[] =
" -> PCI support (PCI [=y])\n"
"(1) -> PCI access mode (<choice> [=y])\n"
" Defined at drivers/pci/Kconfig:47\n"
- " Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
+ " Depends on: X86_LOCAL_APIC && X86_IO_APIC\n"
" Selects: LIBCRC32\n"
" Selected by: BAR [=n]\n"
"-----------------------------------------------------------------\n"
@@ -286,7 +287,6 @@ static int single_menu_mode;
static int show_all_options;
static int save_and_exit;
static int silent;
-static int jump_key_char;
static void conf(struct menu *menu, struct menu *active_menu);
@@ -378,58 +378,6 @@ static void show_help(struct menu *menu)
str_free(&help);
}
-struct search_data {
- struct list_head *head;
- struct menu *target;
-};
-
-static int next_jump_key(int key)
-{
- if (key < '1' || key > '9')
- return '1';
-
- key++;
-
- if (key > '9')
- key = '1';
-
- return key;
-}
-
-static int handle_search_keys(int key, size_t start, size_t end, void *_data)
-{
- struct search_data *data = _data;
- struct jump_key *pos;
- int index = 0;
-
- if (key < '1' || key > '9')
- return 0;
-
- list_for_each_entry(pos, data->head, entries) {
- index = next_jump_key(index);
-
- if (pos->offset < start)
- continue;
-
- if (pos->offset >= end)
- break;
-
- if (key == index) {
- data->target = pos->target;
- return 1;
- }
- }
-
- return 0;
-}
-
-int get_jump_key_char(void)
-{
- jump_key_char = next_jump_key(jump_key_char);
-
- return jump_key_char;
-}
-
static void search_conf(void)
{
struct symbol **sym_arr;
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 61c442d84aef..2cce8b651f61 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -673,19 +673,6 @@ struct menu *menu_get_parent_menu(struct menu *menu)
return menu;
}
-bool menu_has_help(struct menu *menu)
-{
- return menu->help != NULL;
-}
-
-const char *menu_get_help(struct menu *menu)
-{
- if (menu->help)
- return menu->help;
- else
- return "";
-}
-
static void get_def_str(struct gstr *r, struct menu *menu)
{
str_printf(r, "Defined at %s:%d\n",
@@ -856,10 +843,10 @@ void menu_get_ext_help(struct menu *menu, struct gstr *help)
struct symbol *sym = menu->sym;
const char *help_text = nohelp_text;
- if (menu_has_help(menu)) {
+ if (menu->help) {
if (sym->name)
str_printf(help, "%s%s:\n\n", CONFIG_, sym->name);
- help_text = menu_get_help(menu);
+ help_text = menu->help;
}
str_printf(help, "%s\n", help_text);
if (sym)
diff --git a/scripts/kconfig/mnconf-common.c b/scripts/kconfig/mnconf-common.c
new file mode 100644
index 000000000000..18cb9a6c5aaa
--- /dev/null
+++ b/scripts/kconfig/mnconf-common.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "expr.h"
+#include "list.h"
+#include "mnconf-common.h"
+
+int jump_key_char;
+
+int next_jump_key(int key)
+{
+ if (key < '1' || key > '9')
+ return '1';
+
+ key++;
+
+ if (key > '9')
+ key = '1';
+
+ return key;
+}
+
+int handle_search_keys(int key, size_t start, size_t end, void *_data)
+{
+ struct search_data *data = _data;
+ struct jump_key *pos;
+ int index = 0;
+
+ if (key < '1' || key > '9')
+ return 0;
+
+ list_for_each_entry(pos, data->head, entries) {
+ index = next_jump_key(index);
+
+ if (pos->offset < start)
+ continue;
+
+ if (pos->offset >= end)
+ break;
+
+ if (key == index) {
+ data->target = pos->target;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int get_jump_key_char(void)
+{
+ jump_key_char = next_jump_key(jump_key_char);
+
+ return jump_key_char;
+}
diff --git a/scripts/kconfig/mnconf-common.h b/scripts/kconfig/mnconf-common.h
new file mode 100644
index 000000000000..ab6292cc4bf2
--- /dev/null
+++ b/scripts/kconfig/mnconf-common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef MNCONF_COMMON_H
+#define MNCONF_COMMON_H
+
+#include <stddef.h>
+
+struct search_data {
+ struct list_head *head;
+ struct menu *target;
+};
+
+extern int jump_key_char;
+
+int next_jump_key(int key);
+int handle_search_keys(int key, size_t start, size_t end, void *_data);
+int get_jump_key_char(void);
+
+#endif /* MNCONF_COMMON_H */
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 143a2c351d57..1148163cfa7e 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include "lkc.h"
+#include "mnconf-common.h"
#include "nconf.h"
#include <ctype.h>
@@ -216,7 +217,7 @@ search_help[] =
"Symbol: FOO [ = m]\n"
"Prompt: Foo bus is used to drive the bar HW\n"
"Defined at drivers/pci/Kconfig:47\n"
-"Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
+"Depends on: X86_LOCAL_APIC && X86_IO_APIC\n"
"Location:\n"
" -> Bus options (PCI, PCMCIA, EISA, ISA)\n"
" -> PCI support (PCI [ = y])\n"
@@ -279,7 +280,6 @@ static const char *current_instructions = menu_instructions;
static char *dialog_input_result;
static int dialog_input_result_len;
-static int jump_key_char;
static void selected_conf(struct menu *menu, struct menu *active_menu);
static void conf(struct menu *menu);
@@ -691,57 +691,6 @@ static int do_exit(void)
return 0;
}
-struct search_data {
- struct list_head *head;
- struct menu *target;
-};
-
-static int next_jump_key(int key)
-{
- if (key < '1' || key > '9')
- return '1';
-
- key++;
-
- if (key > '9')
- key = '1';
-
- return key;
-}
-
-static int handle_search_keys(int key, size_t start, size_t end, void *_data)
-{
- struct search_data *data = _data;
- struct jump_key *pos;
- int index = 0;
-
- if (key < '1' || key > '9')
- return 0;
-
- list_for_each_entry(pos, data->head, entries) {
- index = next_jump_key(index);
-
- if (pos->offset < start)
- continue;
-
- if (pos->offset >= end)
- break;
-
- if (key == index) {
- data->target = pos->target;
- return 1;
- }
- }
-
- return 0;
-}
-
-int get_jump_key_char(void)
-{
- jump_key_char = next_jump_key(jump_key_char);
-
- return jump_key_char;
-}
static void search_conf(void)
{
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index a76925b46ce6..e9e9fb8d8674 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -29,14 +29,9 @@ struct symbol symbol_no = {
.flags = SYMBOL_CONST|SYMBOL_VALID,
};
-static struct symbol symbol_empty = {
- .name = "",
- .curr = { "", no },
- .flags = SYMBOL_VALID,
-};
-
struct symbol *modules_sym;
static tristate modules_val;
+static int sym_warnings;
enum symbol_type sym_get_type(struct symbol *sym)
{
@@ -317,6 +312,14 @@ static void sym_warn_unmet_dep(struct symbol *sym)
" Selected by [m]:\n");
fputs(str_get(&gs), stderr);
+ sym_warnings++;
+}
+
+bool sym_dep_errors(void)
+{
+ if (sym_warnings)
+ return getenv("KCONFIG_WERROR");
+ return false;
}
void sym_calc_value(struct symbol *sym)
@@ -342,15 +345,21 @@ void sym_calc_value(struct symbol *sym)
oldval = sym->curr;
+ newval.tri = no;
+
switch (sym->type) {
case S_INT:
+ newval.val = "0";
+ break;
case S_HEX:
+ newval.val = "0x0";
+ break;
case S_STRING:
- newval = symbol_empty.curr;
+ newval.val = "";
break;
case S_BOOLEAN:
case S_TRISTATE:
- newval = symbol_no.curr;
+ newval.val = "n";
break;
default:
sym->curr.val = sym->name;
@@ -697,13 +706,12 @@ const char *sym_get_string_default(struct symbol *sym)
{
struct property *prop;
struct symbol *ds;
- const char *str;
+ const char *str = "";
tristate val;
sym_calc_visibility(sym);
sym_calc_value(modules_sym);
val = symbol_no.curr.tri;
- str = symbol_empty.curr.val;
/* If symbol has a default value look it up */
prop = sym_get_default_prop(sym);
@@ -753,14 +761,17 @@ const char *sym_get_string_default(struct symbol *sym)
case yes: return "y";
}
case S_INT:
+ if (!str[0])
+ str = "0";
+ break;
case S_HEX:
- return str;
- case S_STRING:
- return str;
- case S_UNKNOWN:
+ if (!str[0])
+ str = "0x0";
+ break;
+ default:
break;
}
- return "";
+ return str;
}
const char *sym_get_string_value(struct symbol *sym)
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index b78f114ad48c..92e5b2b9761d 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -42,8 +42,7 @@ struct gstr str_new(void)
/* Free storage for growable string */
void str_free(struct gstr *gs)
{
- if (gs->s)
- free(gs->s);
+ free(gs->s);
gs->s = NULL;
gs->len = 0;
}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index a432b171be82..7862a8101747 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -135,8 +135,13 @@ gen_btf()
${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \
--strip-all ${1} ${2} 2>/dev/null
# Change e_type to ET_REL so that it can be used to link final vmlinux.
- # Unlike GNU ld, lld does not allow an ET_EXEC input.
- printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none
+ # GNU ld 2.35+ and lld do not allow an ET_EXEC input.
+ if is_enabled CONFIG_CPU_BIG_ENDIAN; then
+ et_rel='\0\1'
+ else
+ et_rel='\1\0'
+ fi
+ printf "${et_rel}" | dd of=${2} conv=notrunc bs=1 seek=16 status=none
}
# Create ${2} .S file with all symbols from the ${1} object file
diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
index c62066825f53..9faa4d3d91e3 100755
--- a/scripts/min-tool-version.sh
+++ b/scripts/min-tool-version.sh
@@ -26,6 +26,8 @@ gcc)
llvm)
if [ "$SRCARCH" = s390 ]; then
echo 15.0.0
+ elif [ "$SRCARCH" = loongarch ]; then
+ echo 18.0.0
else
echo 11.0.0
fi
diff --git a/scripts/mksysmap b/scripts/mksysmap
index 9ba1c9da0a40..57ff5656d566 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -48,17 +48,8 @@ ${NM} -n ${1} | sed >${2} -e "
/ __kvm_nvhe_\\$/d
/ __kvm_nvhe_\.L/d
-# arm64 lld
-/ __AArch64ADRPThunk_/d
-
-# arm lld
-/ __ARMV5PILongThunk_/d
-/ __ARMV7PILongThunk_/d
-/ __ThumbV7PILongThunk_/d
-
-# mips lld
-/ __LA25Thunk_/d
-/ __microLA25Thunk_/d
+# lld arm/aarch64/mips thunks
+/ __[[:alnum:]]*Thunk_/d
# CFI type identifiers
/ __kcfi_typeid_/d
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index cb6406f485a9..267b9a0a3abc 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -60,8 +60,7 @@ static unsigned int nr_unresolved;
#define MODULE_NAME_LEN (64 - sizeof(Elf_Addr))
-void __attribute__((format(printf, 2, 3)))
-modpost_log(enum loglevel loglevel, const char *fmt, ...)
+void modpost_log(enum loglevel loglevel, const char *fmt, ...)
{
va_list arglist;
@@ -71,9 +70,7 @@ modpost_log(enum loglevel loglevel, const char *fmt, ...)
break;
case LOG_ERROR:
fprintf(stderr, "ERROR: ");
- break;
- case LOG_FATAL:
- fprintf(stderr, "FATAL: ");
+ error_occurred = true;
break;
default: /* invalid loglevel, ignore */
break;
@@ -84,11 +81,6 @@ modpost_log(enum loglevel loglevel, const char *fmt, ...)
va_start(arglist, fmt);
vfprintf(stderr, fmt, arglist);
va_end(arglist);
-
- if (loglevel == LOG_FATAL)
- exit(1);
- if (loglevel == LOG_ERROR)
- error_occurred = true;
}
static inline bool strends(const char *str, const char *postfix)
@@ -474,11 +466,9 @@ static int parse_elf(struct elf_info *info, const char *filename)
fatal("%s: not relocatable object.", filename);
/* Check if file offset is correct */
- if (hdr->e_shoff > info->size) {
+ if (hdr->e_shoff > info->size)
fatal("section header offset=%lu in file '%s' is bigger than filesize=%zu\n",
(unsigned long)hdr->e_shoff, filename, info->size);
- return 0;
- }
if (hdr->e_shnum == SHN_UNDEF) {
/*
@@ -516,12 +506,11 @@ static int parse_elf(struct elf_info *info, const char *filename)
const char *secname;
int nobits = sechdrs[i].sh_type == SHT_NOBITS;
- if (!nobits && sechdrs[i].sh_offset > info->size) {
+ if (!nobits && sechdrs[i].sh_offset > info->size)
fatal("%s is truncated. sechdrs[i].sh_offset=%lu > sizeof(*hrd)=%zu\n",
filename, (unsigned long)sechdrs[i].sh_offset,
sizeof(*hdr));
- return 0;
- }
+
secname = secstrings + sechdrs[i].sh_name;
if (strcmp(secname, ".modinfo") == 0) {
if (nobits)
@@ -807,7 +796,8 @@ static void check_section(const char *modname, struct elf_info *elf,
#define DATA_SECTIONS ".data", ".data.rel"
#define TEXT_SECTIONS ".text", ".text.*", ".sched.text", \
- ".kprobes.text", ".cpuidle.text", ".noinstr.text"
+ ".kprobes.text", ".cpuidle.text", ".noinstr.text", \
+ ".ltext", ".ltext.*"
#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
".fixup", ".entry.text", ".exception.text", \
".coldtext", ".softirqentry.text"
@@ -1346,6 +1336,14 @@ static Elf_Addr addend_mips_rel(uint32_t *location, unsigned int r_type)
#define R_LARCH_SUB32 55
#endif
+#ifndef R_LARCH_RELAX
+#define R_LARCH_RELAX 100
+#endif
+
+#ifndef R_LARCH_ALIGN
+#define R_LARCH_ALIGN 102
+#endif
+
static void get_rel_type_and_sym(struct elf_info *elf, uint64_t r_info,
unsigned int *r_type, unsigned int *r_sym)
{
@@ -1400,9 +1398,16 @@ static void section_rela(struct module *mod, struct elf_info *elf,
continue;
break;
case EM_LOONGARCH:
- if (!strcmp("__ex_table", fromsec) &&
- r_type == R_LARCH_SUB32)
+ switch (r_type) {
+ case R_LARCH_SUB32:
+ if (!strcmp("__ex_table", fromsec))
+ continue;
+ break;
+ case R_LARCH_RELAX:
+ case R_LARCH_ALIGN:
+ /* These relocs do not refer to symbols */
continue;
+ }
break;
}
@@ -1419,7 +1424,7 @@ static void section_rel(struct module *mod, struct elf_info *elf,
for (rel = start; rel < stop; rel++) {
Elf_Sym *tsym;
- Elf_Addr taddr = 0, r_offset;
+ Elf_Addr taddr, r_offset;
unsigned int r_type, r_sym;
void *loc;
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 69baf014da4f..ee43c7950636 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -194,10 +194,10 @@ void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym);
enum loglevel {
LOG_WARN,
LOG_ERROR,
- LOG_FATAL
};
-void modpost_log(enum loglevel loglevel, const char *fmt, ...);
+void __attribute__((format(printf, 2, 3)))
+modpost_log(enum loglevel loglevel, const char *fmt, ...);
/*
* warn - show the given message, then let modpost continue running, still
@@ -214,4 +214,4 @@ void modpost_log(enum loglevel loglevel, const char *fmt, ...);
*/
#define warn(fmt, args...) modpost_log(LOG_WARN, fmt, ##args)
#define error(fmt, args...) modpost_log(LOG_ERROR, fmt, ##args)
-#define fatal(fmt, args...) modpost_log(LOG_FATAL, fmt, ##args)
+#define fatal(fmt, args...) do { error(fmt, ##args); exit(1); } while (1)
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index 31066bfdba04..dc4878502276 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -326,7 +326,12 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
/* Sum all files in the same dir or subdirs. */
while ((line = get_line(&pos))) {
- char* p = line;
+ char* p;
+
+ /* trim the leading spaces away */
+ while (isspace(*line))
+ line++;
+ p = line;
if (strncmp(line, "source_", sizeof("source_")-1) == 0) {
p = strrchr(line, ' ');
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index d7dd0d04c70c..bf96a3c24608 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -25,35 +25,20 @@ if_enabled_echo() {
}
create_package() {
- local pname="$1" pdir="$2"
- local dpkg_deb_opts
-
- mkdir -m 755 -p "$pdir/DEBIAN"
- mkdir -p "$pdir/usr/share/doc/$pname"
- cp debian/copyright "$pdir/usr/share/doc/$pname/"
- cp debian/changelog "$pdir/usr/share/doc/$pname/changelog.Debian"
- gzip -n -9 "$pdir/usr/share/doc/$pname/changelog.Debian"
- sh -c "cd '$pdir'; find . -type f ! -path './DEBIAN/*' -printf '%P\0' \
- | xargs -r0 md5sum > DEBIAN/md5sums"
-
- # Fix ownership and permissions
- if [ "$DEB_RULES_REQUIRES_ROOT" = "no" ]; then
- dpkg_deb_opts="--root-owner-group"
- else
- chown -R root:root "$pdir"
- fi
- # a+rX in case we are in a restrictive umask environment like 0077
- # ug-s in case we build in a setuid/setgid directory
- chmod -R go-w,a+rX,ug-s "$pdir"
-
- # Create the package
- dpkg-gencontrol -p$pname -P"$pdir"
- dpkg-deb $dpkg_deb_opts ${KDEB_COMPRESS:+-Z$KDEB_COMPRESS} --build "$pdir" ..
+ export DH_OPTIONS="-p${1}"
+
+ dh_installdocs
+ dh_installchangelogs
+ dh_compress
+ dh_fixperms
+ dh_gencontrol
+ dh_md5sums
+ dh_builddeb -- ${KDEB_COMPRESS:+-Z$KDEB_COMPRESS}
}
install_linux_image () {
- pdir=$1
- pname=$2
+ pname=$1
+ pdir=debian/$1
rm -rf ${pdir}
@@ -62,7 +47,7 @@ install_linux_image () {
${MAKE} -f ${srctree}/Makefile INSTALL_DTBS_PATH="${pdir}/usr/lib/linux-image-${KERNELRELEASE}" dtbs_install
fi
- ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
+ ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" INSTALL_MOD_STRIP=1 modules_install
rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
# Install the kernel
@@ -122,26 +107,22 @@ install_linux_image () {
}
install_linux_image_dbg () {
- pdir=$1
- image_pdir=$2
+ pdir=debian/$1
rm -rf ${pdir}
- for module in $(find ${image_pdir}/lib/modules/ -name *.ko -printf '%P\n'); do
- module=lib/modules/${module}
- mkdir -p $(dirname ${pdir}/usr/lib/debug/${module})
- # only keep debug symbols in the debug file
- ${OBJCOPY} --only-keep-debug ${image_pdir}/${module} ${pdir}/usr/lib/debug/${module}
- # strip original module from debug symbols
- ${OBJCOPY} --strip-debug ${image_pdir}/${module}
- # then add a link to those
- ${OBJCOPY} --add-gnu-debuglink=${pdir}/usr/lib/debug/${module} ${image_pdir}/${module}
- done
+ # Parse modules.order directly because 'make modules_install' may sign,
+ # compress modules, and then run unneeded depmod.
+ while read -r mod; do
+ mod="${mod%.o}.ko"
+ dbg="${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/kernel/${mod}"
+ buildid=$("${READELF}" -n "${mod}" | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p')
+ link="${pdir}/usr/lib/debug/.build-id/${buildid}.debug"
- # re-sign stripped modules
- if is_enabled CONFIG_MODULE_SIG_ALL; then
- ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${image_pdir}" modules_sign
- fi
+ mkdir -p "${dbg%/*}" "${link%/*}"
+ "${OBJCOPY}" --only-keep-debug "${mod}" "${dbg}"
+ ln -sf --relative "${dbg}" "${link}"
+ done < modules.order
# Build debug package
# Different tools want the image in different locations
@@ -156,8 +137,8 @@ install_linux_image_dbg () {
}
install_kernel_headers () {
- pdir=$1
- version=$2
+ pdir=debian/$1
+ version=${1#linux-headers-}
rm -rf $pdir
@@ -168,18 +149,16 @@ install_kernel_headers () {
}
install_libc_headers () {
- pdir=$1
+ pdir=debian/$1
rm -rf $pdir
- $MAKE -f $srctree/Makefile headers
$MAKE -f $srctree/Makefile headers_install INSTALL_HDR_PATH=$pdir/usr
# move asm headers to /usr/include/<libc-machine>/asm to match the structure
# used by Debian-based distros (to support multi-arch)
- host_arch=$(dpkg-architecture -a$DEB_HOST_ARCH -qDEB_HOST_MULTIARCH)
- mkdir $pdir/usr/include/$host_arch
- mv $pdir/usr/include/asm $pdir/usr/include/$host_arch/
+ mkdir "$pdir/usr/include/${DEB_HOST_MULTIARCH}"
+ mv "$pdir/usr/include/asm" "$pdir/usr/include/${DEB_HOST_MULTIARCH}"
}
rm -f debian/files
@@ -190,30 +169,13 @@ for package in ${packages_enabled}
do
case ${package} in
*-dbg)
- # This must be done after linux-image, that is, we expect the
- # debug package appears after linux-image in debian/control.
- install_linux_image_dbg debian/linux-image-dbg debian/linux-image;;
- linux-image-*|user-mode-linux-*)
- install_linux_image debian/linux-image ${package};;
- linux-libc-dev)
- install_libc_headers debian/linux-libc-dev;;
- linux-headers-*)
- install_kernel_headers debian/linux-headers ${package#linux-headers-};;
- esac
-done
-
-for package in ${packages_enabled}
-do
- case ${package} in
- *-dbg)
- create_package ${package} debian/linux-image-dbg;;
+ install_linux_image_dbg "${package}";;
linux-image-*|user-mode-linux-*)
- create_package ${package} debian/linux-image;;
+ install_linux_image "${package}";;
linux-libc-dev)
- create_package ${package} debian/linux-libc-dev;;
+ install_libc_headers "${package}";;
linux-headers-*)
- create_package ${package} debian/linux-headers;;
+ install_kernel_headers "${package}";;
esac
+ create_package "${package}"
done
-
-exit 0
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index 65b4ea502962..72c91a1b832f 100755
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -23,7 +23,6 @@ tmpdir=$1
#
rm -rf -- "${tmpdir}"
mkdir -p -- "${tmpdir}/boot"
-dirs=boot
#
@@ -38,12 +37,9 @@ fi
#
-# Try to install modules
+# Install modules
#
-if grep -q '^CONFIG_MODULES=y' include/config/auto.conf; then
- make ARCH="${ARCH}" -f ${srctree}/Makefile INSTALL_MOD_PATH="${tmpdir}" modules_install
- dirs="$dirs lib"
-fi
+make ARCH="${ARCH}" -f ${srctree}/Makefile INSTALL_MOD_PATH="${tmpdir}" modules_install
#
diff --git a/scripts/package/deb-build-option b/scripts/package/deb-build-option
deleted file mode 100755
index 7950eff01781..000000000000
--- a/scripts/package/deb-build-option
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0-only
-
-# Set up CROSS_COMPILE if not defined yet
-if [ "${CROSS_COMPILE+set}" != "set" -a "${DEB_HOST_ARCH}" != "${DEB_BUILD_ARCH}" ]; then
- echo CROSS_COMPILE=${DEB_HOST_GNU_TYPE}-
-fi
-
-version=$(dpkg-parsechangelog -S Version)
-debian_revision="${version##*-}"
-
-if [ "${version}" != "${debian_revision}" ]; then
- echo KBUILD_BUILD_VERSION=${debian_revision}
-fi
diff --git a/scripts/package/debian/copyright b/scripts/package/debian/copyright
new file mode 100644
index 000000000000..4f1f06221f09
--- /dev/null
+++ b/scripts/package/debian/copyright
@@ -0,0 +1,16 @@
+This is a packaged upstream version of the Linux kernel.
+
+The sources may be found at most Linux archive sites, including:
+https://www.kernel.org/pub/linux/kernel
+
+Copyright: 1991 - 2023 Linus Torvalds and others.
+
+The git repository for mainline kernel development is at:
+git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 dated June, 1991.
+
+On Debian GNU/Linux systems, the complete text of the GNU General Public
+License version 2 can be found in `/usr/share/common-licenses/GPL-2'.
diff --git a/scripts/package/debian/rules b/scripts/package/debian/rules
index 3dafa9496c63..098307780062 100755
--- a/scripts/package/debian/rules
+++ b/scripts/package/debian/rules
@@ -1,33 +1,46 @@
#!/usr/bin/make -f
# SPDX-License-Identifier: GPL-2.0-only
-include debian/rules.vars
+# in case debian/rules is executed directly
+export DEB_RULES_REQUIRES_ROOT := no
-srctree ?= .
+include debian/rules.vars
ifneq (,$(filter-out parallel=1,$(filter parallel=%,$(DEB_BUILD_OPTIONS))))
NUMJOBS = $(patsubst parallel=%,%,$(filter parallel=%,$(DEB_BUILD_OPTIONS)))
MAKEFLAGS += -j$(NUMJOBS)
endif
+revision = $(lastword $(subst -, ,$(shell dpkg-parsechangelog -S Version)))
+CROSS_COMPILE ?= $(filter-out $(DEB_BUILD_GNU_TYPE)-, $(DEB_HOST_GNU_TYPE)-)
+make-opts = ARCH=$(ARCH) KERNELRELEASE=$(KERNELRELEASE) KBUILD_BUILD_VERSION=$(revision) $(addprefix CROSS_COMPILE=,$(CROSS_COMPILE))
+
.PHONY: binary binary-indep binary-arch
binary: binary-arch binary-indep
binary-indep: build-indep
binary-arch: build-arch
- $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) \
- KERNELRELEASE=$(KERNELRELEASE) \
- run-command KBUILD_RUN_COMMAND=+$(srctree)/scripts/package/builddeb
+ $(MAKE) $(make-opts) \
+ run-command KBUILD_RUN_COMMAND='+$$(srctree)/scripts/package/builddeb'
.PHONY: build build-indep build-arch
build: build-arch build-indep
build-indep:
build-arch:
- $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) \
- KERNELRELEASE=$(KERNELRELEASE) \
- $(shell $(srctree)/scripts/package/deb-build-option) \
- olddefconfig all
+ $(MAKE) $(make-opts) olddefconfig
+ $(MAKE) $(make-opts) $(if $(filter um,$(ARCH)),,headers) all
.PHONY: clean
clean:
- rm -rf debian/files debian/linux-*
- $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) clean
+ rm -rf debian/files debian/linux-* debian/deb-env.vars*
+ $(MAKE) ARCH=$(ARCH) clean
+
+# If DEB_HOST_ARCH is empty, it is likely that debian/rules was executed
+# directly. Run 'dpkg-architecture --print-set --print-format=make' to
+# generate a makefile construct that exports all DEB_* variables.
+ifndef DEB_HOST_ARCH
+include debian/deb-env.vars
+
+debian/deb-env.vars:
+ dpkg-architecture -a$$(cat debian/arch) --print-set --print-format=make > $@.tmp
+ mv $@.tmp $@
+endif
diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build
index 8a7051fad087..76e0765dfcd6 100755
--- a/scripts/package/install-extmod-build
+++ b/scripts/package/install-extmod-build
@@ -20,7 +20,7 @@ mkdir -p "${destdir}"
find "arch/${SRCARCH}" -maxdepth 1 -name 'Makefile*'
find include scripts -type f -o -type l
find "arch/${SRCARCH}" -name Kbuild.platforms -o -name Platform
- find "arch/${SRCARCH}" -name include -o -name scripts -type d
+ find "arch/${SRCARCH}" -name include -type d
) | tar -c -f - -C "${srctree}" -T - | tar -xf - -C "${destdir}"
{
diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
index 3eee0143e0c5..f58726671fb3 100644
--- a/scripts/package/kernel.spec
+++ b/scripts/package/kernel.spec
@@ -55,18 +55,12 @@ patch -p1 < %{SOURCE2}
%{make} %{makeflags} KERNELRELEASE=%{KERNELRELEASE} KBUILD_BUILD_VERSION=%{release}
%install
-mkdir -p %{buildroot}/boot
-%ifarch ia64
-mkdir -p %{buildroot}/boot/efi
-cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/efi/vmlinuz-%{KERNELRELEASE}
-ln -s efi/vmlinuz-%{KERNELRELEASE} %{buildroot}/boot/
-%else
-cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/vmlinuz-%{KERNELRELEASE}
-%endif
+mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE}
+cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz
%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} modules_install
%{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
-cp System.map %{buildroot}/boot/System.map-%{KERNELRELEASE}
-cp .config %{buildroot}/boot/config-%{KERNELRELEASE}
+cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE}
+cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config
ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/build
%if %{with_devel}
%{make} %{makeflags} run-command KBUILD_RUN_COMMAND='${srctree}/scripts/package/install-extmod-build %{buildroot}/usr/src/kernels/%{KERNELRELEASE}'
@@ -76,13 +70,14 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA
rm -rf %{buildroot}
%post
-if [ -x /sbin/installkernel -a -r /boot/vmlinuz-%{KERNELRELEASE} -a -r /boot/System.map-%{KERNELRELEASE} ]; then
-cp /boot/vmlinuz-%{KERNELRELEASE} /boot/.vmlinuz-%{KERNELRELEASE}-rpm
-cp /boot/System.map-%{KERNELRELEASE} /boot/.System.map-%{KERNELRELEASE}-rpm
-rm -f /boot/vmlinuz-%{KERNELRELEASE} /boot/System.map-%{KERNELRELEASE}
-/sbin/installkernel %{KERNELRELEASE} /boot/.vmlinuz-%{KERNELRELEASE}-rpm /boot/.System.map-%{KERNELRELEASE}-rpm
-rm -f /boot/.vmlinuz-%{KERNELRELEASE}-rpm /boot/.System.map-%{KERNELRELEASE}-rpm
+if [ -x /usr/bin/kernel-install ]; then
+ /usr/bin/kernel-install add %{KERNELRELEASE} /lib/modules/%{KERNELRELEASE}/vmlinuz
fi
+for file in vmlinuz System.map config; do
+ if ! cmp --silent "/lib/modules/%{KERNELRELEASE}/${file}" "/boot/${file}-%{KERNELRELEASE}"; then
+ cp "/lib/modules/%{KERNELRELEASE}/${file}" "/boot/${file}-%{KERNELRELEASE}"
+ fi
+done
%preun
if [ -x /sbin/new-kernel-pkg ]; then
@@ -100,7 +95,6 @@ fi
%defattr (-, root, root)
/lib/modules/%{KERNELRELEASE}
%exclude /lib/modules/%{KERNELRELEASE}/build
-/boot/*
%files headers
%defattr (-, root, root)
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 5044224cf671..070149c985fe 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -26,7 +26,7 @@ set_debarch() {
# Attempt to find the correct Debian architecture
case "$UTS_MACHINE" in
- i386|ia64|alpha|m68k|riscv*)
+ i386|alpha|m68k|riscv*)
debarch="$UTS_MACHINE" ;;
x86_64)
debarch=amd64 ;;
@@ -176,8 +176,6 @@ else
fi
echo $debarch > debian/arch
-extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
-extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
# Generate a simple changelog template
cat <<EOF > debian/changelog
@@ -188,26 +186,6 @@ $sourcename ($packageversion) $distribution; urgency=low
-- $maintainer $(date -R)
EOF
-# Generate copyright file
-cat <<EOF > debian/copyright
-This is a packaged upstream version of the Linux kernel.
-
-The sources may be found at most Linux archive sites, including:
-https://www.kernel.org/pub/linux/kernel
-
-Copyright: 1991 - 2018 Linus Torvalds and others.
-
-The git repository for mainline kernel development is at:
-git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 dated June, 1991.
-
-On Debian GNU/Linux systems, the complete text of the GNU General Public
-License version 2 can be found in \`/usr/share/common-licenses/GPL-2'.
-EOF
-
# Generate a control file
cat <<EOF > debian/control
Source: $sourcename
@@ -215,7 +193,8 @@ Section: kernel
Priority: optional
Maintainer: $maintainer
Rules-Requires-Root: no
-Build-Depends: bc, debhelper, rsync, kmod, cpio, bison, flex $extra_build_depends
+Build-Depends: debhelper-compat (= 12)
+Build-Depends-Arch: bc, bison, cpio, flex, kmod, libelf-dev:native, libssl-dev:native, rsync
Homepage: https://www.kernel.org/
Package: $packagename-$version
@@ -268,6 +247,7 @@ ARCH := ${ARCH}
KERNELRELEASE := ${KERNELRELEASE}
EOF
+cp "${srctree}/scripts/package/debian/copyright" debian/
cp "${srctree}/scripts/package/debian/rules" debian/
exit 0
diff --git a/scripts/package/snapcraft.template b/scripts/package/snapcraft.template
index 626d278e4a5a..85d5e07d1b40 100644
--- a/scripts/package/snapcraft.template
+++ b/scripts/package/snapcraft.template
@@ -10,5 +10,5 @@ parts:
kernel:
plugin: kernel
source: SRCTREE
- source-type: tar
+ source-type: local
kernel-with-firmware: false
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 40ae6b2c7a6d..3e4f54799cc0 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -590,7 +590,6 @@ static int do_file(char const *const fname)
ideal_nop = ideal_nop4_arm64;
is_fake_mcount64 = arm64_is_fake_mcount;
break;
- case EM_IA_64: reltype = R_IA64_IMM64; break;
case EM_MIPS: /* reltype: e_class */ break;
case EM_LOONGARCH: /* reltype: e_class */ break;
case EM_PPC: reltype = R_PPC_ADDR32; break;
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 6a4645a57976..f84df9e383fd 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -275,13 +275,6 @@ if ($arch eq "x86_64") {
$section_type = '%progbits';
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_AARCH64_CALL26\\s+_mcount\$";
$type = ".quad";
-} elsif ($arch eq "ia64") {
- $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
- $type = "data8";
-
- if ($is_module eq "0") {
- $cc .= " -mconstant-gp";
- }
} elsif ($arch eq "sparc64") {
# In the objdump output there are giblets like:
# 0000000000000000 <igmp_net_exit-0x18>:
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
index 76e9cbcfbeab..d06baf626abe 100755
--- a/scripts/xz_wrap.sh
+++ b/scripts/xz_wrap.sh
@@ -15,7 +15,6 @@ LZMA2OPTS=
case $SRCARCH in
x86) BCJ=--x86 ;;
powerpc) BCJ=--powerpc ;;
- ia64) BCJ=--ia64; LZMA2OPTS=pb=4 ;;
arm) BCJ=--arm ;;
sparc) BCJ=--sparc ;;
esac
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index e0d1dd0a192a..64cc3044a42c 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -57,10 +57,10 @@ config SECURITY_APPARMOR_INTROSPECT_POLICY
cpu is paramount.
config SECURITY_APPARMOR_HASH
- bool "Enable introspection of sha1 hashes for loaded profiles"
+ bool "Enable introspection of sha256 hashes for loaded profiles"
depends on SECURITY_APPARMOR_INTROSPECT_POLICY
select CRYPTO
- select CRYPTO_SHA1
+ select CRYPTO_SHA256
default y
help
This option selects whether introspection of loaded policy
@@ -74,10 +74,10 @@ config SECURITY_APPARMOR_HASH_DEFAULT
depends on SECURITY_APPARMOR_HASH
default y
help
- This option selects whether sha1 hashing of loaded policy
- is enabled by default. The generation of sha1 hashes for
- loaded policy provide system administrators a quick way
- to verify that policy in the kernel matches what is expected,
+ This option selects whether sha256 hashing of loaded policy
+ is enabled by default. The generation of sha256 hashes for
+ loaded policy provide system administrators a quick way to
+ verify that policy in the kernel matches what is expected,
however it can slow down policy load on some devices. In
these cases policy hashing can be disabled by default and
enabled only if needed.
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index f3c77825aa75..bcfea073e3f2 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -1474,7 +1474,7 @@ int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata)
rawdata->dents[AAFS_LOADDATA_REVISION] = dent;
if (aa_g_hash_policy) {
- dent = aafs_create_file("sha1", S_IFREG | 0444, dir,
+ dent = aafs_create_file("sha256", S_IFREG | 0444, dir,
rawdata, &seq_rawdata_hash_fops);
if (IS_ERR(dent))
goto fail;
@@ -1643,11 +1643,11 @@ static const char *rawdata_get_link_base(struct dentry *dentry,
return target;
}
-static const char *rawdata_get_link_sha1(struct dentry *dentry,
+static const char *rawdata_get_link_sha256(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- return rawdata_get_link_base(dentry, inode, done, "sha1");
+ return rawdata_get_link_base(dentry, inode, done, "sha256");
}
static const char *rawdata_get_link_abi(struct dentry *dentry,
@@ -1664,8 +1664,8 @@ static const char *rawdata_get_link_data(struct dentry *dentry,
return rawdata_get_link_base(dentry, inode, done, "raw_data");
}
-static const struct inode_operations rawdata_link_sha1_iops = {
- .get_link = rawdata_get_link_sha1,
+static const struct inode_operations rawdata_link_sha256_iops = {
+ .get_link = rawdata_get_link_sha256,
};
static const struct inode_operations rawdata_link_abi_iops = {
@@ -1738,7 +1738,7 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
profile->dents[AAFS_PROF_ATTACH] = dent;
if (profile->hash) {
- dent = create_profile_file(dir, "sha1", profile,
+ dent = create_profile_file(dir, "sha256", profile,
&seq_profile_hash_fops);
if (IS_ERR(dent))
goto fail;
@@ -1748,9 +1748,9 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
if (profile->rawdata) {
if (aa_g_hash_policy) {
- dent = aafs_create("raw_sha1", S_IFLNK | 0444, dir,
+ dent = aafs_create("raw_sha256", S_IFLNK | 0444, dir,
profile->label.proxy, NULL, NULL,
- &rawdata_link_sha1_iops);
+ &rawdata_link_sha256_iops);
if (IS_ERR(dent))
goto fail;
aa_get_proxy(profile->label.proxy);
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index 6724e2ff6da8..aad486b2fca6 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -106,16 +106,16 @@ static int __init init_profile_hash(void)
if (!apparmor_initialized)
return 0;
- tfm = crypto_alloc_shash("sha1", 0, 0);
+ tfm = crypto_alloc_shash("sha256", 0, 0);
if (IS_ERR(tfm)) {
int error = PTR_ERR(tfm);
- AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
+ AA_ERROR("failed to setup profile sha256 hashing: %d\n", error);
return error;
}
apparmor_tfm = tfm;
apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
- aa_info_message("AppArmor sha1 policy hashing enabled");
+ aa_info_message("AppArmor sha256 policy hashing enabled");
return 0;
}
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 89fbeab4b33b..571158ec6188 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -1311,7 +1311,7 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
return error;
}
-const char *stack_msg = "change_profile unprivileged unconfined converted to stacking";
+static const char *stack_msg = "change_profile unprivileged unconfined converted to stacking";
/**
* aa_change_profile - perform a one-way profile transition
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 4c198d273f09..cd569fbbfe36 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -41,6 +41,7 @@ void aa_free_str_table(struct aa_str_table *t)
kfree_sensitive(t->table[i]);
kfree_sensitive(t->table);
t->table = NULL;
+ t->size = 0;
}
}
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index e490a7000408..9a3dcaafb5b1 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -469,8 +469,10 @@ static int apparmor_file_open(struct file *file)
* Cache permissions granted by the previous exec check, with
* implicit read and executable mmap which are required to
* actually execute the image.
+ *
+ * Illogically, FMODE_EXEC is in f_flags, not f_mode.
*/
- if (current->in_execve) {
+ if (file->f_flags & __FMODE_EXEC) {
fctx->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
return 0;
}
@@ -782,7 +784,7 @@ static int apparmor_getselfattr(unsigned int attr, struct lsm_ctx __user *lx,
int error = -ENOENT;
struct aa_task_ctx *ctx = task_ctx(current);
struct aa_label *label = NULL;
- char *value;
+ char *value = NULL;
switch (attr) {
case LSM_ATTR_CURRENT:
@@ -1023,7 +1025,6 @@ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo
cl = aa_get_newest_cred_label(cred);
error = aa_may_signal(cred, cl, tc, tl, sig);
aa_put_label(cl);
- return error;
} else {
cl = __begin_current_label_crit_section();
error = aa_may_signal(current_cred(), cl, tc, tl, sig);
@@ -1056,9 +1057,6 @@ static int apparmor_userns_create(const struct cred *cred)
return error;
}
-/**
- * apparmor_sk_alloc_security - allocate and attach the sk_security field
- */
static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
{
struct aa_sk_ctx *ctx;
@@ -1072,9 +1070,6 @@ static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
return 0;
}
-/**
- * apparmor_sk_free_security - free the sk_security field
- */
static void apparmor_sk_free_security(struct sock *sk)
{
struct aa_sk_ctx *ctx = aa_sock(sk);
@@ -1087,6 +1082,8 @@ static void apparmor_sk_free_security(struct sock *sk)
/**
* apparmor_sk_clone_security - clone the sk_security field
+ * @sk: sock to have security cloned
+ * @newsk: sock getting clone
*/
static void apparmor_sk_clone_security(const struct sock *sk,
struct sock *newsk)
@@ -1103,9 +1100,6 @@ static void apparmor_sk_clone_security(const struct sock *sk,
new->peer = aa_get_label(ctx->peer);
}
-/**
- * apparmor_socket_create - check perms before creating a new socket
- */
static int apparmor_socket_create(int family, int type, int protocol, int kern)
{
struct aa_label *label;
@@ -1127,10 +1121,14 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern)
/**
* apparmor_socket_post_create - setup the per-socket security struct
+ * @sock: socket that is being setup
+ * @family: family of socket being created
+ * @type: type of the socket
+ * @ptotocol: protocol of the socket
+ * @kern: socket is a special kernel socket
*
* Note:
- * - kernel sockets currently labeled unconfined but we may want to
- * move to a special kernel label
+ * - kernel sockets labeled kernel_t used to use unconfined
* - socket may not have sk here if created with sock_create_lite or
* sock_alloc. These should be accept cases which will be handled in
* sock_graft.
@@ -1156,9 +1154,6 @@ static int apparmor_socket_post_create(struct socket *sock, int family,
return 0;
}
-/**
- * apparmor_socket_bind - check perms before bind addr to socket
- */
static int apparmor_socket_bind(struct socket *sock,
struct sockaddr *address, int addrlen)
{
@@ -1172,9 +1167,6 @@ static int apparmor_socket_bind(struct socket *sock,
aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk));
}
-/**
- * apparmor_socket_connect - check perms before connecting @sock to @address
- */
static int apparmor_socket_connect(struct socket *sock,
struct sockaddr *address, int addrlen)
{
@@ -1188,9 +1180,6 @@ static int apparmor_socket_connect(struct socket *sock,
aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk));
}
-/**
- * apparmor_socket_listen - check perms before allowing listen
- */
static int apparmor_socket_listen(struct socket *sock, int backlog)
{
AA_BUG(!sock);
@@ -1202,9 +1191,7 @@ static int apparmor_socket_listen(struct socket *sock, int backlog)
aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk));
}
-/**
- * apparmor_socket_accept - check perms before accepting a new connection.
- *
+/*
* Note: while @newsock is created and has some information, the accept
* has not been done.
*/
@@ -1233,18 +1220,12 @@ static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
aa_sk_perm(op, request, sock->sk));
}
-/**
- * apparmor_socket_sendmsg - check perms before sending msg to another socket
- */
static int apparmor_socket_sendmsg(struct socket *sock,
struct msghdr *msg, int size)
{
return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
}
-/**
- * apparmor_socket_recvmsg - check perms before receiving a message
- */
static int apparmor_socket_recvmsg(struct socket *sock,
struct msghdr *msg, int size, int flags)
{
@@ -1263,17 +1244,11 @@ static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
aa_sk_perm(op, request, sock->sk));
}
-/**
- * apparmor_socket_getsockname - check perms before getting the local address
- */
static int apparmor_socket_getsockname(struct socket *sock)
{
return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
}
-/**
- * apparmor_socket_getpeername - check perms before getting remote address
- */
static int apparmor_socket_getpeername(struct socket *sock)
{
return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
@@ -1292,9 +1267,6 @@ static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
aa_sk_perm(op, request, sock->sk));
}
-/**
- * apparmor_socket_getsockopt - check perms before getting socket options
- */
static int apparmor_socket_getsockopt(struct socket *sock, int level,
int optname)
{
@@ -1302,9 +1274,6 @@ static int apparmor_socket_getsockopt(struct socket *sock, int level,
level, optname);
}
-/**
- * apparmor_socket_setsockopt - check perms before setting socket options
- */
static int apparmor_socket_setsockopt(struct socket *sock, int level,
int optname)
{
@@ -1312,9 +1281,6 @@ static int apparmor_socket_setsockopt(struct socket *sock, int level,
level, optname);
}
-/**
- * apparmor_socket_shutdown - check perms before shutting down @sock conn
- */
static int apparmor_socket_shutdown(struct socket *sock, int how)
{
return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
@@ -1323,6 +1289,8 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
#ifdef CONFIG_NETWORK_SECMARK
/**
* apparmor_socket_sock_rcv_skb - check perms before associating skb to sk
+ * @sk: sk to associate @skb with
+ * @skb: skb to check for perms
*
* Note: can not sleep may be called with locks held
*
@@ -1354,6 +1322,11 @@ static struct aa_label *sk_peer_label(struct sock *sk)
/**
* apparmor_socket_getpeersec_stream - get security context of peer
+ * @sock: socket that we are trying to get the peer context of
+ * @optval: output - buffer to copy peer name to
+ * @optlen: output - size of copied name in @optval
+ * @len: size of @optval buffer
+ * Returns: 0 on success, -errno of failure
*
* Note: for tcp only valid if using ipsec or cipso on lan
*/
@@ -2182,7 +2155,7 @@ __initcall(apparmor_nf_ip_init);
static char nulldfa_src[] = {
#include "nulldfa.in"
};
-struct aa_dfa *nulldfa;
+static struct aa_dfa *nulldfa;
static char stacksplitdfa_src[] = {
#include "stacksplitdfa.in"
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index ed4c9803c8fa..957654d253dd 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -99,13 +99,14 @@ const char *const aa_profile_mode_names[] = {
};
-static void aa_free_pdb(struct aa_policydb *policy)
+static void aa_free_pdb(struct aa_policydb *pdb)
{
- if (policy) {
- aa_put_dfa(policy->dfa);
- if (policy->perms)
- kvfree(policy->perms);
- aa_free_str_table(&policy->trans);
+ if (pdb) {
+ aa_put_dfa(pdb->dfa);
+ if (pdb->perms)
+ kvfree(pdb->perms);
+ aa_free_str_table(&pdb->trans);
+ kfree(pdb);
}
}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 47ec097d6741..5e578ef0ddff 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -478,6 +478,8 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
if (!table)
goto fail;
+ strs->table = table;
+ strs->size = size;
for (i = 0; i < size; i++) {
char *str;
int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
@@ -520,14 +522,11 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
-
- strs->table = table;
- strs->size = size;
}
return true;
fail:
- kfree_sensitive(table);
+ aa_free_str_table(strs);
e->pos = saved_pos;
return false;
}
@@ -833,6 +832,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
if (tmpns) {
+ if (!tmpname) {
+ info = "empty profile name";
+ goto fail;
+ }
*ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
if (!*ns_name) {
info = "out of memory";
@@ -1022,8 +1025,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
}
} else if (rules->policy->dfa &&
rules->policy->start[AA_CLASS_FILE]) {
+ aa_put_pdb(rules->file);
rules->file = aa_get_pdb(rules->policy);
} else {
+ aa_put_pdb(rules->file);
rules->file = aa_get_pdb(nullpdb);
}
error = -EPROTO;
diff --git a/security/apparmor/task.c b/security/apparmor/task.c
index f29a2e80e6bf..c87fb9f4ac18 100644
--- a/security/apparmor/task.c
+++ b/security/apparmor/task.c
@@ -278,7 +278,9 @@ static int profile_tracer_perm(const struct cred *cred,
/**
* aa_may_ptrace - test if tracer task can trace the tracee
+ * @tracer_cred: cred of task doing the tracing (NOT NULL)
* @tracer: label of the task doing the tracing (NOT NULL)
+ * @tracee_cred: cred of task to be traced
* @tracee: task label to be traced
* @request: permission request
*
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index df387de29bfa..45c3e5dda355 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -179,7 +179,8 @@ static int __init integrity_add_key(const unsigned int id, const void *data,
KEY_ALLOC_NOT_IN_QUOTA);
if (IS_ERR(key)) {
rc = PTR_ERR(key);
- pr_err("Problem loading X.509 certificate %d\n", rc);
+ if (id != INTEGRITY_KEYRING_MACHINE)
+ pr_err("Problem loading X.509 certificate %d\n", rc);
} else {
pr_notice("Loaded X.509 cert '%s'\n",
key_ref_to_ptr(key)->description);
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 76f55dd13cb8..8af2136069d2 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -237,10 +237,6 @@ static int datablob_parse(char *datablob, const char **format,
break;
}
*decrypted_data = strsep(&datablob, " \t");
- if (!*decrypted_data) {
- pr_info("encrypted_key: decrypted_data is missing\n");
- break;
- }
ret = 0;
break;
case Opt_load:
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index fc520a06f9af..0171f7eb6ee1 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -737,8 +737,8 @@ static int current_check_refer_path(struct dentry *const old_dentry,
bool allow_parent1, allow_parent2;
access_mask_t access_request_parent1, access_request_parent2;
struct path mnt_dir;
- layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS],
- layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS];
+ layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
+ layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
if (!dom)
return 0;
diff --git a/security/security.c b/security/security.c
index 0144a98d3712..aef69632d0a9 100644
--- a/security/security.c
+++ b/security/security.c
@@ -29,6 +29,7 @@
#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/msg.h>
+#include <linux/overflow.h>
#include <net/flow.h>
/* How many LSMs were built into the kernel? */
@@ -4015,6 +4016,7 @@ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
struct security_hook_list *hp;
struct lsm_ctx *lctx;
int rc = LSM_RET_DEFAULT(setselfattr);
+ u64 required_len;
if (flags)
return -EINVAL;
@@ -4027,8 +4029,9 @@ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
if (IS_ERR(lctx))
return PTR_ERR(lctx);
- if (size < lctx->len || size < lctx->ctx_len + sizeof(*lctx) ||
- lctx->len < lctx->ctx_len + sizeof(*lctx)) {
+ if (size < lctx->len ||
+ check_add_overflow(sizeof(*lctx), lctx->ctx_len, &required_len) ||
+ lctx->len < required_len) {
rc = -EINVAL;
goto free_out;
}
@@ -4255,7 +4258,19 @@ EXPORT_SYMBOL(security_inode_setsecctx);
*/
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
- return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
+ struct security_hook_list *hp;
+ int rc;
+
+ /*
+ * Only one module will provide a security context.
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.inode_getsecctx, list) {
+ rc = hp->hook.inode_getsecctx(inode, ctx, ctxlen);
+ if (rc != LSM_RET_DEFAULT(inode_getsecctx))
+ return rc;
+ }
+
+ return LSM_RET_DEFAULT(inode_getsecctx);
}
EXPORT_SYMBOL(security_inode_getsecctx);
@@ -4612,8 +4627,20 @@ EXPORT_SYMBOL(security_sock_rcv_skb);
int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
sockptr_t optlen, unsigned int len)
{
- return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
- optval, optlen, len);
+ struct security_hook_list *hp;
+ int rc;
+
+ /*
+ * Only one module will provide a security context.
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_stream,
+ list) {
+ rc = hp->hook.socket_getpeersec_stream(sock, optval, optlen,
+ len);
+ if (rc != LSM_RET_DEFAULT(socket_getpeersec_stream))
+ return rc;
+ }
+ return LSM_RET_DEFAULT(socket_getpeersec_stream);
}
/**
@@ -4633,8 +4660,19 @@ int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
int security_socket_getpeersec_dgram(struct socket *sock,
struct sk_buff *skb, u32 *secid)
{
- return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
- skb, secid);
+ struct security_hook_list *hp;
+ int rc;
+
+ /*
+ * Only one module will provide a security context.
+ */
+ hlist_for_each_entry(hp, &security_hook_heads.socket_getpeersec_dgram,
+ list) {
+ rc = hp->hook.socket_getpeersec_dgram(sock, skb, secid);
+ if (rc != LSM_RET_DEFAULT(socket_getpeersec_dgram))
+ return rc;
+ }
+ return LSM_RET_DEFAULT(socket_getpeersec_dgram);
}
EXPORT_SYMBOL(security_socket_getpeersec_dgram);
@@ -5410,29 +5448,87 @@ int security_bpf_prog(struct bpf_prog *prog)
}
/**
- * security_bpf_map_alloc() - Allocate a bpf map LSM blob
- * @map: bpf map
+ * security_bpf_map_create() - Check if BPF map creation is allowed
+ * @map: BPF map object
+ * @attr: BPF syscall attributes used to create BPF map
+ * @token: BPF token used to grant user access
+ *
+ * Do a check when the kernel creates a new BPF map. This is also the
+ * point where LSM blob is allocated for LSMs that need them.
*
- * Initialize the security field inside bpf map.
+ * Return: Returns 0 on success, error on failure.
+ */
+int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
+{
+ return call_int_hook(bpf_map_create, 0, map, attr, token);
+}
+
+/**
+ * security_bpf_prog_load() - Check if loading of BPF program is allowed
+ * @prog: BPF program object
+ * @attr: BPF syscall attributes used to create BPF program
+ * @token: BPF token used to grant user access to BPF subsystem
+ *
+ * Perform an access control check when the kernel loads a BPF program and
+ * allocates associated BPF program object. This hook is also responsible for
+ * allocating any required LSM state for the BPF program.
+ *
+ * Return: Returns 0 on success, error on failure.
+ */
+int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
+{
+ return call_int_hook(bpf_prog_load, 0, prog, attr, token);
+}
+
+/**
+ * security_bpf_token_create() - Check if creating of BPF token is allowed
+ * @token: BPF token object
+ * @attr: BPF syscall attributes used to create BPF token
+ * @path: path pointing to BPF FS mount point from which BPF token is created
+ *
+ * Do a check when the kernel instantiates a new BPF token object from BPF FS
+ * instance. This is also the point where LSM blob can be allocated for LSMs.
*
* Return: Returns 0 on success, error on failure.
*/
-int security_bpf_map_alloc(struct bpf_map *map)
+int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
{
- return call_int_hook(bpf_map_alloc_security, 0, map);
+ return call_int_hook(bpf_token_create, 0, token, attr, path);
}
/**
- * security_bpf_prog_alloc() - Allocate a bpf program LSM blob
- * @aux: bpf program aux info struct
+ * security_bpf_token_cmd() - Check if BPF token is allowed to delegate
+ * requested BPF syscall command
+ * @token: BPF token object
+ * @cmd: BPF syscall command requested to be delegated by BPF token
*
- * Initialize the security field inside bpf program.
+ * Do a check when the kernel decides whether provided BPF token should allow
+ * delegation of requested BPF syscall command.
*
* Return: Returns 0 on success, error on failure.
*/
-int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
+int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
{
- return call_int_hook(bpf_prog_alloc_security, 0, aux);
+ return call_int_hook(bpf_token_cmd, 0, token, cmd);
+}
+
+/**
+ * security_bpf_token_capable() - Check if BPF token is allowed to delegate
+ * requested BPF-related capability
+ * @token: BPF token object
+ * @cap: capabilities requested to be delegated by BPF token
+ *
+ * Do a check when the kernel decides whether provided BPF token should allow
+ * delegation of requested BPF-related capabilities.
+ *
+ * Return: Returns 0 on success, error on failure.
+ */
+int security_bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return call_int_hook(bpf_token_capable, 0, token, cap);
}
/**
@@ -5443,18 +5539,29 @@ int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
*/
void security_bpf_map_free(struct bpf_map *map)
{
- call_void_hook(bpf_map_free_security, map);
+ call_void_hook(bpf_map_free, map);
+}
+
+/**
+ * security_bpf_prog_free() - Free a BPF program's LSM blob
+ * @prog: BPF program struct
+ *
+ * Clean up the security information stored inside BPF program.
+ */
+void security_bpf_prog_free(struct bpf_prog *prog)
+{
+ call_void_hook(bpf_prog_free, prog);
}
/**
- * security_bpf_prog_free() - Free a bpf program's LSM blob
- * @aux: bpf program aux info struct
+ * security_bpf_token_free() - Free a BPF token's LSM blob
+ * @token: BPF token struct
*
- * Clean up the security information stored inside bpf prog.
+ * Clean up the security information stored inside BPF token.
*/
-void security_bpf_prog_free(struct bpf_prog_aux *aux)
+void security_bpf_token_free(struct bpf_token *token)
{
- call_void_hook(bpf_prog_free_security, aux);
+ call_void_hook(bpf_token_free, token);
}
#endif /* CONFIG_BPF_SYSCALL */
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index a6bf90ace84c..7da35f51a0ef 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -6559,7 +6559,7 @@ static int selinux_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
size_t *size, u32 flags)
{
int rc;
- char *val;
+ char *val = NULL;
int val_len;
val_len = selinux_lsm_getattr(attr, current, &val);
@@ -6920,7 +6920,8 @@ static int selinux_bpf_prog(struct bpf_prog *prog)
BPF__PROG_RUN, NULL);
}
-static int selinux_bpf_map_alloc(struct bpf_map *map)
+static int selinux_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token)
{
struct bpf_security_struct *bpfsec;
@@ -6942,7 +6943,8 @@ static void selinux_bpf_map_free(struct bpf_map *map)
kfree(bpfsec);
}
-static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux)
+static int selinux_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token)
{
struct bpf_security_struct *bpfsec;
@@ -6951,16 +6953,39 @@ static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux)
return -ENOMEM;
bpfsec->sid = current_sid();
- aux->security = bpfsec;
+ prog->aux->security = bpfsec;
return 0;
}
-static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
+static void selinux_bpf_prog_free(struct bpf_prog *prog)
{
- struct bpf_security_struct *bpfsec = aux->security;
+ struct bpf_security_struct *bpfsec = prog->aux->security;
- aux->security = NULL;
+ prog->aux->security = NULL;
+ kfree(bpfsec);
+}
+
+static int selinux_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ struct path *path)
+{
+ struct bpf_security_struct *bpfsec;
+
+ bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL);
+ if (!bpfsec)
+ return -ENOMEM;
+
+ bpfsec->sid = current_sid();
+ token->security = bpfsec;
+
+ return 0;
+}
+
+static void selinux_bpf_token_free(struct bpf_token *token)
+{
+ struct bpf_security_struct *bpfsec = token->security;
+
+ token->security = NULL;
kfree(bpfsec);
}
#endif
@@ -7324,8 +7349,9 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(bpf, selinux_bpf),
LSM_HOOK_INIT(bpf_map, selinux_bpf_map),
LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog),
- LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
- LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
+ LSM_HOOK_INIT(bpf_map_free, selinux_bpf_map_free),
+ LSM_HOOK_INIT(bpf_prog_free, selinux_bpf_prog_free),
+ LSM_HOOK_INIT(bpf_token_free, selinux_bpf_token_free),
#endif
#ifdef CONFIG_PERF_EVENTS
@@ -7382,8 +7408,9 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
#endif
#ifdef CONFIG_BPF_SYSCALL
- LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
- LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
+ LSM_HOOK_INIT(bpf_map_create, selinux_bpf_map_create),
+ LSM_HOOK_INIT(bpf_prog_load, selinux_bpf_prog_load),
+ LSM_HOOK_INIT(bpf_token_create, selinux_bpf_token_create),
#endif
#ifdef CONFIG_PERF_EVENTS
LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 57ee70ae50f2..ea3140d510ec 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2649,13 +2649,14 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
{
int error = buffer_len;
size_t avail_len = buffer_len;
- char *cp0 = head->write_buf;
+ char *cp0;
int idx;
if (!head->write)
return -EINVAL;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
+ cp0 = head->write_buf;
head->read_user_buf_avail = 0;
idx = tomoyo_read_lock();
/* Read a line and dispatch it to the policy handler. */
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 3c3af149bf1c..04a92c3d65d4 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -328,7 +328,8 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
static int tomoyo_file_open(struct file *f)
{
/* Don't check read permission here if called from execve(). */
- if (current->in_execve)
+ /* Illogically, FMODE_EXEC is in f_flags, not f_mode. */
+ if (f->f_flags & __FMODE_EXEC)
return 0;
return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path,
f->f_flags);
diff --git a/sound/core/Makefile b/sound/core/Makefile
index a6b444ee2832..f6526b337137 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -32,7 +32,6 @@ snd-ump-objs := ump.o
snd-ump-$(CONFIG_SND_UMP_LEGACY_RAWMIDI) += ump_convert.o
snd-timer-objs := timer.o
snd-hrtimer-objs := hrtimer.o
-snd-rtctimer-objs := rtctimer.o
snd-hwdep-objs := hwdep.o
snd-seq-device-objs := seq_device.o
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index a09f0154e6a7..d0788126cbab 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -211,6 +211,10 @@ static const char * const snd_pcm_format_names[] = {
FORMAT(DSD_U32_LE),
FORMAT(DSD_U16_BE),
FORMAT(DSD_U32_BE),
+ FORMAT(S20_LE),
+ FORMAT(S20_BE),
+ FORMAT(U20_LE),
+ FORMAT(U20_BE),
};
/**
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f5ff00f99788..21baf6bf7e25 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -486,6 +486,11 @@ static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
if (snd_interval_single(i))
params->msbits = snd_interval_value(i);
+ m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ if (snd_mask_single(m)) {
+ snd_pcm_format_t format = (__force snd_pcm_format_t)snd_mask_min(m);
+ params->msbits = snd_pcm_format_width(format);
+ }
}
if (params->msbits) {
diff --git a/sound/core/ump.c b/sound/core/ump.c
index 3bef1944e955..fe7911498cc4 100644
--- a/sound/core/ump.c
+++ b/sound/core/ump.c
@@ -985,7 +985,7 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream)
struct snd_ump_endpoint *ump = substream->rmidi->private_data;
int dir = substream->stream;
int group = ump->legacy_mapping[substream->number];
- int err;
+ int err = 0;
mutex_lock(&ump->open_mutex);
if (ump->legacy_substreams[dir][group]) {
@@ -1009,7 +1009,7 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream)
spin_unlock_irq(&ump->legacy_locks[dir]);
unlock:
mutex_unlock(&ump->open_mutex);
- return 0;
+ return err;
}
static int snd_ump_legacy_close(struct snd_rawmidi_substream *substream)
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index e87dc67f33c6..1c65e0a3b13c 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -322,6 +322,17 @@ static int loopback_snd_timer_close_cable(struct loopback_pcm *dpcm)
return 0;
}
+static bool is_access_interleaved(snd_pcm_access_t access)
+{
+ switch (access) {
+ case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
+ case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
+ return true;
+ default:
+ return false;
+ }
+};
+
static int loopback_check_format(struct loopback_cable *cable, int stream)
{
struct snd_pcm_runtime *runtime, *cruntime;
@@ -341,7 +352,8 @@ static int loopback_check_format(struct loopback_cable *cable, int stream)
check = runtime->format != cruntime->format ||
runtime->rate != cruntime->rate ||
runtime->channels != cruntime->channels ||
- runtime->access != cruntime->access;
+ is_access_interleaved(runtime->access) !=
+ is_access_interleaved(cruntime->access);
if (!check)
return 0;
if (stream == SNDRV_PCM_STREAM_CAPTURE) {
@@ -369,7 +381,8 @@ static int loopback_check_format(struct loopback_cable *cable, int stream)
&setup->channels_id);
setup->channels = runtime->channels;
}
- if (setup->access != runtime->access) {
+ if (is_access_interleaved(setup->access) !=
+ is_access_interleaved(runtime->access)) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->access_id);
setup->access = runtime->access;
@@ -584,8 +597,7 @@ static void copy_play_buf(struct loopback_pcm *play,
size = play->pcm_buffer_size - src_off;
if (dst_off + size > capt->pcm_buffer_size)
size = capt->pcm_buffer_size - dst_off;
- if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ||
- runtime->access == SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED)
+ if (!is_access_interleaved(runtime->access))
copy_play_buf_part_n(play, capt, size, src_off, dst_off);
else
memcpy(dst + dst_off, src + src_off, size);
@@ -1544,8 +1556,7 @@ static int loopback_access_get(struct snd_kcontrol *kcontrol,
mutex_lock(&loopback->cable_lock);
access = loopback->setup[kcontrol->id.subdevice][kcontrol->id.device].access;
- ucontrol->value.enumerated.item[0] = access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ||
- access == SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
+ ucontrol->value.enumerated.item[0] = !is_access_interleaved(access);
mutex_unlock(&loopback->cable_lock);
return 0;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index a13c0b408aad..7be17bca257f 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -951,7 +951,7 @@ static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *des
// to the reason.
unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
- lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
+ lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0);
}
if (lost) {
dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 21a90b3c4cc7..8e0ff70fb610 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -156,7 +156,7 @@ config SND_HDA_SCODEC_CS35L56_I2C
depends on I2C
depends on ACPI || COMPILE_TEST
depends on SND_SOC
- select CS_DSP
+ select FW_CS_DSP
select SND_HDA_GENERIC
select SND_SOC_CS35L56_SHARED
select SND_HDA_SCODEC_CS35L56
@@ -171,7 +171,7 @@ config SND_HDA_SCODEC_CS35L56_SPI
depends on SPI_MASTER
depends on ACPI || COMPILE_TEST
depends on SND_SOC
- select CS_DSP
+ select FW_CS_DSP
select SND_HDA_GENERIC
select SND_SOC_CS35L56_SHARED
select SND_HDA_SCODEC_CS35L56
diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
index 35277ce890a4..87dcb367e239 100644
--- a/sound/pci/hda/cs35l41_hda_property.c
+++ b/sound/pci/hda/cs35l41_hda_property.c
@@ -76,6 +76,8 @@ static const struct cs35l41_config cs35l41_config_table[] = {
{ "10431533", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
{ "10431573", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
{ "10431663", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 1000, 4500, 24 },
+ { "10431683", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ { "104316A3", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
{ "104316D3", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
{ "104316F3", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
{ "104317F3", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
@@ -89,10 +91,12 @@ static const struct cs35l41_config cs35l41_config_table[] = {
{ "10431D1F", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
{ "10431DA2", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
{ "10431E02", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ { "10431E12", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "10431EE2", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
{ "10431F12", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
{ "10431F1F", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 0, 0, 0 },
{ "10431F62", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ { "17AA386F", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
{ "17AA38B4", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "17AA38B5", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "17AA38B6", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
@@ -410,6 +414,8 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
{ "CSC3551", "10431533", generic_dsd_config },
{ "CSC3551", "10431573", generic_dsd_config },
{ "CSC3551", "10431663", generic_dsd_config },
+ { "CSC3551", "10431683", generic_dsd_config },
+ { "CSC3551", "104316A3", generic_dsd_config },
{ "CSC3551", "104316D3", generic_dsd_config },
{ "CSC3551", "104316F3", generic_dsd_config },
{ "CSC3551", "104317F3", generic_dsd_config },
@@ -423,10 +429,12 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
{ "CSC3551", "10431D1F", generic_dsd_config },
{ "CSC3551", "10431DA2", generic_dsd_config },
{ "CSC3551", "10431E02", generic_dsd_config },
+ { "CSC3551", "10431E12", generic_dsd_config },
{ "CSC3551", "10431EE2", generic_dsd_config },
{ "CSC3551", "10431F12", generic_dsd_config },
{ "CSC3551", "10431F1F", generic_dsd_config },
{ "CSC3551", "10431F62", generic_dsd_config },
+ { "CSC3551", "17AA386F", generic_dsd_config },
{ "CSC3551", "17AA38B4", generic_dsd_config },
{ "CSC3551", "17AA38B5", generic_dsd_config },
{ "CSC3551", "17AA38B6", generic_dsd_config },
diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
index b61e1de8c4bf..75a14ba54fcd 100644
--- a/sound/pci/hda/cs35l56_hda.c
+++ b/sound/pci/hda/cs35l56_hda.c
@@ -30,14 +30,23 @@
* ASP1_RX_WL = 24 bits per sample
* ASP1_TX_WL = 24 bits per sample
* ASP1_RXn_EN 1..3 and ASP1_TXn_EN 1..4 disabled
+ *
+ * Override any Windows-specific mixer settings applied by the firmware.
*/
static const struct reg_sequence cs35l56_hda_dai_config[] = {
{ CS35L56_ASP1_CONTROL1, 0x00000021 },
{ CS35L56_ASP1_CONTROL2, 0x20200200 },
{ CS35L56_ASP1_CONTROL3, 0x00000003 },
+ { CS35L56_ASP1_FRAME_CONTROL1, 0x03020100 },
+ { CS35L56_ASP1_FRAME_CONTROL5, 0x00020100 },
{ CS35L56_ASP1_DATA_CONTROL5, 0x00000018 },
{ CS35L56_ASP1_DATA_CONTROL1, 0x00000018 },
{ CS35L56_ASP1_ENABLES1, 0x00000000 },
+ { CS35L56_ASP1TX1_INPUT, 0x00000018 },
+ { CS35L56_ASP1TX2_INPUT, 0x00000019 },
+ { CS35L56_ASP1TX3_INPUT, 0x00000020 },
+ { CS35L56_ASP1TX4_INPUT, 0x00000028 },
+
};
static void cs35l56_hda_play(struct cs35l56_hda *cs35l56)
@@ -133,6 +142,10 @@ static int cs35l56_hda_runtime_resume(struct device *dev)
}
}
+ ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+ if (ret)
+ goto err;
+
return 0;
err:
@@ -384,7 +397,7 @@ static const struct cs_dsp_client_ops cs35l56_hda_client_ops = {
static int cs35l56_hda_request_firmware_file(struct cs35l56_hda *cs35l56,
const struct firmware **firmware, char **filename,
- const char *dir, const char *system_name,
+ const char *base_name, const char *system_name,
const char *amp_name,
const char *filetype)
{
@@ -392,17 +405,13 @@ static int cs35l56_hda_request_firmware_file(struct cs35l56_hda *cs35l56,
int ret = 0;
if (system_name && amp_name)
- *filename = kasprintf(GFP_KERNEL, "%scs35l56%s-%02x-dsp1-misc-%s-%s.%s", dir,
- cs35l56->base.secured ? "s" : "", cs35l56->base.rev,
+ *filename = kasprintf(GFP_KERNEL, "%s-%s-%s.%s", base_name,
system_name, amp_name, filetype);
else if (system_name)
- *filename = kasprintf(GFP_KERNEL, "%scs35l56%s-%02x-dsp1-misc-%s.%s", dir,
- cs35l56->base.secured ? "s" : "", cs35l56->base.rev,
+ *filename = kasprintf(GFP_KERNEL, "%s-%s.%s", base_name,
system_name, filetype);
else
- *filename = kasprintf(GFP_KERNEL, "%scs35l56%s-%02x-dsp1-misc.%s", dir,
- cs35l56->base.secured ? "s" : "", cs35l56->base.rev,
- filetype);
+ *filename = kasprintf(GFP_KERNEL, "%s.%s", base_name, filetype);
if (!*filename)
return -ENOMEM;
@@ -435,8 +444,8 @@ static int cs35l56_hda_request_firmware_file(struct cs35l56_hda *cs35l56,
return 0;
}
-static const char cirrus_dir[] = "cirrus/";
static void cs35l56_hda_request_firmware_files(struct cs35l56_hda *cs35l56,
+ unsigned int preloaded_fw_ver,
const struct firmware **wmfw_firmware,
char **wmfw_filename,
const struct firmware **coeff_firmware,
@@ -444,55 +453,73 @@ static void cs35l56_hda_request_firmware_files(struct cs35l56_hda *cs35l56,
{
const char *system_name = cs35l56->system_name;
const char *amp_name = cs35l56->amp_name;
+ char base_name[37];
int ret;
+ if (preloaded_fw_ver) {
+ snprintf(base_name, sizeof(base_name),
+ "cirrus/cs35l56-%02x%s-%06x-dsp1-misc",
+ cs35l56->base.rev,
+ cs35l56->base.secured ? "-s" : "",
+ preloaded_fw_ver & 0xffffff);
+ } else {
+ snprintf(base_name, sizeof(base_name),
+ "cirrus/cs35l56-%02x%s-dsp1-misc",
+ cs35l56->base.rev,
+ cs35l56->base.secured ? "-s" : "");
+ }
+
if (system_name && amp_name) {
if (!cs35l56_hda_request_firmware_file(cs35l56, wmfw_firmware, wmfw_filename,
- cirrus_dir, system_name, amp_name, "wmfw")) {
+ base_name, system_name, amp_name, "wmfw")) {
cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
- cirrus_dir, system_name, amp_name, "bin");
+ base_name, system_name, amp_name, "bin");
return;
}
}
if (system_name) {
if (!cs35l56_hda_request_firmware_file(cs35l56, wmfw_firmware, wmfw_filename,
- cirrus_dir, system_name, NULL, "wmfw")) {
+ base_name, system_name, NULL, "wmfw")) {
if (amp_name)
cs35l56_hda_request_firmware_file(cs35l56,
coeff_firmware, coeff_filename,
- cirrus_dir, system_name,
+ base_name, system_name,
amp_name, "bin");
if (!*coeff_firmware)
cs35l56_hda_request_firmware_file(cs35l56,
coeff_firmware, coeff_filename,
- cirrus_dir, system_name,
+ base_name, system_name,
NULL, "bin");
return;
}
+
+ /*
+ * Check for system-specific bin files without wmfw before
+ * falling back to generic firmware
+ */
+ if (amp_name)
+ cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
+ base_name, system_name, amp_name, "bin");
+ if (!*coeff_firmware)
+ cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
+ base_name, system_name, NULL, "bin");
+
+ if (*coeff_firmware)
+ return;
}
ret = cs35l56_hda_request_firmware_file(cs35l56, wmfw_firmware, wmfw_filename,
- cirrus_dir, NULL, NULL, "wmfw");
+ base_name, NULL, NULL, "wmfw");
if (!ret) {
cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
- cirrus_dir, NULL, NULL, "bin");
+ base_name, NULL, NULL, "bin");
return;
}
- /* When a firmware file is not found must still search for the coeff files */
- if (system_name) {
- if (amp_name)
- cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
- cirrus_dir, system_name, amp_name, "bin");
- if (!*coeff_firmware)
- cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
- cirrus_dir, system_name, NULL, "bin");
- }
-
if (!*coeff_firmware)
cs35l56_hda_request_firmware_file(cs35l56, coeff_firmware, coeff_filename,
- cirrus_dir, NULL, NULL, "bin");
+ base_name, NULL, NULL, "bin");
}
static void cs35l56_hda_release_firmware_files(const struct firmware *wmfw_firmware,
@@ -526,7 +553,8 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
const struct firmware *wmfw_firmware = NULL;
char *coeff_filename = NULL;
char *wmfw_filename = NULL;
- unsigned int firmware_missing;
+ unsigned int preloaded_fw_ver;
+ bool firmware_missing;
int ret = 0;
/* Prepare for a new DSP power-up */
@@ -537,24 +565,21 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
pm_runtime_get_sync(cs35l56->base.dev);
- ret = regmap_read(cs35l56->base.regmap, CS35L56_PROTECTION_STATUS, &firmware_missing);
- if (ret) {
- dev_err(cs35l56->base.dev, "Failed to read PROTECTION_STATUS: %d\n", ret);
+ /*
+ * The firmware can only be upgraded if it is currently running
+ * from the built-in ROM. If not, the wmfw/bin must be for the
+ * version of firmware that is running on the chip.
+ */
+ ret = cs35l56_read_prot_status(&cs35l56->base, &firmware_missing, &preloaded_fw_ver);
+ if (ret)
goto err_pm_put;
- }
- firmware_missing &= CS35L56_FIRMWARE_MISSING;
+ if (firmware_missing)
+ preloaded_fw_ver = 0;
- /*
- * Firmware can only be downloaded if the CS35L56 is secured or is
- * running from the built-in ROM. If it is secured the BIOS will have
- * downloaded firmware, and the wmfw/bin files will only contain
- * tunings that are safe to download with the firmware running.
- */
- if (cs35l56->base.secured || firmware_missing) {
- cs35l56_hda_request_firmware_files(cs35l56, &wmfw_firmware, &wmfw_filename,
- &coeff_firmware, &coeff_filename);
- }
+ cs35l56_hda_request_firmware_files(cs35l56, preloaded_fw_ver,
+ &wmfw_firmware, &wmfw_filename,
+ &coeff_firmware, &coeff_filename);
/*
* If the BIOS didn't patch the firmware a bin file is mandatory to
@@ -569,12 +594,12 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
mutex_lock(&cs35l56->base.irq_lock);
/*
- * When the device is running in secure mode the firmware files can
- * only contain insecure tunings and therefore we do not need to
- * shutdown the firmware to apply them and can use the lower cost
- * reinit sequence instead.
+ * If the firmware hasn't been patched it must be shutdown before
+ * doing a full patch and reset afterwards. If it is already
+ * running a patched version the firmware files only contain
+ * tunings and we can use the lower cost reinit sequence instead.
*/
- if (!cs35l56->base.secured && (wmfw_firmware || coeff_firmware)) {
+ if (firmware_missing && (wmfw_firmware || coeff_firmware)) {
ret = cs35l56_firmware_shutdown(&cs35l56->base);
if (ret)
goto err;
@@ -593,7 +618,7 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
if (coeff_filename)
dev_dbg(cs35l56->base.dev, "Loaded Coefficients: %s\n", coeff_filename);
- if (cs35l56->base.secured) {
+ if (!firmware_missing) {
ret = cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_AUDIO_REINIT);
if (ret)
goto err_powered_up;
@@ -976,6 +1001,9 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int id)
regmap_multi_reg_write(cs35l56->base.regmap, cs35l56_hda_dai_config,
ARRAY_SIZE(cs35l56_hda_dai_config));
+ ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+ if (ret)
+ goto err;
/*
* By default only enable one ASP1TXn, where n=amplifier index,
@@ -1035,16 +1063,6 @@ const struct dev_pm_ops cs35l56_hda_pm_ops = {
};
EXPORT_SYMBOL_NS_GPL(cs35l56_hda_pm_ops, SND_HDA_SCODEC_CS35L56);
-#if IS_ENABLED(CONFIG_SND_HDA_SCODEC_CS35L56_KUNIT_TEST)
-/* Hooks to export static function to KUnit test */
-
-int cs35l56_hda_test_hook_get_speaker_id(struct device *dev, int amp_index, int num_amps)
-{
- return cs35l56_hda_get_speaker_id(dev, amp_index, num_amps);
-}
-EXPORT_SYMBOL_NS_GPL(cs35l56_hda_test_hook_get_speaker_id, SND_HDA_SCODEC_CS35L56);
-#endif
-
MODULE_DESCRIPTION("CS35L56 HDA Driver");
MODULE_IMPORT_NS(SND_HDA_CIRRUS_SCODEC);
MODULE_IMPORT_NS(SND_HDA_CS_DSP_CONTROLS);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index bf685d01259d..de2a3d08c73c 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3946,7 +3946,6 @@ static int create_mute_led_cdev(struct hda_codec *codec,
cdev->max_brightness = 1;
cdev->default_trigger = micmute ? "audio-micmute" : "audio-mute";
cdev->brightness_set_blocking = callback;
- cdev->brightness = ledtrig_audio_get(idx);
cdev->flags = LED_CORE_SUSPENDRESUME;
err = led_classdev_register(&codec->core.dev, cdev);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2276adc84478..1b550c42db09 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1729,9 +1729,11 @@ static int default_bdl_pos_adj(struct azx *chip)
/* some exceptions: Atoms seem problematic with value 1 */
if (chip->pci->vendor == PCI_VENDOR_ID_INTEL) {
switch (chip->pci->device) {
- case 0x0f04: /* Baytrail */
- case 0x2284: /* Braswell */
+ case PCI_DEVICE_ID_INTEL_HDA_BYT:
+ case PCI_DEVICE_ID_INTEL_HDA_BSW:
return 32;
+ case PCI_DEVICE_ID_INTEL_HDA_APL:
+ return 64;
}
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index e8819e8a9876..e8209178d87b 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -344,6 +344,7 @@ enum {
CXT_FIXUP_HP_ZBOOK_MUTE_LED,
CXT_FIXUP_HEADSET_MIC,
CXT_FIXUP_HP_MIC_NO_PRESENCE,
+ CXT_PINCFG_SWS_JS201D,
};
/* for hda_fixup_thinkpad_acpi() */
@@ -841,6 +842,17 @@ static const struct hda_pintbl cxt_pincfg_lemote[] = {
{}
};
+/* SuoWoSi/South-holding JS201D with sn6140 */
+static const struct hda_pintbl cxt_pincfg_sws_js201d[] = {
+ { 0x16, 0x03211040 }, /* hp out */
+ { 0x17, 0x91170110 }, /* SPK/Class_D */
+ { 0x18, 0x95a70130 }, /* Internal mic */
+ { 0x19, 0x03a11020 }, /* Headset Mic */
+ { 0x1a, 0x40f001f0 }, /* Not used */
+ { 0x21, 0x40f001f0 }, /* Not used */
+ {}
+};
+
static const struct hda_fixup cxt_fixups[] = {
[CXT_PINCFG_LENOVO_X200] = {
.type = HDA_FIXUP_PINS,
@@ -996,6 +1008,10 @@ static const struct hda_fixup cxt_fixups[] = {
.chained = true,
.chain_id = CXT_FIXUP_HEADSET_MIC,
},
+ [CXT_PINCFG_SWS_JS201D] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = cxt_pincfg_sws_js201d,
+ },
};
static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -1069,6 +1085,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+ SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
@@ -1109,6 +1126,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
{ .id = CXT_FIXUP_HP_ZBOOK_MUTE_LED, .name = "hp-zbook-mute-led" },
{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
+ { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
{}
};
diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
index 627899959ffe..e41316e2e983 100644
--- a/sound/pci/hda/patch_cs8409.c
+++ b/sound/pci/hda/patch_cs8409.c
@@ -1371,6 +1371,7 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
spec->scodecs[CS8409_CODEC1] = &dolphin_cs42l42_1;
spec->scodecs[CS8409_CODEC1]->codec = codec;
spec->num_scodecs = 2;
+ spec->gen.suppress_vmaster = 1;
codec->patch_ops = cs8409_dolphin_patch_ops;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 200779296a1b..495d63101186 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2301,6 +2301,7 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
for (idx = 0; idx < pcm_num; idx++) {
+ struct hdmi_spec_per_cvt *per_cvt;
struct hda_pcm *info;
struct hda_pcm_stream *pstr;
@@ -2316,6 +2317,11 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
pstr->substreams = 1;
pstr->ops = generic_ops;
+
+ per_cvt = get_cvt(spec, 0);
+ pstr->channels_min = per_cvt->channels_min;
+ pstr->channels_max = per_cvt->channels_max;
+
/* pcm number is less than pcm_rec array size */
if (spec->pcm_used >= ARRAY_SIZE(spec->pcm_rec))
break;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b68c94757051..62701197a019 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -439,6 +439,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
fallthrough;
case 0x10ec0215:
+ case 0x10ec0285:
+ case 0x10ec0289:
+ alc_update_coef_idx(codec, 0x36, 1<<13, 0);
+ fallthrough;
case 0x10ec0230:
case 0x10ec0233:
case 0x10ec0235:
@@ -452,9 +456,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0283:
case 0x10ec0286:
case 0x10ec0288:
- case 0x10ec0285:
case 0x10ec0298:
- case 0x10ec0289:
case 0x10ec0300:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
@@ -3682,6 +3684,7 @@ static void alc285_hp_init(struct hda_codec *codec)
int i, val;
int coef38, coef0d, coef36;
+ alc_write_coefex_idx(codec, 0x58, 0x00, 0x1888); /* write default value */
alc_update_coef_idx(codec, 0x4a, 1<<15, 1<<15); /* Reset HP JD */
coef38 = alc_read_coef_idx(codec, 0x38); /* Amp control */
coef0d = alc_read_coef_idx(codec, 0x0d); /* Digital Misc control */
@@ -7442,6 +7445,7 @@ enum {
ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
ALC298_FIXUP_LENOVO_C940_DUET7,
+ ALC287_FIXUP_LENOVO_14IRP8_DUETITL,
ALC287_FIXUP_13S_GEN2_SPEAKERS,
ALC256_FIXUP_SET_COEF_DEFAULTS,
ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
@@ -7493,6 +7497,26 @@ static void alc298_fixup_lenovo_c940_duet7(struct hda_codec *codec,
__snd_hda_apply_fixup(codec, id, action, 0);
}
+/* A special fixup for Lenovo Slim/Yoga Pro 9 14IRP8 and Yoga DuetITL 2021;
+ * 14IRP8 PCI SSID will mistakenly be matched with the DuetITL codec SSID,
+ * so we need to apply a different fixup in this case. The only DuetITL codec
+ * SSID reported so far is the 17aa:3802 while the 14IRP8 has the 17aa:38be
+ * and 17aa:38bf. If it weren't for the PCI SSID, the 14IRP8 models would
+ * have matched correctly by their codecs.
+ */
+static void alc287_fixup_lenovo_14irp8_duetitl(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ int id;
+
+ if (codec->core.subsystem_id == 0x17aa3802)
+ id = ALC287_FIXUP_YOGA7_14ITL_SPEAKERS; /* DuetITL */
+ else
+ id = ALC287_FIXUP_TAS2781_I2C; /* 14IRP8 */
+ __snd_hda_apply_fixup(codec, id, action, 0);
+}
+
static const struct hda_fixup alc269_fixups[] = {
[ALC269_FIXUP_GPIO2] = {
.type = HDA_FIXUP_FUNC,
@@ -9377,6 +9401,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc298_fixup_lenovo_c940_duet7,
},
+ [ALC287_FIXUP_LENOVO_14IRP8_DUETITL] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_lenovo_14irp8_duetitl,
+ },
[ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -9577,13 +9605,13 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs35l41_fixup_i2c_two,
.chained = true,
- .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
},
[ALC287_FIXUP_TAS2781_I2C] = {
.type = HDA_FIXUP_FUNC,
.v.func = tas2781_fixup_i2c,
.chained = true,
- .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK,
},
[ALC287_FIXUP_YOGA7_14ARB7_I2C] = {
.type = HDA_FIXUP_FUNC,
@@ -9604,6 +9632,8 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC287_FIXUP_THINKPAD_I2S_SPK] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc287_fixup_bind_dacs,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
},
[ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = {
.type = HDA_FIXUP_FUNC,
@@ -9653,6 +9683,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1269, "Acer SWIFT SF314-54", ALC256_FIXUP_ACER_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1025, 0x126a, "Acer Swift SF114-32", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
@@ -9732,12 +9763,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
SND_PCI_QUIRK(0x1028, 0x0beb, "Dell XPS 15 9530 (2023)", ALC289_FIXUP_DELL_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0c0b, "Dell Oasis 14 RPL-P", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0c0d, "Dell Oasis", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x0c0e, "Dell Oasis 16", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c28, "Dell Inspiron 16 Plus 7630", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
SND_PCI_QUIRK(0x1028, 0x0c4d, "Dell", ALC287_FIXUP_CS35L41_I2C_4),
SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
@@ -9852,6 +9887,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8786, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
@@ -9861,6 +9897,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
@@ -9892,6 +9929,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8973, "HP EliteBook 860 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8974, "HP EliteBook 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8975, "HP EliteBook x360 840 Aero G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x897d, "HP mt440 Mobile Thin Client U74", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4),
SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
@@ -9917,16 +9955,20 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ab9, "HP EliteBook 840 G8 (MB 8AB8)", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b0f, "HP Elite mt645 G7 Mobile Thin Client U81", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x8b3f, "HP mt440 Mobile Thin Client U91", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b44, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b45, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b46, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
@@ -9955,6 +9997,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
@@ -9992,6 +10037,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZI/ZJ/ZQ/ZU/ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x16d3, "ASUS UX5304VA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
@@ -10035,14 +10081,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
- SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
- SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10231,8 +10275,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340),
+ SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
- SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8 / DuetITL 2021", ALC287_FIXUP_LENOVO_14IRP8_DUETITL),
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
@@ -10248,6 +10293,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6),
SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x386f, "Legion 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
SND_PCI_QUIRK(0x17aa, 0x387d, "Yoga S780-16 pro Quad AAC", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x387e, "Yoga S780-16 pro Quad YC", ALC287_FIXUP_TAS2781_I2C),
@@ -10319,6 +10365,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
+ SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 2dd809de62e5..1bfb00102a77 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -710,7 +710,7 @@ static int tas2781_hda_bind(struct device *dev, struct device *master,
strscpy(comps->name, dev_name(dev), sizeof(comps->name));
- ret = tascodec_init(tas_hda->priv, codec, tasdev_fw_ready);
+ ret = tascodec_init(tas_hda->priv, codec, THIS_MODULE, tasdev_fw_ready);
if (!ret)
comps->playback_hook = tas2781_hda_playback_hook;
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 46705ec77b48..eb3aca16359c 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -718,7 +718,7 @@ static int ac97_fp_rec_volume_put(struct snd_kcontrol *ctl,
oldreg = oxygen_read_ac97(chip, 1, AC97_REC_GAIN);
newreg = oldreg & ~0x0707;
newreg = newreg | (value->value.integer.value[0] & 7);
- newreg = newreg | ((value->value.integer.value[0] & 7) << 8);
+ newreg = newreg | ((value->value.integer.value[1] & 7) << 8);
change = newreg != oldreg;
if (change)
oxygen_write_ac97(chip, 1, AC97_REC_GAIN, newreg);
diff --git a/sound/soc/amd/acp/acp-mach-common.c b/sound/soc/amd/acp/acp-mach-common.c
index c90ec3419247..504d1b8c4cbb 100644
--- a/sound/soc/amd/acp/acp-mach-common.c
+++ b/sound/soc/amd/acp/acp-mach-common.c
@@ -505,6 +505,13 @@ static int acp_card_rt5682s_hw_params(struct snd_pcm_substream *substream,
clk_set_rate(drvdata->wclk, srate);
clk_set_rate(drvdata->bclk, srate * ch * format);
+ if (!drvdata->soc_mclk) {
+ ret = acp_clk_enable(drvdata, srate, ch * format);
+ if (ret < 0) {
+ dev_err(rtd->card->dev, "Failed to enable HS clk: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
@@ -1464,8 +1471,13 @@ int acp_sofdsp_dai_links_create(struct snd_soc_card *card)
if (drv_data->amp_cpu_id == I2S_SP) {
links[i].name = "acp-amp-codec";
links[i].id = AMP_BE_ID;
- links[i].cpus = sof_sp_virtual;
- links[i].num_cpus = ARRAY_SIZE(sof_sp_virtual);
+ if (drv_data->platform == RENOIR) {
+ links[i].cpus = sof_sp;
+ links[i].num_cpus = ARRAY_SIZE(sof_sp);
+ } else {
+ links[i].cpus = sof_sp_virtual;
+ links[i].num_cpus = ARRAY_SIZE(sof_sp_virtual);
+ }
links[i].platforms = sof_component;
links[i].num_platforms = ARRAY_SIZE(sof_component);
links[i].dpcm_playback = 1;
diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
index 2a9fd3275e42..20b94814a046 100644
--- a/sound/soc/amd/acp/acp-sof-mach.c
+++ b/sound/soc/amd/acp/acp-sof-mach.c
@@ -48,6 +48,7 @@ static struct acp_card_drvdata sof_rt5682s_rt1019_data = {
.hs_codec_id = RT5682S,
.amp_codec_id = RT1019,
.dmic_codec_id = DMIC,
+ .platform = RENOIR,
.tdm_mode = false,
};
@@ -58,6 +59,7 @@ static struct acp_card_drvdata sof_rt5682s_max_data = {
.hs_codec_id = RT5682S,
.amp_codec_id = MAX98360A,
.dmic_codec_id = DMIC,
+ .platform = RENOIR,
.tdm_mode = false,
};
@@ -68,6 +70,7 @@ static struct acp_card_drvdata sof_nau8825_data = {
.hs_codec_id = NAU8825,
.amp_codec_id = MAX98360A,
.dmic_codec_id = DMIC,
+ .platform = REMBRANDT,
.soc_mclk = true,
.tdm_mode = false,
};
@@ -79,6 +82,7 @@ static struct acp_card_drvdata sof_rt5682s_hs_rt1019_data = {
.hs_codec_id = RT5682S,
.amp_codec_id = RT1019,
.dmic_codec_id = DMIC,
+ .platform = REMBRANDT,
.soc_mclk = true,
.tdm_mode = false,
};
diff --git a/sound/soc/amd/acp/acp3x-es83xx/acp3x-es83xx.c b/sound/soc/amd/acp/acp3x-es83xx/acp3x-es83xx.c
index f85b85ea4be9..2b0aa270a3e9 100644
--- a/sound/soc/amd/acp/acp3x-es83xx/acp3x-es83xx.c
+++ b/sound/soc/amd/acp/acp3x-es83xx/acp3x-es83xx.c
@@ -358,6 +358,14 @@ static const struct dmi_system_id acp3x_es83xx_dmi_table[] = {
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "HUAWEI"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HVY-WXX9"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "M1010"),
+ },
+ .driver_data = (void *)(ES83XX_ENABLE_DMIC),
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "HUAWEI"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HVY-WXX9"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "M1020"),
},
.driver_data = (void *)(ES83XX_ENABLE_DMIC),
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index d83cb6e4c62a..abb9589b8477 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -203,6 +203,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J2"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J0"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21J5"),
}
},
@@ -238,6 +252,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82UU"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
}
},
@@ -251,6 +272,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83AS"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
}
@@ -300,6 +328,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7VF"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
}
diff --git a/sound/soc/amd/yc/pci-acp6x.c b/sound/soc/amd/yc/pci-acp6x.c
index 7af6a349b1d4..694b8e313902 100644
--- a/sound/soc/amd/yc/pci-acp6x.c
+++ b/sound/soc/amd/yc/pci-acp6x.c
@@ -162,6 +162,7 @@ static int snd_acp6x_probe(struct pci_dev *pci,
/* Yellow Carp device check */
switch (pci->revision) {
case 0x60:
+ case 0x63:
case 0x6f:
break;
default:
diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c
index 44c221745c3b..2392c6effed8 100644
--- a/sound/soc/codecs/cs35l45.c
+++ b/sound/soc/codecs/cs35l45.c
@@ -184,7 +184,7 @@ static int cs35l45_activate_ctl(struct snd_soc_component *component,
else
snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s", ctl_name);
- kcontrol = snd_soc_card_get_kcontrol(component->card, name);
+ kcontrol = snd_soc_card_get_kcontrol_locked(component->card, name);
if (!kcontrol) {
dev_err(component->dev, "Can't find kcontrol %s\n", name);
return -EINVAL;
diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
index 953ba066bab1..cb4e83126b08 100644
--- a/sound/soc/codecs/cs35l56-shared.c
+++ b/sound/soc/codecs/cs35l56-shared.c
@@ -5,6 +5,7 @@
// Copyright (C) 2023 Cirrus Logic, Inc. and
// Cirrus Logic International Semiconductor Ltd.
+#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/types.h>
@@ -12,6 +13,15 @@
#include "cs35l56.h"
static const struct reg_sequence cs35l56_patch[] = {
+ /*
+ * Firmware can change these to non-defaults to satisfy SDCA.
+ * Ensure that they are at known defaults.
+ */
+ { CS35L56_SWIRE_DP3_CH1_INPUT, 0x00000018 },
+ { CS35L56_SWIRE_DP3_CH2_INPUT, 0x00000019 },
+ { CS35L56_SWIRE_DP3_CH3_INPUT, 0x00000029 },
+ { CS35L56_SWIRE_DP3_CH4_INPUT, 0x00000028 },
+
/* These are not reset by a soft-reset, so patch to defaults. */
{ CS35L56_MAIN_RENDER_USER_MUTE, 0x00000000 },
{ CS35L56_MAIN_RENDER_USER_VOLUME, 0x00000000 },
@@ -34,15 +44,13 @@ static const struct reg_default cs35l56_reg_defaults[] = {
{ CS35L56_ASP1_FRAME_CONTROL5, 0x00020100 },
{ CS35L56_ASP1_DATA_CONTROL1, 0x00000018 },
{ CS35L56_ASP1_DATA_CONTROL5, 0x00000018 },
- { CS35L56_ASP1TX1_INPUT, 0x00000018 },
- { CS35L56_ASP1TX2_INPUT, 0x00000019 },
- { CS35L56_ASP1TX3_INPUT, 0x00000020 },
- { CS35L56_ASP1TX4_INPUT, 0x00000028 },
+
+ /* no defaults for ASP1TX mixer */
+
{ CS35L56_SWIRE_DP3_CH1_INPUT, 0x00000018 },
{ CS35L56_SWIRE_DP3_CH2_INPUT, 0x00000019 },
{ CS35L56_SWIRE_DP3_CH3_INPUT, 0x00000029 },
{ CS35L56_SWIRE_DP3_CH4_INPUT, 0x00000028 },
- { CS35L56_IRQ1_CFG, 0x00000000 },
{ CS35L56_IRQ1_MASK_1, 0x83ffffff },
{ CS35L56_IRQ1_MASK_2, 0xffff7fff },
{ CS35L56_IRQ1_MASK_4, 0xe0ffffff },
@@ -195,6 +203,47 @@ static bool cs35l56_volatile_reg(struct device *dev, unsigned int reg)
}
}
+/*
+ * The firmware boot sequence can overwrite the ASP1 config registers so that
+ * they don't match regmap's view of their values. Rewrite the values from the
+ * regmap cache into the hardware registers.
+ */
+int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base)
+{
+ struct reg_sequence asp1_regs[] = {
+ { .reg = CS35L56_ASP1_ENABLES1 },
+ { .reg = CS35L56_ASP1_CONTROL1 },
+ { .reg = CS35L56_ASP1_CONTROL2 },
+ { .reg = CS35L56_ASP1_CONTROL3 },
+ { .reg = CS35L56_ASP1_FRAME_CONTROL1 },
+ { .reg = CS35L56_ASP1_FRAME_CONTROL5 },
+ { .reg = CS35L56_ASP1_DATA_CONTROL1 },
+ { .reg = CS35L56_ASP1_DATA_CONTROL5 },
+ };
+ int i, ret;
+
+ /* Read values from regmap cache into a write sequence */
+ for (i = 0; i < ARRAY_SIZE(asp1_regs); ++i) {
+ ret = regmap_read(cs35l56_base->regmap, asp1_regs[i].reg, &asp1_regs[i].def);
+ if (ret)
+ goto err;
+ }
+
+ /* Write the values cache-bypassed so that they will be written to silicon */
+ ret = regmap_multi_reg_write_bypassed(cs35l56_base->regmap, asp1_regs,
+ ARRAY_SIZE(asp1_regs));
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(cs35l56_base->dev, "Failed to sync ASP1 registers: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs35l56_force_sync_asp1_registers_from_cache, SND_SOC_CS35L56_SHARED);
+
int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command)
{
unsigned int val;
@@ -286,6 +335,7 @@ void cs35l56_wait_min_reset_pulse(void)
EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, SND_SOC_CS35L56_SHARED);
static const struct reg_sequence cs35l56_system_reset_seq[] = {
+ REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0),
REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET),
};
@@ -400,17 +450,6 @@ int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base)
unsigned int val;
int ret;
- /* Nothing to re-patch if we haven't done any patching yet. */
- if (!cs35l56_base->fw_patched)
- return false;
-
- /*
- * If we have control of RESET we will have asserted it so the firmware
- * will need re-patching.
- */
- if (cs35l56_base->reset_gpio)
- return true;
-
/*
* In secure mode FIRMWARE_MISSING is cleared by the BIOS loader so
* can't be used here to test for memory retention.
@@ -590,10 +629,35 @@ void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_ds
}
EXPORT_SYMBOL_NS_GPL(cs35l56_init_cs_dsp, SND_SOC_CS35L56_SHARED);
+int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
+ bool *fw_missing, unsigned int *fw_version)
+{
+ unsigned int prot_status;
+ int ret;
+
+ ret = regmap_read(cs35l56_base->regmap, CS35L56_PROTECTION_STATUS, &prot_status);
+ if (ret) {
+ dev_err(cs35l56_base->dev, "Get PROTECTION_STATUS failed: %d\n", ret);
+ return ret;
+ }
+
+ *fw_missing = !!(prot_status & CS35L56_FIRMWARE_MISSING);
+
+ ret = regmap_read(cs35l56_base->regmap, CS35L56_DSP1_FW_VER, fw_version);
+ if (ret) {
+ dev_err(cs35l56_base->dev, "Get FW VER failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs35l56_read_prot_status, SND_SOC_CS35L56_SHARED);
+
int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
{
int ret;
- unsigned int devid, revid, otpid, secured;
+ unsigned int devid, revid, otpid, secured, fw_ver;
+ bool fw_missing;
/*
* When the system is not using a reset_gpio ensure the device is
@@ -652,8 +716,13 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
return ret;
}
- dev_info(cs35l56_base->dev, "Cirrus Logic CS35L56%s Rev %02X OTP%d\n",
- cs35l56_base->secured ? "s" : "", cs35l56_base->rev, otpid);
+ ret = cs35l56_read_prot_status(cs35l56_base, &fw_missing, &fw_ver);
+ if (ret)
+ return ret;
+
+ dev_info(cs35l56_base->dev, "Cirrus Logic CS35L56%s Rev %02X OTP%d fw:%d.%d.%d (patched=%u)\n",
+ cs35l56_base->secured ? "s" : "", cs35l56_base->rev, otpid,
+ fw_ver >> 16, (fw_ver >> 8) & 0xff, fw_ver & 0xff, !fw_missing);
/* Wake source and *_BLOCKED interrupts default to unmasked, so mask them */
regmap_write(cs35l56_base->regmap, CS35L56_IRQ1_MASK_20, 0xffffffff);
@@ -668,6 +737,41 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
}
EXPORT_SYMBOL_NS_GPL(cs35l56_hw_init, SND_SOC_CS35L56_SHARED);
+int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base)
+{
+ struct gpio_descs *descs;
+ int speaker_id;
+ int i, ret;
+
+ /* Read the speaker type qualifier from the motherboard GPIOs */
+ descs = gpiod_get_array_optional(cs35l56_base->dev, "spk-id", GPIOD_IN);
+ if (!descs) {
+ return -ENOENT;
+ } else if (IS_ERR(descs)) {
+ ret = PTR_ERR(descs);
+ return dev_err_probe(cs35l56_base->dev, ret, "Failed to get spk-id-gpios\n");
+ }
+
+ speaker_id = 0;
+ for (i = 0; i < descs->ndescs; i++) {
+ ret = gpiod_get_value_cansleep(descs->desc[i]);
+ if (ret < 0) {
+ dev_err_probe(cs35l56_base->dev, ret, "Failed to read spk-id[%d]\n", i);
+ goto err;
+ }
+
+ speaker_id |= (ret << i);
+ }
+
+ dev_dbg(cs35l56_base->dev, "Speaker ID = %d\n", speaker_id);
+ ret = speaker_id;
+err:
+ gpiod_put_array(descs);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cs35l56_get_speaker_id, SND_SOC_CS35L56_SHARED);
+
static const u32 cs35l56_bclk_valid_for_pll_freq_table[] = {
[0x0C] = 128000,
[0x0F] = 256000,
diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
index 45b4de3eff94..6dd0319bc843 100644
--- a/sound/soc/codecs/cs35l56.c
+++ b/sound/soc/codecs/cs35l56.c
@@ -5,6 +5,7 @@
// Copyright (C) 2023 Cirrus Logic, Inc. and
// Cirrus Logic International Semiconductor Ltd.
+#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
@@ -15,6 +16,7 @@
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -59,6 +61,131 @@ static int cs35l56_dspwait_put_volsw(struct snd_kcontrol *kcontrol,
return snd_soc_put_volsw(kcontrol, ucontrol);
}
+static const unsigned short cs35l56_asp1_mixer_regs[] = {
+ CS35L56_ASP1TX1_INPUT, CS35L56_ASP1TX2_INPUT,
+ CS35L56_ASP1TX3_INPUT, CS35L56_ASP1TX4_INPUT,
+};
+
+static const char * const cs35l56_asp1_mux_control_names[] = {
+ "ASP1 TX1 Source", "ASP1 TX2 Source", "ASP1 TX3 Source", "ASP1 TX4 Source"
+};
+
+static int cs35l56_sync_asp1_mixer_widgets_with_firmware(struct cs35l56_private *cs35l56)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cs35l56->component);
+ const char *prefix = cs35l56->component->name_prefix;
+ char full_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+ const char *name;
+ struct snd_kcontrol *kcontrol;
+ struct soc_enum *e;
+ unsigned int val[4];
+ int i, item, ret;
+
+ if (cs35l56->asp1_mixer_widgets_initialized)
+ return 0;
+
+ /*
+ * Resume so we can read the registers from silicon if the regmap
+ * cache has not yet been populated.
+ */
+ ret = pm_runtime_resume_and_get(cs35l56->base.dev);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for firmware download and reboot */
+ cs35l56_wait_dsp_ready(cs35l56);
+
+ ret = regmap_bulk_read(cs35l56->base.regmap, CS35L56_ASP1TX1_INPUT,
+ val, ARRAY_SIZE(val));
+
+ pm_runtime_mark_last_busy(cs35l56->base.dev);
+ pm_runtime_put_autosuspend(cs35l56->base.dev);
+
+ if (ret) {
+ dev_err(cs35l56->base.dev, "Failed to read ASP1 mixer regs: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cs35l56_asp1_mux_control_names); ++i) {
+ name = cs35l56_asp1_mux_control_names[i];
+
+ if (prefix) {
+ snprintf(full_name, sizeof(full_name), "%s %s", prefix, name);
+ name = full_name;
+ }
+
+ kcontrol = snd_soc_card_get_kcontrol_locked(dapm->card, name);
+ if (!kcontrol) {
+ dev_warn(cs35l56->base.dev, "Could not find control %s\n", name);
+ continue;
+ }
+
+ e = (struct soc_enum *)kcontrol->private_value;
+ item = snd_soc_enum_val_to_item(e, val[i] & CS35L56_ASP_TXn_SRC_MASK);
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, item, e, NULL);
+ }
+
+ cs35l56->asp1_mixer_widgets_initialized = true;
+
+ return 0;
+}
+
+static int cs35l56_dspwait_asp1tx_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol);
+ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int index = e->shift_l;
+ unsigned int addr, val;
+ int ret;
+
+ ret = cs35l56_sync_asp1_mixer_widgets_with_firmware(cs35l56);
+ if (ret)
+ return ret;
+
+ addr = cs35l56_asp1_mixer_regs[index];
+ ret = regmap_read(cs35l56->base.regmap, addr, &val);
+ if (ret)
+ return ret;
+
+ val &= CS35L56_ASP_TXn_SRC_MASK;
+ ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val);
+
+ return 0;
+}
+
+static int cs35l56_dspwait_asp1tx_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol);
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+ struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ int item = ucontrol->value.enumerated.item[0];
+ int index = e->shift_l;
+ unsigned int addr, val;
+ bool changed;
+ int ret;
+
+ ret = cs35l56_sync_asp1_mixer_widgets_with_firmware(cs35l56);
+ if (ret)
+ return ret;
+
+ addr = cs35l56_asp1_mixer_regs[index];
+ val = snd_soc_enum_item_to_val(e, item);
+
+ ret = regmap_update_bits_check(cs35l56->base.regmap, addr,
+ CS35L56_ASP_TXn_SRC_MASK, val, &changed);
+ if (ret)
+ return ret;
+
+ if (changed)
+ snd_soc_dapm_mux_update_power(dapm, kcontrol, item, e, NULL);
+
+ return changed;
+}
+
static DECLARE_TLV_DB_SCALE(vol_tlv, -10000, 25, 0);
static const struct snd_kcontrol_new cs35l56_controls[] = {
@@ -77,40 +204,44 @@ static const struct snd_kcontrol_new cs35l56_controls[] = {
};
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx1_enum,
- CS35L56_ASP1TX1_INPUT,
- 0, CS35L56_ASP_TXn_SRC_MASK,
+ SND_SOC_NOPM,
+ 0, 0,
cs35l56_tx_input_texts,
cs35l56_tx_input_values);
static const struct snd_kcontrol_new asp1_tx1_mux =
- SOC_DAPM_ENUM("ASP1TX1 SRC", cs35l56_asp1tx1_enum);
+ SOC_DAPM_ENUM_EXT("ASP1TX1 SRC", cs35l56_asp1tx1_enum,
+ cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx2_enum,
- CS35L56_ASP1TX2_INPUT,
- 0, CS35L56_ASP_TXn_SRC_MASK,
+ SND_SOC_NOPM,
+ 1, 0,
cs35l56_tx_input_texts,
cs35l56_tx_input_values);
static const struct snd_kcontrol_new asp1_tx2_mux =
- SOC_DAPM_ENUM("ASP1TX2 SRC", cs35l56_asp1tx2_enum);
+ SOC_DAPM_ENUM_EXT("ASP1TX2 SRC", cs35l56_asp1tx2_enum,
+ cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx3_enum,
- CS35L56_ASP1TX3_INPUT,
- 0, CS35L56_ASP_TXn_SRC_MASK,
+ SND_SOC_NOPM,
+ 2, 0,
cs35l56_tx_input_texts,
cs35l56_tx_input_values);
static const struct snd_kcontrol_new asp1_tx3_mux =
- SOC_DAPM_ENUM("ASP1TX3 SRC", cs35l56_asp1tx3_enum);
+ SOC_DAPM_ENUM_EXT("ASP1TX3 SRC", cs35l56_asp1tx3_enum,
+ cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_asp1tx4_enum,
- CS35L56_ASP1TX4_INPUT,
- 0, CS35L56_ASP_TXn_SRC_MASK,
+ SND_SOC_NOPM,
+ 3, 0,
cs35l56_tx_input_texts,
cs35l56_tx_input_values);
static const struct snd_kcontrol_new asp1_tx4_mux =
- SOC_DAPM_ENUM("ASP1TX4 SRC", cs35l56_asp1tx4_enum);
+ SOC_DAPM_ENUM_EXT("ASP1TX4 SRC", cs35l56_asp1tx4_enum,
+ cs35l56_dspwait_asp1tx_get, cs35l56_dspwait_asp1tx_put);
static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_sdw1tx1_enum,
CS35L56_SWIRE_DP3_CH1_INPUT,
@@ -148,6 +279,21 @@ static SOC_VALUE_ENUM_SINGLE_DECL(cs35l56_sdw1tx4_enum,
static const struct snd_kcontrol_new sdw1_tx4_mux =
SOC_DAPM_ENUM("SDW1TX4 SRC", cs35l56_sdw1tx4_enum);
+static int cs35l56_asp1_cfg_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Override register values set by firmware boot */
+ return cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+ default:
+ return 0;
+ }
+}
+
static int cs35l56_play_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -184,6 +330,9 @@ static const struct snd_soc_dapm_widget cs35l56_dapm_widgets[] = {
SND_SOC_DAPM_REGULATOR_SUPPLY("VDD_B", 0, 0),
SND_SOC_DAPM_REGULATOR_SUPPLY("VDD_AMP", 0, 0),
+ SND_SOC_DAPM_SUPPLY("ASP1 CFG", SND_SOC_NOPM, 0, 0, cs35l56_asp1_cfg_event,
+ SND_SOC_DAPM_PRE_PMU),
+
SND_SOC_DAPM_SUPPLY("PLAY", SND_SOC_NOPM, 0, 0, cs35l56_play_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
@@ -251,6 +400,9 @@ static const struct snd_soc_dapm_route cs35l56_audio_map[] = {
{ "AMP", NULL, "VDD_B" },
{ "AMP", NULL, "VDD_AMP" },
+ { "ASP1 Playback", NULL, "ASP1 CFG" },
+ { "ASP1 Capture", NULL, "ASP1 CFG" },
+
{ "ASP1 Playback", NULL, "PLAY" },
{ "SDW1 Playback", NULL, "PLAY" },
@@ -650,7 +802,7 @@ static struct snd_soc_dai_driver cs35l56_dai[] = {
}
};
-static void cs35l56_secure_patch(struct cs35l56_private *cs35l56)
+static void cs35l56_reinit_patch(struct cs35l56_private *cs35l56)
{
int ret;
@@ -662,19 +814,10 @@ static void cs35l56_secure_patch(struct cs35l56_private *cs35l56)
cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_AUDIO_REINIT);
}
-static void cs35l56_patch(struct cs35l56_private *cs35l56)
+static void cs35l56_patch(struct cs35l56_private *cs35l56, bool firmware_missing)
{
- unsigned int firmware_missing;
int ret;
- ret = regmap_read(cs35l56->base.regmap, CS35L56_PROTECTION_STATUS, &firmware_missing);
- if (ret) {
- dev_err(cs35l56->base.dev, "Failed to read PROTECTION_STATUS: %d\n", ret);
- return;
- }
-
- firmware_missing &= CS35L56_FIRMWARE_MISSING;
-
/*
* Disable SoundWire interrupts to prevent race with IRQ work.
* Setting sdw_irq_no_unmask prevents the handler re-enabling
@@ -747,23 +890,51 @@ static void cs35l56_dsp_work(struct work_struct *work)
struct cs35l56_private *cs35l56 = container_of(work,
struct cs35l56_private,
dsp_work);
+ unsigned int firmware_version;
+ bool firmware_missing;
+ int ret;
if (!cs35l56->base.init_done)
return;
pm_runtime_get_sync(cs35l56->base.dev);
+ ret = cs35l56_read_prot_status(&cs35l56->base, &firmware_missing, &firmware_version);
+ if (ret)
+ goto err;
+
+ /* Populate fw file qualifier with the revision and security state */
+ kfree(cs35l56->dsp.fwf_name);
+ if (firmware_missing) {
+ cs35l56->dsp.fwf_name = kasprintf(GFP_KERNEL, "%02x-dsp1", cs35l56->base.rev);
+ } else {
+ /* Firmware files must match the running firmware version */
+ cs35l56->dsp.fwf_name = kasprintf(GFP_KERNEL,
+ "%02x%s-%06x-dsp1",
+ cs35l56->base.rev,
+ cs35l56->base.secured ? "-s" : "",
+ firmware_version);
+ }
+
+ if (!cs35l56->dsp.fwf_name)
+ goto err;
+
+ dev_dbg(cs35l56->base.dev, "DSP fwf name: '%s' system name: '%s'\n",
+ cs35l56->dsp.fwf_name, cs35l56->dsp.system_name);
+
/*
- * When the device is running in secure mode the firmware files can
- * only contain insecure tunings and therefore we do not need to
- * shutdown the firmware to apply them and can use the lower cost
- * reinit sequence instead.
+ * The firmware cannot be patched if it is already running from
+ * patch RAM. In this case the firmware files are versioned to
+ * match the running firmware version and will only contain
+ * tunings. We do not need to shutdown the firmware to apply
+ * tunings so can use the lower cost reinit sequence instead.
*/
- if (cs35l56->base.secured)
- cs35l56_secure_patch(cs35l56);
+ if (!firmware_missing)
+ cs35l56_reinit_patch(cs35l56);
else
- cs35l56_patch(cs35l56);
+ cs35l56_patch(cs35l56, firmware_missing);
+err:
pm_runtime_mark_last_busy(cs35l56->base.dev);
pm_runtime_put_autosuspend(cs35l56->base.dev);
}
@@ -778,10 +949,19 @@ static int cs35l56_component_probe(struct snd_soc_component *component)
if (!cs35l56->dsp.system_name &&
(snd_soc_card_get_pci_ssid(component->card, &vendor, &device) == 0)) {
- cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
- GFP_KERNEL,
- "%04x%04x",
- vendor, device);
+ /* Append a speaker qualifier if there is a speaker ID */
+ if (cs35l56->speaker_id >= 0) {
+ cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
+ GFP_KERNEL,
+ "%04x%04x-spkid%d",
+ vendor, device,
+ cs35l56->speaker_id);
+ } else {
+ cs35l56->dsp.system_name = devm_kasprintf(cs35l56->base.dev,
+ GFP_KERNEL,
+ "%04x%04x",
+ vendor, device);
+ }
if (!cs35l56->dsp.system_name)
return -ENOMEM;
}
@@ -799,6 +979,13 @@ static int cs35l56_component_probe(struct snd_soc_component *component)
debugfs_create_bool("can_hibernate", 0444, debugfs_root, &cs35l56->base.can_hibernate);
debugfs_create_bool("fw_patched", 0444, debugfs_root, &cs35l56->base.fw_patched);
+ /*
+ * The widgets for the ASP1TX mixer can't be initialized
+ * until the firmware has been downloaded and rebooted.
+ */
+ regcache_drop_region(cs35l56->base.regmap, CS35L56_ASP1TX1_INPUT, CS35L56_ASP1TX4_INPUT);
+ cs35l56->asp1_mixer_widgets_initialized = false;
+
queue_work(cs35l56->dsp_wq, &cs35l56->dsp_work);
return 0;
@@ -809,6 +996,16 @@ static void cs35l56_component_remove(struct snd_soc_component *component)
struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
cancel_work_sync(&cs35l56->dsp_work);
+
+ if (cs35l56->dsp.cs_dsp.booted)
+ wm_adsp_power_down(&cs35l56->dsp);
+
+ wm_adsp2_component_remove(&cs35l56->dsp, component);
+
+ kfree(cs35l56->dsp.fwf_name);
+ cs35l56->dsp.fwf_name = NULL;
+
+ cs35l56->component = NULL;
}
static int cs35l56_set_bias_level(struct snd_soc_component *component,
@@ -1050,7 +1247,13 @@ static int cs35l56_get_firmware_uid(struct cs35l56_private *cs35l56)
if (ret < 0)
return 0;
- cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL);
+ /* Append a speaker qualifier if there is a speaker ID */
+ if (cs35l56->speaker_id >= 0)
+ cs35l56->dsp.system_name = devm_kasprintf(dev, GFP_KERNEL, "%s-spkid%d",
+ prop, cs35l56->speaker_id);
+ else
+ cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL);
+
if (cs35l56->dsp.system_name == NULL)
return -ENOMEM;
@@ -1059,12 +1262,101 @@ static int cs35l56_get_firmware_uid(struct cs35l56_private *cs35l56)
return 0;
}
+/*
+ * Some SoundWire laptops have a spk-id-gpios property but it points to
+ * the wrong ACPI Device node so can't be used to get the GPIO. Try to
+ * find the SDCA node containing the GpioIo resource and add a GPIO
+ * mapping to it.
+ */
+static const struct acpi_gpio_params cs35l56_af01_first_gpio = { 0, 0, false };
+static const struct acpi_gpio_mapping cs35l56_af01_spkid_gpios_mapping[] = {
+ { "spk-id-gpios", &cs35l56_af01_first_gpio, 1 },
+ { }
+};
+
+static void cs35l56_acpi_dev_release_driver_gpios(void *adev)
+{
+ acpi_dev_remove_driver_gpios(adev);
+}
+
+static int cs35l56_try_get_broken_sdca_spkid_gpio(struct cs35l56_private *cs35l56)
+{
+ struct fwnode_handle *af01_fwnode;
+ const union acpi_object *obj;
+ struct gpio_desc *desc;
+ int ret;
+
+ /* Find the SDCA node containing the GpioIo */
+ af01_fwnode = device_get_named_child_node(cs35l56->base.dev, "AF01");
+ if (!af01_fwnode) {
+ dev_dbg(cs35l56->base.dev, "No AF01 node\n");
+ return -ENOENT;
+ }
+
+ ret = acpi_dev_get_property(ACPI_COMPANION(cs35l56->base.dev),
+ "spk-id-gpios", ACPI_TYPE_PACKAGE, &obj);
+ if (ret) {
+ dev_dbg(cs35l56->base.dev, "Could not get spk-id-gpios package: %d\n", ret);
+ return -ENOENT;
+ }
+
+ /* The broken properties we can handle are a 4-element package (one GPIO) */
+ if (obj->package.count != 4) {
+ dev_warn(cs35l56->base.dev, "Unexpected spk-id element count %d\n",
+ obj->package.count);
+ return -ENOENT;
+ }
+
+ /* Add a GPIO mapping if it doesn't already have one */
+ if (!fwnode_property_present(af01_fwnode, "spk-id-gpios")) {
+ struct acpi_device *adev = to_acpi_device_node(af01_fwnode);
+
+ /*
+ * Can't use devm_acpi_dev_add_driver_gpios() because the
+ * mapping isn't being added to the node pointed to by
+ * ACPI_COMPANION().
+ */
+ ret = acpi_dev_add_driver_gpios(adev, cs35l56_af01_spkid_gpios_mapping);
+ if (ret) {
+ return dev_err_probe(cs35l56->base.dev, ret,
+ "Failed to add gpio mapping to AF01\n");
+ }
+
+ ret = devm_add_action_or_reset(cs35l56->base.dev,
+ cs35l56_acpi_dev_release_driver_gpios,
+ adev);
+ if (ret)
+ return ret;
+
+ dev_dbg(cs35l56->base.dev, "Added spk-id-gpios mapping to AF01\n");
+ }
+
+ desc = fwnode_gpiod_get_index(af01_fwnode, "spk-id", 0, GPIOD_IN, NULL);
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
+ return dev_err_probe(cs35l56->base.dev, ret, "Get GPIO from AF01 failed\n");
+ }
+
+ ret = gpiod_get_value_cansleep(desc);
+ gpiod_put(desc);
+
+ if (ret < 0) {
+ dev_err_probe(cs35l56->base.dev, ret, "Error reading spk-id GPIO\n");
+ return ret;
+ }
+
+ dev_info(cs35l56->base.dev, "Got spk-id from AF01\n");
+
+ return ret;
+}
+
int cs35l56_common_probe(struct cs35l56_private *cs35l56)
{
int ret;
init_completion(&cs35l56->init_completion);
mutex_init(&cs35l56->base.irq_lock);
+ cs35l56->speaker_id = -ENOENT;
dev_set_drvdata(cs35l56->base.dev, cs35l56);
@@ -1101,6 +1393,15 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 1);
}
+ ret = cs35l56_get_speaker_id(&cs35l56->base);
+ if (ACPI_COMPANION(cs35l56->base.dev) && cs35l56->sdw_peripheral && (ret == -ENOENT))
+ ret = cs35l56_try_get_broken_sdca_spkid_gpio(cs35l56);
+
+ if ((ret < 0) && (ret != -ENOENT))
+ goto err;
+
+ cs35l56->speaker_id = ret;
+
ret = cs35l56_get_firmware_uid(cs35l56);
if (ret != 0)
goto err;
@@ -1152,11 +1453,9 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
if (ret < 0)
return ret;
- /* Populate the DSP information with the revision and security state */
- cs35l56->dsp.part = devm_kasprintf(cs35l56->base.dev, GFP_KERNEL, "cs35l56%s-%02x",
- cs35l56->base.secured ? "s" : "", cs35l56->base.rev);
- if (!cs35l56->dsp.part)
- return -ENOMEM;
+ ret = cs35l56_set_patch(&cs35l56->base);
+ if (ret)
+ return ret;
if (!cs35l56->base.reset_gpio) {
dev_dbg(cs35l56->base.dev, "No reset gpio: using soft reset\n");
@@ -1190,10 +1489,6 @@ post_soft_reset:
if (ret)
return ret;
- ret = cs35l56_set_patch(&cs35l56->base);
- if (ret)
- return ret;
-
/* Registers could be dirty after soft reset or SoundWire enumeration */
regcache_sync(cs35l56->base.regmap);
diff --git a/sound/soc/codecs/cs35l56.h b/sound/soc/codecs/cs35l56.h
index 8159c3e217d9..b000e7365e40 100644
--- a/sound/soc/codecs/cs35l56.h
+++ b/sound/soc/codecs/cs35l56.h
@@ -44,12 +44,14 @@ struct cs35l56_private {
bool sdw_attached;
struct completion init_completion;
+ int speaker_id;
u32 rx_mask;
u32 tx_mask;
u8 asp_slot_width;
u8 asp_slot_count;
bool tdm_mode;
bool sysclk_set;
+ bool asp1_mixer_widgets_initialized;
u8 old_sdw_clock_scale;
};
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 6a64681767de..a97ccb512deb 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -2257,7 +2257,10 @@ static int cs42l43_codec_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(priv->dev);
pm_runtime_set_active(priv->dev);
pm_runtime_get_noresume(priv->dev);
- devm_pm_runtime_enable(priv->dev);
+
+ ret = devm_pm_runtime_enable(priv->dev);
+ if (ret)
+ goto err_pm;
for (i = 0; i < ARRAY_SIZE(cs42l43_irqs); i++) {
ret = cs42l43_request_irq(priv, dom, cs42l43_irqs[i].name,
@@ -2333,8 +2336,47 @@ static int cs42l43_codec_runtime_resume(struct device *dev)
return 0;
}
-static DEFINE_RUNTIME_DEV_PM_OPS(cs42l43_codec_pm_ops, NULL,
- cs42l43_codec_runtime_resume, NULL);
+static int cs42l43_codec_suspend(struct device *dev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+
+ disable_irq(cs42l43->irq);
+
+ return 0;
+}
+
+static int cs42l43_codec_suspend_noirq(struct device *dev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+
+ enable_irq(cs42l43->irq);
+
+ return 0;
+}
+
+static int cs42l43_codec_resume(struct device *dev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+
+ enable_irq(cs42l43->irq);
+
+ return 0;
+}
+
+static int cs42l43_codec_resume_noirq(struct device *dev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+
+ disable_irq(cs42l43->irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cs42l43_codec_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(cs42l43_codec_suspend, cs42l43_codec_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(cs42l43_codec_suspend_noirq, cs42l43_codec_resume_noirq)
+ RUNTIME_PM_OPS(NULL, cs42l43_codec_runtime_resume, NULL)
+};
static const struct platform_device_id cs42l43_codec_id_table[] = {
{ "cs42l43-codec", },
diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
index fa890f6205e2..cbcd02ec6ba4 100755..100644
--- a/sound/soc/codecs/es8326.c
+++ b/sound/soc/codecs/es8326.c
@@ -45,6 +45,82 @@ struct es8326_priv {
int jack_remove_retry;
};
+static int es8326_crosstalk1_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
+ unsigned int crosstalk_h, crosstalk_l;
+ unsigned int crosstalk;
+
+ regmap_read(es8326->regmap, ES8326_DAC_RAMPRATE, &crosstalk_h);
+ regmap_read(es8326->regmap, ES8326_DAC_CROSSTALK, &crosstalk_l);
+ crosstalk_h &= 0x20;
+ crosstalk_l &= 0xf0;
+ crosstalk = crosstalk_h >> 1 | crosstalk_l >> 4;
+ ucontrol->value.integer.value[0] = crosstalk;
+
+ return 0;
+}
+
+static int es8326_crosstalk1_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
+ unsigned int crosstalk_h, crosstalk_l;
+ unsigned int crosstalk;
+
+ crosstalk = ucontrol->value.integer.value[0];
+ regmap_read(es8326->regmap, ES8326_DAC_CROSSTALK, &crosstalk_l);
+ crosstalk_h = (crosstalk & 0x10) << 1;
+ crosstalk_l &= 0x0f;
+ crosstalk_l |= (crosstalk & 0x0f) << 4;
+ regmap_update_bits(es8326->regmap, ES8326_DAC_RAMPRATE,
+ 0x20, crosstalk_h);
+ regmap_write(es8326->regmap, ES8326_DAC_CROSSTALK, crosstalk_l);
+
+ return 0;
+}
+
+static int es8326_crosstalk2_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
+ unsigned int crosstalk_h, crosstalk_l;
+ unsigned int crosstalk;
+
+ regmap_read(es8326->regmap, ES8326_DAC_RAMPRATE, &crosstalk_h);
+ regmap_read(es8326->regmap, ES8326_DAC_CROSSTALK, &crosstalk_l);
+ crosstalk_h &= 0x10;
+ crosstalk_l &= 0x0f;
+ crosstalk = crosstalk_h | crosstalk_l;
+ ucontrol->value.integer.value[0] = crosstalk;
+
+ return 0;
+}
+
+static int es8326_crosstalk2_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct es8326_priv *es8326 = snd_soc_component_get_drvdata(component);
+ unsigned int crosstalk_h, crosstalk_l;
+ unsigned int crosstalk;
+
+ crosstalk = ucontrol->value.integer.value[0];
+ regmap_read(es8326->regmap, ES8326_DAC_CROSSTALK, &crosstalk_l);
+ crosstalk_h = crosstalk & 0x10;
+ crosstalk_l &= 0xf0;
+ crosstalk_l |= crosstalk & 0x0f;
+ regmap_update_bits(es8326->regmap, ES8326_DAC_RAMPRATE,
+ 0x10, crosstalk_h);
+ regmap_write(es8326->regmap, ES8326_DAC_CROSSTALK, crosstalk_l);
+
+ return 0;
+}
+
static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(dac_vol_tlv, -9550, 50, 0);
static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9550, 50, 0);
static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_analog_pga_tlv, 0, 300, 0);
@@ -102,6 +178,10 @@ static const struct snd_kcontrol_new es8326_snd_controls[] = {
SOC_SINGLE_TLV("ALC Capture Target Level", ES8326_ALC_LEVEL,
0, 0x0f, 0, drc_target_tlv),
+ SOC_SINGLE_EXT("CROSSTALK1", SND_SOC_NOPM, 0, 31, 0,
+ es8326_crosstalk1_get, es8326_crosstalk1_set),
+ SOC_SINGLE_EXT("CROSSTALK2", SND_SOC_NOPM, 0, 31, 0,
+ es8326_crosstalk2_get, es8326_crosstalk2_set),
};
static const struct snd_soc_dapm_widget es8326_dapm_widgets[] = {
@@ -117,12 +197,6 @@ static const struct snd_soc_dapm_widget es8326_dapm_widgets[] = {
SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("I2S IN", "I2S1 Playback", 0, SND_SOC_NOPM, 0, 0),
- /* ADC Digital Mute */
- SND_SOC_DAPM_PGA("ADC L1", ES8326_ADC_MUTE, 0, 1, NULL, 0),
- SND_SOC_DAPM_PGA("ADC R1", ES8326_ADC_MUTE, 1, 1, NULL, 0),
- SND_SOC_DAPM_PGA("ADC L2", ES8326_ADC_MUTE, 2, 1, NULL, 0),
- SND_SOC_DAPM_PGA("ADC R2", ES8326_ADC_MUTE, 3, 1, NULL, 0),
-
/* Analog Power Supply*/
SND_SOC_DAPM_DAC("Right DAC", NULL, ES8326_ANA_PDN, 0, 1),
SND_SOC_DAPM_DAC("Left DAC", NULL, ES8326_ANA_PDN, 1, 1),
@@ -142,15 +216,10 @@ static const struct snd_soc_dapm_widget es8326_dapm_widgets[] = {
};
static const struct snd_soc_dapm_route es8326_dapm_routes[] = {
- {"ADC L1", NULL, "MIC1"},
- {"ADC R1", NULL, "MIC2"},
- {"ADC L2", NULL, "MIC3"},
- {"ADC R2", NULL, "MIC4"},
-
- {"ADC L", NULL, "ADC L1"},
- {"ADC R", NULL, "ADC R1"},
- {"ADC L", NULL, "ADC L2"},
- {"ADC R", NULL, "ADC R2"},
+ {"ADC L", NULL, "MIC1"},
+ {"ADC R", NULL, "MIC2"},
+ {"ADC L", NULL, "MIC3"},
+ {"ADC R", NULL, "MIC4"},
{"I2S OUT", NULL, "ADC L"},
{"I2S OUT", NULL, "ADC R"},
@@ -440,10 +509,16 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
unsigned int offset_l, offset_r;
if (mute) {
- regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_OFF);
- regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE,
- ES8326_MUTE_MASK, ES8326_MUTE);
- regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xf0);
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_OFF);
+ regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE,
+ ES8326_MUTE_MASK, ES8326_MUTE);
+ regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF,
+ 0x30, 0x00);
+ } else {
+ regmap_update_bits(es8326->regmap, ES8326_ADC_MUTE,
+ 0x0F, 0x0F);
+ }
} else {
if (!es8326->calibrated) {
regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_FORCE_CAL);
@@ -456,11 +531,22 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
regmap_write(es8326->regmap, ES8326_HPR_OFFSET_INI, offset_r);
es8326->calibrated = true;
}
- regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xa1);
- regmap_write(es8326->regmap, ES8326_HP_VOL, 0x91);
- regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_ON);
- regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE,
- ES8326_MUTE_MASK, ~(ES8326_MUTE));
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(es8326->regmap, ES8326_DAC_DSM, 0x01, 0x01);
+ usleep_range(1000, 5000);
+ regmap_update_bits(es8326->regmap, ES8326_DAC_DSM, 0x01, 0x00);
+ usleep_range(1000, 5000);
+ regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x30, 0x20);
+ regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x30, 0x30);
+ regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xa1);
+ regmap_write(es8326->regmap, ES8326_HP_CAL, ES8326_HP_ON);
+ regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE,
+ ES8326_MUTE_MASK, ~(ES8326_MUTE));
+ } else {
+ msleep(300);
+ regmap_update_bits(es8326->regmap, ES8326_ADC_MUTE,
+ 0x0F, 0x00);
+ }
}
return 0;
}
@@ -477,23 +563,20 @@ static int es8326_set_bias_level(struct snd_soc_component *codec,
if (ret)
return ret;
- regmap_update_bits(es8326->regmap, ES8326_DAC_DSM, 0x01, 0x00);
+ regmap_update_bits(es8326->regmap, ES8326_RESET, 0x02, 0x02);
+ usleep_range(5000, 10000);
regmap_write(es8326->regmap, ES8326_INTOUT_IO, es8326->interrupt_clk);
regmap_write(es8326->regmap, ES8326_SDINOUT1_IO,
(ES8326_IO_DMIC_CLK << ES8326_SDINOUT1_SHIFT));
- regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x0E);
regmap_write(es8326->regmap, ES8326_PGA_PDN, 0x40);
regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x00);
regmap_update_bits(es8326->regmap, ES8326_CLK_CTL, 0x20, 0x20);
-
- regmap_update_bits(es8326->regmap, ES8326_RESET,
- ES8326_CSM_ON, ES8326_CSM_ON);
+ regmap_update_bits(es8326->regmap, ES8326_RESET, 0x02, 0x00);
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x3b);
- regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x00);
regmap_update_bits(es8326->regmap, ES8326_CLK_CTL, 0x20, 0x00);
regmap_write(es8326->regmap, ES8326_SDINOUT1_IO, ES8326_IO_INPUT);
break;
@@ -513,7 +596,7 @@ static const struct snd_soc_dai_ops es8326_ops = {
.set_fmt = es8326_set_dai_fmt,
.set_sysclk = es8326_set_dai_sysclk,
.mute_stream = es8326_mute,
- .no_capture_mute = 1,
+ .no_capture_mute = 0,
};
static struct snd_soc_dai_driver es8326_dai = {
@@ -672,6 +755,8 @@ static void es8326_jack_detect_handler(struct work_struct *work)
es8326->hp = 0;
}
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
+ regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x0a);
+ regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x03);
/*
* Inverted HPJACK_POL bit to trigger one IRQ to double check HP Removal event
*/
@@ -695,8 +780,11 @@ static void es8326_jack_detect_handler(struct work_struct *work)
* Don't report jack status.
*/
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01);
+ es8326_enable_micbias(es8326->component);
usleep_range(50000, 70000);
regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00);
+ regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x1f);
+ regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x08);
queue_delayed_work(system_wq, &es8326->jack_detect_work,
msecs_to_jiffies(400));
es8326->hp = 1;
@@ -736,13 +824,10 @@ exit:
static irqreturn_t es8326_irq(int irq, void *dev_id)
{
struct es8326_priv *es8326 = dev_id;
- struct snd_soc_component *comp = es8326->component;
if (!es8326->jack)
goto out;
- es8326_enable_micbias(comp);
-
if (es8326->jack->status & SND_JACK_HEADSET)
queue_delayed_work(system_wq, &es8326->jack_detect_work,
msecs_to_jiffies(10));
@@ -766,14 +851,14 @@ static int es8326_calibrate(struct snd_soc_component *component)
if ((es8326->version == ES8326_VERSION_B) && (es8326->calibrated == false)) {
dev_dbg(component->dev, "ES8326_VERSION_B, calibrating\n");
regmap_write(es8326->regmap, ES8326_CLK_INV, 0xc0);
- regmap_write(es8326->regmap, ES8326_CLK_DIV1, 0x01);
+ regmap_write(es8326->regmap, ES8326_CLK_DIV1, 0x03);
regmap_write(es8326->regmap, ES8326_CLK_DLL, 0x30);
regmap_write(es8326->regmap, ES8326_CLK_MUX, 0xed);
regmap_write(es8326->regmap, ES8326_CLK_DAC_SEL, 0x08);
regmap_write(es8326->regmap, ES8326_CLK_TRI, 0xc1);
regmap_write(es8326->regmap, ES8326_DAC_MUTE, 0x03);
regmap_write(es8326->regmap, ES8326_ANA_VSEL, 0x7f);
- regmap_write(es8326->regmap, ES8326_VMIDLOW, 0x03);
+ regmap_write(es8326->regmap, ES8326_VMIDLOW, 0x23);
regmap_write(es8326->regmap, ES8326_DAC2HPMIX, 0x88);
usleep_range(15000, 20000);
regmap_write(es8326->regmap, ES8326_HP_OFFSET_CAL, 0x8c);
@@ -814,13 +899,13 @@ static int es8326_resume(struct snd_soc_component *component)
/* reset internal clock state */
regmap_write(es8326->regmap, ES8326_RESET, 0x1f);
regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x0E);
+ regmap_write(es8326->regmap, ES8326_ANA_LP, 0xf0);
usleep_range(10000, 15000);
regmap_write(es8326->regmap, ES8326_HPJACK_TIMER, 0xe9);
- regmap_write(es8326->regmap, ES8326_ANA_MICBIAS, 0x4b);
+ regmap_write(es8326->regmap, ES8326_ANA_MICBIAS, 0xcb);
/* set headphone default type and detect pin */
regmap_write(es8326->regmap, ES8326_HPDET_TYPE, 0x83);
regmap_write(es8326->regmap, ES8326_CLK_RESAMPLE, 0x05);
- regmap_write(es8326->regmap, ES8326_HP_MISC, 0x30);
/* set internal oscillator as clock source of headpone cp */
regmap_write(es8326->regmap, ES8326_CLK_DIV_CPC, 0x89);
@@ -828,14 +913,15 @@ static int es8326_resume(struct snd_soc_component *component)
/* clock manager reset release */
regmap_write(es8326->regmap, ES8326_RESET, 0x17);
/* set headphone detection as half scan mode */
- regmap_write(es8326->regmap, ES8326_HP_MISC, 0x30);
+ regmap_write(es8326->regmap, ES8326_HP_MISC, 0x3d);
regmap_write(es8326->regmap, ES8326_PULLUP_CTL, 0x00);
/* enable headphone driver */
+ regmap_write(es8326->regmap, ES8326_HP_VOL, 0xc4);
regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xa7);
usleep_range(2000, 5000);
- regmap_write(es8326->regmap, ES8326_HP_DRIVER_REF, 0xa3);
- regmap_write(es8326->regmap, ES8326_HP_DRIVER_REF, 0xb3);
+ regmap_write(es8326->regmap, ES8326_HP_DRIVER_REF, 0x23);
+ regmap_write(es8326->regmap, ES8326_HP_DRIVER_REF, 0x33);
regmap_write(es8326->regmap, ES8326_HP_DRIVER, 0xa1);
regmap_write(es8326->regmap, ES8326_CLK_INV, 0x00);
@@ -844,6 +930,8 @@ static int es8326_resume(struct snd_soc_component *component)
regmap_write(es8326->regmap, ES8326_CLK_CAL_TIME, 0x00);
/* calibrate for B version */
es8326_calibrate(component);
+ regmap_write(es8326->regmap, ES8326_DAC_CROSSTALK, 0xaa);
+ regmap_write(es8326->regmap, ES8326_DAC_RAMPRATE, 0x00);
/* turn off headphone out */
regmap_write(es8326->regmap, ES8326_HP_CAL, 0x00);
/* set ADC and DAC in low power mode */
@@ -856,6 +944,14 @@ static int es8326_resume(struct snd_soc_component *component)
regmap_write(es8326->regmap, ES8326_DAC_DSM, 0x08);
regmap_write(es8326->regmap, ES8326_DAC_VPPSCALE, 0x15);
+ regmap_write(es8326->regmap, ES8326_HPDET_TYPE, 0x80 |
+ ((es8326->version == ES8326_VERSION_B) ?
+ (ES8326_HP_DET_SRC_PIN9 | es8326->jack_pol) :
+ (ES8326_HP_DET_SRC_PIN9 | es8326->jack_pol | 0x04)));
+ usleep_range(5000, 10000);
+ es8326_enable_micbias(es8326->component);
+ usleep_range(50000, 70000);
+ regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00);
regmap_write(es8326->regmap, ES8326_INT_SOURCE,
(ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON));
regmap_write(es8326->regmap, ES8326_INTOUT_IO,
@@ -864,7 +960,7 @@ static int es8326_resume(struct snd_soc_component *component)
(ES8326_IO_DMIC_CLK << ES8326_SDINOUT1_SHIFT));
regmap_write(es8326->regmap, ES8326_SDINOUT23_IO, ES8326_IO_INPUT);
- regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x3b);
+ regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x00);
regmap_write(es8326->regmap, ES8326_RESET, ES8326_CSM_ON);
regmap_update_bits(es8326->regmap, ES8326_PGAGAIN, ES8326_MIC_SEL_MASK,
ES8326_MIC1_SEL);
@@ -872,11 +968,7 @@ static int es8326_resume(struct snd_soc_component *component)
regmap_update_bits(es8326->regmap, ES8326_DAC_MUTE, ES8326_MUTE_MASK,
ES8326_MUTE);
- regmap_write(es8326->regmap, ES8326_HPDET_TYPE, 0x80 |
- ((es8326->version == ES8326_VERSION_B) ?
- (ES8326_HP_DET_SRC_PIN9 | es8326->jack_pol) :
- (ES8326_HP_DET_SRC_PIN9 | es8326->jack_pol | 0x04)));
- regmap_write(es8326->regmap, ES8326_HP_VOL, 0x11);
+ regmap_write(es8326->regmap, ES8326_ADC_MUTE, 0x0f);
es8326->jack_remove_retry = 0;
es8326->hp = 0;
diff --git a/sound/soc/codecs/es8326.h b/sound/soc/codecs/es8326.h
index 90a08351d6ac..4234bbb900c4 100644
--- a/sound/soc/codecs/es8326.h
+++ b/sound/soc/codecs/es8326.h
@@ -72,6 +72,7 @@
#define ES8326_DAC_VOL 0x50
#define ES8326_DRC_RECOVERY 0x53
#define ES8326_DRC_WINSIZE 0x54
+#define ES8326_DAC_CROSSTALK 0x55
#define ES8326_HPJACK_TIMER 0x56
#define ES8326_HPDET_TYPE 0x57
#define ES8326_INT_SOURCE 0x58
@@ -100,7 +101,7 @@
#define ES8326_MUTE (3 << 0)
/* ES8326_CLK_CTL */
-#define ES8326_CLK_ON (0x7f << 0)
+#define ES8326_CLK_ON (0x7e << 0)
#define ES8326_CLK_OFF (0 << 0)
/* ES8326_CLK_INV */
diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
index 7e21cec3c2fb..6ce309980cd1 100644
--- a/sound/soc/codecs/lpass-wsa-macro.c
+++ b/sound/soc/codecs/lpass-wsa-macro.c
@@ -1584,7 +1584,6 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
u16 gain_reg;
u16 reg;
int val;
- int offset_val = 0;
struct wsa_macro *wsa = snd_soc_component_get_drvdata(component);
if (w->shift == WSA_MACRO_COMP1) {
@@ -1623,10 +1622,8 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
CDC_WSA_RX1_RX_PATH_MIX_SEC0,
CDC_WSA_RX_PGA_HALF_DB_MASK,
CDC_WSA_RX_PGA_HALF_DB_ENABLE);
- offset_val = -2;
}
val = snd_soc_component_read(component, gain_reg);
- val += offset_val;
snd_soc_component_write(component, gain_reg, val);
wsa_macro_config_ear_spkr_gain(component, wsa,
event, gain_reg);
@@ -1654,10 +1651,6 @@ static int wsa_macro_enable_interpolator(struct snd_soc_dapm_widget *w,
CDC_WSA_RX1_RX_PATH_MIX_SEC0,
CDC_WSA_RX_PGA_HALF_DB_MASK,
CDC_WSA_RX_PGA_HALF_DB_DISABLE);
- offset_val = 2;
- val = snd_soc_component_read(component, gain_reg);
- val += offset_val;
- snd_soc_component_write(component, gain_reg, val);
}
wsa_macro_config_ear_spkr_gain(component, wsa,
event, gain_reg);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 5150d6ee3748..20191a4473c2 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3317,6 +3317,7 @@ static void rt5645_jack_detect_work(struct work_struct *work)
report, SND_JACK_HEADPHONE);
snd_soc_jack_report(rt5645->mic_jack,
report, SND_JACK_MICROPHONE);
+ mutex_unlock(&rt5645->jd_mutex);
return;
case 4:
val = snd_soc_component_read(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020;
@@ -3692,6 +3693,11 @@ static const struct rt5645_platform_data jd_mode3_monospk_platform_data = {
.mono_speaker = true,
};
+static const struct rt5645_platform_data jd_mode3_inv_data = {
+ .jd_mode = 3,
+ .inv_jd1_1 = true,
+};
+
static const struct rt5645_platform_data jd_mode3_platform_data = {
.jd_mode = 3,
};
@@ -3837,6 +3843,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_EXACT_MATCH(DMI_BOARD_VERSION, "Default string"),
+ /*
+ * Above strings are too generic, LattePanda BIOS versions for
+ * all 4 hw revisions are:
+ * DF-BI-7-S70CR100-*
+ * DF-BI-7-S70CR110-*
+ * DF-BI-7-S70CR200-*
+ * LP-BS-7-S70CR700-*
+ * Do a partial match for S70CR to avoid false positive matches.
+ */
+ DMI_MATCH(DMI_BIOS_VERSION, "S70CR"),
},
.driver_data = (void *)&lattepanda_board_platform_data,
},
@@ -3871,6 +3887,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&intel_braswell_platform_data,
},
+ {
+ .ident = "Meegopad T08",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+ },
+ .driver_data = (void *)&jd_mode3_inv_data,
+ },
{ }
};
diff --git a/sound/soc/codecs/rtq9128.c b/sound/soc/codecs/rtq9128.c
index c22b047115cc..aa3eadecd974 100644
--- a/sound/soc/codecs/rtq9128.c
+++ b/sound/soc/codecs/rtq9128.c
@@ -59,6 +59,7 @@
struct rtq9128_data {
struct gpio_desc *enable;
+ unsigned int daifmt;
int tdm_slots;
int tdm_slot_width;
bool tdm_input_data2_select;
@@ -391,7 +392,11 @@ static int rtq9128_component_probe(struct snd_soc_component *comp)
unsigned int val;
int i, ret;
- pm_runtime_resume_and_get(comp->dev);
+ ret = pm_runtime_resume_and_get(comp->dev);
+ if (ret < 0) {
+ dev_err(comp->dev, "Failed to resume device (%d)\n", ret);
+ return ret;
+ }
val = snd_soc_component_read(comp, RTQ9128_REG_EFUSE_DATA);
@@ -437,10 +442,7 @@ static const struct snd_soc_component_driver rtq9128_comp_driver = {
static int rtq9128_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct rtq9128_data *data = snd_soc_dai_get_drvdata(dai);
- struct snd_soc_component *comp = dai->component;
struct device *dev = dai->dev;
- unsigned int audfmt, fmtval;
- int ret;
dev_dbg(dev, "%s: fmt 0x%8x\n", __func__, fmt);
@@ -450,35 +452,10 @@ static int rtq9128_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- fmtval = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
- if (data->tdm_slots && fmtval != SND_SOC_DAIFMT_DSP_A && fmtval != SND_SOC_DAIFMT_DSP_B) {
- dev_err(dev, "TDM is used, format only support DSP_A or DSP_B\n");
- return -EINVAL;
- }
+ /* Store here and will be used in runtime hw_params for DAI format setting */
+ data->daifmt = fmt;
- switch (fmtval) {
- case SND_SOC_DAIFMT_I2S:
- audfmt = 8;
- break;
- case SND_SOC_DAIFMT_LEFT_J:
- audfmt = 9;
- break;
- case SND_SOC_DAIFMT_RIGHT_J:
- audfmt = 10;
- break;
- case SND_SOC_DAIFMT_DSP_A:
- audfmt = data->tdm_slots ? 12 : 11;
- break;
- case SND_SOC_DAIFMT_DSP_B:
- audfmt = data->tdm_slots ? 4 : 3;
- break;
- default:
- dev_err(dev, "Unsupported format 0x%8x\n", fmt);
- return -EINVAL;
- }
-
- ret = snd_soc_component_write_field(comp, RTQ9128_REG_I2S_OPT, RTQ9128_AUDFMT_MASK, audfmt);
- return ret < 0 ? ret : 0;
+ return 0;
}
static int rtq9128_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
@@ -554,10 +531,38 @@ static int rtq9128_dai_hw_params(struct snd_pcm_substream *stream, struct snd_pc
unsigned int width, slot_width, bitrate, audbit, dolen;
struct snd_soc_component *comp = dai->component;
struct device *dev = dai->dev;
+ unsigned int fmtval, audfmt;
int ret;
dev_dbg(dev, "%s: width %d\n", __func__, params_width(param));
+ fmtval = FIELD_GET(SND_SOC_DAIFMT_FORMAT_MASK, data->daifmt);
+ if (data->tdm_slots && fmtval != SND_SOC_DAIFMT_DSP_A && fmtval != SND_SOC_DAIFMT_DSP_B) {
+ dev_err(dev, "TDM is used, format only support DSP_A or DSP_B\n");
+ return -EINVAL;
+ }
+
+ switch (fmtval) {
+ case SND_SOC_DAIFMT_I2S:
+ audfmt = 8;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ audfmt = 9;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ audfmt = 10;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ audfmt = data->tdm_slots ? 12 : 11;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ audfmt = data->tdm_slots ? 4 : 3;
+ break;
+ default:
+ dev_err(dev, "Unsupported format 0x%8x\n", fmtval);
+ return -EINVAL;
+ }
+
switch (width = params_width(param)) {
case 16:
audbit = 0;
@@ -611,6 +616,10 @@ static int rtq9128_dai_hw_params(struct snd_pcm_substream *stream, struct snd_pc
return -EINVAL;
}
+ ret = snd_soc_component_write_field(comp, RTQ9128_REG_I2S_OPT, RTQ9128_AUDFMT_MASK, audfmt);
+ if (ret < 0)
+ return ret;
+
ret = snd_soc_component_write_field(comp, RTQ9128_REG_I2S_OPT, RTQ9128_AUDBIT_MASK, audbit);
if (ret < 0)
return ret;
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
index 962c2cdfa017..54561ae598b8 100644
--- a/sound/soc/codecs/tas2562.c
+++ b/sound/soc/codecs/tas2562.c
@@ -59,7 +59,6 @@ struct tas2562_data {
enum tas256x_model {
TAS2562,
- TAS2563,
TAS2564,
TAS2110,
};
@@ -721,7 +720,6 @@ static int tas2562_parse_dt(struct tas2562_data *tas2562)
static const struct i2c_device_id tas2562_id[] = {
{ "tas2562", TAS2562 },
- { "tas2563", TAS2563 },
{ "tas2564", TAS2564 },
{ "tas2110", TAS2110 },
{ }
@@ -770,7 +768,6 @@ static int tas2562_probe(struct i2c_client *client)
#ifdef CONFIG_OF
static const struct of_device_id tas2562_of_match[] = {
{ .compatible = "ti,tas2562", },
- { .compatible = "ti,tas2563", },
{ .compatible = "ti,tas2564", },
{ .compatible = "ti,tas2110", },
{ },
diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c
index b7e56ceb1acf..5d0e5348b361 100644
--- a/sound/soc/codecs/tas2781-comlib.c
+++ b/sound/soc/codecs/tas2781-comlib.c
@@ -267,6 +267,7 @@ void tas2781_reset(struct tasdevice_priv *tas_dev)
EXPORT_SYMBOL_GPL(tas2781_reset);
int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
+ struct module *module,
void (*cont)(const struct firmware *fw, void *context))
{
int ret = 0;
@@ -280,7 +281,7 @@ int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
tas_priv->dev_name, tas_priv->ndev);
crc8_populate_msb(tas_priv->crc8_lkp_tbl, TASDEVICE_CRC8_POLYNOMIAL);
tas_priv->codec = codec;
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ ret = request_firmware_nowait(module, FW_ACTION_UEVENT,
tas_priv->rca_binaryname, tas_priv->dev, GFP_KERNEL, tas_priv,
cont);
if (ret)
diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
index 917b1c15f71d..b5abff230e43 100644
--- a/sound/soc/codecs/tas2781-i2c.c
+++ b/sound/soc/codecs/tas2781-i2c.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
//
-// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
//
// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
// https://www.ti.com
//
-// The TAS2781 driver implements a flexible and configurable
+// The TAS2563/TAS2781 driver implements a flexible and configurable
// algo coefficient setting for one, two, or even multiple
-// TAS2781 chips.
+// TAS2563/TAS2781 chips.
//
// Author: Shenghao Ding <shenghao-ding@ti.com>
// Author: Kevin Lu <kevin-lu@ti.com>
@@ -32,6 +32,7 @@
#include <sound/tas2781-tlv.h>
static const struct i2c_device_id tasdevice_id[] = {
+ { "tas2563", TAS2563 },
{ "tas2781", TAS2781 },
{}
};
@@ -39,6 +40,7 @@ MODULE_DEVICE_TABLE(i2c, tasdevice_id);
#ifdef CONFIG_OF
static const struct of_device_id tasdevice_of_match[] = {
+ { .compatible = "ti,tas2563" },
{ .compatible = "ti,tas2781" },
{},
};
@@ -564,7 +566,7 @@ static int tasdevice_codec_probe(struct snd_soc_component *codec)
{
struct tasdevice_priv *tas_priv = snd_soc_component_get_drvdata(codec);
- return tascodec_init(tas_priv, codec, tasdevice_fw_ready);
+ return tascodec_init(tas_priv, codec, THIS_MODULE, tasdevice_fw_ready);
}
static void tasdevice_deinit(void *context)
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 43c648efd0d9..deb15b95992d 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -3033,7 +3033,6 @@ static int wcd9335_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
{
struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
u16 gain_reg;
- int offset_val = 0;
int val = 0;
switch (w->reg) {
@@ -3073,7 +3072,6 @@ static int wcd9335_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
val = snd_soc_component_read(comp, gain_reg);
- val += offset_val;
snd_soc_component_write(comp, gain_reg, val);
break;
case SND_SOC_DAPM_POST_PMD:
@@ -3294,7 +3292,6 @@ static int wcd9335_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
u16 gain_reg;
u16 reg;
int val;
- int offset_val = 0;
if (!(snd_soc_dapm_widget_name_cmp(w, "RX INT0 INTERP"))) {
reg = WCD9335_CDC_RX0_RX_PATH_CTL;
@@ -3337,7 +3334,6 @@ static int wcd9335_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMU:
wcd9335_config_compander(comp, w->shift, event);
val = snd_soc_component_read(comp, gain_reg);
- val += offset_val;
snd_soc_component_write(comp, gain_reg, val);
break;
case SND_SOC_DAPM_POST_PMD:
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index 1b6e376f3833..6813268e6a19 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -13,7 +13,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/slimbus.h>
#include <sound/pcm_params.h>
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index faf8d3f9b3c5..6021aa5a5689 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -210,7 +210,7 @@ struct wcd938x_priv {
};
static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(ear_pa_gain, 600, -1800);
-static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, -3000);
+static const DECLARE_TLV_DB_SCALE(line_gain, -3000, 150, 0);
static const SNDRV_CTL_TLVD_DECLARE_DB_MINMAX(analog_gain, 0, 3000);
struct wcd938x_mbhc_zdet_param {
@@ -3587,10 +3587,8 @@ static int wcd938x_probe(struct platform_device *pdev)
mutex_init(&wcd938x->micb_lock);
ret = wcd938x_populate_dt_data(wcd938x, dev);
- if (ret) {
- dev_err(dev, "%s: Fail to obtain platform data\n", __func__);
- return -EINVAL;
- }
+ if (ret)
+ return ret;
ret = wcd938x_add_slave_components(wcd938x, dev, &match);
if (ret)
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index c01e31175015..36ea0dcdc7ab 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -739,19 +739,25 @@ static int wm_adsp_request_firmware_file(struct wm_adsp *dsp,
const char *filetype)
{
struct cs_dsp *cs_dsp = &dsp->cs_dsp;
+ const char *fwf;
char *s, c;
int ret = 0;
+ if (dsp->fwf_name)
+ fwf = dsp->fwf_name;
+ else
+ fwf = dsp->cs_dsp.name;
+
if (system_name && asoc_component_prefix)
*filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-%s.%s", dir, dsp->part,
- dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
+ fwf, wm_adsp_fw[dsp->fw].file, system_name,
asoc_component_prefix, filetype);
else if (system_name)
*filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s.%s", dir, dsp->part,
- dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
+ fwf, wm_adsp_fw[dsp->fw].file, system_name,
filetype);
else
- *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, dsp->fwf_name,
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, fwf,
wm_adsp_fw[dsp->fw].file, filetype);
if (*filename == NULL)
@@ -823,6 +829,23 @@ static int wm_adsp_request_firmware_files(struct wm_adsp *dsp,
}
}
+ /* Check system-specific bin without wmfw before falling back to generic */
+ if (dsp->wmfw_optional && system_name) {
+ if (asoc_component_prefix)
+ wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
+ cirrus_dir, system_name,
+ asoc_component_prefix, "bin");
+
+ if (!*coeff_firmware)
+ wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
+ cirrus_dir, system_name,
+ NULL, "bin");
+
+ if (*coeff_firmware)
+ return 0;
+ }
+
+ /* Check legacy location */
if (!wm_adsp_request_firmware_file(dsp, wmfw_firmware, wmfw_filename,
"", NULL, NULL, "wmfw")) {
wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
@@ -830,62 +853,28 @@ static int wm_adsp_request_firmware_files(struct wm_adsp *dsp,
return 0;
}
+ /* Fall back to generic wmfw and optional matching bin */
ret = wm_adsp_request_firmware_file(dsp, wmfw_firmware, wmfw_filename,
cirrus_dir, NULL, NULL, "wmfw");
- if (!ret) {
+ if (!ret || dsp->wmfw_optional) {
wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
cirrus_dir, NULL, NULL, "bin");
return 0;
}
- if (dsp->wmfw_optional) {
- if (system_name) {
- if (asoc_component_prefix)
- wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
- cirrus_dir, system_name,
- asoc_component_prefix, "bin");
-
- if (!*coeff_firmware)
- wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
- cirrus_dir, system_name,
- NULL, "bin");
- }
-
- if (!*coeff_firmware)
- wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
- "", NULL, NULL, "bin");
-
- if (!*coeff_firmware)
- wm_adsp_request_firmware_file(dsp, coeff_firmware, coeff_filename,
- cirrus_dir, NULL, NULL, "bin");
-
- return 0;
- }
-
adsp_err(dsp, "Failed to request firmware <%s>%s-%s-%s<-%s<%s>>.wmfw\n",
- cirrus_dir, dsp->part, dsp->fwf_name, wm_adsp_fw[dsp->fw].file,
- system_name, asoc_component_prefix);
+ cirrus_dir, dsp->part,
+ dsp->fwf_name ? dsp->fwf_name : dsp->cs_dsp.name,
+ wm_adsp_fw[dsp->fw].file, system_name, asoc_component_prefix);
return -ENOENT;
}
static int wm_adsp_common_init(struct wm_adsp *dsp)
{
- char *p;
-
INIT_LIST_HEAD(&dsp->compr_list);
INIT_LIST_HEAD(&dsp->buffer_list);
- if (!dsp->fwf_name) {
- p = devm_kstrdup(dsp->cs_dsp.dev, dsp->cs_dsp.name, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- dsp->fwf_name = p;
- for (; *p != 0; ++p)
- *p = tolower(*p);
- }
-
return 0;
}
diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
index cb83c569e18d..a2e86ef7d18f 100644
--- a/sound/soc/codecs/wsa883x.c
+++ b/sound/soc/codecs/wsa883x.c
@@ -1098,7 +1098,11 @@ static int wsa_dev_mode_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static const DECLARE_TLV_DB_SCALE(pa_gain, -300, 150, -300);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(pa_gain,
+ 0, 14, TLV_DB_SCALE_ITEM(-300, 0, 0),
+ 15, 29, TLV_DB_SCALE_ITEM(-300, 150, 0),
+ 30, 31, TLV_DB_SCALE_ITEM(1800, 0, 0),
+);
static int wsa883x_get_swr_port(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
index f0fb33d719c2..c46f64557a7f 100644
--- a/sound/soc/fsl/fsl_xcvr.c
+++ b/sound/soc/fsl/fsl_xcvr.c
@@ -174,7 +174,9 @@ static int fsl_xcvr_activate_ctl(struct snd_soc_dai *dai, const char *name,
struct snd_kcontrol *kctl;
bool enabled;
- kctl = snd_soc_card_get_kcontrol(card, name);
+ lockdep_assert_held(&card->snd_card->controls_rwsem);
+
+ kctl = snd_soc_card_get_kcontrol_locked(card, name);
if (kctl == NULL)
return -ENOENT;
@@ -576,10 +578,14 @@ static int fsl_xcvr_startup(struct snd_pcm_substream *substream,
xcvr->streams |= BIT(substream->stream);
if (!xcvr->soc_data->spdif_only) {
+ struct snd_soc_card *card = dai->component->card;
+
/* Disable XCVR controls if there is stream started */
+ down_read(&card->snd_card->controls_rwsem);
fsl_xcvr_activate_ctl(dai, fsl_xcvr_mode_kctl.name, false);
fsl_xcvr_activate_ctl(dai, fsl_xcvr_arc_mode_kctl.name, false);
fsl_xcvr_activate_ctl(dai, fsl_xcvr_earc_capds_kctl.name, false);
+ up_read(&card->snd_card->controls_rwsem);
}
return 0;
@@ -598,11 +604,15 @@ static void fsl_xcvr_shutdown(struct snd_pcm_substream *substream,
/* Enable XCVR controls if there is no stream started */
if (!xcvr->streams) {
if (!xcvr->soc_data->spdif_only) {
+ struct snd_soc_card *card = dai->component->card;
+
+ down_read(&card->snd_card->controls_rwsem);
fsl_xcvr_activate_ctl(dai, fsl_xcvr_mode_kctl.name, true);
fsl_xcvr_activate_ctl(dai, fsl_xcvr_arc_mode_kctl.name,
(xcvr->mode == FSL_XCVR_MODE_ARC));
fsl_xcvr_activate_ctl(dai, fsl_xcvr_earc_capds_kctl.name,
(xcvr->mode == FSL_XCVR_MODE_EARC));
+ up_read(&card->snd_card->controls_rwsem);
}
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_IER0,
FSL_XCVR_IRQ_EARC_ALL, 0);
diff --git a/sound/soc/generic/audio-graph-card2.c b/sound/soc/generic/audio-graph-card2.c
index 9c94677f681a..62606e20be9a 100644
--- a/sound/soc/generic/audio-graph-card2.c
+++ b/sound/soc/generic/audio-graph-card2.c
@@ -556,7 +556,7 @@ static int graph_parse_node_multi_nm(struct snd_soc_dai_link *dai_link,
struct device_node *mcodec_port;
int codec_idx;
- if (*nm_idx >= nm_max)
+ if (*nm_idx > nm_max)
break;
mcpu_ep_n = of_get_next_child(mcpu_port, mcpu_ep_n);
diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
index 59c3793f65df..db78eb2f0108 100644
--- a/sound/soc/intel/avs/core.c
+++ b/sound/soc/intel/avs/core.c
@@ -477,6 +477,9 @@ static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
return 0;
err_i915_init:
+ pci_free_irq(pci, 0, adev);
+ pci_free_irq(pci, 0, bus);
+ pci_free_irq_vectors(pci);
pci_clear_master(pci);
pci_set_drvdata(pci, NULL);
err_acquire_irq:
diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
index 778236d3fd28..48b3c67c9103 100644
--- a/sound/soc/intel/avs/topology.c
+++ b/sound/soc/intel/avs/topology.c
@@ -857,7 +857,7 @@ assign_copier_gtw_instance(struct snd_soc_component *comp, struct avs_tplg_modcf
}
/* If topology sets value don't overwrite it */
- if (cfg->copier.vindex.i2s.instance)
+ if (cfg->copier.vindex.val)
return;
mach = dev_get_platdata(comp->card->dev);
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 816fad8c1ff0..540f7a29310a 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -797,6 +797,9 @@ static int broxton_audio_probe(struct platform_device *pdev)
broxton_audio_card.name = "glkda7219max";
/* Fixup the SSP entries for geminilake */
for (i = 0; i < ARRAY_SIZE(broxton_dais); i++) {
+ if (!broxton_dais[i].codecs->dai_name)
+ continue;
+
/* MAXIM_CODEC is connected to SSP1. */
if (!strcmp(broxton_dais[i].codecs->dai_name,
BXT_MAXIM_CODEC_DAI)) {
@@ -822,6 +825,9 @@ static int broxton_audio_probe(struct platform_device *pdev)
broxton_audio_card.name = "cmlda7219max";
for (i = 0; i < ARRAY_SIZE(broxton_dais); i++) {
+ if (!broxton_dais[i].codecs->dai_name)
+ continue;
+
/* MAXIM_CODEC is connected to SSP1. */
if (!strcmp(broxton_dais[i].codecs->dai_name,
BXT_MAXIM_CODEC_DAI)) {
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 4631106f2a28..c0eb65c14aa9 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -604,7 +604,8 @@ static int broxton_audio_probe(struct platform_device *pdev)
int i;
for (i = 0; i < ARRAY_SIZE(broxton_rt298_dais); i++) {
- if (!strncmp(card->dai_link[i].codecs->name, "i2c-INT343A:00",
+ if (card->dai_link[i].codecs->name &&
+ !strncmp(card->dai_link[i].codecs->name, "i2c-INT343A:00",
I2C_NAME_SIZE)) {
if (!strncmp(card->name, "broxton-rt298",
PLATFORM_NAME_SIZE)) {
diff --git a/sound/soc/intel/boards/bytcht_cx2072x.c b/sound/soc/intel/boards/bytcht_cx2072x.c
index 10a84a2c1036..c014d85a08b2 100644
--- a/sound/soc/intel/boards/bytcht_cx2072x.c
+++ b/sound/soc/intel/boards/bytcht_cx2072x.c
@@ -241,7 +241,8 @@ static int snd_byt_cht_cx2072x_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_cht_cx2072x_dais); i++) {
- if (!strcmp(byt_cht_cx2072x_dais[i].codecs->name,
+ if (byt_cht_cx2072x_dais[i].codecs->name &&
+ !strcmp(byt_cht_cx2072x_dais[i].codecs->name,
"i2c-14F10720:00")) {
dai_index = i;
break;
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index 7e5eea690023..f4ac3ddd148b 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -245,7 +245,8 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(dailink); i++) {
- if (!strcmp(dailink[i].codecs->name, "i2c-DLGS7213:00")) {
+ if (dailink[i].codecs->name &&
+ !strcmp(dailink[i].codecs->name, "i2c-DLGS7213:00")) {
dai_index = i;
break;
}
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 1564a88a885e..2fcec2e02bb5 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -546,7 +546,8 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
- if (!strcmp(byt_cht_es8316_dais[i].codecs->name,
+ if (byt_cht_es8316_dais[i].codecs->name &&
+ !strcmp(byt_cht_es8316_dais[i].codecs->name,
"i2c-ESSX8316:00")) {
dai_index = i;
break;
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 42466b4b1ca4..03be5e26ec4a 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1652,7 +1652,8 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_rt5640_dais); i++) {
- if (!strcmp(byt_rt5640_dais[i].codecs->name,
+ if (byt_rt5640_dais[i].codecs->name &&
+ !strcmp(byt_rt5640_dais[i].codecs->name,
"i2c-10EC5640:00")) {
dai_index = i;
break;
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index f9fe8414f454..80c841b000a3 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -910,7 +910,8 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_rt5651_dais); i++) {
- if (!strcmp(byt_rt5651_dais[i].codecs->name,
+ if (byt_rt5651_dais[i].codecs->name &&
+ !strcmp(byt_rt5651_dais[i].codecs->name,
"i2c-10EC5651:00")) {
dai_index = i;
break;
diff --git a/sound/soc/intel/boards/bytcr_wm5102.c b/sound/soc/intel/boards/bytcr_wm5102.c
index 6978ebde6693..cccb5e90c0fe 100644
--- a/sound/soc/intel/boards/bytcr_wm5102.c
+++ b/sound/soc/intel/boards/bytcr_wm5102.c
@@ -605,7 +605,8 @@ static int snd_byt_wm5102_mc_probe(struct platform_device *pdev)
/* find index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_wm5102_dais); i++) {
- if (!strcmp(byt_wm5102_dais[i].codecs->name,
+ if (byt_wm5102_dais[i].codecs->name &&
+ !strcmp(byt_wm5102_dais[i].codecs->name,
"wm5102-codec")) {
dai_index = i;
break;
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index c952a96cde7e..eb41b7115d01 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -40,7 +40,6 @@ struct cht_acpi_card {
struct cht_mc_private {
struct snd_soc_jack jack;
struct cht_acpi_card *acpi_card;
- char codec_name[SND_ACPI_I2C_ID_LEN];
struct clk *mclk;
};
@@ -567,14 +566,14 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
card->dev = &pdev->dev;
- sprintf(drv->codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
/* set correct codec name */
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++)
- if (!strcmp(card->dai_link[i].codecs->name,
+ if (cht_dailink[i].codecs->name &&
+ !strcmp(cht_dailink[i].codecs->name,
"i2c-10EC5645:00")) {
- card->dai_link[i].codecs->name = drv->codec_name;
dai_index = i;
+ break;
}
/* fixup codec name based on HID */
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 8cf0b33cc02e..be2d1a8dbca8 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -466,7 +466,8 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* find index of codec dai */
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
- if (!strcmp(cht_dailink[i].codecs->name, RT5672_I2C_DEFAULT)) {
+ if (cht_dailink[i].codecs->name &&
+ !strcmp(cht_dailink[i].codecs->name, RT5672_I2C_DEFAULT)) {
dai_index = i;
break;
}
diff --git a/sound/soc/mediatek/common/mtk-dsp-sof-common.c b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
index f3894010f656..7ec8965a70c0 100644
--- a/sound/soc/mediatek/common/mtk-dsp-sof-common.c
+++ b/sound/soc/mediatek/common/mtk-dsp-sof-common.c
@@ -24,7 +24,7 @@ int mtk_sof_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai_link *sof_dai_link = NULL;
const struct sof_conn_stream *conn = &sof_priv->conn_streams[i];
- if (strcmp(rtd->dai_link->name, conn->normal_link))
+ if (conn->normal_link && strcmp(rtd->dai_link->name, conn->normal_link))
continue;
for_each_card_rtds(card, runtime) {
diff --git a/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c b/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
index 5bd6addd1450..bfcb2c486c39 100644
--- a/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
+++ b/sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
@@ -1208,7 +1208,8 @@ static int mt8192_mt6359_dev_probe(struct platform_device *pdev)
dai_link->ignore = 0;
}
- if (strcmp(dai_link->codecs[0].dai_name, RT1015_CODEC_DAI) == 0)
+ if (dai_link->num_codecs && dai_link->codecs[0].dai_name &&
+ strcmp(dai_link->codecs[0].dai_name, RT1015_CODEC_DAI) == 0)
dai_link->ops = &mt8192_rt1015_i2s_ops;
if (!dai_link->platforms->name)
diff --git a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
index 1e33863c85ca..620d7ade1992 100644
--- a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
+++ b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
@@ -1795,10 +1795,6 @@ static const struct snd_kcontrol_new mt8195_memif_controls[] = {
MT8195_AFE_IRQ_28),
};
-static const struct snd_soc_component_driver mt8195_afe_pcm_dai_component = {
- .name = "mt8195-afe-pcm-dai",
-};
-
static const struct mtk_base_memif_data memif_data[MT8195_AFE_MEMIF_NUM] = {
[MT8195_AFE_MEMIF_DL2] = {
.name = "DL2",
@@ -3037,7 +3033,6 @@ static int mt8195_afe_pcm_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct reset_control *rstc;
int i, irq_id, ret;
- struct snd_soc_component *component;
ret = of_reserved_mem_device_init(dev);
if (ret)
@@ -3170,36 +3165,12 @@ static int mt8195_afe_pcm_dev_probe(struct platform_device *pdev)
/* register component */
ret = devm_snd_soc_register_component(dev, &mt8195_afe_component,
- NULL, 0);
+ afe->dai_drivers, afe->num_dai_drivers);
if (ret) {
dev_warn(dev, "err_platform\n");
goto err_pm_put;
}
- component = devm_kzalloc(dev, sizeof(*component), GFP_KERNEL);
- if (!component) {
- ret = -ENOMEM;
- goto err_pm_put;
- }
-
- ret = snd_soc_component_initialize(component,
- &mt8195_afe_pcm_dai_component,
- dev);
- if (ret)
- goto err_pm_put;
-
-#ifdef CONFIG_DEBUG_FS
- component->debugfs_prefix = "pcm";
-#endif
-
- ret = snd_soc_add_component(component,
- afe->dai_drivers,
- afe->num_dai_drivers);
- if (ret) {
- dev_warn(dev, "err_dai_component\n");
- goto err_pm_put;
- }
-
ret = regmap_multi_reg_write(afe->regmap, mt8195_afe_reg_defaults,
ARRAY_SIZE(mt8195_afe_reg_defaults));
if (ret)
@@ -3224,8 +3195,6 @@ err_pm_put:
static void mt8195_afe_pcm_dev_remove(struct platform_device *pdev)
{
- snd_soc_unregister_component(&pdev->dev);
-
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mt8195_afe_runtime_suspend(&pdev->dev);
diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359.c b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
index 4feb9fb76967..53fd8a897b9d 100644
--- a/sound/soc/mediatek/mt8195/mt8195-mt6359.c
+++ b/sound/soc/mediatek/mt8195/mt8195-mt6359.c
@@ -934,12 +934,11 @@ SND_SOC_DAILINK_DEFS(ETDM1_IN_BE,
SND_SOC_DAILINK_DEFS(ETDM2_IN_BE,
DAILINK_COMP_ARRAY(COMP_CPU("ETDM2_IN")),
- DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
SND_SOC_DAILINK_DEFS(ETDM1_OUT_BE,
DAILINK_COMP_ARRAY(COMP_CPU("ETDM1_OUT")),
- DAILINK_COMP_ARRAY(COMP_DUMMY()),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
SND_SOC_DAILINK_DEFS(ETDM2_OUT_BE,
@@ -1237,8 +1236,6 @@ static struct snd_soc_dai_link mt8195_mt6359_dai_links[] = {
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.dpcm_capture = 1,
- .init = mt8195_rt5682_init,
- .ops = &mt8195_rt5682_etdm_ops,
.be_hw_params_fixup = mt8195_etdm_hw_params_fixup,
SND_SOC_DAILINK_REG(ETDM2_IN_BE),
},
@@ -1249,7 +1246,6 @@ static struct snd_soc_dai_link mt8195_mt6359_dai_links[] = {
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.dpcm_playback = 1,
- .ops = &mt8195_rt5682_etdm_ops,
.be_hw_params_fixup = mt8195_etdm_hw_params_fixup,
SND_SOC_DAILINK_REG(ETDM1_OUT_BE),
},
@@ -1381,7 +1377,7 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
struct snd_soc_dai_link *dai_link;
struct mtk_soc_card_data *soc_card_data;
struct mt8195_mt6359_priv *mach_priv;
- struct device_node *platform_node, *adsp_node, *dp_node, *hdmi_node;
+ struct device_node *platform_node, *adsp_node, *codec_node, *dp_node, *hdmi_node;
struct mt8195_card_data *card_data;
int is5682s = 0;
int init6359 = 0;
@@ -1401,8 +1397,12 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
if (!card->name)
card->name = card_data->name;
- if (strstr(card->name, "_5682s"))
+ if (strstr(card->name, "_5682s")) {
+ codec_node = of_find_compatible_node(NULL, NULL, "realtek,rt5682s");
is5682s = 1;
+ } else
+ codec_node = of_find_compatible_node(NULL, NULL, "realtek,rt5682i");
+
soc_card_data = devm_kzalloc(&pdev->dev, sizeof(*card_data), GFP_KERNEL);
if (!soc_card_data)
return -ENOMEM;
@@ -1488,12 +1488,27 @@ static int mt8195_mt6359_dev_probe(struct platform_device *pdev)
dai_link->codecs->dai_name = "i2s-hifi";
dai_link->init = mt8195_hdmi_codec_init;
}
- } else if (strcmp(dai_link->name, "ETDM1_OUT_BE") == 0 ||
- strcmp(dai_link->name, "ETDM2_IN_BE") == 0) {
- dai_link->codecs->name =
- is5682s ? RT5682S_DEV0_NAME : RT5682_DEV0_NAME;
- dai_link->codecs->dai_name =
- is5682s ? RT5682S_CODEC_DAI : RT5682_CODEC_DAI;
+ } else if (strcmp(dai_link->name, "ETDM1_OUT_BE") == 0) {
+ if (!codec_node) {
+ dev_err(&pdev->dev, "Codec not found!\n");
+ } else {
+ dai_link->codecs->of_node = codec_node;
+ dai_link->codecs->name = NULL;
+ dai_link->codecs->dai_name =
+ is5682s ? RT5682S_CODEC_DAI : RT5682_CODEC_DAI;
+ dai_link->init = mt8195_rt5682_init;
+ dai_link->ops = &mt8195_rt5682_etdm_ops;
+ }
+ } else if (strcmp(dai_link->name, "ETDM2_IN_BE") == 0) {
+ if (!codec_node) {
+ dev_err(&pdev->dev, "Codec not found!\n");
+ } else {
+ dai_link->codecs->of_node = codec_node;
+ dai_link->codecs->name = NULL;
+ dai_link->codecs->dai_name =
+ is5682s ? RT5682S_CODEC_DAI : RT5682_CODEC_DAI;
+ dai_link->ops = &mt8195_rt5682_etdm_ops;
+ }
} else if (strcmp(dai_link->name, "DL_SRC_BE") == 0 ||
strcmp(dai_link->name, "UL_SRC1_BE") == 0 ||
strcmp(dai_link->name, "UL_SRC2_BE") == 0) {
diff --git a/sound/soc/qcom/lpass-cdc-dma.c b/sound/soc/qcom/lpass-cdc-dma.c
index 48b03e60e3a3..8106c586f68a 100644
--- a/sound/soc/qcom/lpass-cdc-dma.c
+++ b/sound/soc/qcom/lpass-cdc-dma.c
@@ -259,7 +259,7 @@ static int lpass_cdc_dma_daiops_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *soc_runtime = snd_soc_substream_to_rtd(substream);
- struct lpaif_dmactl *dmactl;
+ struct lpaif_dmactl *dmactl = NULL;
int ret = 0, id;
switch (cmd) {
diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
index 052e40cb38fe..00bbd291be5c 100644
--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
@@ -123,7 +123,7 @@ static struct snd_pcm_hardware q6apm_dai_hardware_playback = {
.fifo_size = 0,
};
-static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, void *priv)
+static void event_handler(uint32_t opcode, uint32_t token, void *payload, void *priv)
{
struct q6apm_dai_rtd *prtd = priv;
struct snd_pcm_substream *substream = prtd->substream;
@@ -157,7 +157,7 @@ static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, vo
}
static void event_handler_compr(uint32_t opcode, uint32_t token,
- uint32_t *payload, void *priv)
+ void *payload, void *priv)
{
struct q6apm_dai_rtd *prtd = priv;
struct snd_compr_stream *substream = prtd->cstream;
@@ -352,7 +352,7 @@ static int q6apm_dai_open(struct snd_soc_component *component,
spin_lock_init(&prtd->lock);
prtd->substream = substream;
- prtd->graph = q6apm_graph_open(dev, (q6apm_cb)event_handler, prtd, graph_id);
+ prtd->graph = q6apm_graph_open(dev, event_handler, prtd, graph_id);
if (IS_ERR(prtd->graph)) {
dev_err(dev, "%s: Could not allocate memory\n", __func__);
ret = PTR_ERR(prtd->graph);
@@ -496,7 +496,7 @@ static int q6apm_dai_compr_open(struct snd_soc_component *component,
return -ENOMEM;
prtd->cstream = stream;
- prtd->graph = q6apm_graph_open(dev, (q6apm_cb)event_handler_compr, prtd, graph_id);
+ prtd->graph = q6apm_graph_open(dev, event_handler_compr, prtd, graph_id);
if (IS_ERR(prtd->graph)) {
ret = PTR_ERR(prtd->graph);
kfree(prtd);
diff --git a/sound/soc/qcom/sc8280xp.c b/sound/soc/qcom/sc8280xp.c
index ed4bb551bfbb..b7fd503a1666 100644
--- a/sound/soc/qcom/sc8280xp.c
+++ b/sound/soc/qcom/sc8280xp.c
@@ -32,12 +32,14 @@ static int sc8280xp_snd_init(struct snd_soc_pcm_runtime *rtd)
case WSA_CODEC_DMA_RX_0:
case WSA_CODEC_DMA_RX_1:
/*
- * set limit of 0dB on Digital Volume for Speakers,
- * this can prevent damage of speakers to some extent without
- * active speaker protection
+ * Set limit of -3 dB on Digital Volume and 0 dB on PA Volume
+ * to reduce the risk of speaker damage until we have active
+ * speaker protection in place.
*/
- snd_soc_limit_volume(card, "WSA_RX0 Digital Volume", 84);
- snd_soc_limit_volume(card, "WSA_RX1 Digital Volume", 84);
+ snd_soc_limit_volume(card, "WSA_RX0 Digital Volume", 81);
+ snd_soc_limit_volume(card, "WSA_RX1 Digital Volume", 81);
+ snd_soc_limit_volume(card, "SpkrLeft PA Volume", 17);
+ snd_soc_limit_volume(card, "SpkrRight PA Volume", 17);
break;
default:
break;
diff --git a/sound/soc/soc-card.c b/sound/soc/soc-card.c
index 285ab4c9c716..8a2f163da6bc 100644
--- a/sound/soc/soc-card.c
+++ b/sound/soc/soc-card.c
@@ -5,6 +5,9 @@
// Copyright (C) 2019 Renesas Electronics Corp.
// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
//
+
+#include <linux/lockdep.h>
+#include <linux/rwsem.h>
#include <sound/soc.h>
#include <sound/jack.h>
@@ -26,12 +29,15 @@ static inline int _soc_card_ret(struct snd_soc_card *card,
return ret;
}
-struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
- const char *name)
+struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
+ const char *name)
{
struct snd_card *card = soc_card->snd_card;
struct snd_kcontrol *kctl;
+ /* must be held read or write */
+ lockdep_assert_held(&card->controls_rwsem);
+
if (unlikely(!name))
return NULL;
@@ -40,6 +46,20 @@ struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
return kctl;
return NULL;
}
+EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol_locked);
+
+struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+ const char *name)
+{
+ struct snd_card *card = soc_card->snd_card;
+ struct snd_kcontrol *kctl;
+
+ down_read(&card->controls_rwsem);
+ kctl = snd_soc_card_get_kcontrol_locked(soc_card, name);
+ up_read(&card->controls_rwsem);
+
+ return kctl;
+}
EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol);
static int jack_new(struct snd_soc_card *card, const char *id, int type,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f8524b5bfb33..516350533e73 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1037,7 +1037,7 @@ component_dai_empty:
return -EINVAL;
}
-#define MAX_DEFAULT_CH_MAP_SIZE 7
+#define MAX_DEFAULT_CH_MAP_SIZE 8
static struct snd_soc_dai_link_ch_map default_ch_map_sync[MAX_DEFAULT_CH_MAP_SIZE] = {
{ .cpu = 0, .codec = 0 },
{ .cpu = 1, .codec = 1 },
@@ -1046,6 +1046,7 @@ static struct snd_soc_dai_link_ch_map default_ch_map_sync[MAX_DEFAULT_CH_MAP_SIZ
{ .cpu = 4, .codec = 4 },
{ .cpu = 5, .codec = 5 },
{ .cpu = 6, .codec = 6 },
+ { .cpu = 7, .codec = 7 },
};
static struct snd_soc_dai_link_ch_map default_ch_map_1cpu[MAX_DEFAULT_CH_MAP_SIZE] = {
{ .cpu = 0, .codec = 0 },
@@ -1055,6 +1056,7 @@ static struct snd_soc_dai_link_ch_map default_ch_map_1cpu[MAX_DEFAULT_CH_MAP_SIZ
{ .cpu = 0, .codec = 4 },
{ .cpu = 0, .codec = 5 },
{ .cpu = 0, .codec = 6 },
+ { .cpu = 0, .codec = 7 },
};
static struct snd_soc_dai_link_ch_map default_ch_map_1codec[MAX_DEFAULT_CH_MAP_SIZE] = {
{ .cpu = 0, .codec = 0 },
@@ -1064,6 +1066,7 @@ static struct snd_soc_dai_link_ch_map default_ch_map_1codec[MAX_DEFAULT_CH_MAP_S
{ .cpu = 4, .codec = 0 },
{ .cpu = 5, .codec = 0 },
{ .cpu = 6, .codec = 0 },
+ { .cpu = 7, .codec = 0 },
};
static int snd_soc_compensate_channel_connection_map(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link)
diff --git a/sound/soc/sof/amd/acp-ipc.c b/sound/soc/sof/amd/acp-ipc.c
index 2743f07a5e08..b44b1b1adb6e 100644
--- a/sound/soc/sof/amd/acp-ipc.c
+++ b/sound/soc/sof/amd/acp-ipc.c
@@ -188,11 +188,13 @@ irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context)
dsp_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack_write);
if (dsp_ack) {
+ spin_lock_irq(&sdev->ipc_lock);
/* handle immediate reply from DSP core */
acp_dsp_ipc_get_reply(sdev);
snd_sof_ipc_reply(sdev, 0);
/* set the done bit */
acp_dsp_ipc_dsp_done(sdev);
+ spin_unlock_irq(&sdev->ipc_lock);
ipc_irq = true;
}
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index 32a741fcb84f..07632ae6ccf5 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -355,21 +355,20 @@ static irqreturn_t acp_irq_thread(int irq, void *context)
unsigned int count = ACP_HW_SEM_RETRY_COUNT;
spin_lock_irq(&sdev->ipc_lock);
- while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
- /* Wait until acquired HW Semaphore lock or timeout */
- count--;
- if (!count) {
- dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
- spin_unlock_irq(&sdev->ipc_lock);
- return IRQ_NONE;
- }
+ /* Wait until acquired HW Semaphore lock or timeout */
+ while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset) && --count)
+ ;
+ spin_unlock_irq(&sdev->ipc_lock);
+
+ if (!count) {
+ dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
+ return IRQ_NONE;
}
sof_ops(sdev)->irq_thread(irq, sdev);
/* Unlock or Release HW Semaphore */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
- spin_unlock_irq(&sdev->ipc_lock);
return IRQ_HANDLED;
};
diff --git a/sound/soc/sof/intel/pci-lnl.c b/sound/soc/sof/intel/pci-lnl.c
index 78a57eb9cbc3..b26ffe767fab 100644
--- a/sound/soc/sof/intel/pci-lnl.c
+++ b/sound/soc/sof/intel/pci-lnl.c
@@ -36,7 +36,7 @@ static const struct sof_dev_desc lnl_desc = {
[SOF_IPC_TYPE_4] = "intel/sof-ipc4/lnl",
},
.default_tplg_path = {
- [SOF_IPC_TYPE_4] = "intel/sof-ace-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_4] = "sof-lnl.ri",
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index 0660d4b2ac96..a361ee9d1107 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -33,18 +33,18 @@ static const struct sof_dev_desc tgl_desc = {
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC_TYPE_3] = "intel/sof",
- [SOF_IPC_TYPE_4] = "intel/avs/tgl",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/tgl",
},
.default_lib_path = {
- [SOF_IPC_TYPE_4] = "intel/avs-lib/tgl",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/tgl",
},
.default_tplg_path = {
[SOF_IPC_TYPE_3] = "intel/sof-tplg",
- [SOF_IPC_TYPE_4] = "intel/avs-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_3] = "sof-tgl.ri",
- [SOF_IPC_TYPE_4] = "dsp_basefw.bin",
+ [SOF_IPC_TYPE_4] = "sof-tgl.ri",
},
.nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
.ops = &sof_tgl_ops,
@@ -66,18 +66,18 @@ static const struct sof_dev_desc tglh_desc = {
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC_TYPE_3] = "intel/sof",
- [SOF_IPC_TYPE_4] = "intel/avs/tgl-h",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/tgl-h",
},
.default_lib_path = {
- [SOF_IPC_TYPE_4] = "intel/avs-lib/tgl-h",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/tgl-h",
},
.default_tplg_path = {
[SOF_IPC_TYPE_3] = "intel/sof-tplg",
- [SOF_IPC_TYPE_4] = "intel/avs-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_3] = "sof-tgl-h.ri",
- [SOF_IPC_TYPE_4] = "dsp_basefw.bin",
+ [SOF_IPC_TYPE_4] = "sof-tgl-h.ri",
},
.nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
.ops = &sof_tgl_ops,
@@ -98,18 +98,18 @@ static const struct sof_dev_desc ehl_desc = {
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC_TYPE_3] = "intel/sof",
- [SOF_IPC_TYPE_4] = "intel/avs/ehl",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/ehl",
},
.default_lib_path = {
- [SOF_IPC_TYPE_4] = "intel/avs-lib/ehl",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/ehl",
},
.default_tplg_path = {
[SOF_IPC_TYPE_3] = "intel/sof-tplg",
- [SOF_IPC_TYPE_4] = "intel/avs-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_3] = "sof-ehl.ri",
- [SOF_IPC_TYPE_4] = "dsp_basefw.bin",
+ [SOF_IPC_TYPE_4] = "sof-ehl.ri",
},
.nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
.ops = &sof_tgl_ops,
@@ -131,18 +131,18 @@ static const struct sof_dev_desc adls_desc = {
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC_TYPE_3] = "intel/sof",
- [SOF_IPC_TYPE_4] = "intel/avs/adl-s",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/adl-s",
},
.default_lib_path = {
- [SOF_IPC_TYPE_4] = "intel/avs-lib/adl-s",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/adl-s",
},
.default_tplg_path = {
[SOF_IPC_TYPE_3] = "intel/sof-tplg",
- [SOF_IPC_TYPE_4] = "intel/avs-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_3] = "sof-adl-s.ri",
- [SOF_IPC_TYPE_4] = "dsp_basefw.bin",
+ [SOF_IPC_TYPE_4] = "sof-adl-s.ri",
},
.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
.ops = &sof_tgl_ops,
@@ -164,18 +164,18 @@ static const struct sof_dev_desc adl_desc = {
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC_TYPE_3] = "intel/sof",
- [SOF_IPC_TYPE_4] = "intel/avs/adl",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/adl",
},
.default_lib_path = {
- [SOF_IPC_TYPE_4] = "intel/avs-lib/adl",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/adl",
},
.default_tplg_path = {
[SOF_IPC_TYPE_3] = "intel/sof-tplg",
- [SOF_IPC_TYPE_4] = "intel/avs-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_3] = "sof-adl.ri",
- [SOF_IPC_TYPE_4] = "dsp_basefw.bin",
+ [SOF_IPC_TYPE_4] = "sof-adl.ri",
},
.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
.ops = &sof_tgl_ops,
@@ -197,18 +197,18 @@ static const struct sof_dev_desc adl_n_desc = {
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC_TYPE_3] = "intel/sof",
- [SOF_IPC_TYPE_4] = "intel/avs/adl-n",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/adl-n",
},
.default_lib_path = {
- [SOF_IPC_TYPE_4] = "intel/avs-lib/adl-n",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/adl-n",
},
.default_tplg_path = {
[SOF_IPC_TYPE_3] = "intel/sof-tplg",
- [SOF_IPC_TYPE_4] = "intel/avs-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_3] = "sof-adl-n.ri",
- [SOF_IPC_TYPE_4] = "dsp_basefw.bin",
+ [SOF_IPC_TYPE_4] = "sof-adl-n.ri",
},
.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
.ops = &sof_tgl_ops,
@@ -230,18 +230,18 @@ static const struct sof_dev_desc rpls_desc = {
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC_TYPE_3] = "intel/sof",
- [SOF_IPC_TYPE_4] = "intel/avs/rpl-s",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/rpl-s",
},
.default_lib_path = {
- [SOF_IPC_TYPE_4] = "intel/avs-lib/rpl-s",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/rpl-s",
},
.default_tplg_path = {
[SOF_IPC_TYPE_3] = "intel/sof-tplg",
- [SOF_IPC_TYPE_4] = "intel/avs-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_3] = "sof-rpl-s.ri",
- [SOF_IPC_TYPE_4] = "dsp_basefw.bin",
+ [SOF_IPC_TYPE_4] = "sof-rpl-s.ri",
},
.nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
.ops = &sof_tgl_ops,
@@ -263,18 +263,18 @@ static const struct sof_dev_desc rpl_desc = {
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC_TYPE_3] = "intel/sof",
- [SOF_IPC_TYPE_4] = "intel/avs/rpl",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4/rpl",
},
.default_lib_path = {
- [SOF_IPC_TYPE_4] = "intel/avs-lib/rpl",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/rpl",
},
.default_tplg_path = {
[SOF_IPC_TYPE_3] = "intel/sof-tplg",
- [SOF_IPC_TYPE_4] = "intel/avs-tplg",
+ [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
},
.default_fw_filename = {
[SOF_IPC_TYPE_3] = "sof-rpl.ri",
- [SOF_IPC_TYPE_4] = "dsp_basefw.bin",
+ [SOF_IPC_TYPE_4] = "sof-rpl.ri",
},
.nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
.ops = &sof_tgl_ops,
diff --git a/sound/soc/sof/ipc3-dtrace.c b/sound/soc/sof/ipc3-dtrace.c
index 93b189c2d2ee..0dca139322f3 100644
--- a/sound/soc/sof/ipc3-dtrace.c
+++ b/sound/soc/sof/ipc3-dtrace.c
@@ -137,7 +137,6 @@ static int trace_filter_parse(struct snd_sof_dev *sdev, char *string,
dev_err(sdev->dev,
"Parsing filter entry '%s' failed with %d\n",
entry, entry_len);
- kfree(*out);
return -EINVAL;
}
}
@@ -209,13 +208,13 @@ static ssize_t dfsentry_trace_filter_write(struct file *file, const char __user
ret = ipc3_trace_update_filter(sdev, num_elems, elems);
if (ret < 0) {
dev_err(sdev->dev, "Filter update failed: %d\n", ret);
- kfree(elems);
goto error;
}
}
ret = count;
error:
kfree(string);
+ kfree(elems);
return ret;
}
diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c
index a8832a1c1a24..d47698f4be2d 100644
--- a/sound/soc/sof/ipc3-topology.c
+++ b/sound/soc/sof/ipc3-topology.c
@@ -2360,27 +2360,16 @@ static int sof_tear_down_left_over_pipelines(struct snd_sof_dev *sdev)
return 0;
}
-/*
- * For older firmware, this function doesn't free widgets for static pipelines during suspend.
- * It only resets use_count for all widgets.
- */
-static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verify)
+static int sof_ipc3_free_widgets_in_list(struct snd_sof_dev *sdev, bool include_scheduler,
+ bool *dyn_widgets, bool verify)
{
struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
struct snd_sof_widget *swidget;
- struct snd_sof_route *sroute;
- bool dyn_widgets = false;
int ret;
- /*
- * This function is called during suspend and for one-time topology verification during
- * first boot. In both cases, there is no need to protect swidget->use_count and
- * sroute->setup because during suspend all running streams are suspended and during
- * topology loading the sound card unavailable to open PCMs.
- */
list_for_each_entry(swidget, &sdev->widget_list, list) {
if (swidget->dynamic_pipeline_widget) {
- dyn_widgets = true;
+ *dyn_widgets = true;
continue;
}
@@ -2395,11 +2384,49 @@ static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verif
continue;
}
+ if (include_scheduler && swidget->id != snd_soc_dapm_scheduler)
+ continue;
+
+ if (!include_scheduler && swidget->id == snd_soc_dapm_scheduler)
+ continue;
+
ret = sof_widget_free(sdev, swidget);
if (ret < 0)
return ret;
}
+ return 0;
+}
+
+/*
+ * For older firmware, this function doesn't free widgets for static pipelines during suspend.
+ * It only resets use_count for all widgets.
+ */
+static int sof_ipc3_tear_down_all_pipelines(struct snd_sof_dev *sdev, bool verify)
+{
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
+ struct snd_sof_widget *swidget;
+ struct snd_sof_route *sroute;
+ bool dyn_widgets = false;
+ int ret;
+
+ /*
+ * This function is called during suspend and for one-time topology verification during
+ * first boot. In both cases, there is no need to protect swidget->use_count and
+ * sroute->setup because during suspend all running streams are suspended and during
+ * topology loading the sound card unavailable to open PCMs. Do not free the scheduler
+ * widgets yet so that the secondary cores do not get powered down before all the widgets
+ * associated with the scheduler are freed.
+ */
+ ret = sof_ipc3_free_widgets_in_list(sdev, false, &dyn_widgets, verify);
+ if (ret < 0)
+ return ret;
+
+ /* free all the scheduler widgets now */
+ ret = sof_ipc3_free_widgets_in_list(sdev, true, &dyn_widgets, verify);
+ if (ret < 0)
+ return ret;
+
/*
* Tear down all pipelines associated with PCMs that did not get suspended
* and unset the prepare flag so that they can be set up again during resume.
diff --git a/sound/soc/sof/ipc3.c b/sound/soc/sof/ipc3.c
index fb40378ad084..c03dd513fbff 100644
--- a/sound/soc/sof/ipc3.c
+++ b/sound/soc/sof/ipc3.c
@@ -1067,7 +1067,7 @@ static void sof_ipc3_rx_msg(struct snd_sof_dev *sdev)
return;
}
- if (hdr.size < sizeof(hdr)) {
+ if (hdr.size < sizeof(hdr) || hdr.size > SOF_IPC_MSG_MAX_SIZE) {
dev_err(sdev->dev, "The received message size is invalid\n");
return;
}
diff --git a/sound/soc/sof/ipc4-loader.c b/sound/soc/sof/ipc4-loader.c
index 3539b0a66e1b..c79479afa8d0 100644
--- a/sound/soc/sof/ipc4-loader.c
+++ b/sound/soc/sof/ipc4-loader.c
@@ -482,13 +482,10 @@ void sof_ipc4_update_cpc_from_manifest(struct snd_sof_dev *sdev,
msg = "No CPC match in the firmware file's manifest";
no_cpc:
- dev_warn(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
- fw_module->man4_module_entry.name,
- &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
- basecfg->obs);
- dev_warn_once(sdev->dev, "Please try to update the firmware.\n");
- dev_warn_once(sdev->dev, "If the issue persists, file a bug at\n");
- dev_warn_once(sdev->dev, "https://github.com/thesofproject/sof/issues/\n");
+ dev_dbg(sdev->dev, "%s (UUID: %pUL): %s (ibs/obs: %u/%u)\n",
+ fw_module->man4_module_entry.name,
+ &fw_module->man4_module_entry.uuid, msg, basecfg->ibs,
+ basecfg->obs);
}
const struct sof_ipc_fw_loader_ops ipc4_loader_ops = {
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
index 39039a647cca..07eb5c6d4adf 100644
--- a/sound/soc/sof/ipc4-pcm.c
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -413,7 +413,18 @@ skip_pause_transition:
ret = sof_ipc4_set_multi_pipeline_state(sdev, state, trigger_list);
if (ret < 0) {
dev_err(sdev->dev, "failed to set final state %d for all pipelines\n", state);
- goto free;
+ /*
+ * workaround: if the firmware is crashed while setting the
+ * pipelines to reset state we must ignore the error code and
+ * reset it to 0.
+ * Since the firmware is crashed we will not send IPC messages
+ * and we are going to see errors printed, but the state of the
+ * widgets will be correct for the next boot.
+ */
+ if (sdev->fw_state != SOF_FW_CRASHED || state != SOF_IPC4_PIPE_RESET)
+ goto free;
+
+ ret = 0;
}
/* update RUNNING/RESET state for all pipelines that were just triggered */
@@ -768,10 +779,8 @@ static void sof_ipc4_build_time_info(struct snd_sof_dev *sdev, struct snd_sof_pc
info->llp_offset = offsetof(struct sof_ipc4_fw_registers, llp_evad_reading_slot) +
sdev->fw_info_box.offset;
sof_mailbox_read(sdev, info->llp_offset, &llp_slot, sizeof(llp_slot));
- if (llp_slot.node_id != dai_copier->data.gtw_cfg.node_id) {
- dev_info(sdev->dev, "no llp found, fall back to default HDA path");
+ if (llp_slot.node_id != dai_copier->data.gtw_cfg.node_id)
info->llp_offset = 0;
- }
}
static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 702386823d17..f41c30955857 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -577,6 +577,11 @@ static const struct of_device_id sun4i_spdif_of_match[] = {
.compatible = "allwinner,sun50i-h6-spdif",
.data = &sun50i_h6_spdif_quirks,
},
+ {
+ .compatible = "allwinner,sun50i-h616-spdif",
+ /* Essentially the same as the H6, but without RX */
+ .data = &sun50i_h6_spdif_quirks,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 33db334e6556..60fcb872a80b 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -261,6 +261,8 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
int ret, i, cur, err, pins, clock_id;
const u8 *sources;
int proto = fmt->protocol;
+ bool readable, writeable;
+ u32 bmControls;
entity_id &= 0xff;
@@ -292,11 +294,27 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
sources = GET_VAL(selector, proto, baCSourceID);
cur = 0;
+ if (proto == UAC_VERSION_3)
+ bmControls = le32_to_cpu(*(__le32 *)(&selector->v3.baCSourceID[0] + pins));
+ else
+ bmControls = *(__u8 *)(&selector->v2.baCSourceID[0] + pins);
+
+ readable = uac_v2v3_control_is_readable(bmControls,
+ UAC2_CX_CLOCK_SELECTOR);
+ writeable = uac_v2v3_control_is_writeable(bmControls,
+ UAC2_CX_CLOCK_SELECTOR);
+
if (pins == 1) {
ret = 1;
goto find_source;
}
+ /* for now just warn about buggy device */
+ if (!readable)
+ usb_audio_warn(chip,
+ "%s(): clock selector control is not readable, id %d\n",
+ __func__, clock_id);
+
/* the entity ID we are looking at is a selector.
* find out what it currently selects */
ret = uac_clock_selector_get_val(chip, clock_id);
@@ -325,17 +343,29 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
visited, validate);
if (ret > 0) {
/* Skip setting clock selector again for some devices */
- if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR)
+ if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR ||
+ !writeable)
return ret;
err = uac_clock_selector_set_val(chip, entity_id, cur);
- if (err < 0)
+ if (err < 0) {
+ if (pins == 1) {
+ usb_audio_dbg(chip,
+ "%s(): selector returned an error, "
+ "assuming a firmware bug, id %d, ret %d\n",
+ __func__, clock_id, err);
+ return ret;
+ }
return err;
+ }
}
if (!validate || ret > 0 || !chip->autoclock)
return ret;
find_others:
+ if (!writeable)
+ return -ENXIO;
+
/* The current clock source is invalid, try others. */
for (i = 1; i <= pins; i++) {
if (i == cur)
diff --git a/sound/usb/format.c b/sound/usb/format.c
index ab5fed9f55b6..3b45d0ee7693 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -470,9 +470,11 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
int clock)
{
struct usb_device *dev = chip->dev;
+ struct usb_host_interface *alts;
unsigned int *table;
unsigned int nr_rates;
int i, err;
+ u32 bmControls;
/* performing the rate verification may lead to unexpected USB bus
* behavior afterwards by some unknown reason. Do this only for the
@@ -481,6 +483,24 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES))
return 0; /* don't perform the validation as default */
+ alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting);
+ if (!alts)
+ return 0;
+
+ if (fp->protocol == UAC_VERSION_3) {
+ struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc(
+ alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
+ bmControls = le32_to_cpu(as->bmControls);
+ } else {
+ struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc(
+ alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
+ bmControls = as->bmControls;
+ }
+
+ if (!uac_v2v3_control_is_readable(bmControls,
+ UAC2_AS_VAL_ALT_SETTINGS))
+ return 0;
+
table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 6b0993258e03..c1f2e5a03de9 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1742,50 +1742,44 @@ static void snd_usbmidi_get_port_info(struct snd_rawmidi *rmidi, int number,
}
}
-static struct usb_midi_in_jack_descriptor *find_usb_in_jack_descriptor(
- struct usb_host_interface *hostif, uint8_t jack_id)
+/* return iJack for the corresponding jackID */
+static int find_usb_ijack(struct usb_host_interface *hostif, uint8_t jack_id)
{
unsigned char *extra = hostif->extra;
int extralen = hostif->extralen;
+ struct usb_descriptor_header *h;
+ struct usb_midi_out_jack_descriptor *outjd;
+ struct usb_midi_in_jack_descriptor *injd;
+ size_t sz;
while (extralen > 4) {
- struct usb_midi_in_jack_descriptor *injd =
- (struct usb_midi_in_jack_descriptor *)extra;
+ h = (struct usb_descriptor_header *)extra;
+ if (h->bDescriptorType != USB_DT_CS_INTERFACE)
+ goto next;
+ outjd = (struct usb_midi_out_jack_descriptor *)h;
+ if (h->bLength >= sizeof(*outjd) &&
+ outjd->bDescriptorSubtype == UAC_MIDI_OUT_JACK &&
+ outjd->bJackID == jack_id) {
+ sz = USB_DT_MIDI_OUT_SIZE(outjd->bNrInputPins);
+ if (outjd->bLength < sz)
+ goto next;
+ return *(extra + sz - 1);
+ }
+
+ injd = (struct usb_midi_in_jack_descriptor *)h;
if (injd->bLength >= sizeof(*injd) &&
- injd->bDescriptorType == USB_DT_CS_INTERFACE &&
injd->bDescriptorSubtype == UAC_MIDI_IN_JACK &&
- injd->bJackID == jack_id)
- return injd;
- if (!extra[0])
- break;
- extralen -= extra[0];
- extra += extra[0];
- }
- return NULL;
-}
-
-static struct usb_midi_out_jack_descriptor *find_usb_out_jack_descriptor(
- struct usb_host_interface *hostif, uint8_t jack_id)
-{
- unsigned char *extra = hostif->extra;
- int extralen = hostif->extralen;
+ injd->bJackID == jack_id)
+ return injd->iJack;
- while (extralen > 4) {
- struct usb_midi_out_jack_descriptor *outjd =
- (struct usb_midi_out_jack_descriptor *)extra;
-
- if (outjd->bLength >= sizeof(*outjd) &&
- outjd->bDescriptorType == USB_DT_CS_INTERFACE &&
- outjd->bDescriptorSubtype == UAC_MIDI_OUT_JACK &&
- outjd->bJackID == jack_id)
- return outjd;
+next:
if (!extra[0])
break;
extralen -= extra[0];
extra += extra[0];
}
- return NULL;
+ return 0;
}
static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi,
@@ -1796,13 +1790,10 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi,
const char *name_format;
struct usb_interface *intf;
struct usb_host_interface *hostif;
- struct usb_midi_in_jack_descriptor *injd;
- struct usb_midi_out_jack_descriptor *outjd;
uint8_t jack_name_buf[32];
uint8_t *default_jack_name = "MIDI";
uint8_t *jack_name = default_jack_name;
uint8_t iJack;
- size_t sz;
int res;
struct snd_rawmidi_substream *substream =
@@ -1816,21 +1807,7 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi,
intf = umidi->iface;
if (intf && jack_id >= 0) {
hostif = intf->cur_altsetting;
- iJack = 0;
- if (stream != SNDRV_RAWMIDI_STREAM_OUTPUT) {
- /* in jacks connect to outs */
- outjd = find_usb_out_jack_descriptor(hostif, jack_id);
- if (outjd) {
- sz = USB_DT_MIDI_OUT_SIZE(outjd->bNrInputPins);
- if (outjd->bLength >= sz)
- iJack = *(((uint8_t *) outjd) + sz - sizeof(uint8_t));
- }
- } else {
- /* and out jacks connect to ins */
- injd = find_usb_in_jack_descriptor(hostif, jack_id);
- if (injd)
- iJack = injd->iJack;
- }
+ iJack = find_usb_ijack(hostif, jack_id);
if (iJack != 0) {
res = usb_string(umidi->dev, iJack, jack_name_buf,
ARRAY_SIZE(jack_name_buf));
diff --git a/sound/usb/midi2.c b/sound/usb/midi2.c
index 1ec177fe284e..820d3e4b672a 100644
--- a/sound/usb/midi2.c
+++ b/sound/usb/midi2.c
@@ -1085,7 +1085,7 @@ int snd_usb_midi_v2_create(struct snd_usb_audio *chip,
}
if ((quirk && quirk->type != QUIRK_MIDI_STANDARD_INTERFACE) ||
iface->num_altsetting < 2) {
- usb_audio_info(chip, "Quirk or no altest; falling back to MIDI 1.0\n");
+ usb_audio_info(chip, "Quirk or no altset; falling back to MIDI 1.0\n");
goto fallback_to_midi1;
}
hostif = &iface->altsetting[1];
diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
index 1de3ddc50eb6..6de605a601e5 100644
--- a/sound/usb/mixer_scarlett2.c
+++ b/sound/usb/mixer_scarlett2.c
@@ -5361,9 +5361,9 @@ static int scarlett2_add_line_out_ctls(struct usb_mixer_interface *mixer)
if (private->vol_sw_hw_switch[index])
scarlett2_vol_ctl_set_writable(mixer, i, 0);
- snprintf(s, sizeof(s),
- "Line Out %02d Volume Control Playback Enum",
- i + 1);
+ scnprintf(s, sizeof(s),
+ "Line Out %02d Volume Control Playback Enum",
+ i + 1);
err = scarlett2_add_new_ctl(mixer,
&scarlett2_sw_hw_enum_ctl,
i, 1, s,
@@ -5406,8 +5406,8 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
/* Add input level (line/inst) controls */
for (i = 0; i < info->level_input_count; i++) {
- snprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
- "Level", "Enum");
+ scnprintf(s, sizeof(s), fmt, i + 1 + info->level_input_first,
+ "Level", "Enum");
err = scarlett2_add_new_ctl(mixer, &scarlett2_level_enum_ctl,
i, 1, s, &private->level_ctls[i]);
if (err < 0)
@@ -5416,7 +5416,7 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
/* Add input pad controls */
for (i = 0; i < info->pad_input_count; i++) {
- snprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
+ scnprintf(s, sizeof(s), fmt, i + 1, "Pad", "Switch");
err = scarlett2_add_new_ctl(mixer, &scarlett2_pad_ctl,
i, 1, s, &private->pad_ctls[i]);
if (err < 0)
@@ -5425,8 +5425,8 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
/* Add input air controls */
for (i = 0; i < info->air_input_count; i++) {
- snprintf(s, sizeof(s), fmt, i + 1 + info->air_input_first,
- "Air", info->air_option ? "Enum" : "Switch");
+ scnprintf(s, sizeof(s), fmt, i + 1 + info->air_input_first,
+ "Air", info->air_option ? "Enum" : "Switch");
err = scarlett2_add_new_ctl(
mixer, &scarlett2_air_ctl[info->air_option],
i, 1, s, &private->air_ctls[i]);
@@ -5481,9 +5481,9 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
for (i = 0; i < info->gain_input_count; i++) {
if (i % 2) {
- snprintf(s, sizeof(s),
- "Line In %d-%d Link Capture Switch",
- i, i + 1);
+ scnprintf(s, sizeof(s),
+ "Line In %d-%d Link Capture Switch",
+ i, i + 1);
err = scarlett2_add_new_ctl(
mixer, &scarlett2_input_link_ctl,
i / 2, 1, s,
@@ -5492,30 +5492,30 @@ static int scarlett2_add_line_in_ctls(struct usb_mixer_interface *mixer)
return err;
}
- snprintf(s, sizeof(s), fmt, i + 1,
- "Gain", "Volume");
+ scnprintf(s, sizeof(s), fmt, i + 1,
+ "Gain", "Volume");
err = scarlett2_add_new_ctl(
mixer, &scarlett2_input_gain_ctl,
i, 1, s, &private->input_gain_ctls[i]);
if (err < 0)
return err;
- snprintf(s, sizeof(s), fmt, i + 1,
- "Autogain", "Switch");
+ scnprintf(s, sizeof(s), fmt, i + 1,
+ "Autogain", "Switch");
err = scarlett2_add_new_ctl(
mixer, &scarlett2_autogain_switch_ctl,
i, 1, s, &private->autogain_ctls[i]);
if (err < 0)
return err;
- snprintf(s, sizeof(s), fmt, i + 1,
- "Autogain Status", "Enum");
+ scnprintf(s, sizeof(s), fmt, i + 1,
+ "Autogain Status", "Enum");
err = scarlett2_add_new_ctl(
mixer, &scarlett2_autogain_status_ctl,
i, 1, s, &private->autogain_status_ctls[i]);
- snprintf(s, sizeof(s), fmt, i + 1,
- "Safe", "Switch");
+ scnprintf(s, sizeof(s), fmt, i + 1,
+ "Safe", "Switch");
err = scarlett2_add_new_ctl(
mixer, &scarlett2_safe_ctl,
i, 1, s, &private->safe_ctls[i]);
@@ -5902,8 +5902,8 @@ static int scarlett2_add_direct_monitor_ctls(struct usb_mixer_interface *mixer)
for (k = 0; k < private->num_mix_in; k++, index++) {
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- snprintf(name, sizeof(name), format,
- mix_type, 'A' + j, k + 1);
+ scnprintf(name, sizeof(name), format,
+ mix_type, 'A' + j, k + 1);
err = scarlett2_add_new_ctl(
mixer, &scarlett2_monitor_mix_ctl,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 07cc6a201579..09712e61c606 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -2031,10 +2031,14 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */
QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x0499, 0x3108, /* Yamaha YIT-W12TX */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x04d8, 0xfeea, /* Benchmark DAC1 Pre */
QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x04e8, 0xa051, /* Samsung USBC Headset (AKG) */
QUIRK_FLAG_SKIP_CLOCK_SELECTOR | QUIRK_FLAG_CTL_MSG_DELAY_5M),
+ DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
+ QUIRK_FLAG_IFACE_SKIP_CLOSE),
DEVICE_FLG(0x054c, 0x0b8c, /* Sony WALKMAN NW-A45 DAC */
QUIRK_FLAG_SET_IFACE_FIRST),
DEVICE_FLG(0x0556, 0x0014, /* Phoenix Audio TMX320VC */
@@ -2073,14 +2077,22 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_GENERIC_IMPLICIT_FB),
DEVICE_FLG(0x0763, 0x2031, /* M-Audio Fast Track C600 */
QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x07fd, 0x000b, /* MOTU M Series 2nd hardware revision */
+ QUIRK_FLAG_CTL_MSG_DELAY_1M),
DEVICE_FLG(0x08bb, 0x2702, /* LineX FM Transmitter */
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x0951, 0x16ad, /* Kingston HyperX */
QUIRK_FLAG_CTL_MSG_DELAY_1M),
DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */
QUIRK_FLAG_CTL_MSG_DELAY_1M),
+ DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
+ QUIRK_FLAG_FIXED_RATE),
+ DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
+ QUIRK_FLAG_FIXED_RATE),
DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */
QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */
@@ -2113,6 +2125,10 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY),
DEVICE_FLG(0x1901, 0x0191, /* GE B850V3 CP2114 audio interface */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
+ QUIRK_FLAG_GET_SAMPLE_RATE),
DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
DEVICE_FLG(0x2040, 0x7201, /* Hauppauge HVR-950Q-MXL */
@@ -2155,6 +2171,12 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
+ DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
+ QUIRK_FLAG_GENERIC_IMPLICIT_FB),
DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */
@@ -2163,22 +2185,6 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_ALIGN_TRANSFER),
DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */
QUIRK_FLAG_ALIGN_TRANSFER),
- DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */
- QUIRK_FLAG_GET_SAMPLE_RATE),
- DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */
- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
- DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */
- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
- DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */
- QUIRK_FLAG_GENERIC_IMPLICIT_FB),
- DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */
- QUIRK_FLAG_IFACE_SKIP_CLOSE),
- DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */
- QUIRK_FLAG_FIXED_RATE),
- DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
- QUIRK_FLAG_FIXED_RATE),
- DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
- QUIRK_FLAG_GET_SAMPLE_RATE),
/* Vendor matches */
VENDOR_FLG(0x045e, /* MS Lifecam */
diff --git a/sound/virtio/virtio_card.c b/sound/virtio/virtio_card.c
index e2847c040f75..b158c3cb8e5f 100644
--- a/sound/virtio/virtio_card.c
+++ b/sound/virtio/virtio_card.c
@@ -91,8 +91,6 @@ static void virtsnd_event_notify_cb(struct virtqueue *vqueue)
virtsnd_event_dispatch(snd, event);
virtsnd_event_send(vqueue, event, true, GFP_ATOMIC);
}
- if (unlikely(virtqueue_is_broken(vqueue)))
- break;
} while (!virtqueue_enable_cb(vqueue));
spin_unlock_irqrestore(&queue->lock, flags);
}
diff --git a/sound/virtio/virtio_ctl_msg.c b/sound/virtio/virtio_ctl_msg.c
index 18dc5aca2e0c..9dabea01277f 100644
--- a/sound/virtio/virtio_ctl_msg.c
+++ b/sound/virtio/virtio_ctl_msg.c
@@ -303,8 +303,6 @@ void virtsnd_ctl_notify_cb(struct virtqueue *vqueue)
virtqueue_disable_cb(vqueue);
while ((msg = virtqueue_get_buf(vqueue, &length)))
virtsnd_ctl_msg_complete(msg);
- if (unlikely(virtqueue_is_broken(vqueue)))
- break;
} while (!virtqueue_enable_cb(vqueue));
spin_unlock_irqrestore(&queue->lock, flags);
}
diff --git a/sound/virtio/virtio_pcm_msg.c b/sound/virtio/virtio_pcm_msg.c
index 542446c4c7ba..8c32efaf4c52 100644
--- a/sound/virtio/virtio_pcm_msg.c
+++ b/sound/virtio/virtio_pcm_msg.c
@@ -358,8 +358,6 @@ static inline void virtsnd_pcm_notify_cb(struct virtio_snd_queue *queue)
virtqueue_disable_cb(queue->vqueue);
while ((msg = virtqueue_get_buf(queue->vqueue, &written_bytes)))
virtsnd_pcm_msg_complete(msg, written_bytes);
- if (unlikely(virtqueue_is_broken(queue->vqueue)))
- break;
} while (!virtqueue_enable_cb(queue->vqueue));
spin_unlock_irqrestore(&queue->lock, flags);
}
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index f4542d2718f4..29cb275a219d 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -198,6 +198,7 @@
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* Platform supports being a TDX host */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
@@ -308,10 +309,14 @@
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
-
#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
+#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
+#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
+#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
+#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
+#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
@@ -495,6 +500,7 @@
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */
+#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */
/* BUG word 2 */
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 1d51e1850ed0..f1bd7b91b3c6 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -237,6 +237,11 @@
#define LBR_INFO_CYCLES 0xffff
#define LBR_INFO_BR_TYPE_OFFSET 56
#define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET)
+#define LBR_INFO_BR_CNTR_OFFSET 32
+#define LBR_INFO_BR_CNTR_NUM 4
+#define LBR_INFO_BR_CNTR_BITS 2
+#define LBR_INFO_BR_CNTR_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0)
+#define LBR_INFO_BR_CNTR_FULL_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0)
#define MSR_ARCH_LBR_CTL 0x000014ce
#define ARCH_LBR_CTL_LBREN BIT(0)
@@ -536,6 +541,9 @@
#define MSR_RELOAD_PMC0 0x000014c1
#define MSR_RELOAD_FIXED_CTR0 0x00001309
+/* KeyID partitioning between MKTME and TDX */
+#define MSR_IA32_MKTME_KEYID_PARTITIONING 0x00000087
+
/*
* AMD64 MSRs. Not complete. See the architecture manual for a more
* complete list.
diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h
index 11ff975242ca..e2ff22b379a4 100644
--- a/tools/arch/x86/include/asm/rmwcc.h
+++ b/tools/arch/x86/include/asm/rmwcc.h
@@ -4,7 +4,7 @@
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
- asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
+ asm goto (fullop "; j" cc " %l[cc_label]" \
: : "m" (var), ## __VA_ARGS__ \
: "memory" : cc_label); \
return 0; \
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 1a6a1f987949..a448d0964fc0 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -562,4 +562,7 @@ struct kvm_pmu_event_filter {
/* x86-specific KVM_EXIT_HYPERCALL flags. */
#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
+#define KVM_X86_DEFAULT_VM 0
+#define KVM_X86_SW_PROTECTED_VM 1
+
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index d055b82d22cc..59cf6f9065aa 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2002 Andi Kleen */
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-#include <asm/export.h>
.section .noinstr.text, "ax"
@@ -39,7 +39,7 @@ SYM_TYPED_FUNC_START(__memcpy)
SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
-SYM_FUNC_ALIAS(memcpy, __memcpy)
+SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_START_LOCAL(memcpy_orig)
diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S
index 7c59a704c458..0199d56cb479 100644
--- a/tools/arch/x86/lib/memset_64.S
+++ b/tools/arch/x86/lib/memset_64.S
@@ -1,10 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2002 Andi Kleen, SuSE Labs */
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-#include <asm/export.h>
.section .noinstr.text, "ax"
@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset)
SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
-SYM_FUNC_ALIAS(memset, __memset)
+SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
EXPORT_SYMBOL(memset)
SYM_FUNC_START_LOCAL(memset_orig)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index 5006e724d1bc..5e60825818dd 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -257,18 +257,48 @@ EXAMPLES
return 0;
}
-This is example BPF application with two BPF programs and a mix of BPF maps
-and global variables. Source code is split across two source code files.
+**$ cat example3.bpf.c**
+
+::
+
+ #include <linux/ptrace.h>
+ #include <linux/bpf.h>
+ #include <bpf/bpf_helpers.h>
+ /* This header file is provided by the bpf_testmod module. */
+ #include "bpf_testmod.h"
+
+ int test_2_result = 0;
+
+ /* bpf_Testmod.ko calls this function, passing a "4"
+ * and testmod_map->data.
+ */
+ SEC("struct_ops/test_2")
+ void BPF_PROG(test_2, int a, int b)
+ {
+ test_2_result = a + b;
+ }
+
+ SEC(".struct_ops")
+ struct bpf_testmod_ops testmod_map = {
+ .test_2 = (void *)test_2,
+ .data = 0x1,
+ };
+
+This is example BPF application with three BPF programs and a mix of BPF
+maps and global variables. Source code is split across three source code
+files.
**$ clang --target=bpf -g example1.bpf.c -o example1.bpf.o**
**$ clang --target=bpf -g example2.bpf.c -o example2.bpf.o**
-**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o**
+**$ clang --target=bpf -g example3.bpf.c -o example3.bpf.o**
+
+**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o example3.bpf.o**
-This set of commands compiles *example1.bpf.c* and *example2.bpf.c*
-individually and then statically links respective object files into the final
-BPF ELF object file *example.bpf.o*.
+This set of commands compiles *example1.bpf.c*, *example2.bpf.c* and
+*example3.bpf.c* individually and then statically links respective object
+files into the final BPF ELF object file *example.bpf.o*.
**$ bpftool gen skeleton example.bpf.o name example | tee example.skel.h**
@@ -291,7 +321,15 @@ BPF ELF object file *example.bpf.o*.
struct bpf_map *data;
struct bpf_map *bss;
struct bpf_map *my_map;
+ struct bpf_map *testmod_map;
} maps;
+ struct {
+ struct example__testmod_map__bpf_testmod_ops {
+ const struct bpf_program *test_1;
+ const struct bpf_program *test_2;
+ int data;
+ } *testmod_map;
+ } struct_ops;
struct {
struct bpf_program *handle_sys_enter;
struct bpf_program *handle_sys_exit;
@@ -304,6 +342,7 @@ BPF ELF object file *example.bpf.o*.
struct {
int x;
} data;
+ int test_2_result;
} *bss;
struct example__data {
_Bool global_flag;
@@ -342,10 +381,16 @@ BPF ELF object file *example.bpf.o*.
skel->rodata->param1 = 128;
+ /* Change the value through the pointer of shadow type */
+ skel->struct_ops.testmod_map->data = 13;
+
err = example__load(skel);
if (err)
goto cleanup;
+ /* The result of the function test_2() */
+ printf("test_2_result: %d\n", skel->bss->test_2_result);
+
err = example__attach(skel);
if (err)
goto cleanup;
@@ -372,6 +417,7 @@ BPF ELF object file *example.bpf.o*.
::
+ test_2_result: 17
my_map name: my_map
sys_enter prog FD: 8
my_static_var: 7
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index ee3ce2b8000d..1f579eacd9d4 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -7,6 +7,7 @@
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include <linux/err.h>
#include <stdbool.h>
#include <stdio.h>
@@ -54,11 +55,27 @@ static bool str_has_suffix(const char *str, const char *suffix)
return true;
}
+static const struct btf_type *
+resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
+{
+ const struct btf_type *t;
+
+ t = skip_mods_and_typedefs(btf, id, NULL);
+ if (!btf_is_ptr(t))
+ return NULL;
+
+ t = skip_mods_and_typedefs(btf, t->type, res_id);
+
+ return btf_is_func_proto(t) ? t : NULL;
+}
+
static void get_obj_name(char *name, const char *file)
{
- /* Using basename() GNU version which doesn't modify arg. */
- strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
- name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ char file_copy[PATH_MAX];
+
+ /* Using basename() POSIX version to be more portable. */
+ strncpy(file_copy, file, PATH_MAX - 1)[PATH_MAX - 1] = '\0';
+ strncpy(name, basename(file_copy), MAX_OBJ_NAME_LEN - 1)[MAX_OBJ_NAME_LEN - 1] = '\0';
if (str_has_suffix(name, ".o"))
name[strlen(name) - 2] = '\0';
sanitize_identifier(name);
@@ -906,6 +923,207 @@ codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_li
}
}
+static int walk_st_ops_shadow_vars(struct btf *btf, const char *ident,
+ const struct btf_type *map_type, __u32 map_type_id)
+{
+ LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .indent_level = 3);
+ const struct btf_type *member_type;
+ __u32 offset, next_offset = 0;
+ const struct btf_member *m;
+ struct btf_dump *d = NULL;
+ const char *member_name;
+ __u32 member_type_id;
+ int i, err = 0, n;
+ int size;
+
+ d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
+
+ n = btf_vlen(map_type);
+ for (i = 0, m = btf_members(map_type); i < n; i++, m++) {
+ member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id);
+ member_name = btf__name_by_offset(btf, m->name_off);
+
+ offset = m->offset / 8;
+ if (next_offset < offset)
+ printf("\t\t\tchar __padding_%d[%d];\n", i, offset - next_offset);
+
+ switch (btf_kind(member_type)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ /* scalar type */
+ printf("\t\t\t");
+ opts.field_name = member_name;
+ err = btf_dump__emit_type_decl(d, member_type_id, &opts);
+ if (err) {
+ p_err("Failed to emit type declaration for %s: %d", member_name, err);
+ goto out;
+ }
+ printf(";\n");
+
+ size = btf__resolve_size(btf, member_type_id);
+ if (size < 0) {
+ p_err("Failed to resolve size of %s: %d\n", member_name, size);
+ err = size;
+ goto out;
+ }
+
+ next_offset = offset + size;
+ break;
+
+ case BTF_KIND_PTR:
+ if (resolve_func_ptr(btf, m->type, NULL)) {
+ /* Function pointer */
+ printf("\t\t\tstruct bpf_program *%s;\n", member_name);
+
+ next_offset = offset + sizeof(void *);
+ break;
+ }
+ /* All pointer types are unsupported except for
+ * function pointers.
+ */
+ fallthrough;
+
+ default:
+ /* Unsupported types
+ *
+ * Types other than scalar types and function
+ * pointers are currently not supported in order to
+ * prevent conflicts in the generated code caused
+ * by multiple definitions. For instance, if the
+ * struct type FOO is used in a struct_ops map,
+ * bpftool has to generate definitions for FOO,
+ * which may result in conflicts if FOO is defined
+ * in different skeleton files.
+ */
+ size = btf__resolve_size(btf, member_type_id);
+ if (size < 0) {
+ p_err("Failed to resolve size of %s: %d\n", member_name, size);
+ err = size;
+ goto out;
+ }
+ printf("\t\t\tchar __unsupported_%d[%d];\n", i, size);
+
+ next_offset = offset + size;
+ break;
+ }
+ }
+
+ /* Cannot fail since it must be a struct type */
+ size = btf__resolve_size(btf, map_type_id);
+ if (next_offset < (__u32)size)
+ printf("\t\t\tchar __padding_end[%d];\n", size - next_offset);
+
+out:
+ btf_dump__free(d);
+
+ return err;
+}
+
+/* Generate the pointer of the shadow type for a struct_ops map.
+ *
+ * This function adds a pointer of the shadow type for a struct_ops map.
+ * The members of a struct_ops map can be exported through a pointer to a
+ * shadow type. The user can access these members through the pointer.
+ *
+ * A shadow type includes not all members, only members of some types.
+ * They are scalar types and function pointers. The function pointers are
+ * translated to the pointer of the struct bpf_program. The scalar types
+ * are translated to the original type without any modifiers.
+ *
+ * Unsupported types will be translated to a char array to occupy the same
+ * space as the original field, being renamed as __unsupported_*. The user
+ * should treat these fields as opaque data.
+ */
+static int gen_st_ops_shadow_type(const char *obj_name, struct btf *btf, const char *ident,
+ const struct bpf_map *map)
+{
+ const struct btf_type *map_type;
+ const char *type_name;
+ __u32 map_type_id;
+ int err;
+
+ map_type_id = bpf_map__btf_value_type_id(map);
+ if (map_type_id == 0)
+ return -EINVAL;
+ map_type = btf__type_by_id(btf, map_type_id);
+ if (!map_type)
+ return -EINVAL;
+
+ type_name = btf__name_by_offset(btf, map_type->name_off);
+
+ printf("\t\tstruct %s__%s__%s {\n", obj_name, ident, type_name);
+
+ err = walk_st_ops_shadow_vars(btf, ident, map_type, map_type_id);
+ if (err)
+ return err;
+
+ printf("\t\t} *%s;\n", ident);
+
+ return 0;
+}
+
+static int gen_st_ops_shadow(const char *obj_name, struct btf *btf, struct bpf_object *obj)
+{
+ int err, st_ops_cnt = 0;
+ struct bpf_map *map;
+ char ident[256];
+
+ if (!btf)
+ return 0;
+
+ /* Generate the pointers to shadow types of
+ * struct_ops maps.
+ */
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+
+ if (st_ops_cnt == 0) /* first struct_ops map */
+ printf("\tstruct {\n");
+ st_ops_cnt++;
+
+ err = gen_st_ops_shadow_type(obj_name, btf, ident, map);
+ if (err)
+ return err;
+ }
+
+ if (st_ops_cnt)
+ printf("\t} struct_ops;\n");
+
+ return 0;
+}
+
+/* Generate the code to initialize the pointers of shadow types. */
+static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
+{
+ struct bpf_map *map;
+ char ident[256];
+
+ if (!btf)
+ return;
+
+ /* Initialize the pointers to_ops shadow types of
+ * struct_ops maps.
+ */
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ codegen("\
+ \n\
+ obj->struct_ops.%1$s = bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
+ \n\
+ ", ident);
+ }
+}
+
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
@@ -1049,6 +1267,11 @@ static int do_skeleton(int argc, char **argv)
printf("\t} maps;\n");
}
+ btf = bpf_object__btf(obj);
+ err = gen_st_ops_shadow(obj_name, btf, obj);
+ if (err)
+ goto out;
+
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
@@ -1072,7 +1295,6 @@ static int do_skeleton(int argc, char **argv)
printf("\t} links;\n");
}
- btf = bpf_object__btf(obj);
if (btf) {
err = codegen_datasecs(obj, obj_name);
if (err)
@@ -1130,6 +1352,12 @@ static int do_skeleton(int argc, char **argv)
if (err) \n\
goto err_out; \n\
\n\
+ ", obj_name);
+
+ gen_st_ops_shadow_init(btf, obj);
+
+ codegen("\
+ \n\
return obj; \n\
err_out: \n\
%1$s__destroy(obj); \n\
@@ -1439,6 +1667,10 @@ static int do_subskeleton(int argc, char **argv)
printf("\t} maps;\n");
}
+ err = gen_st_ops_shadow(obj_name, btf, obj);
+ if (err)
+ goto out;
+
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
@@ -1550,6 +1782,12 @@ static int do_subskeleton(int argc, char **argv)
if (err) \n\
goto err; \n\
\n\
+ ");
+
+ gen_st_ops_shadow_init(btf, obj);
+
+ codegen("\
+ \n\
return obj; \n\
err: \n\
%1$s__destroy(obj); \n\
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index cb46667a6b2e..afde9d0c2ea1 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -249,18 +249,44 @@ static int get_prog_info(int prog_id, struct bpf_prog_info *info)
return err;
}
-static int cmp_u64(const void *A, const void *B)
+struct addr_cookie {
+ __u64 addr;
+ __u64 cookie;
+};
+
+static int cmp_addr_cookie(const void *A, const void *B)
{
- const __u64 *a = A, *b = B;
+ const struct addr_cookie *a = A, *b = B;
- return *a - *b;
+ if (a->addr == b->addr)
+ return 0;
+ return a->addr < b->addr ? -1 : 1;
+}
+
+static struct addr_cookie *
+get_addr_cookie_array(__u64 *addrs, __u64 *cookies, __u32 count)
+{
+ struct addr_cookie *data;
+ __u32 i;
+
+ data = calloc(count, sizeof(data[0]));
+ if (!data) {
+ p_err("mem alloc failed");
+ return NULL;
+ }
+ for (i = 0; i < count; i++) {
+ data[i].addr = addrs[i];
+ data[i].cookie = cookies[i];
+ }
+ qsort(data, count, sizeof(data[0]), cmp_addr_cookie);
+ return data;
}
static void
show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
{
+ struct addr_cookie *data;
__u32 i, j = 0;
- __u64 *addrs;
jsonw_bool_field(json_wtr, "retprobe",
info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
@@ -268,14 +294,20 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
jsonw_name(json_wtr, "funcs");
jsonw_start_array(json_wtr);
- addrs = u64_to_ptr(info->kprobe_multi.addrs);
- qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64);
+ data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
+ u64_to_ptr(info->kprobe_multi.cookies),
+ info->kprobe_multi.count);
+ if (!data)
+ return;
/* Load it once for all. */
if (!dd.sym_count)
kernel_syms_load(&dd);
+ if (!dd.sym_count)
+ goto error;
+
for (i = 0; i < dd.sym_count; i++) {
- if (dd.sym_mapping[i].address != addrs[j])
+ if (dd.sym_mapping[i].address != data[j].addr)
continue;
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
@@ -287,11 +319,14 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
} else {
jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
}
+ jsonw_uint_field(json_wtr, "cookie", data[j].cookie);
jsonw_end_object(json_wtr);
if (j++ == info->kprobe_multi.count)
break;
}
jsonw_end_array(json_wtr);
+error:
+ free(data);
}
static __u64 *u64_to_arr(__u64 val)
@@ -334,6 +369,7 @@ show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
u64_to_ptr(info->perf_event.kprobe.func_name));
jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
+ jsonw_uint_field(wtr, "cookie", info->perf_event.kprobe.cookie);
}
static void
@@ -343,6 +379,7 @@ show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_string_field(wtr, "file",
u64_to_ptr(info->perf_event.uprobe.file_name));
jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
+ jsonw_uint_field(wtr, "cookie", info->perf_event.uprobe.cookie);
}
static void
@@ -350,6 +387,7 @@ show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
{
jsonw_string_field(wtr, "tracepoint",
u64_to_ptr(info->perf_event.tracepoint.tp_name));
+ jsonw_uint_field(wtr, "cookie", info->perf_event.tracepoint.cookie);
}
static char *perf_config_hw_cache_str(__u64 config)
@@ -426,6 +464,8 @@ show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
else
jsonw_uint_field(wtr, "event_config", config);
+ jsonw_uint_field(wtr, "cookie", info->perf_event.event.cookie);
+
if (type == PERF_TYPE_HW_CACHE && perf_config)
free((void *)perf_config);
}
@@ -670,8 +710,8 @@ void netfilter_dump_plain(const struct bpf_link_info *info)
static void show_kprobe_multi_plain(struct bpf_link_info *info)
{
+ struct addr_cookie *data;
__u32 i, j = 0;
- __u64 *addrs;
if (!info->kprobe_multi.count)
return;
@@ -683,21 +723,24 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
printf("func_cnt %u ", info->kprobe_multi.count);
if (info->kprobe_multi.missed)
printf("missed %llu ", info->kprobe_multi.missed);
- addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
- qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
+ data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
+ u64_to_ptr(info->kprobe_multi.cookies),
+ info->kprobe_multi.count);
+ if (!data)
+ return;
/* Load it once for all. */
if (!dd.sym_count)
kernel_syms_load(&dd);
if (!dd.sym_count)
- return;
+ goto error;
- printf("\n\t%-16s %s", "addr", "func [module]");
+ printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]");
for (i = 0; i < dd.sym_count; i++) {
- if (dd.sym_mapping[i].address != addrs[j])
+ if (dd.sym_mapping[i].address != data[j].addr)
continue;
- printf("\n\t%016lx %s",
- dd.sym_mapping[i].address, dd.sym_mapping[i].name);
+ printf("\n\t%016lx %-16llx %s",
+ dd.sym_mapping[i].address, data[j].cookie, dd.sym_mapping[i].name);
if (dd.sym_mapping[i].module[0] != '\0')
printf(" [%s] ", dd.sym_mapping[i].module);
else
@@ -706,6 +749,8 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
if (j++ == info->kprobe_multi.count)
break;
}
+error:
+ free(data);
}
static void show_uprobe_multi_plain(struct bpf_link_info *info)
@@ -754,6 +799,8 @@ static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
printf("+%#x", info->perf_event.kprobe.offset);
if (info->perf_event.kprobe.missed)
printf(" missed %llu", info->perf_event.kprobe.missed);
+ if (info->perf_event.kprobe.cookie)
+ printf(" cookie %llu", info->perf_event.kprobe.cookie);
printf(" ");
}
@@ -770,6 +817,8 @@ static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
else
printf("\n\tuprobe ");
printf("%s+%#x ", buf, info->perf_event.uprobe.offset);
+ if (info->perf_event.uprobe.cookie)
+ printf("cookie %llu ", info->perf_event.uprobe.cookie);
}
static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
@@ -781,6 +830,8 @@ static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
return;
printf("\n\ttracepoint %s ", buf);
+ if (info->perf_event.tracepoint.cookie)
+ printf("cookie %llu ", info->perf_event.tracepoint.cookie);
}
static void show_perf_event_event_plain(struct bpf_link_info *info)
@@ -802,6 +853,9 @@ static void show_perf_event_event_plain(struct bpf_link_info *info)
else
printf("%llu ", config);
+ if (info->perf_event.event.cookie)
+ printf("cookie %llu ", info->perf_event.event.cookie);
+
if (type == PERF_TYPE_HW_CACHE && perf_config)
free((void *)perf_config);
}
@@ -952,6 +1006,14 @@ again:
return -ENOMEM;
}
info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ cookies = calloc(count, sizeof(__u64));
+ if (!cookies) {
+ p_err("mem alloc failed");
+ free(addrs);
+ close(fd);
+ return -ENOMEM;
+ }
+ info.kprobe_multi.cookies = ptr_to_u64(cookies);
goto again;
}
}
@@ -977,7 +1039,7 @@ again:
cookies = calloc(count, sizeof(__u64));
if (!cookies) {
p_err("mem alloc failed");
- free(cookies);
+ free(ref_ctr_offsets);
free(offsets);
close(fd);
return -ENOMEM;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index feb8e305804f..9cb42a3366c0 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -2298,7 +2298,7 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
int map_fd;
profile_perf_events = calloc(
- sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+ obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int));
if (!profile_perf_events) {
p_err("failed to allocate memory for perf_event array: %s",
strerror(errno));
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 27a23196d58e..d9520cb826b3 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -70,6 +70,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
+#include <linux/btf_ids.h>
#include <linux/rbtree.h>
#include <linux/zalloc.h>
#include <linux/err.h>
@@ -78,7 +79,7 @@
#include <subcmd/parse-options.h>
#define BTF_IDS_SECTION ".BTF_ids"
-#define BTF_ID "__BTF_ID__"
+#define BTF_ID_PREFIX "__BTF_ID__"
#define BTF_STRUCT "struct"
#define BTF_UNION "union"
@@ -89,6 +90,14 @@
#define ADDR_CNT 100
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+# define ELFDATANATIVE ELFDATA2LSB
+#elif __BYTE_ORDER == __BIG_ENDIAN
+# define ELFDATANATIVE ELFDATA2MSB
+#else
+# error "Unknown machine endianness!"
+#endif
+
struct btf_id {
struct rb_node rb_node;
char *name;
@@ -116,6 +125,7 @@ struct object {
int idlist_shndx;
size_t strtabidx;
unsigned long idlist_addr;
+ int encoding;
} efile;
struct rb_root sets;
@@ -161,7 +171,7 @@ static int eprintf(int level, int var, const char *fmt, ...)
static bool is_btf_id(const char *name)
{
- return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
+ return name && !strncmp(name, BTF_ID_PREFIX, sizeof(BTF_ID_PREFIX) - 1);
}
static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
@@ -319,6 +329,7 @@ static int elf_collect(struct object *obj)
{
Elf_Scn *scn = NULL;
size_t shdrstrndx;
+ GElf_Ehdr ehdr;
int idx = 0;
Elf *elf;
int fd;
@@ -350,6 +361,13 @@ static int elf_collect(struct object *obj)
return -1;
}
+ if (gelf_getehdr(obj->efile.elf, &ehdr) == NULL) {
+ pr_err("FAILED cannot get ELF header: %s\n",
+ elf_errmsg(-1));
+ return -1;
+ }
+ obj->efile.encoding = ehdr.e_ident[EI_DATA];
+
/*
* Scan all the elf sections and look for save data
* from .BTF_ids section and symbols.
@@ -441,7 +459,7 @@ static int symbols_collect(struct object *obj)
* __BTF_ID__TYPE__vfs_truncate__0
* prefix = ^
*/
- prefix = name + sizeof(BTF_ID) - 1;
+ prefix = name + sizeof(BTF_ID_PREFIX) - 1;
/* struct */
if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
@@ -649,19 +667,18 @@ static int cmp_id(const void *pa, const void *pb)
static int sets_patch(struct object *obj)
{
Elf_Data *data = obj->efile.idlist;
- int *ptr = data->d_buf;
struct rb_node *next;
next = rb_first(&obj->sets);
while (next) {
- unsigned long addr, idx;
+ struct btf_id_set8 *set8;
+ struct btf_id_set *set;
+ unsigned long addr, off;
struct btf_id *id;
- int *base;
- int cnt;
id = rb_entry(next, struct btf_id, rb_node);
addr = id->addr[0];
- idx = addr - obj->efile.idlist_addr;
+ off = addr - obj->efile.idlist_addr;
/* sets are unique */
if (id->addr_cnt != 1) {
@@ -670,14 +687,39 @@ static int sets_patch(struct object *obj)
return -1;
}
- idx = idx / sizeof(int);
- base = &ptr[idx] + (id->is_set8 ? 2 : 1);
- cnt = ptr[idx];
+ if (id->is_set) {
+ set = data->d_buf + off;
+ qsort(set->ids, set->cnt, sizeof(set->ids[0]), cmp_id);
+ } else {
+ set8 = data->d_buf + off;
+ /*
+ * Make sure id is at the beginning of the pairs
+ * struct, otherwise the below qsort would not work.
+ */
+ BUILD_BUG_ON(set8->pairs != &set8->pairs[0].id);
+ qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id);
- pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
- (idx + 1) * sizeof(int), cnt, id->name);
+ /*
+ * When ELF endianness does not match endianness of the
+ * host, libelf will do the translation when updating
+ * the ELF. This, however, corrupts SET8 flags which are
+ * already in the target endianness. So, let's bswap
+ * them to the host endianness and libelf will then
+ * correctly translate everything.
+ */
+ if (obj->efile.encoding != ELFDATANATIVE) {
+ int i;
+
+ set8->flags = bswap_32(set8->flags);
+ for (i = 0; i < set8->cnt; i++) {
+ set8->pairs[i].flags =
+ bswap_32(set8->pairs[i].flags);
+ }
+ }
+ }
- qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
+ pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
+ off, id->is_set ? set->cnt : set8->cnt, id->name);
next = rb_next(next);
}
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 934e2777a2db..64df118376df 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -32,6 +32,7 @@ FEATURE_TESTS_BASIC := \
backtrace \
dwarf \
dwarf_getlocations \
+ dwarf_getcfi \
eventfd \
fortify-source \
get_current_dir_name \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index dad79ede4e0a..37722e509eb9 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -7,6 +7,7 @@ FILES= \
test-bionic.bin \
test-dwarf.bin \
test-dwarf_getlocations.bin \
+ test-dwarf_getcfi.bin \
test-eventfd.bin \
test-fortify-source.bin \
test-get_current_dir_name.bin \
@@ -154,6 +155,9 @@ $(OUTPUT)test-dwarf.bin:
$(OUTPUT)test-dwarf_getlocations.bin:
$(BUILD) $(DWARFLIBS)
+$(OUTPUT)test-dwarf_getcfi.bin:
+ $(BUILD) $(DWARFLIBS)
+
$(OUTPUT)test-libelf-getphdrnum.bin:
$(BUILD) -lelf
diff --git a/tools/build/feature/test-dwarf_getcfi.c b/tools/build/feature/test-dwarf_getcfi.c
new file mode 100644
index 000000000000..50e7d7cb7bdf
--- /dev/null
+++ b/tools/build/feature/test-dwarf_getcfi.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <elfutils/libdw.h>
+
+int main(void)
+{
+ Dwarf *dwarf = NULL;
+ return dwarf_getcfi(dwarf) == NULL;
+}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
index eb6303ff446e..4cfcef9da3e4 100644
--- a/tools/build/feature/test-libopencsd.c
+++ b/tools/build/feature/test-libopencsd.c
@@ -4,9 +4,9 @@
/*
* Check OpenCSD library version is sufficient to provide required features
*/
-#define OCSD_MIN_VER ((1 << 16) | (1 << 8) | (1))
+#define OCSD_MIN_VER ((1 << 16) | (2 << 8) | (1))
#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
-#error "OpenCSD >= 1.1.1 is required"
+#error "OpenCSD >= 1.2.1 is required"
#endif
int main(void)
diff --git a/tools/include/asm-generic/unaligned.h b/tools/include/asm-generic/unaligned.h
index 2fd551915c20..cdd2fd078027 100644
--- a/tools/include/asm-generic/unaligned.h
+++ b/tools/include/asm-generic/unaligned.h
@@ -105,9 +105,9 @@ static inline u32 get_unaligned_le24(const void *p)
static inline void __put_unaligned_be24(const u32 val, u8 *p)
{
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
}
static inline void put_unaligned_be24(const u32 val, void *p)
@@ -117,9 +117,9 @@ static inline void put_unaligned_be24(const u32 val, void *p)
static inline void __put_unaligned_le24(const u32 val, u8 *p)
{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
+ *p++ = val & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = (val >> 16) & 0xff;
}
static inline void put_unaligned_le24(const u32 val, void *p)
@@ -129,12 +129,12 @@ static inline void put_unaligned_le24(const u32 val, void *p)
static inline void __put_unaligned_be48(const u64 val, u8 *p)
{
- *p++ = val >> 40;
- *p++ = val >> 32;
- *p++ = val >> 24;
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
+ *p++ = (val >> 40) & 0xff;
+ *p++ = (val >> 32) & 0xff;
+ *p++ = (val >> 24) & 0xff;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
}
static inline void put_unaligned_be48(const u64 val, void *p)
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
index 2f882d5cb30f..72535f00572f 100644
--- a/tools/include/linux/btf_ids.h
+++ b/tools/include/linux/btf_ids.h
@@ -8,6 +8,15 @@ struct btf_id_set {
u32 ids[];
};
+struct btf_id_set8 {
+ u32 cnt;
+ u32 flags;
+ struct {
+ u32 id;
+ u32 flags;
+ } pairs[];
+};
+
#ifdef CONFIG_DEBUG_INFO_BTF
#include <linux/compiler.h> /* for __PASTE */
diff --git a/tools/include/linux/compiler_types.h b/tools/include/linux/compiler_types.h
index 1bdd834bdd57..d09f9dc172a4 100644
--- a/tools/include/linux/compiler_types.h
+++ b/tools/include/linux/compiler_types.h
@@ -36,8 +36,8 @@
#include <linux/compiler-gcc.h>
#endif
-#ifndef asm_volatile_goto
-#define asm_volatile_goto(x...) asm goto(x)
+#ifndef asm_goto_output
+#define asm_goto_output(x...) asm goto(x)
#endif
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 756b013fb832..75f00965ab15 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait)
#define __NR_futex_requeue 456
__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
+#define __NR_statmount 457
+__SYSCALL(__NR_statmount, sys_statmount)
+
+#define __NR_listmount 458
+__SYSCALL(__NR_listmount, sys_listmount)
+
+#define __NR_lsm_get_self_attr 459
+__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
+#define __NR_lsm_set_self_attr 460
+__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
+#define __NR_lsm_list_modules 461
+__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
+
#undef __NR_syscalls
-#define __NR_syscalls 457
+#define __NR_syscalls 462
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index de723566c5ae..16122819edfe 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -713,7 +713,8 @@ struct drm_gem_open {
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
- * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
@@ -773,6 +774,13 @@ struct drm_gem_open {
* :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -842,6 +850,31 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
+
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -893,6 +926,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -901,6 +935,14 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
/**
@@ -1218,6 +1268,26 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+/**
+ * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
+ *
+ * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
+ * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
+ * alive. When the plane no longer uses the framebuffer (because the
+ * framebuffer is replaced with another one, or the plane is disabled), the
+ * framebuffer is cleaned up.
+ *
+ * This is useful to implement flicker-free transitions between two processes.
+ *
+ * Depending on the threat model, user-space may want to ensure that the
+ * framebuffer doesn't expose any sensitive user information: closed
+ * framebuffers attached to a plane can be read back by the next DRM master.
+ */
+#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 218edb0a96f8..fd4f9574d177 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -693,7 +693,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_FENCE 44
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
- * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * user-specified buffers for post-mortem debugging of GPU hangs. See
* EXEC_OBJECT_CAPTURE.
*/
#define I915_PARAM_HAS_EXEC_CAPTURE 45
@@ -1606,7 +1606,7 @@ struct drm_i915_gem_busy {
* is accurate.
*
* The returned dword is split into two fields to indicate both
- * the engine classess on which the object is being read, and the
+ * the engine classes on which the object is being read, and the
* engine class on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
@@ -1815,7 +1815,7 @@ struct drm_i915_gem_madvise {
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
- * or wont be and could be discarded under memory pressure.
+ * or won't be and could be discarded under memory pressure.
*/
__u32 madv;
@@ -3246,7 +3246,7 @@ struct drm_i915_query_topology_info {
* // enough to hold our array of engines. The kernel will fill out the
* // item.length for us, which is the number of bytes we need.
* //
- * // Alternatively a large buffer can be allocated straight away enabling
+ * // Alternatively a large buffer can be allocated straightaway enabling
* // querying in one pass, in which case item.length should contain the
* // length of the provided buffer.
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
@@ -3256,7 +3256,7 @@ struct drm_i915_query_topology_info {
* // Now that we allocated the required number of bytes, we call the ioctl
* // again, this time with the data_ptr pointing to our newly allocated
* // blob, which the kernel can then populate with info on all engines.
- * item.data_ptr = (uintptr_t)&info,
+ * item.data_ptr = (uintptr_t)&info;
*
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
* if (err) ...
@@ -3286,7 +3286,7 @@ struct drm_i915_query_topology_info {
/**
* struct drm_i915_engine_info
*
- * Describes one engine and it's capabilities as known to the driver.
+ * Describes one engine and its capabilities as known to the driver.
*/
struct drm_i915_engine_info {
/** @engine: Engine class and instance. */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 7f24d898efbb..a241f407c234 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -77,12 +77,29 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
-/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+/* Deprecated: use struct bpf_lpm_trie_key_u8 (when the "data" member is needed for
+ * byte access) or struct bpf_lpm_trie_key_hdr (when using an alternative type for
+ * the trailing flexible array member) instead.
+ */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
__u8 data[0]; /* Arbitrary size */
};
+/* Header for bpf_lpm_trie_key structs */
+struct bpf_lpm_trie_key_hdr {
+ __u32 prefixlen;
+};
+
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry, with trailing byte array. */
+struct bpf_lpm_trie_key_u8 {
+ union {
+ struct bpf_lpm_trie_key_hdr hdr;
+ __u32 prefixlen;
+ };
+ __u8 data[]; /* Arbitrary size */
+};
+
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
@@ -617,7 +634,11 @@ union bpf_iter_link_info {
* to NULL to begin the batched operation. After each subsequent
* **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
* *out_batch* as the *in_batch* for the next operation to
- * continue iteration from the current point.
+ * continue iteration from the current point. Both *in_batch* and
+ * *out_batch* must point to memory large enough to hold a key,
+ * except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
+ * LRU_HASH, LRU_PERCPU_HASH}**, for which batch parameters
+ * must be at least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point
* to memory large enough to hold *count* items based on the key
@@ -847,6 +868,36 @@ union bpf_iter_link_info {
* Returns zero on success. On error, -1 is returned and *errno*
* is set appropriately.
*
+ * BPF_TOKEN_CREATE
+ * Description
+ * Create BPF token with embedded information about what
+ * BPF-related functionality it allows:
+ * - a set of allowed bpf() syscall commands;
+ * - a set of allowed BPF map types to be created with
+ * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
+ * - a set of allowed BPF program types and BPF program attach
+ * types to be loaded with BPF_PROG_LOAD command, if
+ * BPF_PROG_LOAD itself is allowed.
+ *
+ * BPF token is created (derived) from an instance of BPF FS,
+ * assuming it has necessary delegation mount options specified.
+ * This BPF token can be passed as an extra parameter to various
+ * bpf() syscall commands to grant BPF subsystem functionality to
+ * unprivileged processes.
+ *
+ * When created, BPF token is "associated" with the owning
+ * user namespace of BPF FS instance (super block) that it was
+ * derived from, and subsequent BPF operations performed with
+ * BPF token would be performing capabilities checks (i.e.,
+ * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
+ * that user namespace. Without BPF token, such capabilities
+ * have to be granted in init user namespace, making bpf()
+ * syscall incompatible with user namespace, for the most part.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -901,6 +952,8 @@ enum bpf_cmd {
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
+ BPF_TOKEN_CREATE,
+ __MAX_BPF_CMD,
};
enum bpf_map_type {
@@ -951,6 +1004,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
+ __MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
@@ -995,6 +1049,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
BPF_PROG_TYPE_NETFILTER,
+ __MAX_BPF_PROG_TYPE
};
enum bpf_attach_type {
@@ -1330,6 +1385,12 @@ enum {
/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
BPF_F_PATH_FD = (1U << 14),
+
+/* Flag for value_type_btf_obj_fd, the fd is available */
+ BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
+
+/* BPF token FD is passed in a corresponding command's token_fd field */
+ BPF_F_TOKEN_FD = (1U << 16),
};
/* Flags for BPF_PROG_QUERY. */
@@ -1403,6 +1464,15 @@ union bpf_attr {
* to using 5 hash functions).
*/
__u64 map_extra;
+
+ __s32 value_type_btf_obj_fd; /* fd pointing to a BTF
+ * type data for
+ * btf_vmlinux_value_type_id.
+ */
+ /* BPF token FD to use with BPF_MAP_CREATE operation.
+ * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -1472,6 +1542,10 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 log_true_size;
+ /* BPF token FD to use with BPF_PROG_LOAD operation.
+ * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 prog_token_fd;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -1584,6 +1658,11 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 btf_log_true_size;
+ __u32 btf_flags;
+ /* BPF token FD to use with BPF_BTF_LOAD operation.
+ * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 btf_token_fd;
};
struct {
@@ -1714,6 +1793,11 @@ union bpf_attr {
__u32 flags; /* extra flags */
} prog_bind_map;
+ struct { /* struct used by BPF_TOKEN_CREATE command */
+ __u32 flags;
+ __u32 bpffs_fd;
+ } token_create;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -4839,9 +4923,9 @@ union bpf_attr {
* going through the CPU's backlog queue.
*
* The *flags* argument is reserved and must be 0. The helper is
- * currently only supported for tc BPF program types at the ingress
- * hook and for veth device types. The peer device must reside in a
- * different network namespace.
+ * currently only supported for tc BPF program types at the
+ * ingress hook and for veth and netkit target device types. The
+ * peer device must reside in a different network namespace.
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
@@ -6487,7 +6571,7 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
- __u32 :32; /* alignment pad */
+ __u32 btf_vmlinux_id;
__u64 map_extra;
} __attribute__((aligned(8)));
@@ -6563,6 +6647,7 @@ struct bpf_link_info {
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
__u64 missed;
+ __aligned_u64 cookies;
} kprobe_multi;
struct {
__aligned_u64 path;
@@ -6582,6 +6667,7 @@ struct bpf_link_info {
__aligned_u64 file_name; /* in/out */
__u32 name_len;
__u32 offset; /* offset from file_name */
+ __u64 cookie;
} uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
struct {
__aligned_u64 func_name; /* in/out */
@@ -6589,14 +6675,19 @@ struct bpf_link_info {
__u32 offset; /* offset from func_name */
__u64 addr;
__u64 missed;
+ __u64 cookie;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */
__u32 name_len;
+ __u32 :32;
+ __u64 cookie;
} tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
struct {
__u64 config;
__u32 type;
+ __u32 :32;
+ __u64 cookie;
} event; /* BPF_PERF_EVENT_EVENT */
};
} perf_event;
@@ -6904,6 +6995,7 @@ enum {
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
+ BPF_TCP_BOUND_INACTIVE,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index 6c80f96049bd..282e90aeb163 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -116,5 +116,8 @@
#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
compare object identity and may not
be usable to open_by_handle_at(2) */
+#if defined(__KERNEL__)
+#define AT_GETATTR_NOSEC 0x80000000
+#endif
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index a0aa05a28cf2..f0d71b2a3f1e 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -974,6 +974,7 @@ enum {
IFLA_BOND_AD_LACP_ACTIVE,
IFLA_BOND_MISSED_MAX,
IFLA_BOND_NS_IP6_TARGET,
+ IFLA_BOND_COUPLED_CONTROL,
__IFLA_BOND_MAX,
};
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 211b86de35ac..c3308536482b 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -16,76 +16,6 @@
#define KVM_API_VERSION 12
-/* *** Deprecated interfaces *** */
-
-#define KVM_TRC_SHIFT 16
-
-#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
-#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
-
-#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
-#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
-#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
-
-#define KVM_TRC_HEAD_SIZE 12
-#define KVM_TRC_CYCLE_SIZE 8
-#define KVM_TRC_EXTRA_MAX 7
-
-#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
-#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
-#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
-#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
-#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
-#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
-#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
-#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
-#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
-#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
-#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
-#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
-#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
-#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
-#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
-#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
-#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
-#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
-#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
-#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
-#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
-#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
-#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
-#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
-
-struct kvm_user_trace_setup {
- __u32 buf_size;
- __u32 buf_nr;
-};
-
-#define __KVM_DEPRECATED_MAIN_W_0x06 \
- _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
-#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
-#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
-
-#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
-
-struct kvm_breakpoint {
- __u32 enabled;
- __u32 padding;
- __u64 address;
-};
-
-struct kvm_debug_guest {
- __u32 enabled;
- __u32 pad;
- struct kvm_breakpoint breakpoints[4];
- __u32 singlestep;
-};
-
-#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
-
-/* *** End of deprecated interfaces *** */
-
-
/* for KVM_SET_USER_MEMORY_REGION */
struct kvm_userspace_memory_region {
__u32 slot;
@@ -95,6 +25,19 @@ struct kvm_userspace_memory_region {
__u64 userspace_addr; /* start of the userspace allocated memory */
};
+/* for KVM_SET_USER_MEMORY_REGION2 */
+struct kvm_userspace_memory_region2 {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size;
+ __u64 userspace_addr;
+ __u64 guest_memfd_offset;
+ __u32 guest_memfd;
+ __u32 pad1;
+ __u64 pad2[14];
+};
+
/*
* The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
* userspace, other bits are reserved for kvm internal use which are defined
@@ -102,6 +45,7 @@ struct kvm_userspace_memory_region {
*/
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
+#define KVM_MEM_GUEST_MEMFD (1UL << 2)
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
@@ -265,6 +209,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_LOONGARCH_IOCSR 38
+#define KVM_EXIT_MEMORY_FAULT 39
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -518,6 +463,13 @@ struct kvm_run {
#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
__u32 flags;
} notify;
+ /* KVM_EXIT_MEMORY_FAULT */
+ struct {
+#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
+ __u64 flags;
+ __u64 gpa;
+ __u64 size;
+ } memory_fault;
/* Fix the size of the union. */
char padding[256];
};
@@ -945,9 +897,6 @@ struct kvm_ppc_resize_hpt {
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
-#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
-#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
@@ -1201,6 +1150,11 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
+#define KVM_CAP_USER_MEMORY2 231
+#define KVM_CAP_MEMORY_FAULT_INFO 232
+#define KVM_CAP_MEMORY_ATTRIBUTES 233
+#define KVM_CAP_GUEST_MEMFD 234
+#define KVM_CAP_VM_TYPES 235
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1291,6 +1245,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
+#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
struct kvm_xen_hvm_config {
__u32 flags;
@@ -1483,6 +1438,8 @@ struct kvm_vfio_spapr_tce {
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
+ struct kvm_userspace_memory_region2)
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
@@ -1507,20 +1464,8 @@ struct kvm_s390_ucas_mapping {
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
-#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
- struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
-/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
-#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
-#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
-#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
- struct kvm_assigned_pci_dev)
-#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
- struct kvm_assigned_msix_nr)
-#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
- struct kvm_assigned_msix_entry)
-#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
@@ -1537,9 +1482,6 @@ struct kvm_s390_ucas_mapping {
* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
-/* Available with KVM_CAP_PCI_2_3 */
-#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
- struct kvm_assigned_pci_dev)
/* Available with KVM_CAP_SIGNAL_MSI */
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
@@ -1592,8 +1534,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
-/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
-#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
@@ -2267,4 +2207,24 @@ struct kvm_s390_zpci_op {
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
+/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
+#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
+
+struct kvm_memory_attributes {
+ __u64 address;
+ __u64 size;
+ __u64 attributes;
+ __u64 flags;
+};
+
+#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
+
+#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
+
+struct kvm_create_guest_memfd {
+ __u64 size;
+ __u64 flags;
+ __u64 reserved[6];
+};
+
#endif /* __LINUX_KVM_H */
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
index bb242fdcfe6b..ad5478dbad00 100644
--- a/tools/include/uapi/linux/mount.h
+++ b/tools/include/uapi/linux/mount.h
@@ -138,4 +138,74 @@ struct mount_attr {
/* List of all mount_attr versions. */
#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
+
+/*
+ * Structure for getting mount/superblock/filesystem info with statmount(2).
+ *
+ * The interface is similar to statx(2): individual fields or groups can be
+ * selected with the @mask argument of statmount(). Kernel will set the @mask
+ * field according to the supported fields.
+ *
+ * If string fields are selected, then the caller needs to pass a buffer that
+ * has space after the fixed part of the structure. Nul terminated strings are
+ * copied there and offsets relative to @str are stored in the relevant fields.
+ * If the buffer is too small, then EOVERFLOW is returned. The actually used
+ * size is returned in @size.
+ */
+struct statmount {
+ __u32 size; /* Total size, including strings */
+ __u32 __spare1;
+ __u64 mask; /* What results were written */
+ __u32 sb_dev_major; /* Device ID */
+ __u32 sb_dev_minor;
+ __u64 sb_magic; /* ..._SUPER_MAGIC */
+ __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+ __u32 fs_type; /* [str] Filesystem type */
+ __u64 mnt_id; /* Unique ID of mount */
+ __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
+ __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
+ __u32 mnt_parent_id_old;
+ __u64 mnt_attr; /* MOUNT_ATTR_... */
+ __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+ __u64 mnt_peer_group; /* ID of shared peer group */
+ __u64 mnt_master; /* Mount receives propagation from this ID */
+ __u64 propagate_from; /* Propagation from in current namespace */
+ __u32 mnt_root; /* [str] Root of mount relative to root of fs */
+ __u32 mnt_point; /* [str] Mountpoint relative to current root */
+ __u64 __spare2[50];
+ char str[]; /* Variable size part containing strings */
+};
+
+/*
+ * Structure for passing mount ID and miscellaneous parameters to statmount(2)
+ * and listmount(2).
+ *
+ * For statmount(2) @param represents the request mask.
+ * For listmount(2) @param represents the last listed mount id (or zero).
+ */
+struct mnt_id_req {
+ __u32 size;
+ __u32 spare;
+ __u64 mnt_id;
+ __u64 param;
+};
+
+/* List of all mnt_id_req versions. */
+#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
+
+/*
+ * @mask bits for statmount(2)
+ */
+#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */
+#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
+#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
+#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
+#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
+#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
+
+/*
+ * Special @mnt_id values that can be passed to listmount
+ */
+#define LSMT_ROOT 0xffffffffffffffff /* root mount */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index 93cb411adf72..bb65ee840cda 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -70,6 +70,10 @@ enum netdev_queue_type {
NETDEV_QUEUE_TYPE_TX,
};
+enum netdev_qstats_scope {
+ NETDEV_QSTATS_SCOPE_QUEUE = 1,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
@@ -133,6 +137,21 @@ enum {
};
enum {
+ NETDEV_A_QSTATS_IFINDEX = 1,
+ NETDEV_A_QSTATS_QUEUE_TYPE,
+ NETDEV_A_QSTATS_QUEUE_ID,
+ NETDEV_A_QSTATS_SCOPE,
+ NETDEV_A_QSTATS_RX_PACKETS = 8,
+ NETDEV_A_QSTATS_RX_BYTES,
+ NETDEV_A_QSTATS_TX_PACKETS,
+ NETDEV_A_QSTATS_TX_BYTES,
+ NETDEV_A_QSTATS_RX_ALLOC_FAIL,
+
+ __NETDEV_A_QSTATS_MAX,
+ NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -144,6 +163,7 @@ enum {
NETDEV_CMD_PAGE_POOL_STATS_GET,
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
+ NETDEV_CMD_QSTATS_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 39c6a250dd1b..3a64499b0f5d 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+ PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -235,6 +237,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -982,6 +986,12 @@ enum perf_event_type {
* { u64 nr;
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
* { u64 from, to, flags } lbr[nr];
+ * #
+ * # The format of the counters is decided by the
+ * # "branch_counter_nr" and "branch_counter_width",
+ * # which are defined in the ABI.
+ * #
+ * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
* } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
@@ -1427,6 +1437,9 @@ struct perf_branch_entry {
reserved:31;
};
+/* Size of used info bits in struct perf_branch_entry */
+#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
+
union perf_sample_weight {
__u64 full;
#if defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
index 7cab2c65d3d7..2f2ee82d5517 100644
--- a/tools/include/uapi/linux/stat.h
+++ b/tools/include/uapi/linux/stat.h
@@ -154,6 +154,7 @@ struct statx {
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 5cb0eeec2c8a..337fde770e45 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -16,6 +16,7 @@
#include <sys/mount.h>
#include "fs.h"
+#include "../io.h"
#include "debug-internal.h"
#define _STR(x) #x
@@ -344,53 +345,24 @@ int filename__read_ull(const char *filename, unsigned long long *value)
return filename__read_ull_base(filename, value, 0);
}
-#define STRERR_BUFSIZE 128 /* For the buffer size of strerror_r */
-
int filename__read_str(const char *filename, char **buf, size_t *sizep)
{
- size_t size = 0, alloc_size = 0;
- void *bf = NULL, *nbf;
- int fd, n, err = 0;
- char sbuf[STRERR_BUFSIZE];
+ struct io io;
+ char bf[128];
+ int err;
- fd = open(filename, O_RDONLY);
- if (fd < 0)
+ io.fd = open(filename, O_RDONLY);
+ if (io.fd < 0)
return -errno;
-
- do {
- if (size == alloc_size) {
- alloc_size += BUFSIZ;
- nbf = realloc(bf, alloc_size);
- if (!nbf) {
- err = -ENOMEM;
- break;
- }
-
- bf = nbf;
- }
-
- n = read(fd, bf + size, alloc_size - size);
- if (n < 0) {
- if (size) {
- pr_warn("read failed %d: %s\n", errno,
- strerror_r(errno, sbuf, sizeof(sbuf)));
- err = 0;
- } else
- err = -errno;
-
- break;
- }
-
- size += n;
- } while (n > 0);
-
- if (!err) {
- *sizep = size;
- *buf = bf;
+ io__init(&io, io.fd, bf, sizeof(bf));
+ *buf = NULL;
+ err = io__getdelim(&io, buf, sizep, /*delim=*/-1);
+ if (err < 0) {
+ free(*buf);
+ *buf = NULL;
} else
- free(bf);
-
- close(fd);
+ err = 0;
+ close(io.fd);
return err;
}
@@ -475,15 +447,22 @@ int sysfs__read_str(const char *entry, char **buf, size_t *sizep)
int sysfs__read_bool(const char *entry, bool *value)
{
- char *buf;
- size_t size;
- int ret;
+ struct io io;
+ char bf[16];
+ int ret = 0;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return -1;
- ret = sysfs__read_str(entry, &buf, &size);
- if (ret < 0)
- return ret;
+ snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
+ io.fd = open(path, O_RDONLY);
+ if (io.fd < 0)
+ return -errno;
- switch (buf[0]) {
+ io__init(&io, io.fd, bf, sizeof(bf));
+ switch (io__get_char(&io)) {
case '1':
case 'y':
case 'Y':
@@ -497,8 +476,7 @@ int sysfs__read_bool(const char *entry, bool *value)
default:
ret = -1;
}
-
- free(buf);
+ close(io.fd);
return ret;
}
diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
index a77b74c5fb65..84adf8102018 100644
--- a/tools/lib/api/io.h
+++ b/tools/lib/api/io.h
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <linux/types.h>
struct io {
/* File descriptor being read/ */
@@ -140,8 +141,8 @@ static inline int io__get_dec(struct io *io, __u64 *dec)
}
}
-/* Read up to and including the first newline following the pattern of getline. */
-static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out)
+/* Read up to and including the first delim. */
+static inline ssize_t io__getdelim(struct io *io, char **line_out, size_t *line_len_out, int delim)
{
char buf[128];
int buf_pos = 0;
@@ -151,7 +152,7 @@ static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_l
/* TODO: reuse previously allocated memory. */
free(*line_out);
- while (ch != '\n') {
+ while (ch != delim) {
ch = io__get_char(io);
if (ch < 0)
@@ -184,4 +185,9 @@ err_out:
return -ENOMEM;
}
+static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out)
+{
+ return io__getdelim(io, line_out, line_len_out, /*delim=*/'\n');
+}
+
#endif /* __API_IO__ */
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 2d0c282c8588..b6619199a706 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,4 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
- usdt.o zip.o elf.o
+ usdt.o zip.o elf.o features.o
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 9dc9625651dc..97ec005c3c47 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -103,7 +103,7 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
*/
-int probe_memcg_account(void)
+int probe_memcg_account(int token_fd)
{
const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
struct bpf_insn insns[] = {
@@ -120,6 +120,9 @@ int probe_memcg_account(void)
attr.insns = ptr_to_u64(insns);
attr.insn_cnt = insn_cnt;
attr.license = ptr_to_u64("GPL");
+ attr.prog_token_fd = token_fd;
+ if (token_fd)
+ attr.prog_flags |= BPF_F_TOKEN_FD;
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
if (prog_fd >= 0) {
@@ -146,7 +149,7 @@ int bump_rlimit_memlock(void)
struct rlimit rlim;
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
- if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
+ if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
return 0;
memlock_bumped = true;
@@ -169,7 +172,7 @@ int bpf_map_create(enum bpf_map_type map_type,
__u32 max_entries,
const struct bpf_map_create_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
+ const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
union bpf_attr attr;
int fd;
@@ -181,7 +184,7 @@ int bpf_map_create(enum bpf_map_type map_type,
return libbpf_err(-EINVAL);
attr.map_type = map_type;
- if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size;
attr.value_size = value_size;
@@ -191,6 +194,7 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
+ attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0);
attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
attr.map_flags = OPTS_GET(opts, map_flags, 0);
@@ -198,6 +202,8 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.numa_node = OPTS_GET(opts, numa_node, 0);
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
+ attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
+
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd);
}
@@ -232,7 +238,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@@ -261,8 +267,9 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
attr.kern_version = OPTS_GET(opts, kern_version, 0);
+ attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
- if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
attr.license = ptr_to_u64(license);
@@ -1182,7 +1189,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
+ const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
union bpf_attr attr;
char *log_buf;
size_t log_size;
@@ -1207,6 +1214,10 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts
attr.btf = ptr_to_u64(btf_data);
attr.btf_size = btf_size;
+
+ attr.btf_flags = OPTS_GET(opts, btf_flags, 0);
+ attr.btf_token_fd = OPTS_GET(opts, token_fd, 0);
+
/* log_level == 0 and log_buf != NULL means "try loading without
* log_buf, but retry with log_buf and log_level=1 on error", which is
* consistent across low-level and high-level BTF and program loading
@@ -1287,3 +1298,20 @@ int bpf_prog_bind_map(int prog_fd, int map_fd,
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
return libbpf_err_errno(ret);
}
+
+int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, token_create);
+ union bpf_attr attr;
+ int fd;
+
+ if (!OPTS_VALID(opts, bpf_token_create_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.token_create.bpffs_fd = bpffs_fd;
+ attr.token_create.flags = OPTS_GET(opts, flags, 0);
+
+ fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
+ return libbpf_err_errno(fd);
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index d0f53772bdc0..df0db2f0cdb7 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -35,7 +35,7 @@
extern "C" {
#endif
-int libbpf_set_memlock_rlim(size_t memlock_bytes);
+LIBBPF_API int libbpf_set_memlock_rlim(size_t memlock_bytes);
struct bpf_map_create_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
@@ -51,8 +51,12 @@ struct bpf_map_create_opts {
__u32 numa_node;
__u32 map_ifindex;
+ __s32 value_type_btf_obj_fd;
+
+ __u32 token_fd;
+ size_t :0;
};
-#define bpf_map_create_opts__last_field map_ifindex
+#define bpf_map_create_opts__last_field token_fd
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
const char *map_name,
@@ -102,9 +106,10 @@ struct bpf_prog_load_opts {
* If kernel doesn't support this feature, log_size is left unchanged.
*/
__u32 log_true_size;
+ __u32 token_fd;
size_t :0;
};
-#define bpf_prog_load_opts__last_field log_true_size
+#define bpf_prog_load_opts__last_field token_fd
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
@@ -130,9 +135,12 @@ struct bpf_btf_load_opts {
* If kernel doesn't support this feature, log_size is left unchanged.
*/
__u32 log_true_size;
+
+ __u32 btf_flags;
+ __u32 token_fd;
size_t :0;
};
-#define bpf_btf_load_opts__last_field log_true_size
+#define bpf_btf_load_opts__last_field token_fd
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
struct bpf_btf_load_opts *opts);
@@ -182,10 +190,14 @@ LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
/**
* @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
*
- * The parameter *in_batch* is the address of the first element in the batch to read.
- * *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
- * calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
- * that the batched lookup starts from the beginning of the map.
+ * The parameter *in_batch* is the address of the first element in the batch to
+ * read. *out_batch* is an output parameter that should be passed as *in_batch*
+ * to subsequent calls to **bpf_map_lookup_batch()**. NULL can be passed for
+ * *in_batch* to indicate that the batched lookup starts from the beginning of
+ * the map. Both *in_batch* and *out_batch* must point to memory large enough to
+ * hold a single key, except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
+ * LRU_HASH, LRU_PERCPU_HASH}**, for which the memory size must be at
+ * least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point to memory large enough to
* hold *count* items based on the key and value size of the map *map_fd*. The *keys*
@@ -218,7 +230,10 @@ LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
*
* @param fd BPF map file descriptor
* @param in_batch address of the first element in batch to read, can pass NULL to
- * get address of the first element in *out_batch*
+ * get address of the first element in *out_batch*. If not NULL, must be large
+ * enough to hold a key. For **BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH,
+ * LRU_PERCPU_HASH}**, the memory size must be at least 4 bytes wide regardless
+ * of key size.
* @param out_batch output parameter that should be passed to next call as *in_batch*
* @param keys pointer to an array of *count* keys
* @param values pointer to an array large enough for *count* values
@@ -492,7 +507,10 @@ LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
* program corresponding to *prog_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param prog_fd BPF program file descriptor
* @param info pointer to **struct bpf_prog_info** that will be populated with
@@ -509,7 +527,10 @@ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info,
* map corresponding to *map_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param map_fd BPF map file descriptor
* @param info pointer to **struct bpf_map_info** that will be populated with
@@ -522,11 +543,14 @@ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info,
LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
/**
- * @brief **bpf_btf_get_info_by_fd()** obtains information about the
+ * @brief **bpf_btf_get_info_by_fd()** obtains information about the
* BTF object corresponding to *btf_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param btf_fd BTF object file descriptor
* @param info pointer to **struct bpf_btf_info** that will be populated with
@@ -543,7 +567,10 @@ LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u
* link corresponding to *link_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param link_fd BPF link file descriptor
* @param info pointer to **struct bpf_link_info** that will be populated with
@@ -640,6 +667,30 @@ struct bpf_test_run_opts {
LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
struct bpf_test_run_opts *opts);
+struct bpf_token_create_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags;
+ size_t :0;
+};
+#define bpf_token_create_opts__last_field flags
+
+/**
+ * @brief **bpf_token_create()** creates a new instance of BPF token derived
+ * from specified BPF FS mount point.
+ *
+ * BPF token created with this API can be passed to bpf() syscall for
+ * commands like BPF_PROG_LOAD, BPF_MAP_CREATE, etc.
+ *
+ * @param bpffs_fd FD for BPF FS instance from which to derive a BPF token
+ * instance.
+ * @param opts optional BPF token creation options, can be NULL
+ *
+ * @return BPF token FD > 0, on success; negative error code, otherwise (errno
+ * is also set to the error code)
+ */
+LIBBPF_API int bpf_token_create(int bpffs_fd,
+ struct bpf_token_create_opts *opts);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 7325a12692a3..1ce738d91685 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -2,6 +2,8 @@
#ifndef __BPF_CORE_READ_H__
#define __BPF_CORE_READ_H__
+#include <bpf/bpf_helpers.h>
+
/*
* enum bpf_field_info_kind is passed as a second argument into
* __builtin_preserve_field_info() built-in to get a specific aspect of
@@ -44,7 +46,7 @@ enum bpf_enum_value_kind {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read_kernel( \
- (void *)dst, \
+ (void *)dst, \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#else
@@ -143,8 +145,29 @@ enum bpf_enum_value_kind {
} \
})
+/* Differentiator between compilers builtin implementations. This is a
+ * requirement due to the compiler parsing differences where GCC optimizes
+ * early in parsing those constructs of type pointers to the builtin specific
+ * type, resulting in not being possible to collect the required type
+ * information in the builtin expansion.
+ */
+#ifdef __clang__
+#define ___bpf_typeof(type) ((typeof(type) *) 0)
+#else
+#define ___bpf_typeof1(type, NR) ({ \
+ extern typeof(type) *___concat(bpf_type_tmp_, NR); \
+ ___concat(bpf_type_tmp_, NR); \
+})
+#define ___bpf_typeof(type) ___bpf_typeof1(type, __COUNTER__)
+#endif
+
+#ifdef __clang__
#define ___bpf_field_ref1(field) (field)
-#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
+#define ___bpf_field_ref2(type, field) (___bpf_typeof(type)->field)
+#else
+#define ___bpf_field_ref1(field) (&(field))
+#define ___bpf_field_ref2(type, field) (&(___bpf_typeof(type)->field))
+#endif
#define ___bpf_field_ref(args...) \
___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
@@ -194,7 +217,7 @@ enum bpf_enum_value_kind {
* BTF. Always succeeds.
*/
#define bpf_core_type_id_local(type) \
- __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
+ __builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_LOCAL)
/*
* Convenience macro to get BTF type ID of a target kernel's type that matches
@@ -204,7 +227,7 @@ enum bpf_enum_value_kind {
* - 0, if no matching type was found in a target kernel BTF.
*/
#define bpf_core_type_id_kernel(type) \
- __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
+ __builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_TARGET)
/*
* Convenience macro to check that provided named type
@@ -214,7 +237,7 @@ enum bpf_enum_value_kind {
* 0, if no matching type is found.
*/
#define bpf_core_type_exists(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_EXISTS)
/*
* Convenience macro to check that provided named type
@@ -224,7 +247,7 @@ enum bpf_enum_value_kind {
* 0, if the type does not match any in the target kernel
*/
#define bpf_core_type_matches(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_MATCHES)
/*
* Convenience macro to get the byte size of a provided named type
@@ -234,7 +257,7 @@ enum bpf_enum_value_kind {
* 0, if no matching type is found.
*/
#define bpf_core_type_size(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_SIZE)
/*
* Convenience macro to check that provided enumerator value is defined in
@@ -244,8 +267,13 @@ enum bpf_enum_value_kind {
* kernel's BTF;
* 0, if no matching enum and/or enum value within that enum is found.
*/
+#ifdef __clang__
#define bpf_core_enum_value_exists(enum_type, enum_value) \
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
+#else
+#define bpf_core_enum_value_exists(enum_type, enum_value) \
+ __builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_EXISTS)
+#endif
/*
* Convenience macro to get the integer value of an enumerator value in
@@ -255,8 +283,13 @@ enum bpf_enum_value_kind {
* present in target kernel's BTF;
* 0, if no matching enum and/or enum value within that enum is found.
*/
+#ifdef __clang__
#define bpf_core_enum_value(enum_type, enum_value) \
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
+#else
+#define bpf_core_enum_value(enum_type, enum_value) \
+ __builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_VALUE)
+#endif
/*
* bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
@@ -268,7 +301,7 @@ enum bpf_enum_value_kind {
* a relocation, which records BTF type ID describing root struct/union and an
* accessor string which describes exact embedded field that was used to take
* an address. See detailed description of this relocation format and
- * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
+ * semantics in comments to struct bpf_core_relo in include/uapi/linux/bpf.h.
*
* This relocation allows libbpf to adjust BPF instruction to use correct
* actual field offset, based on target kernel BTF type that matches original
@@ -292,6 +325,17 @@ enum bpf_enum_value_kind {
#define bpf_core_read_user_str(dst, sz, src) \
bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
+extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
+
+/*
+ * Cast provided pointer *ptr* into a pointer to a specified *type* in such
+ * a way that BPF verifier will become aware of associated kernel-side BTF
+ * type. This allows to access members of kernel types directly without the
+ * need to use BPF_CORE_READ() macros.
+ */
+#define bpf_core_cast(ptr, type) \
+ ((typeof(type) *)bpf_rdonly_cast((ptr), bpf_core_type_id_kernel(type)))
+
#define ___concat(a, b) a ## b
#define ___apply(fn, n) ___concat(fn, n)
#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 2324cc42b017..79eaa581be98 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -190,6 +190,8 @@ enum libbpf_tristate {
#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
+#define __arg_nullable __attribute((btf_decl_tag("arg:nullable")))
+#define __arg_trusted __attribute((btf_decl_tag("arg:trusted")))
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index ee95fd379d4d..a17b4c9c4213 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1079,6 +1079,11 @@ struct btf *btf__new(const void *data, __u32 size)
return libbpf_ptr(btf_new(data, size, NULL));
}
+struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
+{
+ return libbpf_ptr(btf_new(data, size, base_btf));
+}
+
static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
struct btf_ext **btf_ext)
{
@@ -1317,7 +1322,9 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
-int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
+int btf_load_into_kernel(struct btf *btf,
+ char *log_buf, size_t log_sz, __u32 log_level,
+ int token_fd)
{
LIBBPF_OPTS(bpf_btf_load_opts, opts);
__u32 buf_sz = 0, raw_size;
@@ -1367,6 +1374,10 @@ retry_load:
opts.log_level = log_level;
}
+ opts.token_fd = token_fd;
+ if (token_fd)
+ opts.btf_flags |= BPF_F_TOKEN_FD;
+
btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
if (btf->fd < 0) {
/* time to turn on verbose mode and try again */
@@ -1394,7 +1405,7 @@ done:
int btf__load_into_kernel(struct btf *btf)
{
- return btf_load_into_kernel(btf, NULL, 0, 0);
+ return btf_load_into_kernel(btf, NULL, 0, 0, 0);
}
int btf__fd(const struct btf *btf)
@@ -3039,12 +3050,16 @@ done:
return btf_ext;
}
-const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
+const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
{
*size = btf_ext->data_size;
return btf_ext->data;
}
+__attribute__((alias("btf_ext__raw_data")))
+const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
+
+
struct btf_dedup;
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
@@ -4926,10 +4941,9 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
*/
struct btf *btf__load_vmlinux_btf(void)
{
+ const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
+ /* fall back locations, trying to find vmlinux on disk */
const char *locations[] = {
- /* try canonical vmlinux BTF through sysfs first */
- "/sys/kernel/btf/vmlinux",
- /* fall back to trying to find vmlinux on disk otherwise */
"/boot/vmlinux-%1$s",
"/lib/modules/%1$s/vmlinux-%1$s",
"/lib/modules/%1$s/build/vmlinux",
@@ -4943,8 +4957,23 @@ struct btf *btf__load_vmlinux_btf(void)
struct btf *btf;
int i, err;
- uname(&buf);
+ /* is canonical sysfs location accessible? */
+ if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
+ pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
+ sysfs_btf_path);
+ } else {
+ btf = btf__parse(sysfs_btf_path, NULL);
+ if (!btf) {
+ err = -errno;
+ pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err);
+ return libbpf_err_ptr(err);
+ }
+ pr_debug("loaded kernel BTF from '%s'\n", path);
+ return btf;
+ }
+ /* try fallback locations */
+ uname(&buf);
for (i = 0; i < ARRAY_SIZE(locations); i++) {
snprintf(path, PATH_MAX, locations[i], buf.release);
diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
index b02faec748a5..c92e02394159 100644
--- a/tools/lib/bpf/elf.c
+++ b/tools/lib/bpf/elf.c
@@ -11,8 +11,6 @@
#include "libbpf_internal.h"
#include "str_error.h"
-#define STRERR_BUFSIZE 128
-
/* A SHT_GNU_versym section holds 16-bit words. This bit is set if
* the symbol is hidden and can only be seen when referenced using an
* explicit version number. This is a GNU extension.
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
new file mode 100644
index 000000000000..6b0738ad7063
--- /dev/null
+++ b/tools/lib/bpf/features.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <linux/kernel.h>
+#include <linux/filter.h>
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_common.h"
+#include "libbpf_internal.h"
+#include "str_error.h"
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+int probe_fd(int fd)
+{
+ if (fd >= 0)
+ close(fd);
+ return fd >= 0;
+}
+
+static int probe_kern_prog_name(int token_fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.license = ptr_to_u64("GPL");
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
+ attr.prog_token_fd = token_fd;
+ if (token_fd)
+ attr.prog_flags |= BPF_F_TOKEN_FD;
+ libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
+
+ /* make sure loading with name works */
+ ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
+ return probe_fd(ret);
+}
+
+static int probe_kern_global_data(int token_fd)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
+ .token_fd = token_fd,
+ .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, map, insn_cnt = ARRAY_SIZE(insns);
+
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
+ }
+
+ insns[0].imm = map;
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
+ close(map);
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf(int token_fd)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_func(int token_fd)
+{
+ static const char strs[] = "\0int\0x\0a";
+ /* void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* FUNC_PROTO */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
+ BTF_PARAM_ENC(7, 1),
+ /* FUNC x */ /* [3] */
+ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_func_global(int token_fd)
+{
+ static const char strs[] = "\0int\0x\0a";
+ /* static void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* FUNC_PROTO */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
+ BTF_PARAM_ENC(7, 1),
+ /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
+ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_datasec(int token_fd)
+{
+ static const char strs[] = "\0x\0.data";
+ /* static int a; */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC val */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_float(int token_fd)
+{
+ static const char strs[] = "\0float";
+ __u32 types[] = {
+ /* float */
+ BTF_TYPE_FLOAT_ENC(1, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_decl_tag(int token_fd)
+{
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* attr */
+ BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_type_tag(int token_fd)
+{
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* attr */
+ BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
+ /* ptr */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_array_mmap(int token_fd)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts,
+ .map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
+ .token_fd = token_fd,
+ );
+ int fd;
+
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_kern_exp_attach_type(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int fd, insn_cnt = ARRAY_SIZE(insns);
+
+ /* use any valid combination of program type and (optional)
+ * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
+ * to see if kernel supports expected_attach_type field for
+ * BPF_PROG_LOAD command
+ */
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_kern_probe_read_kernel(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
+ BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
+ BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
+ BPF_EXIT_INSN(),
+ };
+ int fd, insn_cnt = ARRAY_SIZE(insns);
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_prog_bind_map(int token_fd)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
+ .token_fd = token_fd,
+ .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
+
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
+ }
+
+ prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
+ if (prog < 0) {
+ close(map);
+ return 0;
+ }
+
+ ret = bpf_prog_bind_map(prog, map, NULL);
+
+ close(map);
+ close(prog);
+
+ return ret >= 0;
+}
+
+static int probe_module_btf(int token_fd)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+ struct bpf_btf_info info;
+ __u32 len = sizeof(info);
+ char name[16];
+ int fd, err;
+
+ fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
+ if (fd < 0)
+ return 0; /* BTF not supported at all */
+
+ memset(&info, 0, sizeof(info));
+ info.name = ptr_to_u64(name);
+ info.name_len = sizeof(name);
+
+ /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
+ * kernel's module BTF support coincides with support for
+ * name/name_len fields in struct bpf_btf_info.
+ */
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
+ close(fd);
+ return !err;
+}
+
+static int probe_perf_link(int token_fd)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int prog_fd, link_fd, err;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), &opts);
+ if (prog_fd < 0)
+ return -errno;
+
+ /* use invalid perf_event FD to get EBADF, if link is supported;
+ * otherwise EINVAL should be returned
+ */
+ link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
+ err = -errno; /* close() can clobber errno */
+
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+
+ return link_fd < 0 && err == -EBADF;
+}
+
+static int probe_uprobe_multi_link(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
+ .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, link_fd, err;
+ unsigned long offset = 0;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), &load_opts);
+ if (prog_fd < 0)
+ return -errno;
+
+ /* Creating uprobe in '/' binary should fail with -EBADF. */
+ link_opts.uprobe_multi.path = "/";
+ link_opts.uprobe_multi.offsets = &offset;
+ link_opts.uprobe_multi.cnt = 1;
+
+ link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+ err = -errno; /* close() can clobber errno */
+
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+
+ return link_fd < 0 && err == -EBADF;
+}
+
+static int probe_kern_bpf_cookie(int token_fd)
+{
+ struct bpf_insn insns[] = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, insn_cnt = ARRAY_SIZE(insns);
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf_enum64(int token_fd)
+{
+ static const char strs[] = "\0enum64";
+ __u32 types[] = {
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_arg_ctx_tag(int token_fd)
+{
+ static const char strs[] = "\0a\0b\0arg:ctx\0";
+ const __u32 types[] = {
+ /* [1] INT */
+ BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
+ /* [2] PTR -> VOID */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
+ /* [3] FUNC_PROTO `int(void *a)` */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+ BTF_PARAM_ENC(1 /* "a" */, 2),
+ /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
+ BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
+ /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+ BTF_PARAM_ENC(3 /* "b" */, 2),
+ /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
+ BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
+ /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
+ BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
+ };
+ const struct bpf_insn insns[] = {
+ /* main prog */
+ BPF_CALL_REL(+1),
+ BPF_EXIT_INSN(),
+ /* global subprog */
+ BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
+ BPF_EXIT_INSN(),
+ };
+ const struct bpf_func_info_min func_infos[] = {
+ { 0, 4 }, /* main prog -> FUNC 'a' */
+ { 2, 6 }, /* subprog -> FUNC 'b' */
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
+
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
+ if (btf_fd < 0)
+ return 0;
+
+ opts.prog_btf_fd = btf_fd;
+ opts.func_info = &func_infos;
+ opts.func_info_cnt = ARRAY_SIZE(func_infos);
+ opts.func_info_rec_size = sizeof(func_infos[0]);
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
+ "GPL", insns, insn_cnt, &opts);
+ close(btf_fd);
+
+ return probe_fd(prog_fd);
+}
+
+typedef int (*feature_probe_fn)(int /* token_fd */);
+
+static struct kern_feature_cache feature_cache;
+
+static struct kern_feature_desc {
+ const char *desc;
+ feature_probe_fn probe;
+} feature_probes[__FEAT_CNT] = {
+ [FEAT_PROG_NAME] = {
+ "BPF program name", probe_kern_prog_name,
+ },
+ [FEAT_GLOBAL_DATA] = {
+ "global variables", probe_kern_global_data,
+ },
+ [FEAT_BTF] = {
+ "minimal BTF", probe_kern_btf,
+ },
+ [FEAT_BTF_FUNC] = {
+ "BTF functions", probe_kern_btf_func,
+ },
+ [FEAT_BTF_GLOBAL_FUNC] = {
+ "BTF global function", probe_kern_btf_func_global,
+ },
+ [FEAT_BTF_DATASEC] = {
+ "BTF data section and variable", probe_kern_btf_datasec,
+ },
+ [FEAT_ARRAY_MMAP] = {
+ "ARRAY map mmap()", probe_kern_array_mmap,
+ },
+ [FEAT_EXP_ATTACH_TYPE] = {
+ "BPF_PROG_LOAD expected_attach_type attribute",
+ probe_kern_exp_attach_type,
+ },
+ [FEAT_PROBE_READ_KERN] = {
+ "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
+ },
+ [FEAT_PROG_BIND_MAP] = {
+ "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
+ },
+ [FEAT_MODULE_BTF] = {
+ "module BTF support", probe_module_btf,
+ },
+ [FEAT_BTF_FLOAT] = {
+ "BTF_KIND_FLOAT support", probe_kern_btf_float,
+ },
+ [FEAT_PERF_LINK] = {
+ "BPF perf link support", probe_perf_link,
+ },
+ [FEAT_BTF_DECL_TAG] = {
+ "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
+ },
+ [FEAT_BTF_TYPE_TAG] = {
+ "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
+ },
+ [FEAT_MEMCG_ACCOUNT] = {
+ "memcg-based memory accounting", probe_memcg_account,
+ },
+ [FEAT_BPF_COOKIE] = {
+ "BPF cookie support", probe_kern_bpf_cookie,
+ },
+ [FEAT_BTF_ENUM64] = {
+ "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
+ },
+ [FEAT_SYSCALL_WRAPPER] = {
+ "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
+ },
+ [FEAT_UPROBE_MULTI_LINK] = {
+ "BPF multi-uprobe link support", probe_uprobe_multi_link,
+ },
+ [FEAT_ARG_CTX_TAG] = {
+ "kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
+ },
+};
+
+bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
+{
+ struct kern_feature_desc *feat = &feature_probes[feat_id];
+ int ret;
+
+ /* assume global feature cache, unless custom one is provided */
+ if (!cache)
+ cache = &feature_cache;
+
+ if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
+ ret = feat->probe(cache->token_fd);
+ if (ret > 0) {
+ WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
+ } else if (ret == 0) {
+ WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
+ } else {
+ pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
+ WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
+ }
+ }
+
+ return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
+}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index afd09571c482..6c2979f1b471 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -33,6 +33,7 @@
#include <linux/filter.h>
#include <linux/limits.h>
#include <linux/perf_event.h>
+#include <linux/bpf_perf_event.h>
#include <linux/ring_buffer.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
@@ -59,6 +60,8 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
+#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
+
#define BPF_INSN_SZ (sizeof(struct bpf_insn))
/* vsprintf() in __base_pr() uses nonliteral format string. It may break
@@ -70,6 +73,7 @@
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
+static int map_set_def_max_entries(struct bpf_map *map);
static const char * const attach_type_name[] = {
[BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
@@ -527,6 +531,7 @@ struct bpf_map {
struct bpf_map_def def;
__u32 numa_node;
__u32 btf_var_idx;
+ int mod_btf_fd;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 btf_vmlinux_value_type_id;
@@ -693,6 +698,10 @@ struct bpf_object {
struct usdt_manager *usdt_man;
+ struct kern_feature_cache *feat_cache;
+ char *token_path;
+ int token_fd;
+
char path[];
};
@@ -930,22 +939,29 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t,
return NULL;
}
+static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
+ __u16 kind, struct btf **res_btf,
+ struct module_btf **res_mod_btf);
+
#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind);
static int
-find_struct_ops_kern_types(const struct btf *btf, const char *tname,
+find_struct_ops_kern_types(struct bpf_object *obj, const char *tname,
+ struct module_btf **mod_btf,
const struct btf_type **type, __u32 *type_id,
const struct btf_type **vtype, __u32 *vtype_id,
const struct btf_member **data_member)
{
const struct btf_type *kern_type, *kern_vtype;
const struct btf_member *kern_data_member;
+ struct btf *btf;
__s32 kern_vtype_id, kern_type_id;
__u32 i;
- kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
+ &btf, mod_btf);
if (kern_type_id < 0) {
pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
tname);
@@ -998,15 +1014,30 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map)
return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
}
+static bool is_valid_st_ops_program(struct bpf_object *obj,
+ const struct bpf_program *prog)
+{
+ int i;
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ if (&obj->programs[i] == prog)
+ return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
+ }
+
+ return false;
+}
+
/* Init the map's fields that depend on kern_btf */
-static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
- const struct btf *btf,
- const struct btf *kern_btf)
+static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
{
const struct btf_member *member, *kern_member, *kern_data_member;
const struct btf_type *type, *kern_type, *kern_vtype;
__u32 i, kern_type_id, kern_vtype_id, kern_data_off;
+ struct bpf_object *obj = map->obj;
+ const struct btf *btf = obj->btf;
struct bpf_struct_ops *st_ops;
+ const struct btf *kern_btf;
+ struct module_btf *mod_btf;
void *data, *kern_data;
const char *tname;
int err;
@@ -1014,16 +1045,19 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
st_ops = map->st_ops;
type = st_ops->type;
tname = st_ops->tname;
- err = find_struct_ops_kern_types(kern_btf, tname,
+ err = find_struct_ops_kern_types(obj, tname, &mod_btf,
&kern_type, &kern_type_id,
&kern_vtype, &kern_vtype_id,
&kern_data_member);
if (err)
return err;
+ kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
+
pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
+ map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
map->def.value_size = kern_vtype->size;
map->btf_vmlinux_value_type_id = kern_vtype_id;
@@ -1081,9 +1115,16 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
if (btf_is_ptr(mtype)) {
struct bpf_program *prog;
- prog = st_ops->progs[i];
+ /* Update the value from the shadow type */
+ prog = *(void **)mdata;
+ st_ops->progs[i] = prog;
if (!prog)
continue;
+ if (!is_valid_st_ops_program(obj, prog)) {
+ pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
+ map->name, mname);
+ return -ENOTSUP;
+ }
kern_mtype = skip_mods_and_typedefs(kern_btf,
kern_mtype->type,
@@ -1099,6 +1140,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
return -ENOTSUP;
}
+ if (mod_btf)
+ prog->attach_btf_obj_fd = mod_btf->fd;
prog->attach_btf_id = kern_type_id;
prog->expected_attach_type = kern_member_idx;
@@ -1141,8 +1184,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
if (!bpf_map__is_struct_ops(map))
continue;
- err = bpf_map__init_kern_struct_ops(map, obj->btf,
- obj->btf_vmlinux);
+ err = bpf_map__init_kern_struct_ops(map);
if (err)
return err;
}
@@ -1207,6 +1249,7 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
map->name = strdup(var_name);
if (!map->name)
return -ENOMEM;
+ map->btf_value_type_id = type_id;
map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
map->def.key_size = sizeof(int);
@@ -1503,11 +1546,20 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam
return ERR_PTR(-ENOENT);
}
+/* Some versions of Android don't provide memfd_create() in their libc
+ * implementation, so avoid complications and just go straight to Linux
+ * syscall.
+ */
+static int sys_memfd_create(const char *name, unsigned flags)
+{
+ return syscall(__NR_memfd_create, name, flags);
+}
+
static int create_placeholder_fd(void)
{
int fd;
- fd = ensure_good_fd(memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
+ fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
if (fd < 0)
return -errno;
return fd;
@@ -2216,7 +2268,7 @@ static int build_map_pin_path(struct bpf_map *map, const char *path)
int err;
if (!path)
- path = "/sys/fs/bpf";
+ path = BPF_FS_DEFAULT_PATH;
err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
if (err)
@@ -3225,7 +3277,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
} else {
/* currently BPF_BTF_LOAD only supports log_level 1 */
err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
- obj->log_level ? 1 : 0);
+ obj->log_level ? 1 : 0, obj->token_fd);
}
if (sanitize) {
if (!err) {
@@ -4546,6 +4598,58 @@ int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
return 0;
}
+static int bpf_object_prepare_token(struct bpf_object *obj)
+{
+ const char *bpffs_path;
+ int bpffs_fd = -1, token_fd, err;
+ bool mandatory;
+ enum libbpf_print_level level;
+
+ /* token is explicitly prevented */
+ if (obj->token_path && obj->token_path[0] == '\0') {
+ pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
+ return 0;
+ }
+
+ mandatory = obj->token_path != NULL;
+ level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
+
+ bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
+ bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
+ if (bpffs_fd < 0) {
+ err = -errno;
+ __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
+ obj->name, err, bpffs_path,
+ mandatory ? "" : ", skipping optional step...");
+ return mandatory ? err : 0;
+ }
+
+ token_fd = bpf_token_create(bpffs_fd, 0);
+ close(bpffs_fd);
+ if (token_fd < 0) {
+ if (!mandatory && token_fd == -ENOENT) {
+ pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
+ obj->name, bpffs_path);
+ return 0;
+ }
+ __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
+ obj->name, token_fd, bpffs_path,
+ mandatory ? "" : ", skipping optional step...");
+ return mandatory ? token_fd : 0;
+ }
+
+ obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
+ if (!obj->feat_cache) {
+ close(token_fd);
+ return -ENOMEM;
+ }
+
+ obj->token_fd = token_fd;
+ obj->feat_cache->token_fd = token_fd;
+
+ return 0;
+}
+
static int
bpf_object__probe_loading(struct bpf_object *obj)
{
@@ -4555,6 +4659,10 @@ bpf_object__probe_loading(struct bpf_object *obj)
BPF_EXIT_INSN(),
};
int ret, insn_cnt = ARRAY_SIZE(insns);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = obj->token_fd,
+ .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
+ );
if (obj->gen_loader)
return 0;
@@ -4564,9 +4672,9 @@ bpf_object__probe_loading(struct bpf_object *obj)
pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
/* make sure basic loading works */
- ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
if (ret < 0)
- ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
if (ret < 0) {
ret = errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4581,462 +4689,18 @@ bpf_object__probe_loading(struct bpf_object *obj)
return 0;
}
-static int probe_fd(int fd)
-{
- if (fd >= 0)
- close(fd);
- return fd >= 0;
-}
-
-static int probe_kern_prog_name(void)
-{
- const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- union bpf_attr attr;
- int ret;
-
- memset(&attr, 0, attr_sz);
- attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- attr.license = ptr_to_u64("GPL");
- attr.insns = ptr_to_u64(insns);
- attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
- libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
-
- /* make sure loading with name works */
- ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
- return probe_fd(ret);
-}
-
-static int probe_kern_global_data(void)
-{
- char *cp, errmsg[STRERR_BUFSIZE];
- struct bpf_insn insns[] = {
- BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int ret, map, insn_cnt = ARRAY_SIZE(insns);
-
- map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL);
- if (map < 0) {
- ret = -errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, -ret);
- return ret;
- }
-
- insns[0].imm = map;
-
- ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
- close(map);
- return probe_fd(ret);
-}
-
-static int probe_kern_btf(void)
-{
- static const char strs[] = "\0int";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_func(void)
-{
- static const char strs[] = "\0int\0x\0a";
- /* void x(int a) {} */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* FUNC_PROTO */ /* [2] */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
- BTF_PARAM_ENC(7, 1),
- /* FUNC x */ /* [3] */
- BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_func_global(void)
-{
- static const char strs[] = "\0int\0x\0a";
- /* static void x(int a) {} */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* FUNC_PROTO */ /* [2] */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
- BTF_PARAM_ENC(7, 1),
- /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
- BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_datasec(void)
-{
- static const char strs[] = "\0x\0.data";
- /* static int a; */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* VAR x */ /* [2] */
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
- BTF_VAR_STATIC,
- /* DATASEC val */ /* [3] */
- BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
- BTF_VAR_SECINFO_ENC(2, 0, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_float(void)
-{
- static const char strs[] = "\0float";
- __u32 types[] = {
- /* float */
- BTF_TYPE_FLOAT_ENC(1, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_decl_tag(void)
-{
- static const char strs[] = "\0tag";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* VAR x */ /* [2] */
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
- BTF_VAR_STATIC,
- /* attr */
- BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_type_tag(void)
-{
- static const char strs[] = "\0tag";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* attr */
- BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
- /* ptr */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_array_mmap(void)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
- int fd;
-
- fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
- return probe_fd(fd);
-}
-
-static int probe_kern_exp_attach_type(void)
-{
- LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int fd, insn_cnt = ARRAY_SIZE(insns);
-
- /* use any valid combination of program type and (optional)
- * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
- * to see if kernel supports expected_attach_type field for
- * BPF_PROG_LOAD command
- */
- fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
- return probe_fd(fd);
-}
-
-static int probe_kern_probe_read_kernel(void)
-{
- struct bpf_insn insns[] = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
- BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
- BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
- BPF_EXIT_INSN(),
- };
- int fd, insn_cnt = ARRAY_SIZE(insns);
-
- fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
- return probe_fd(fd);
-}
-
-static int probe_prog_bind_map(void)
-{
- char *cp, errmsg[STRERR_BUFSIZE];
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
-
- map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL);
- if (map < 0) {
- ret = -errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, -ret);
- return ret;
- }
-
- prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
- if (prog < 0) {
- close(map);
- return 0;
- }
-
- ret = bpf_prog_bind_map(prog, map, NULL);
-
- close(map);
- close(prog);
-
- return ret >= 0;
-}
-
-static int probe_module_btf(void)
-{
- static const char strs[] = "\0int";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
- };
- struct bpf_btf_info info;
- __u32 len = sizeof(info);
- char name[16];
- int fd, err;
-
- fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
- if (fd < 0)
- return 0; /* BTF not supported at all */
-
- memset(&info, 0, sizeof(info));
- info.name = ptr_to_u64(name);
- info.name_len = sizeof(name);
-
- /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
- * kernel's module BTF support coincides with support for
- * name/name_len fields in struct bpf_btf_info.
- */
- err = bpf_btf_get_info_by_fd(fd, &info, &len);
- close(fd);
- return !err;
-}
-
-static int probe_perf_link(void)
-{
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int prog_fd, link_fd, err;
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
- insns, ARRAY_SIZE(insns), NULL);
- if (prog_fd < 0)
- return -errno;
-
- /* use invalid perf_event FD to get EBADF, if link is supported;
- * otherwise EINVAL should be returned
- */
- link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
- err = -errno; /* close() can clobber errno */
-
- if (link_fd >= 0)
- close(link_fd);
- close(prog_fd);
-
- return link_fd < 0 && err == -EBADF;
-}
-
-static int probe_uprobe_multi_link(void)
-{
- LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
- .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
- );
- LIBBPF_OPTS(bpf_link_create_opts, link_opts);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int prog_fd, link_fd, err;
- unsigned long offset = 0;
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
- insns, ARRAY_SIZE(insns), &load_opts);
- if (prog_fd < 0)
- return -errno;
-
- /* Creating uprobe in '/' binary should fail with -EBADF. */
- link_opts.uprobe_multi.path = "/";
- link_opts.uprobe_multi.offsets = &offset;
- link_opts.uprobe_multi.cnt = 1;
-
- link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
- err = -errno; /* close() can clobber errno */
-
- if (link_fd >= 0)
- close(link_fd);
- close(prog_fd);
-
- return link_fd < 0 && err == -EBADF;
-}
-
-static int probe_kern_bpf_cookie(void)
-{
- struct bpf_insn insns[] = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
- BPF_EXIT_INSN(),
- };
- int ret, insn_cnt = ARRAY_SIZE(insns);
-
- ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
- return probe_fd(ret);
-}
-
-static int probe_kern_btf_enum64(void)
-{
- static const char strs[] = "\0enum64";
- __u32 types[] = {
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_syscall_wrapper(void);
-
-enum kern_feature_result {
- FEAT_UNKNOWN = 0,
- FEAT_SUPPORTED = 1,
- FEAT_MISSING = 2,
-};
-
-typedef int (*feature_probe_fn)(void);
-
-static struct kern_feature_desc {
- const char *desc;
- feature_probe_fn probe;
- enum kern_feature_result res;
-} feature_probes[__FEAT_CNT] = {
- [FEAT_PROG_NAME] = {
- "BPF program name", probe_kern_prog_name,
- },
- [FEAT_GLOBAL_DATA] = {
- "global variables", probe_kern_global_data,
- },
- [FEAT_BTF] = {
- "minimal BTF", probe_kern_btf,
- },
- [FEAT_BTF_FUNC] = {
- "BTF functions", probe_kern_btf_func,
- },
- [FEAT_BTF_GLOBAL_FUNC] = {
- "BTF global function", probe_kern_btf_func_global,
- },
- [FEAT_BTF_DATASEC] = {
- "BTF data section and variable", probe_kern_btf_datasec,
- },
- [FEAT_ARRAY_MMAP] = {
- "ARRAY map mmap()", probe_kern_array_mmap,
- },
- [FEAT_EXP_ATTACH_TYPE] = {
- "BPF_PROG_LOAD expected_attach_type attribute",
- probe_kern_exp_attach_type,
- },
- [FEAT_PROBE_READ_KERN] = {
- "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
- },
- [FEAT_PROG_BIND_MAP] = {
- "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
- },
- [FEAT_MODULE_BTF] = {
- "module BTF support", probe_module_btf,
- },
- [FEAT_BTF_FLOAT] = {
- "BTF_KIND_FLOAT support", probe_kern_btf_float,
- },
- [FEAT_PERF_LINK] = {
- "BPF perf link support", probe_perf_link,
- },
- [FEAT_BTF_DECL_TAG] = {
- "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
- },
- [FEAT_BTF_TYPE_TAG] = {
- "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
- },
- [FEAT_MEMCG_ACCOUNT] = {
- "memcg-based memory accounting", probe_memcg_account,
- },
- [FEAT_BPF_COOKIE] = {
- "BPF cookie support", probe_kern_bpf_cookie,
- },
- [FEAT_BTF_ENUM64] = {
- "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
- },
- [FEAT_SYSCALL_WRAPPER] = {
- "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
- },
- [FEAT_UPROBE_MULTI_LINK] = {
- "BPF multi-uprobe link support", probe_uprobe_multi_link,
- },
-};
-
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
{
- struct kern_feature_desc *feat = &feature_probes[feat_id];
- int ret;
-
- if (obj && obj->gen_loader)
+ if (obj->gen_loader)
/* To generate loader program assume the latest kernel
* to avoid doing extra prog_load, map_create syscalls.
*/
return true;
- if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
- ret = feat->probe();
- if (ret > 0) {
- WRITE_ONCE(feat->res, FEAT_SUPPORTED);
- } else if (ret == 0) {
- WRITE_ONCE(feat->res, FEAT_MISSING);
- } else {
- pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
- WRITE_ONCE(feat->res, FEAT_MISSING);
- }
- }
+ if (obj->token_fd)
+ return feat_supported(obj->feat_cache, feat_id);
- return READ_ONCE(feat->res) == FEAT_SUPPORTED;
+ return feat_supported(NULL, feat_id);
}
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
@@ -5160,9 +4824,17 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.map_flags = def->map_flags;
create_attr.numa_node = map->numa_node;
create_attr.map_extra = map->map_extra;
+ create_attr.token_fd = obj->token_fd;
+ if (obj->token_fd)
+ create_attr.map_flags |= BPF_F_TOKEN_FD;
- if (bpf_map__is_struct_ops(map))
+ if (bpf_map__is_struct_ops(map)) {
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
+ if (map->mod_btf_fd >= 0) {
+ create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
+ create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
+ }
+ }
if (obj->btf && btf__fd(obj->btf) >= 0) {
create_attr.btf_fd = btf__fd(obj->btf);
@@ -5172,6 +4844,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
if (bpf_map_type__is_map_in_map(def->type)) {
if (map->inner_map) {
+ err = map_set_def_max_entries(map->inner_map);
+ if (err)
+ return err;
err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
pr_warn("map '%s': failed to create inner map: %d\n",
@@ -5203,6 +4878,10 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.btf_value_type_id = 0;
map->btf_key_type_id = 0;
map->btf_value_type_id = 0;
+ break;
+ case BPF_MAP_TYPE_STRUCT_OPS:
+ create_attr.btf_value_type_id = 0;
+ break;
default:
break;
}
@@ -6695,6 +6374,14 @@ static struct {
/* all other program types don't have "named" context structs */
};
+/* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
+ * for below __builtin_types_compatible_p() checks;
+ * with this approach we don't need any extra arch-specific #ifdef guards
+ */
+struct pt_regs;
+struct user_pt_regs;
+struct user_regs_struct;
+
static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
const char *subprog_name, int arg_idx,
int arg_type_id, const char *ctx_name)
@@ -6735,11 +6422,21 @@ static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_pro
/* special cases */
switch (prog->type) {
case BPF_PROG_TYPE_KPROBE:
- case BPF_PROG_TYPE_PERF_EVENT:
/* `struct pt_regs *` is expected, but we need to fix up */
if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
return true;
break;
+ case BPF_PROG_TYPE_PERF_EVENT:
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
+ btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
+ return true;
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
+ btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
+ return true;
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
+ btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
+ return true;
+ break;
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
/* allow u64* as ctx */
@@ -6818,69 +6515,6 @@ static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_progr
return fn_id;
}
-static int probe_kern_arg_ctx_tag(void)
-{
- /* To minimize merge conflicts with BPF token series that refactors
- * feature detection code a lot, we don't integrate
- * probe_kern_arg_ctx_tag() into kernel_supports() feature-detection
- * framework yet, doing our own caching internally.
- * This will be cleaned up a bit later when bpf/bpf-next trees settle.
- */
- static int cached_result = -1;
- static const char strs[] = "\0a\0b\0arg:ctx\0";
- const __u32 types[] = {
- /* [1] INT */
- BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
- /* [2] PTR -> VOID */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
- /* [3] FUNC_PROTO `int(void *a)` */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
- BTF_PARAM_ENC(1 /* "a" */, 2),
- /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
- BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
- /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
- BTF_PARAM_ENC(3 /* "b" */, 2),
- /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
- BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
- /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
- BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
- };
- const struct bpf_insn insns[] = {
- /* main prog */
- BPF_CALL_REL(+1),
- BPF_EXIT_INSN(),
- /* global subprog */
- BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
- BPF_EXIT_INSN(),
- };
- const struct bpf_func_info_min func_infos[] = {
- { 0, 4 }, /* main prog -> FUNC 'a' */
- { 2, 6 }, /* subprog -> FUNC 'b' */
- };
- LIBBPF_OPTS(bpf_prog_load_opts, opts);
- int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
-
- if (cached_result >= 0)
- return cached_result;
-
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
- if (btf_fd < 0)
- return 0;
-
- opts.prog_btf_fd = btf_fd;
- opts.func_info = &func_infos;
- opts.func_info_cnt = ARRAY_SIZE(func_infos);
- opts.func_info_rec_size = sizeof(func_infos[0]);
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
- "GPL", insns, insn_cnt, &opts);
- close(btf_fd);
-
- cached_result = probe_fd(prog_fd);
- return cached_result;
-}
-
/* Check if main program or global subprog's function prototype has `arg:ctx`
* argument tags, and, if necessary, substitute correct type to match what BPF
* verifier would expect, taking into account specific program type. This
@@ -6905,7 +6539,7 @@ static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_progra
return 0;
/* don't do any fix ups if kernel natively supports __arg_ctx */
- if (probe_kern_arg_ctx_tag() > 0)
+ if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
return 0;
/* some BPF program types just don't have named context structs, so
@@ -7473,6 +7107,10 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
load_attr.prog_flags = prog->prog_flags;
load_attr.fd_array = obj->fd_array;
+ load_attr.token_fd = obj->token_fd;
+ if (obj->token_fd)
+ load_attr.prog_flags |= BPF_F_TOKEN_FD;
+
/* adjust load_attr if sec_def provides custom preload callback */
if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
@@ -7918,7 +7556,7 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
- const char *obj_name, *kconfig, *btf_tmp_path;
+ const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
struct bpf_object *obj;
char tmp_name[64];
int err;
@@ -7955,6 +7593,16 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
if (log_size && !log_buf)
return ERR_PTR(-EINVAL);
+ token_path = OPTS_GET(opts, bpf_token_path, NULL);
+ /* if user didn't specify bpf_token_path explicitly, check if
+ * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
+ * option
+ */
+ if (!token_path)
+ token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
+ if (token_path && strlen(token_path) >= PATH_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
if (IS_ERR(obj))
return obj;
@@ -7963,6 +7611,14 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
obj->log_size = log_size;
obj->log_level = log_level;
+ if (token_path) {
+ obj->token_path = strdup(token_path);
+ if (!obj->token_path) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
if (btf_tmp_path) {
if (strlen(btf_tmp_path) >= PATH_MAX) {
@@ -8473,7 +8129,8 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
if (obj->gen_loader)
bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
- err = bpf_object__probe_loading(obj);
+ err = bpf_object_prepare_token(obj);
+ err = err ? : bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_maps(obj);
@@ -9008,6 +8665,11 @@ void bpf_object__close(struct bpf_object *obj)
}
zfree(&obj->programs);
+ zfree(&obj->feat_cache);
+ zfree(&obj->token_path);
+ if (obj->token_fd > 0)
+ close(obj->token_fd);
+
free(obj);
}
@@ -9668,7 +9330,9 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
return NULL;
}
-/* Collect the reloc from ELF and populate the st_ops->progs[] */
+/* Collect the reloc from ELF, populate the st_ops->progs[], and update
+ * st_ops->data for shadow type.
+ */
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
Elf64_Shdr *shdr, Elf_Data *data)
{
@@ -9782,6 +9446,14 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
}
st_ops->progs[member_idx] = prog;
+
+ /* st_ops->data will be exposed to users, being returned by
+ * bpf_map__initial_value() as a pointer to the shadow
+ * type. All function pointers in the original struct type
+ * should be converted to a pointer to struct bpf_program
+ * in the shadow type.
+ */
+ *((struct bpf_program **)(st_ops->data + moff)) = prog;
}
return 0;
@@ -9966,7 +9638,9 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
*btf_obj_fd = 0;
*btf_type_id = 1;
} else {
- err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ err = find_kernel_btf_id(prog->obj, attach_name,
+ attach_type, btf_obj_fd,
+ btf_type_id);
}
if (err) {
pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
@@ -10238,6 +9912,12 @@ int bpf_map__set_initial_value(struct bpf_map *map,
void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
{
+ if (bpf_map__is_struct_ops(map)) {
+ if (psize)
+ *psize = map->def.value_size;
+ return map->st_ops->data;
+ }
+
if (!map->mmaped)
return NULL;
*psize = map->def.value_size;
@@ -11028,7 +10708,7 @@ static const char *arch_specific_syscall_pfx(void)
#endif
}
-static int probe_kern_syscall_wrapper(void)
+int probe_kern_syscall_wrapper(int token_fd)
{
char syscall_name[64];
const char *ksys_pfx;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 6cd9c501624f..5723cbbfcc41 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -177,10 +177,29 @@ struct bpf_object_open_opts {
* logs through its print callback.
*/
__u32 kernel_log_level;
+ /* Path to BPF FS mount point to derive BPF token from.
+ *
+ * Created BPF token will be used for all bpf() syscall operations
+ * that accept BPF token (e.g., map creation, BTF and program loads,
+ * etc) automatically within instantiated BPF object.
+ *
+ * If bpf_token_path is not specified, libbpf will consult
+ * LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will be
+ * taken as a value of bpf_token_path option and will force libbpf to
+ * either create BPF token from provided custom BPF FS path, or will
+ * disable implicit BPF token creation, if envvar value is an empty
+ * string. bpf_token_path overrides LIBBPF_BPF_TOKEN_PATH, if both are
+ * set at the same time.
+ *
+ * Setting bpf_token_path option to empty string disables libbpf's
+ * automatic attempt to create BPF token from default BPF FS mount
+ * point (/sys/fs/bpf), in case this default behavior is undesirable.
+ */
+ const char *bpf_token_path;
size_t :0;
};
-#define bpf_object_open_opts__last_field kernel_log_level
+#define bpf_object_open_opts__last_field bpf_token_path
/**
* @brief **bpf_object__open()** creates a bpf_object by opening
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 91c5aef7dae7..86804fd90dd1 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -245,7 +245,6 @@ LIBBPF_0.3.0 {
btf__parse_raw_split;
btf__parse_split;
btf__new_empty_split;
- btf__new_split;
ring_buffer__epoll_fd;
} LIBBPF_0.2.0;
@@ -326,7 +325,6 @@ LIBBPF_0.7.0 {
bpf_xdp_detach;
bpf_xdp_query;
bpf_xdp_query_id;
- btf_ext__raw_data;
libbpf_probe_bpf_helper;
libbpf_probe_bpf_map_type;
libbpf_probe_bpf_prog_type;
@@ -411,4 +409,8 @@ LIBBPF_1.3.0 {
} LIBBPF_1.2.0;
LIBBPF_1.4.0 {
+ global:
+ bpf_token_create;
+ btf__new_split;
+ btf_ext__raw_data;
} LIBBPF_1.3.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 27e4e320e1a6..ad936ac5e639 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -15,9 +15,24 @@
#include <linux/err.h>
#include <fcntl.h>
#include <unistd.h>
+#include <sys/syscall.h>
#include <libelf.h>
#include "relo_core.h"
+/* Android's libc doesn't support AT_EACCESS in faccessat() implementation
+ * ([0]), and just returns -EINVAL even if file exists and is accessible.
+ * See [1] for issues caused by this.
+ *
+ * So just redefine it to 0 on Android.
+ *
+ * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50
+ * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250
+ */
+#ifdef __ANDROID__
+#undef AT_EACCESS
+#define AT_EACCESS 0
+#endif
+
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
@@ -357,18 +372,37 @@ enum kern_feature_id {
FEAT_SYSCALL_WRAPPER,
/* BPF multi-uprobe link support */
FEAT_UPROBE_MULTI_LINK,
+ /* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */
+ FEAT_ARG_CTX_TAG,
__FEAT_CNT,
};
-int probe_memcg_account(void);
+enum kern_feature_result {
+ FEAT_UNKNOWN = 0,
+ FEAT_SUPPORTED = 1,
+ FEAT_MISSING = 2,
+};
+
+struct kern_feature_cache {
+ enum kern_feature_result res[__FEAT_CNT];
+ int token_fd;
+};
+
+bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
+
+int probe_kern_syscall_wrapper(int token_fd);
+int probe_memcg_account(int token_fd);
int bump_rlimit_memlock(void);
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len);
-int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level);
+ const char *str_sec, size_t str_len,
+ int token_fd);
+int btf_load_into_kernel(struct btf *btf,
+ char *log_buf, size_t log_sz, __u32 log_level,
+ int token_fd);
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
@@ -532,6 +566,17 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
+/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
+ * Original FD is not closed or altered in any other way.
+ * Preserves original FD value, if it's invalid (negative).
+ */
+static inline int dup_good_fd(int fd)
+{
+ if (fd < 0)
+ return fd;
+ return fcntl(fd, F_DUPFD_CLOEXEC, 3);
+}
+
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
* Takes ownership of the fd passed in, and closes it if calling
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
@@ -543,7 +588,7 @@ static inline int ensure_good_fd(int fd)
if (fd < 0)
return fd;
if (fd < 3) {
- fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+ fd = dup_good_fd(fd);
saved_errno = errno;
close(old_fd);
errno = saved_errno;
@@ -555,6 +600,15 @@ static inline int ensure_good_fd(int fd)
return fd;
}
+static inline int sys_dup2(int oldfd, int newfd)
+{
+#ifdef __NR_dup2
+ return syscall(__NR_dup2, oldfd, newfd);
+#else
+ return syscall(__NR_dup3, oldfd, newfd, 0);
+#endif
+}
+
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
* Regardless of success, *tmp_fd* is closed.
* Whatever *fixed_fd* pointed to is closed silently.
@@ -563,7 +617,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)
{
int err;
- err = dup2(tmp_fd, fixed_fd);
+ err = sys_dup2(tmp_fd, fixed_fd);
err = err < 0 ? -errno : 0;
close(tmp_fd); /* clean up temporary FD */
return err;
@@ -613,4 +667,6 @@ int elf_resolve_syms_offsets(const char *binary_path, int cnt,
int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
unsigned long **poffsets, size_t *pcnt);
+int probe_fd(int fd);
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 9c4db90b92b6..ee9b1dbea9eb 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -219,7 +219,8 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
}
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len)
+ const char *str_sec, size_t str_len,
+ int token_fd)
{
struct btf_header hdr = {
.magic = BTF_MAGIC,
@@ -229,6 +230,10 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
.str_off = types_len,
.str_len = str_len,
};
+ LIBBPF_OPTS(bpf_btf_load_opts, opts,
+ .token_fd = token_fd,
+ .btf_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
int btf_fd, btf_len;
__u8 *raw_btf;
@@ -241,7 +246,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
- btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
+ btf_fd = bpf_btf_load(raw_btf, btf_len, &opts);
free(raw_btf);
return btf_fd;
@@ -271,7 +276,7 @@ static int load_local_storage_btf(void)
};
return libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
+ strs, sizeof(strs), 0);
}
static int probe_map_create(enum bpf_map_type map_type)
@@ -326,6 +331,7 @@ static int probe_map_create(enum bpf_map_type map_type)
case BPF_MAP_TYPE_STRUCT_OPS:
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
opts.btf_vmlinux_value_type_id = 1;
+ opts.value_type_btf_obj_fd = -1;
exp_err = -524; /* -ENOTSUPP */
break;
case BPF_MAP_TYPE_BLOOM_FILTER:
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 16bca56002ab..0d4be829551b 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -2732,7 +2732,7 @@ static int finalize_btf(struct bpf_linker *linker)
/* Emit .BTF.ext section */
if (linker->btf_ext) {
- raw_data = btf_ext__get_raw_data(linker->btf_ext, &raw_sz);
+ raw_data = btf_ext__raw_data(linker->btf_ext, &raw_sz);
if (!raw_data)
return -ENOMEM;
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 090bcf6e3b3d..68a2def17175 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -496,8 +496,8 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
if (err)
return libbpf_err(err);
- opts->feature_flags = md.flags;
- opts->xdp_zc_max_segs = md.xdp_zc_max_segs;
+ OPTS_SET(opts, feature_flags, md.flags);
+ OPTS_SET(opts, xdp_zc_max_segs, md.xdp_zc_max_segs);
skip_feature_flags:
return 0;
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
index a139334d57b6..626d7ffb03d6 100644
--- a/tools/lib/bpf/str_error.h
+++ b/tools/lib/bpf/str_error.h
@@ -2,5 +2,8 @@
#ifndef __LIBBPF_STR_ERROR_H
#define __LIBBPF_STR_ERROR_H
+#define STRERR_BUFSIZE 128
+
char *libbpf_strerror_r(int err, char *dst, int len);
+
#endif /* __LIBBPF_STR_ERROR_H */
diff --git a/tools/lib/perf/Documentation/examples/sampling.c b/tools/lib/perf/Documentation/examples/sampling.c
index 8e1a926a9cfe..bc142f0664b5 100644
--- a/tools/lib/perf/Documentation/examples/sampling.c
+++ b/tools/lib/perf/Documentation/examples/sampling.c
@@ -39,7 +39,7 @@ int main(int argc, char **argv)
libperf_init(libperf_print);
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus) {
fprintf(stderr, "failed to create cpus\n");
return -1;
diff --git a/tools/lib/perf/Documentation/libperf-sampling.txt b/tools/lib/perf/Documentation/libperf-sampling.txt
index d6ca24f6ef78..2378980fab8a 100644
--- a/tools/lib/perf/Documentation/libperf-sampling.txt
+++ b/tools/lib/perf/Documentation/libperf-sampling.txt
@@ -97,7 +97,7 @@ In this case we will monitor all the available CPUs:
[source,c]
--
- 42 cpus = perf_cpu_map__new(NULL);
+ 42 cpus = perf_cpu_map__new_online_cpus();
43 if (!cpus) {
44 fprintf(stderr, "failed to create cpus\n");
45 return -1;
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
index a8f1a237931b..fcfb9499ef9c 100644
--- a/tools/lib/perf/Documentation/libperf.txt
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -37,7 +37,7 @@ SYNOPSIS
struct perf_cpu_map;
- struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+ struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
struct perf_cpu_map *perf_cpu_map__read(FILE *file);
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
@@ -46,7 +46,7 @@ SYNOPSIS
void perf_cpu_map__put(struct perf_cpu_map *map);
int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
- bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+ bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
int perf_cpu_map__max(struct perf_cpu_map *map);
bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index 2a5a29217374..4adcd7920d03 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -9,6 +9,7 @@
#include <unistd.h>
#include <ctype.h>
#include <limits.h>
+#include "internal.h"
void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
{
@@ -27,7 +28,7 @@ struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
return result;
}
-struct perf_cpu_map *perf_cpu_map__dummy_new(void)
+struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
{
struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
@@ -66,15 +67,21 @@ void perf_cpu_map__put(struct perf_cpu_map *map)
}
}
-static struct perf_cpu_map *cpu_map__default_new(void)
+static struct perf_cpu_map *cpu_map__new_sysconf(void)
{
struct perf_cpu_map *cpus;
- int nr_cpus;
+ int nr_cpus, nr_cpus_conf;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (nr_cpus < 0)
return NULL;
+ nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
+ if (nr_cpus != nr_cpus_conf) {
+ pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
+ nr_cpus, nr_cpus_conf, nr_cpus);
+ }
+
cpus = perf_cpu_map__alloc(nr_cpus);
if (cpus != NULL) {
int i;
@@ -86,9 +93,27 @@ static struct perf_cpu_map *cpu_map__default_new(void)
return cpus;
}
-struct perf_cpu_map *perf_cpu_map__default_new(void)
+static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
{
- return cpu_map__default_new();
+ struct perf_cpu_map *cpus = NULL;
+ FILE *onlnf;
+
+ onlnf = fopen("/sys/devices/system/cpu/online", "r");
+ if (onlnf) {
+ cpus = perf_cpu_map__read(onlnf);
+ fclose(onlnf);
+ }
+ return cpus;
+}
+
+struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
+{
+ struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
+
+ if (cpus)
+ return cpus;
+
+ return cpu_map__new_sysconf();
}
@@ -180,27 +205,11 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file)
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else
- cpus = cpu_map__default_new();
out_free_tmp:
free(tmp_cpus);
return cpus;
}
-static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
-{
- struct perf_cpu_map *cpus = NULL;
- FILE *onlnf;
-
- onlnf = fopen("/sys/devices/system/cpu/online", "r");
- if (!onlnf)
- return cpu_map__default_new();
-
- cpus = perf_cpu_map__read(onlnf);
- fclose(onlnf);
- return cpus;
-}
-
struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
{
struct perf_cpu_map *cpus = NULL;
@@ -211,7 +220,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
int max_entries = 0;
if (!cpu_list)
- return cpu_map__read_all_cpu_map();
+ return perf_cpu_map__new_online_cpus();
/*
* must handle the case of empty cpumap to cover
@@ -268,10 +277,12 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else if (*cpu_list != '\0')
- cpus = cpu_map__default_new();
- else
- cpus = perf_cpu_map__dummy_new();
+ else if (*cpu_list != '\0') {
+ pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
+ cpu_list);
+ cpus = perf_cpu_map__new_online_cpus();
+ } else
+ cpus = perf_cpu_map__new_any_cpu();
invalid:
free(tmp_cpus);
out:
@@ -300,7 +311,7 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
return cpus ? __perf_cpu_map__nr(cpus) : 1;
}
-bool perf_cpu_map__empty(const struct perf_cpu_map *map)
+bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
{
return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
}
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index 3acbbccc1901..058e3ff10f9b 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -39,7 +39,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
if (evsel->system_wide) {
/* System wide: set the cpu map of the evsel to all online CPUs. */
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__new(NULL);
+ evsel->cpus = perf_cpu_map__new_online_cpus();
} else if (evlist->has_user_cpus && evsel->is_pmu_core) {
/*
* User requested CPUs on a core PMU, ensure the requested CPUs
@@ -619,7 +619,7 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
/* One for each CPU */
nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
- if (perf_cpu_map__empty(evlist->all_cpus)) {
+ if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
/* Plus one for each thread */
nr_mmaps += perf_thread_map__nr(evlist->threads);
/* Minus the per-thread CPU (-1) */
@@ -653,7 +653,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
- if (perf_cpu_map__empty(cpus))
+ if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
return mmap_per_thread(evlist, ops, mp);
return mmap_per_cpu(evlist, ops, mp);
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index 8b51b008a81f..c07160953224 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -120,7 +120,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
static struct perf_cpu_map *empty_cpu_map;
if (empty_cpu_map == NULL) {
- empty_cpu_map = perf_cpu_map__dummy_new();
+ empty_cpu_map = perf_cpu_map__new_any_cpu();
if (empty_cpu_map == NULL)
return -ENOMEM;
}
diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h
index 5a062af8e9d8..5f08cab61ece 100644
--- a/tools/lib/perf/include/internal/mmap.h
+++ b/tools/lib/perf/include/internal/mmap.h
@@ -33,7 +33,8 @@ struct perf_mmap {
bool overwrite;
u64 flush;
libperf_unmap_cb_t unmap_cb;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+ void *event_copy;
+ size_t event_copy_sz;
struct perf_mmap *next;
};
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index e38d859a384d..228c6c629b0c 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -19,10 +19,23 @@ struct perf_cache {
struct perf_cpu_map;
/**
- * perf_cpu_map__dummy_new - a map with a singular "any CPU"/dummy -1 value.
+ * perf_cpu_map__new_any_cpu - a map with a singular "any CPU"/dummy -1 value.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
+/**
+ * perf_cpu_map__new_online_cpus - a map read from
+ * /sys/devices/system/cpu/online if
+ * available. If reading wasn't possible a map
+ * is created using the online processors
+ * assuming the first 'n' processors are all
+ * online.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_online_cpus(void);
+/**
+ * perf_cpu_map__new - create a map from the given cpu_list such as "0-7". If no
+ * cpu_list argument is provided then
+ * perf_cpu_map__new_online_cpus is returned.
*/
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
@@ -31,12 +44,23 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
LIBPERF_API struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
struct perf_cpu_map *other);
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
+/**
+ * perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index
+ * is invalid.
+ */
LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+/**
+ * perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a
+ * cpu of -1 for an invalid index, this makes an empty map
+ * look like it contains the "any CPU"/dummy value. Otherwise
+ * the result is the number CPUs in the map plus one if the
+ * "any CPU"/dummy value is present.
+ */
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
/**
- * perf_cpu_map__empty - is map either empty or the "any CPU"/dummy value.
+ * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
*/
-LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+LIBPERF_API bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs,
@@ -51,6 +75,12 @@ LIBPERF_API bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map);
(idx) < perf_cpu_map__nr(cpus); \
(idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
+#define perf_cpu_map__for_each_cpu_skip_any(_cpu, idx, cpus) \
+ for ((idx) = 0, (_cpu) = perf_cpu_map__cpu(cpus, idx); \
+ (idx) < perf_cpu_map__nr(cpus); \
+ (idx)++, (_cpu) = perf_cpu_map__cpu(cpus, idx)) \
+ if ((_cpu).cpu != -1)
+
#define perf_cpu_map__for_each_idx(idx, cpus) \
for ((idx) = 0; (idx) < perf_cpu_map__nr(cpus); (idx)++)
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index 190b56ae923a..10b3f3722642 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -1,15 +1,15 @@
LIBPERF_0.0.1 {
global:
libperf_init;
- perf_cpu_map__dummy_new;
- perf_cpu_map__default_new;
+ perf_cpu_map__new_any_cpu;
+ perf_cpu_map__new_online_cpus;
perf_cpu_map__get;
perf_cpu_map__put;
perf_cpu_map__new;
perf_cpu_map__read;
perf_cpu_map__nr;
perf_cpu_map__cpu;
- perf_cpu_map__empty;
+ perf_cpu_map__has_any_cpu_or_is_empty;
perf_cpu_map__max;
perf_cpu_map__has;
perf_thread_map__new_array;
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index 2184814b37dd..0c903c2372c9 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -19,6 +19,7 @@
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
bool overwrite, libperf_unmap_cb_t unmap_cb)
{
+ /* Assume fields were zero initialized. */
map->fd = -1;
map->overwrite = overwrite;
map->unmap_cb = unmap_cb;
@@ -51,13 +52,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
void perf_mmap__munmap(struct perf_mmap *map)
{
- if (map && map->base != NULL) {
+ if (!map)
+ return;
+
+ zfree(&map->event_copy);
+ map->event_copy_sz = 0;
+ if (map->base) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
- if (map && map->unmap_cb)
+ if (map->unmap_cb)
map->unmap_cb(map);
}
@@ -223,9 +229,17 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
*/
if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
unsigned int offset = *startp;
- unsigned int len = min(sizeof(*event), size), cpy;
+ unsigned int len = size, cpy;
void *dst = map->event_copy;
+ if (size > map->event_copy_sz) {
+ dst = realloc(map->event_copy, size);
+ if (!dst)
+ return NULL;
+ map->event_copy = dst;
+ map->event_copy_sz = size;
+ }
+
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
index 87b0510a556f..c998b1dae863 100644
--- a/tools/lib/perf/tests/test-cpumap.c
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -21,7 +21,7 @@ int test_cpumap(int argc, char **argv)
libperf_init(libperf_print);
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
if (!cpus)
return -1;
@@ -29,7 +29,7 @@ int test_cpumap(int argc, char **argv)
perf_cpu_map__put(cpus);
perf_cpu_map__put(cpus);
- cpus = perf_cpu_map__default_new();
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus)
return -1;
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index ed616fc19b4f..10f70cb41ff1 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -46,7 +46,7 @@ static int test_stat_cpu(void)
};
int err, idx;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evlist = perf_evlist__new();
@@ -261,7 +261,7 @@ static int test_mmap_thread(void)
threads = perf_thread_map__new_dummy();
__T("failed to create threads", threads);
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
__T("failed to create cpus", cpus);
perf_thread_map__set_pid(threads, 0, pid);
@@ -350,7 +350,7 @@ static int test_mmap_cpus(void)
attr.config = id;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evlist = perf_evlist__new();
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index a11fc51bfb68..545ec3150546 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -27,7 +27,7 @@ static int test_stat_cpu(void)
};
int err, idx;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evsel = perf_evsel__new(&attr);
diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
index adfbae27dc36..8561b0f01a24 100644
--- a/tools/lib/subcmd/help.c
+++ b/tools/lib/subcmd/help.c
@@ -52,11 +52,21 @@ void uniq(struct cmdnames *cmds)
if (!cmds->cnt)
return;
- for (i = j = 1; i < cmds->cnt; i++)
- if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
- cmds->names[j++] = cmds->names[i];
-
+ for (i = 1; i < cmds->cnt; i++) {
+ if (!strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
+ zfree(&cmds->names[i - 1]);
+ }
+ for (i = 0, j = 0; i < cmds->cnt; i++) {
+ if (cmds->names[i]) {
+ if (i == j)
+ j++;
+ else
+ cmds->names[j++] = cmds->names[i];
+ }
+ }
cmds->cnt = j;
+ while (j < i)
+ cmds->names[j++] = NULL;
}
void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile
index da1aa10bbcc3..8e9e09d84e26 100644
--- a/tools/net/ynl/Makefile
+++ b/tools/net/ynl/Makefile
@@ -11,11 +11,11 @@ $(SUBDIRS):
$(MAKE) -C $@ ; \
fi
-clean hardclean:
+clean distclean:
@for dir in $(SUBDIRS) ; do \
if [ -f "$$dir/Makefile" ] ; then \
$(MAKE) -C $$dir $@; \
fi \
done
-.PHONY: clean all $(SUBDIRS)
+.PHONY: all clean distclean $(SUBDIRS)
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
index 3110f84dd029..07373c5a7afe 100644
--- a/tools/net/ynl/Makefile.deps
+++ b/tools/net/ynl/Makefile.deps
@@ -15,7 +15,12 @@ UAPI_PATH:=../../../../include/uapi/
get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2)
CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
+CFLAGS_dpll:=$(call get_hdr_inc,_LINUX_DPLL_H,dpll.h)
CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
+CFLAGS_mptcp_pm:=$(call get_hdr_inc,_LINUX_MPTCP_PM_H,mptcp_pm.h)
CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h)
CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_NETLINK_H,nfsd_netlink.h)
+CFLAGS_ovs_datapath:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
+CFLAGS_ovs_flow:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
+CFLAGS_ovs_vport:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h)
diff --git a/tools/net/ynl/cli.py b/tools/net/ynl/cli.py
index 2ad9ec0f5545..f131e33ac3ee 100755
--- a/tools/net/ynl/cli.py
+++ b/tools/net/ynl/cli.py
@@ -6,7 +6,16 @@ import json
import pprint
import time
-from lib import YnlFamily, Netlink
+from lib import YnlFamily, Netlink, NlError
+
+
+class YnlEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, bytes):
+ return bytes.hex(obj)
+ if isinstance(obj, set):
+ return list(obj)
+ return json.JSONEncoder.default(self, obj)
def main():
@@ -28,8 +37,17 @@ def main():
parser.add_argument('--append', dest='flags', action='append_const',
const=Netlink.NLM_F_APPEND)
parser.add_argument('--process-unknown', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--output-json', action='store_true')
+ parser.add_argument('--dbg-small-recv', default=0, const=4000,
+ action='store', nargs='?', type=int)
args = parser.parse_args()
+ def output(msg):
+ if args.output_json:
+ print(json.dumps(msg, cls=YnlEncoder))
+ else:
+ pprint.PrettyPrinter().pprint(msg)
+
if args.no_schema:
args.schema = ''
@@ -37,7 +55,10 @@ def main():
if args.json_text:
attrs = json.loads(args.json_text)
- ynl = YnlFamily(args.spec, args.schema, args.process_unknown)
+ ynl = YnlFamily(args.spec, args.schema, args.process_unknown,
+ recv_size=args.dbg_small_recv)
+ if args.dbg_small_recv:
+ ynl.set_recv_dbg(True)
if args.ntf:
ynl.ntf_subscribe(args.ntf)
@@ -45,16 +66,20 @@ def main():
if args.sleep:
time.sleep(args.sleep)
- if args.do:
- reply = ynl.do(args.do, attrs, args.flags)
- pprint.PrettyPrinter().pprint(reply)
- if args.dump:
- reply = ynl.dump(args.dump, attrs)
- pprint.PrettyPrinter().pprint(reply)
+ try:
+ if args.do:
+ reply = ynl.do(args.do, attrs, args.flags)
+ output(reply)
+ if args.dump:
+ reply = ynl.dump(args.dump, attrs)
+ output(reply)
+ except NlError as e:
+ print(e)
+ exit(1)
if args.ntf:
ynl.check_ntf()
- pprint.PrettyPrinter().pprint(ynl.async_msg_queue)
+ output(ynl.async_msg_queue)
if __name__ == "__main__":
diff --git a/tools/net/ynl/generated/Makefile b/tools/net/ynl/generated/Makefile
index 84cbabdd02a8..713f5fb9cc2d 100644
--- a/tools/net/ynl/generated/Makefile
+++ b/tools/net/ynl/generated/Makefile
@@ -14,7 +14,10 @@ YNL_GEN_ARG_ethtool:=--user-header linux/ethtool_netlink.h \
TOOL:=../ynl-gen-c.py
-GENS:=ethtool devlink handshake fou netdev nfsd
+GENS_PATHS=$(shell grep -nrI --files-without-match \
+ 'protocol: netlink' \
+ ../../../../Documentation/netlink/specs/)
+GENS=$(patsubst ../../../../Documentation/netlink/specs/%.yaml,%,${GENS_PATHS})
SRCS=$(patsubst %,%-user.c,${GENS})
HDRS=$(patsubst %,%-user.h,${GENS})
OBJS=$(patsubst %,%-user.o,${GENS})
@@ -40,11 +43,11 @@ protos.a: $(OBJS)
clean:
rm -f *.o
-hardclean: clean
+distclean: clean
rm -f *.c *.h *.a
regen:
@../ynl-regen.sh
-.PHONY: all clean hardclean regen
+.PHONY: all clean distclean regen
.DEFAULT_GOAL: all
diff --git a/tools/net/ynl/lib/Makefile b/tools/net/ynl/lib/Makefile
index d2e50fd0a52d..dfff3ecd1cba 100644
--- a/tools/net/ynl/lib/Makefile
+++ b/tools/net/ynl/lib/Makefile
@@ -17,12 +17,13 @@ ynl.a: $(OBJS)
ar rcs $@ $(OBJS)
clean:
rm -f *.o *.d *~
+ rm -rf __pycache__
-hardclean: clean
+distclean: clean
rm -f *.a
%.o: %.c
$(COMPILE.c) -MMD -c -o $@ $<
-.PHONY: all clean
+.PHONY: all clean distclean
.DEFAULT_GOAL=all
diff --git a/tools/net/ynl/lib/__init__.py b/tools/net/ynl/lib/__init__.py
index f7eaa07783e7..9137b83e580a 100644
--- a/tools/net/ynl/lib/__init__.py
+++ b/tools/net/ynl/lib/__init__.py
@@ -2,7 +2,7 @@
from .nlspec import SpecAttr, SpecAttrSet, SpecEnumEntry, SpecEnumSet, \
SpecFamily, SpecOperation
-from .ynl import YnlFamily, Netlink
+from .ynl import YnlFamily, Netlink, NlError
__all__ = ["SpecAttr", "SpecAttrSet", "SpecEnumEntry", "SpecEnumSet",
- "SpecFamily", "SpecOperation", "YnlFamily", "Netlink"]
+ "SpecFamily", "SpecOperation", "YnlFamily", "Netlink", "NlError"]
diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py
index 44f13e383e8a..fbce52395b3b 100644
--- a/tools/net/ynl/lib/nlspec.py
+++ b/tools/net/ynl/lib/nlspec.py
@@ -144,7 +144,7 @@ class SpecEnumSet(SpecElement):
class SpecAttr(SpecElement):
- """ Single Netlink atttribute type
+ """ Single Netlink attribute type
Represents a single attribute type within an attr space.
@@ -248,6 +248,7 @@ class SpecStructMember(SpecElement):
len integer, optional byte length of binary types
display_hint string, hint to help choose format specifier
when displaying the value
+ struct string, name of nested struct type
"""
def __init__(self, family, yaml):
super().__init__(family, yaml)
@@ -256,6 +257,7 @@ class SpecStructMember(SpecElement):
self.enum = yaml.get('enum')
self.len = yaml.get('len')
self.display_hint = yaml.get('display-hint')
+ self.struct = yaml.get('struct')
class SpecStruct(SpecElement):
@@ -306,10 +308,9 @@ class SpecSubMessage(SpecElement):
class SpecSubMessageFormat(SpecElement):
- """ Netlink sub-message definition
+ """ Netlink sub-message format definition
- Represents a set of sub-message formats for polymorphic nlattrs
- that contain type-specific sub messages.
+ Represents a single format for a sub-message.
Attributes:
value attribute value to match against type selector
diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h
index 7491da8e7555..6cf890080dc0 100644
--- a/tools/net/ynl/lib/ynl-priv.h
+++ b/tools/net/ynl/lib/ynl-priv.h
@@ -2,16 +2,16 @@
#ifndef __YNL_C_PRIV_H
#define __YNL_C_PRIV_H 1
+#include <stdbool.h>
#include <stddef.h>
-#include <libmnl/libmnl.h>
#include <linux/types.h>
+struct ynl_parse_arg;
+
/*
* YNL internals / low level stuff
*/
-/* Generic mnl helper code */
-
enum ynl_policy_type {
YNL_PT_REJECT = 1,
YNL_PT_IGNORE,
@@ -27,6 +27,20 @@ enum ynl_policy_type {
YNL_PT_BITFIELD32,
};
+enum ynl_parse_result {
+ YNL_PARSE_CB_ERROR = -1,
+ YNL_PARSE_CB_STOP = 0,
+ YNL_PARSE_CB_OK = 1,
+};
+
+#define YNL_SOCKET_BUFFER_SIZE (1 << 17)
+
+#define YNL_ARRAY_SIZE(array) (sizeof(array) ? \
+ sizeof(array) / sizeof(array[0]) : 0)
+
+typedef int (*ynl_parse_cb_t)(const struct nlmsghdr *nlh,
+ struct ynl_parse_arg *yarg);
+
struct ynl_policy_attr {
enum ynl_policy_type type;
unsigned int len;
@@ -80,8 +94,6 @@ struct ynl_ntf_base_type {
unsigned char data[] __attribute__((aligned(8)));
};
-extern mnl_cb_t ynl_cb_array[NLMSG_MIN_TYPE];
-
struct nlmsghdr *
ynl_gemsg_start_req(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version);
struct nlmsghdr *
@@ -89,30 +101,26 @@ ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version);
int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr);
-int ynl_recv_ack(struct ynl_sock *ys, int ret);
-int ynl_cb_null(const struct nlmsghdr *nlh, void *data);
-
/* YNL specific helpers used by the auto-generated code */
struct ynl_req_state {
struct ynl_parse_arg yarg;
- mnl_cb_t cb;
+ ynl_parse_cb_t cb;
__u32 rsp_cmd;
};
struct ynl_dump_state {
- struct ynl_sock *ys;
- struct ynl_policy_nest *rsp_policy;
+ struct ynl_parse_arg yarg;
void *first;
struct ynl_dump_list_type *last;
size_t alloc_sz;
- mnl_cb_t cb;
+ ynl_parse_cb_t cb;
__u32 rsp_cmd;
};
struct ynl_ntf_info {
struct ynl_policy_nest *policy;
- mnl_cb_t cb;
+ ynl_parse_cb_t cb;
size_t alloc_sz;
void (*free)(struct ynl_ntf_base_type *ntf);
};
@@ -125,20 +133,325 @@ int ynl_exec_dump(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
void ynl_error_unknown_notification(struct ynl_sock *ys, __u8 cmd);
int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg);
-#ifndef MNL_HAS_AUTO_SCALARS
-static inline uint64_t mnl_attr_get_uint(const struct nlattr *attr)
+/* Netlink message handling helpers */
+
+#define YNL_MSG_OVERFLOW 1
+
+static inline struct nlmsghdr *ynl_nlmsg_put_header(void *buf)
+{
+ struct nlmsghdr *nlh = buf;
+
+ memset(nlh, 0, sizeof(*nlh));
+ nlh->nlmsg_len = NLMSG_HDRLEN;
+
+ return nlh;
+}
+
+static inline unsigned int ynl_nlmsg_data_len(const struct nlmsghdr *nlh)
{
- if (mnl_attr_get_payload_len(attr) == 4)
- return mnl_attr_get_u32(attr);
- return mnl_attr_get_u64(attr);
+ return nlh->nlmsg_len - NLMSG_HDRLEN;
+}
+
+static inline void *ynl_nlmsg_data(const struct nlmsghdr *nlh)
+{
+ return (unsigned char *)nlh + NLMSG_HDRLEN;
+}
+
+static inline void *
+ynl_nlmsg_data_offset(const struct nlmsghdr *nlh, unsigned int offset)
+{
+ return (unsigned char *)nlh + NLMSG_HDRLEN + offset;
+}
+
+static inline void *ynl_nlmsg_end_addr(const struct nlmsghdr *nlh)
+{
+ return (char *)nlh + nlh->nlmsg_len;
+}
+
+static inline void *
+ynl_nlmsg_put_extra_header(struct nlmsghdr *nlh, unsigned int size)
+{
+ void *tail = ynl_nlmsg_end_addr(nlh);
+
+ nlh->nlmsg_len += NLMSG_ALIGN(size);
+ return tail;
+}
+
+/* Netlink attribute helpers */
+
+static inline unsigned int ynl_attr_type(const struct nlattr *attr)
+{
+ return attr->nla_type & NLA_TYPE_MASK;
+}
+
+static inline unsigned int ynl_attr_data_len(const struct nlattr *attr)
+{
+ return attr->nla_len - NLA_HDRLEN;
+}
+
+static inline void *ynl_attr_data(const struct nlattr *attr)
+{
+ return (unsigned char *)attr + NLA_HDRLEN;
+}
+
+static inline void *ynl_attr_data_end(const struct nlattr *attr)
+{
+ return ynl_attr_data(attr) + ynl_attr_data_len(attr);
+}
+
+#define ynl_attr_for_each(attr, nlh, fixed_hdr_sz) \
+ for ((attr) = ynl_attr_first(nlh, (nlh)->nlmsg_len, \
+ NLMSG_HDRLEN + fixed_hdr_sz); attr; \
+ (attr) = ynl_attr_next(ynl_nlmsg_end_addr(nlh), attr))
+
+#define ynl_attr_for_each_nested(attr, outer) \
+ for ((attr) = ynl_attr_first(outer, outer->nla_len, \
+ sizeof(struct nlattr)); attr; \
+ (attr) = ynl_attr_next(ynl_attr_data_end(outer), attr))
+
+#define ynl_attr_for_each_payload(start, len, attr) \
+ for ((attr) = ynl_attr_first(start, len, 0); attr; \
+ (attr) = ynl_attr_next(start + len, attr))
+
+static inline struct nlattr *
+ynl_attr_if_good(const void *end, struct nlattr *attr)
+{
+ if (attr + 1 > (const struct nlattr *)end)
+ return NULL;
+ if (ynl_attr_data_end(attr) > end)
+ return NULL;
+ return attr;
+}
+
+static inline struct nlattr *
+ynl_attr_next(const void *end, const struct nlattr *prev)
+{
+ struct nlattr *attr;
+
+ attr = (void *)((char *)prev + NLA_ALIGN(prev->nla_len));
+ return ynl_attr_if_good(end, attr);
+}
+
+static inline struct nlattr *
+ynl_attr_first(const void *start, size_t len, size_t skip)
+{
+ struct nlattr *attr;
+
+ attr = (void *)((char *)start + NLMSG_ALIGN(skip));
+ return ynl_attr_if_good(start + len, attr);
+}
+
+static inline bool
+__ynl_attr_put_overflow(struct nlmsghdr *nlh, size_t size)
+{
+ bool o;
+
+ /* ynl_msg_start() stashed buffer length in nlmsg_pid. */
+ o = nlh->nlmsg_len + NLA_HDRLEN + NLMSG_ALIGN(size) > nlh->nlmsg_pid;
+ if (o)
+ /* YNL_MSG_OVERFLOW is < NLMSG_HDRLEN, all subsequent checks
+ * are guaranteed to fail.
+ */
+ nlh->nlmsg_pid = YNL_MSG_OVERFLOW;
+ return o;
+}
+
+static inline struct nlattr *
+ynl_attr_nest_start(struct nlmsghdr *nlh, unsigned int attr_type)
+{
+ struct nlattr *attr;
+
+ if (__ynl_attr_put_overflow(nlh, 0))
+ return ynl_nlmsg_end_addr(nlh) - NLA_HDRLEN;
+
+ attr = ynl_nlmsg_end_addr(nlh);
+ attr->nla_type = attr_type | NLA_F_NESTED;
+ nlh->nlmsg_len += NLA_HDRLEN;
+
+ return attr;
}
static inline void
-mnl_attr_put_uint(struct nlmsghdr *nlh, uint16_t type, uint64_t data)
+ynl_attr_nest_end(struct nlmsghdr *nlh, struct nlattr *attr)
{
- if ((uint32_t)data == (uint64_t)data)
- return mnl_attr_put_u32(nlh, type, data);
- return mnl_attr_put_u64(nlh, type, data);
+ attr->nla_len = (char *)ynl_nlmsg_end_addr(nlh) - (char *)attr;
+}
+
+static inline void
+ynl_attr_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ const void *value, size_t size)
+{
+ struct nlattr *attr;
+
+ if (__ynl_attr_put_overflow(nlh, size))
+ return;
+
+ attr = ynl_nlmsg_end_addr(nlh);
+ attr->nla_type = attr_type;
+ attr->nla_len = NLA_HDRLEN + size;
+
+ memcpy(ynl_attr_data(attr), value, size);
+
+ nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
+}
+
+static inline void
+ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
+{
+ struct nlattr *attr;
+ size_t len;
+
+ len = strlen(str);
+ if (__ynl_attr_put_overflow(nlh, len))
+ return;
+
+ attr = ynl_nlmsg_end_addr(nlh);
+ attr->nla_type = attr_type;
+
+ strcpy(ynl_attr_data(attr), str);
+ attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len);
+
+ nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
+}
+
+static inline const char *ynl_attr_get_str(const struct nlattr *attr)
+{
+ return (const char *)ynl_attr_data(attr);
+}
+
+static inline __s8 ynl_attr_get_s8(const struct nlattr *attr)
+{
+ return *(__s8 *)ynl_attr_data(attr);
+}
+
+static inline __s16 ynl_attr_get_s16(const struct nlattr *attr)
+{
+ return *(__s16 *)ynl_attr_data(attr);
+}
+
+static inline __s32 ynl_attr_get_s32(const struct nlattr *attr)
+{
+ return *(__s32 *)ynl_attr_data(attr);
+}
+
+static inline __s64 ynl_attr_get_s64(const struct nlattr *attr)
+{
+ __s64 tmp;
+
+ memcpy(&tmp, (unsigned char *)(attr + 1), sizeof(tmp));
+ return tmp;
+}
+
+static inline __u8 ynl_attr_get_u8(const struct nlattr *attr)
+{
+ return *(__u8 *)ynl_attr_data(attr);
+}
+
+static inline __u16 ynl_attr_get_u16(const struct nlattr *attr)
+{
+ return *(__u16 *)ynl_attr_data(attr);
+}
+
+static inline __u32 ynl_attr_get_u32(const struct nlattr *attr)
+{
+ return *(__u32 *)ynl_attr_data(attr);
+}
+
+static inline __u64 ynl_attr_get_u64(const struct nlattr *attr)
+{
+ __u64 tmp;
+
+ memcpy(&tmp, (unsigned char *)(attr + 1), sizeof(tmp));
+ return tmp;
+}
+
+static inline void
+ynl_attr_put_s8(struct nlmsghdr *nlh, unsigned int attr_type, __s8 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_s16(struct nlmsghdr *nlh, unsigned int attr_type, __s16 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_s32(struct nlmsghdr *nlh, unsigned int attr_type, __s32 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_s64(struct nlmsghdr *nlh, unsigned int attr_type, __s64 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_u8(struct nlmsghdr *nlh, unsigned int attr_type, __u8 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_u16(struct nlmsghdr *nlh, unsigned int attr_type, __u16 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_u32(struct nlmsghdr *nlh, unsigned int attr_type, __u32 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline void
+ynl_attr_put_u64(struct nlmsghdr *nlh, unsigned int attr_type, __u64 value)
+{
+ ynl_attr_put(nlh, attr_type, &value, sizeof(value));
+}
+
+static inline __u64 ynl_attr_get_uint(const struct nlattr *attr)
+{
+ switch (ynl_attr_data_len(attr)) {
+ case 4:
+ return ynl_attr_get_u32(attr);
+ case 8:
+ return ynl_attr_get_u64(attr);
+ default:
+ return 0;
+ }
+}
+
+static inline __s64 ynl_attr_get_sint(const struct nlattr *attr)
+{
+ switch (ynl_attr_data_len(attr)) {
+ case 4:
+ return ynl_attr_get_s32(attr);
+ case 8:
+ return ynl_attr_get_s64(attr);
+ default:
+ return 0;
+ }
+}
+
+static inline void
+ynl_attr_put_uint(struct nlmsghdr *nlh, __u16 type, __u64 data)
+{
+ if ((__u32)data == (__u64)data)
+ ynl_attr_put_u32(nlh, type, data);
+ else
+ ynl_attr_put_u64(nlh, type, data);
+}
+
+static inline void
+ynl_attr_put_sint(struct nlmsghdr *nlh, __u16 type, __s64 data)
+{
+ if ((__s32)data == (__s64)data)
+ ynl_attr_put_s32(nlh, type, data);
+ else
+ ynl_attr_put_s64(nlh, type, data);
}
-#endif
#endif
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index c82a7f41b31c..b9e77af5af5f 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -3,10 +3,11 @@
#include <poll.h>
#include <string.h>
#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
#include <linux/types.h>
-
-#include <libmnl/libmnl.h>
#include <linux/genetlink.h>
+#include <sys/socket.h>
#include "ynl.h"
@@ -92,9 +93,9 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
data_len = end - start;
- mnl_attr_for_each_payload(start, data_len) {
+ ynl_attr_for_each_payload(start, data_len, attr) {
astart_off = (char *)attr - (char *)start;
- aend_off = astart_off + mnl_attr_get_payload_len(attr);
+ aend_off = astart_off + ynl_attr_data_len(attr);
if (aend_off <= off)
continue;
@@ -106,7 +107,7 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
off -= astart_off;
- type = mnl_attr_get_type(attr);
+ type = ynl_attr_type(attr);
if (ynl_err_walk_report_one(policy, type, str, str_sz, &n))
return n;
@@ -124,8 +125,8 @@ ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off,
}
off -= sizeof(struct nlattr);
- start = mnl_attr_get_payload(attr);
- end = start + mnl_attr_get_payload_len(attr);
+ start = ynl_attr_data(attr);
+ end = start + ynl_attr_data_len(attr);
return n + ynl_err_walk(ys, start, end, off, policy->table[type].nest,
&str[n], str_sz - n, nest_pol);
@@ -147,14 +148,14 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) {
yerr_msg(ys, "%s", strerror(ys->err.code));
- return MNL_CB_OK;
+ return YNL_PARSE_CB_OK;
}
- mnl_attr_for_each(attr, nlh, hlen) {
+ ynl_attr_for_each(attr, nlh, hlen) {
unsigned int len, type;
- len = mnl_attr_get_payload_len(attr);
- type = mnl_attr_get_type(attr);
+ len = ynl_attr_data_len(attr);
+ type = ynl_attr_type(attr);
if (type > NLMSGERR_ATTR_MAX)
continue;
@@ -166,12 +167,12 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
case NLMSGERR_ATTR_MISS_TYPE:
case NLMSGERR_ATTR_MISS_NEST:
if (len != sizeof(__u32))
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
break;
case NLMSGERR_ATTR_MSG:
- str = mnl_attr_get_payload(attr);
+ str = ynl_attr_get_str(attr);
if (str[len - 1])
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
break;
default:
break;
@@ -185,14 +186,13 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
unsigned int n, off;
void *start, *end;
- ys->err.attr_offs = mnl_attr_get_u32(tb[NLMSGERR_ATTR_OFFS]);
+ ys->err.attr_offs = ynl_attr_get_u32(tb[NLMSGERR_ATTR_OFFS]);
n = snprintf(bad_attr, sizeof(bad_attr), "%sbad attribute: ",
str ? " (" : "");
- start = mnl_nlmsg_get_payload_offset(ys->nlh,
- ys->family->hdr_len);
- end = mnl_nlmsg_get_payload_tail(ys->nlh);
+ start = ynl_nlmsg_data_offset(ys->nlh, ys->family->hdr_len);
+ end = ynl_nlmsg_end_addr(ys->nlh);
off = ys->err.attr_offs;
off -= sizeof(struct nlmsghdr);
@@ -211,18 +211,17 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
void *start, *end;
int n2;
- type = mnl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_TYPE]);
+ type = ynl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_TYPE]);
n = snprintf(miss_attr, sizeof(miss_attr), "%smissing attribute: ",
bad_attr[0] ? ", " : (str ? " (" : ""));
- start = mnl_nlmsg_get_payload_offset(ys->nlh,
- ys->family->hdr_len);
- end = mnl_nlmsg_get_payload_tail(ys->nlh);
+ start = ynl_nlmsg_data_offset(ys->nlh, ys->family->hdr_len);
+ end = ynl_nlmsg_end_addr(ys->nlh);
nest_pol = ys->req_policy;
if (tb[NLMSGERR_ATTR_MISS_NEST]) {
- off = mnl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_NEST]);
+ off = ynl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_NEST]);
off -= sizeof(struct nlmsghdr);
off -= ys->family->hdr_len;
@@ -254,13 +253,13 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh,
else
yerr_msg(ys, "%s", strerror(ys->err.code));
- return MNL_CB_OK;
+ return YNL_PARSE_CB_OK;
}
-static int ynl_cb_error(const struct nlmsghdr *nlh, void *data)
+static int
+ynl_cb_error(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- const struct nlmsgerr *err = mnl_nlmsg_get_payload(nlh);
- struct ynl_parse_arg *yarg = data;
+ const struct nlmsgerr *err = ynl_nlmsg_data(nlh);
unsigned int hlen;
int code;
@@ -270,16 +269,15 @@ static int ynl_cb_error(const struct nlmsghdr *nlh, void *data)
hlen = sizeof(*err);
if (!(nlh->nlmsg_flags & NLM_F_CAPPED))
- hlen += mnl_nlmsg_get_payload_len(&err->msg);
+ hlen += ynl_nlmsg_data_len(&err->msg);
ynl_ext_ack_check(yarg->ys, nlh, hlen);
- return code ? MNL_CB_ERROR : MNL_CB_STOP;
+ return code ? YNL_PARSE_CB_ERROR : YNL_PARSE_CB_STOP;
}
-static int ynl_cb_done(const struct nlmsghdr *nlh, void *data)
+static int ynl_cb_done(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- struct ynl_parse_arg *yarg = data;
int err;
err = *(int *)NLMSG_DATA(nlh);
@@ -289,23 +287,11 @@ static int ynl_cb_done(const struct nlmsghdr *nlh, void *data)
ynl_ext_ack_check(yarg->ys, nlh, sizeof(int));
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
- return MNL_CB_STOP;
-}
-
-static int ynl_cb_noop(const struct nlmsghdr *nlh, void *data)
-{
- return MNL_CB_OK;
+ return YNL_PARSE_CB_STOP;
}
-mnl_cb_t ynl_cb_array[NLMSG_MIN_TYPE] = {
- [NLMSG_NOOP] = ynl_cb_noop,
- [NLMSG_ERROR] = ynl_cb_error,
- [NLMSG_DONE] = ynl_cb_done,
- [NLMSG_OVERRUN] = ynl_cb_noop,
-};
-
/* Attribute validation */
int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
@@ -314,9 +300,9 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
unsigned int type, len;
unsigned char *data;
- data = mnl_attr_get_payload(attr);
- len = mnl_attr_get_payload_len(attr);
- type = mnl_attr_get_type(attr);
+ data = ynl_attr_data(attr);
+ len = ynl_attr_data_len(attr);
+ type = ynl_attr_type(attr);
if (type > yarg->rsp_policy->max_attr) {
yerr(yarg->ys, YNL_ERROR_INTERNAL,
"Internal error, validating unknown attribute");
@@ -413,14 +399,38 @@ struct nlmsghdr *ynl_msg_start(struct ynl_sock *ys, __u32 id, __u16 flags)
ynl_err_reset(ys);
- nlh = ys->nlh = mnl_nlmsg_put_header(ys->tx_buf);
+ nlh = ys->nlh = ynl_nlmsg_put_header(ys->tx_buf);
nlh->nlmsg_type = id;
nlh->nlmsg_flags = flags;
nlh->nlmsg_seq = ++ys->seq;
+ /* This is a local YNL hack for length checking, we put the buffer
+ * length in nlmsg_pid, since messages sent to the kernel always use
+ * PID 0. Message needs to be terminated with ynl_msg_end().
+ */
+ nlh->nlmsg_pid = YNL_SOCKET_BUFFER_SIZE;
+
return nlh;
}
+static int ynl_msg_end(struct ynl_sock *ys, struct nlmsghdr *nlh)
+{
+ /* We stash buffer length in nlmsg_pid. */
+ if (nlh->nlmsg_pid == 0) {
+ yerr(ys, YNL_ERROR_INPUT_INVALID,
+ "Unknown input buffer length");
+ return -EINVAL;
+ }
+ if (nlh->nlmsg_pid == YNL_MSG_OVERFLOW) {
+ yerr(ys, YNL_ERROR_INPUT_TOO_BIG,
+ "Constructred message longer than internal buffer");
+ return -EMSGSIZE;
+ }
+
+ nlh->nlmsg_pid = 0;
+ return 0;
+}
+
struct nlmsghdr *
ynl_gemsg_start(struct ynl_sock *ys, __u32 id, __u16 flags,
__u8 cmd, __u8 version)
@@ -435,7 +445,7 @@ ynl_gemsg_start(struct ynl_sock *ys, __u32 id, __u16 flags,
gehdr.cmd = cmd;
gehdr.version = version;
- data = mnl_nlmsg_put_extra_header(nlh, sizeof(gehdr));
+ data = ynl_nlmsg_put_extra_header(nlh, sizeof(gehdr));
memcpy(data, &gehdr, sizeof(gehdr));
return nlh;
@@ -464,31 +474,85 @@ ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version)
cmd, version);
}
-int ynl_recv_ack(struct ynl_sock *ys, int ret)
+static int ynl_cb_null(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- if (!ret) {
- yerr(ys, YNL_ERROR_EXPECT_ACK,
- "Expecting an ACK but nothing received");
- return -1;
+ yerr(yarg->ys, YNL_ERROR_UNEXPECT_MSG,
+ "Received a message when none were expected");
+
+ return YNL_PARSE_CB_ERROR;
+}
+
+static int
+__ynl_sock_read_msgs(struct ynl_parse_arg *yarg, ynl_parse_cb_t cb, int flags)
+{
+ struct ynl_sock *ys = yarg->ys;
+ const struct nlmsghdr *nlh;
+ ssize_t len, rem;
+ int ret;
+
+ len = recv(ys->socket, ys->rx_buf, YNL_SOCKET_BUFFER_SIZE, flags);
+ if (len < 0) {
+ if (flags & MSG_DONTWAIT && errno == EAGAIN)
+ return YNL_PARSE_CB_STOP;
+ return len;
}
- ret = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);
- if (ret < 0) {
- perr(ys, "Socket receive failed");
- return ret;
+ ret = YNL_PARSE_CB_STOP;
+ for (rem = len; rem > 0; NLMSG_NEXT(nlh, rem)) {
+ nlh = (struct nlmsghdr *)&ys->rx_buf[len - rem];
+ if (!NLMSG_OK(nlh, rem)) {
+ yerr(yarg->ys, YNL_ERROR_INV_RESP,
+ "Invalid message or trailing data in the response.");
+ return YNL_PARSE_CB_ERROR;
+ }
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP_INTR) {
+ /* TODO: handle this better */
+ yerr(yarg->ys, YNL_ERROR_DUMP_INTER,
+ "Dump interrupted / inconsistent, please retry.");
+ return YNL_PARSE_CB_ERROR;
+ }
+
+ switch (nlh->nlmsg_type) {
+ case 0:
+ yerr(yarg->ys, YNL_ERROR_INV_RESP,
+ "Invalid message type in the response.");
+ return YNL_PARSE_CB_ERROR;
+ case NLMSG_NOOP:
+ case NLMSG_OVERRUN ... NLMSG_MIN_TYPE - 1:
+ ret = YNL_PARSE_CB_OK;
+ break;
+ case NLMSG_ERROR:
+ ret = ynl_cb_error(nlh, yarg);
+ break;
+ case NLMSG_DONE:
+ ret = ynl_cb_done(nlh, yarg);
+ break;
+ default:
+ ret = cb(nlh, yarg);
+ break;
+ }
}
- return mnl_cb_run(ys->rx_buf, ret, ys->seq, ys->portid,
- ynl_cb_null, ys);
+
+ return ret;
}
-int ynl_cb_null(const struct nlmsghdr *nlh, void *data)
+static int ynl_sock_read_msgs(struct ynl_parse_arg *yarg, ynl_parse_cb_t cb)
{
- struct ynl_parse_arg *yarg = data;
+ return __ynl_sock_read_msgs(yarg, cb, 0);
+}
- yerr(yarg->ys, YNL_ERROR_UNEXPECT_MSG,
- "Received a message when none were expected");
+static int ynl_recv_ack(struct ynl_sock *ys, int ret)
+{
+ struct ynl_parse_arg yarg = { .ys = ys, };
+
+ if (!ret) {
+ yerr(ys, YNL_ERROR_EXPECT_ACK,
+ "Expecting an ACK but nothing received");
+ return -1;
+ }
- return MNL_CB_ERROR;
+ return ynl_sock_read_msgs(&yarg, ynl_cb_null);
}
/* Init/fini and genetlink boiler plate */
@@ -498,7 +562,7 @@ ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts)
const struct nlattr *entry, *attr;
unsigned int i;
- mnl_attr_for_each_nested(attr, mcasts)
+ ynl_attr_for_each_nested(attr, mcasts)
ys->n_mcast_groups++;
if (!ys->n_mcast_groups)
@@ -507,54 +571,55 @@ ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts)
ys->mcast_groups = calloc(ys->n_mcast_groups,
sizeof(*ys->mcast_groups));
if (!ys->mcast_groups)
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
i = 0;
- mnl_attr_for_each_nested(entry, mcasts) {
- mnl_attr_for_each_nested(attr, entry) {
- if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GRP_ID)
- ys->mcast_groups[i].id = mnl_attr_get_u32(attr);
- if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GRP_NAME) {
+ ynl_attr_for_each_nested(entry, mcasts) {
+ ynl_attr_for_each_nested(attr, entry) {
+ if (ynl_attr_type(attr) == CTRL_ATTR_MCAST_GRP_ID)
+ ys->mcast_groups[i].id = ynl_attr_get_u32(attr);
+ if (ynl_attr_type(attr) == CTRL_ATTR_MCAST_GRP_NAME) {
strncpy(ys->mcast_groups[i].name,
- mnl_attr_get_str(attr),
+ ynl_attr_get_str(attr),
GENL_NAMSIZ - 1);
ys->mcast_groups[i].name[GENL_NAMSIZ - 1] = 0;
}
}
+ i++;
}
return 0;
}
-static int ynl_get_family_info_cb(const struct nlmsghdr *nlh, void *data)
+static int
+ynl_get_family_info_cb(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- struct ynl_parse_arg *yarg = data;
struct ynl_sock *ys = yarg->ys;
const struct nlattr *attr;
bool found_id = true;
- mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
- if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GROUPS)
+ ynl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ if (ynl_attr_type(attr) == CTRL_ATTR_MCAST_GROUPS)
if (ynl_get_family_info_mcast(ys, attr))
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
- if (mnl_attr_get_type(attr) != CTRL_ATTR_FAMILY_ID)
+ if (ynl_attr_type(attr) != CTRL_ATTR_FAMILY_ID)
continue;
- if (mnl_attr_get_payload_len(attr) != sizeof(__u16)) {
+ if (ynl_attr_data_len(attr) != sizeof(__u16)) {
yerr(ys, YNL_ERROR_ATTR_INVALID, "Invalid family ID");
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
- ys->family_id = mnl_attr_get_u16(attr);
+ ys->family_id = ynl_attr_get_u16(attr);
found_id = true;
}
if (!found_id) {
yerr(ys, YNL_ERROR_ATTR_MISSING, "Family ID missing");
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
- return MNL_CB_OK;
+ return YNL_PARSE_CB_OK;
}
static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
@@ -564,66 +629,85 @@ static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
int err;
nlh = ynl_gemsg_start_req(ys, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, 1);
- mnl_attr_put_strz(nlh, CTRL_ATTR_FAMILY_NAME, family_name);
+ ynl_attr_put_str(nlh, CTRL_ATTR_FAMILY_NAME, family_name);
- err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);
+ err = ynl_msg_end(ys, nlh);
+ if (err < 0)
+ return err;
+
+ err = send(ys->socket, nlh, nlh->nlmsg_len, 0);
if (err < 0) {
perr(ys, "failed to request socket family info");
return err;
}
- err = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);
- if (err <= 0) {
- perr(ys, "failed to receive the socket family info");
+ err = ynl_sock_read_msgs(&yarg, ynl_get_family_info_cb);
+ if (err < 0) {
+ free(ys->mcast_groups);
+ perr(ys, "failed to receive the socket family info - no such family?");
return err;
}
- err = mnl_cb_run2(ys->rx_buf, err, ys->seq, ys->portid,
- ynl_get_family_info_cb, &yarg,
- ynl_cb_array, ARRAY_SIZE(ynl_cb_array));
+
+ err = ynl_recv_ack(ys, err);
if (err < 0) {
free(ys->mcast_groups);
- perr(ys, "failed to receive the socket family info - no such family?");
return err;
}
- return ynl_recv_ack(ys, err);
+ return 0;
}
struct ynl_sock *
ynl_sock_create(const struct ynl_family *yf, struct ynl_error *yse)
{
+ struct sockaddr_nl addr;
struct ynl_sock *ys;
+ socklen_t addrlen;
int one = 1;
- ys = malloc(sizeof(*ys) + 2 * MNL_SOCKET_BUFFER_SIZE);
+ ys = malloc(sizeof(*ys) + 2 * YNL_SOCKET_BUFFER_SIZE);
if (!ys)
return NULL;
memset(ys, 0, sizeof(*ys));
ys->family = yf;
ys->tx_buf = &ys->raw_buf[0];
- ys->rx_buf = &ys->raw_buf[MNL_SOCKET_BUFFER_SIZE];
+ ys->rx_buf = &ys->raw_buf[YNL_SOCKET_BUFFER_SIZE];
ys->ntf_last_next = &ys->ntf_first;
- ys->sock = mnl_socket_open(NETLINK_GENERIC);
- if (!ys->sock) {
+ ys->socket = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+ if (ys->socket < 0) {
__perr(yse, "failed to create a netlink socket");
goto err_free_sock;
}
- if (mnl_socket_setsockopt(ys->sock, NETLINK_CAP_ACK,
- &one, sizeof(one))) {
+ if (setsockopt(ys->socket, SOL_NETLINK, NETLINK_CAP_ACK,
+ &one, sizeof(one))) {
__perr(yse, "failed to enable netlink ACK");
goto err_close_sock;
}
- if (mnl_socket_setsockopt(ys->sock, NETLINK_EXT_ACK,
- &one, sizeof(one))) {
+ if (setsockopt(ys->socket, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one))) {
__perr(yse, "failed to enable netlink ext ACK");
goto err_close_sock;
}
+ memset(&addr, 0, sizeof(addr));
+ addr.nl_family = AF_NETLINK;
+ if (bind(ys->socket, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+ __perr(yse, "unable to bind to a socket address");
+ goto err_close_sock;;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addrlen = sizeof(addr);
+ if (getsockname(ys->socket, (struct sockaddr *)&addr, &addrlen) < 0) {
+ __perr(yse, "unable to read socket address");
+ goto err_close_sock;;
+ }
+ ys->portid = addr.nl_pid;
ys->seq = random();
- ys->portid = mnl_socket_get_portid(ys->sock);
+
if (ynl_sock_read_family(ys, yf->name)) {
if (yse)
@@ -634,7 +718,7 @@ ynl_sock_create(const struct ynl_family *yf, struct ynl_error *yse)
return ys;
err_close_sock:
- mnl_socket_close(ys->sock);
+ close(ys->socket);
err_free_sock:
free(ys);
return NULL;
@@ -644,7 +728,7 @@ void ynl_sock_destroy(struct ynl_sock *ys)
{
struct ynl_ntf_base_type *ntf;
- mnl_socket_close(ys->sock);
+ close(ys->socket);
while ((ntf = ynl_ntf_dequeue(ys)))
ynl_ntf_free(ntf);
free(ys->mcast_groups);
@@ -671,9 +755,9 @@ int ynl_subscribe(struct ynl_sock *ys, const char *grp_name)
return -1;
}
- err = mnl_socket_setsockopt(ys->sock, NETLINK_ADD_MEMBERSHIP,
- &ys->mcast_groups[i].id,
- sizeof(ys->mcast_groups[i].id));
+ err = setsockopt(ys->socket, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
+ &ys->mcast_groups[i].id,
+ sizeof(ys->mcast_groups[i].id));
if (err < 0) {
perr(ys, "Subscribing to multicast group failed");
return -1;
@@ -684,7 +768,7 @@ int ynl_subscribe(struct ynl_sock *ys, const char *grp_name)
int ynl_socket_get_fd(struct ynl_sock *ys)
{
- return mnl_socket_get_fd(ys->sock);
+ return ys->socket;
}
struct ynl_ntf_base_type *ynl_ntf_dequeue(struct ynl_sock *ys)
@@ -710,12 +794,12 @@ static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
struct genlmsghdr *gehdr;
int ret;
- gehdr = mnl_nlmsg_get_payload(nlh);
+ gehdr = ynl_nlmsg_data(nlh);
if (gehdr->cmd >= ys->family->ntf_info_size)
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
info = &ys->family->ntf_info[gehdr->cmd];
if (!info->cb)
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
rsp = calloc(1, info->alloc_sz);
rsp->free = info->free;
@@ -723,7 +807,7 @@ static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
yarg.rsp_policy = info->policy;
ret = info->cb(nlh, &yarg);
- if (ret <= MNL_CB_STOP)
+ if (ret <= YNL_PARSE_CB_STOP)
goto err_free;
rsp->family = nlh->nlmsg_type;
@@ -732,43 +816,27 @@ static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
*ys->ntf_last_next = rsp;
ys->ntf_last_next = &rsp->next;
- return MNL_CB_OK;
+ return YNL_PARSE_CB_OK;
err_free:
info->free(rsp);
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
-static int ynl_ntf_trampoline(const struct nlmsghdr *nlh, void *data)
+static int
+ynl_ntf_trampoline(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- return ynl_ntf_parse((struct ynl_sock *)data, nlh);
+ return ynl_ntf_parse(yarg->ys, nlh);
}
int ynl_ntf_check(struct ynl_sock *ys)
{
- ssize_t len;
+ struct ynl_parse_arg yarg = { .ys = ys, };
int err;
do {
- /* libmnl doesn't let us pass flags to the recv to make
- * it non-blocking so we need to poll() or peek() :|
- */
- struct pollfd pfd = { };
-
- pfd.fd = mnl_socket_get_fd(ys->sock);
- pfd.events = POLLIN;
- err = poll(&pfd, 1, 1);
- if (err < 1)
- return err;
-
- len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
- MNL_SOCKET_BUFFER_SIZE);
- if (len < 0)
- return len;
-
- err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
- ynl_ntf_trampoline, ys,
- ynl_cb_array, NLMSG_MIN_TYPE);
+ err = __ynl_sock_read_msgs(&yarg, ynl_ntf_trampoline,
+ MSG_DONTWAIT);
if (err < 0)
return err;
} while (err > 0);
@@ -789,7 +857,7 @@ void ynl_error_unknown_notification(struct ynl_sock *ys, __u8 cmd)
int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg)
{
yerr(yarg->ys, YNL_ERROR_INV_RESP, "Error parsing response: %s", msg);
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
}
static int
@@ -797,27 +865,28 @@ ynl_check_alien(struct ynl_sock *ys, const struct nlmsghdr *nlh, __u32 rsp_cmd)
{
struct genlmsghdr *gehdr;
- if (mnl_nlmsg_get_payload_len(nlh) < sizeof(*gehdr)) {
+ if (ynl_nlmsg_data_len(nlh) < sizeof(*gehdr)) {
yerr(ys, YNL_ERROR_INV_RESP,
"Kernel responded with truncated message");
return -1;
}
- gehdr = mnl_nlmsg_get_payload(nlh);
+ gehdr = ynl_nlmsg_data(nlh);
if (gehdr->cmd != rsp_cmd)
return ynl_ntf_parse(ys, nlh);
return 0;
}
-static int ynl_req_trampoline(const struct nlmsghdr *nlh, void *data)
+static
+int ynl_req_trampoline(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg)
{
- struct ynl_req_state *yrs = data;
+ struct ynl_req_state *yrs = (void *)yarg;
int ret;
ret = ynl_check_alien(yrs->yarg.ys, nlh, yrs->rsp_cmd);
if (ret)
- return ret < 0 ? MNL_CB_ERROR : MNL_CB_OK;
+ return ret < 0 ? YNL_PARSE_CB_ERROR : YNL_PARSE_CB_OK;
return yrs->cb(nlh, &yrs->yarg);
}
@@ -825,43 +894,38 @@ static int ynl_req_trampoline(const struct nlmsghdr *nlh, void *data)
int ynl_exec(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
struct ynl_req_state *yrs)
{
- ssize_t len;
int err;
- err = mnl_socket_sendto(ys->sock, req_nlh, req_nlh->nlmsg_len);
+ err = ynl_msg_end(ys, req_nlh);
+ if (err < 0)
+ return err;
+
+ err = send(ys->socket, req_nlh, req_nlh->nlmsg_len, 0);
if (err < 0)
return err;
do {
- len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
- MNL_SOCKET_BUFFER_SIZE);
- if (len < 0)
- return len;
-
- err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
- ynl_req_trampoline, yrs,
- ynl_cb_array, NLMSG_MIN_TYPE);
- if (err < 0)
- return err;
+ err = ynl_sock_read_msgs(&yrs->yarg, ynl_req_trampoline);
} while (err > 0);
- return 0;
+ return err;
}
-static int ynl_dump_trampoline(const struct nlmsghdr *nlh, void *data)
+static int
+ynl_dump_trampoline(const struct nlmsghdr *nlh, struct ynl_parse_arg *data)
{
- struct ynl_dump_state *ds = data;
+ struct ynl_dump_state *ds = (void *)data;
struct ynl_dump_list_type *obj;
struct ynl_parse_arg yarg = {};
int ret;
- ret = ynl_check_alien(ds->ys, nlh, ds->rsp_cmd);
+ ret = ynl_check_alien(ds->yarg.ys, nlh, ds->rsp_cmd);
if (ret)
- return ret < 0 ? MNL_CB_ERROR : MNL_CB_OK;
+ return ret < 0 ? YNL_PARSE_CB_ERROR : YNL_PARSE_CB_OK;
obj = calloc(1, ds->alloc_sz);
if (!obj)
- return MNL_CB_ERROR;
+ return YNL_PARSE_CB_ERROR;
if (!ds->first)
ds->first = obj;
@@ -869,8 +933,7 @@ static int ynl_dump_trampoline(const struct nlmsghdr *nlh, void *data)
ds->last->next = obj;
ds->last = obj;
- yarg.ys = ds->ys;
- yarg.rsp_policy = ds->rsp_policy;
+ yarg = ds->yarg;
yarg.data = &obj->data;
return ds->cb(nlh, &yarg);
@@ -888,22 +951,18 @@ static void *ynl_dump_end(struct ynl_dump_state *ds)
int ynl_exec_dump(struct ynl_sock *ys, struct nlmsghdr *req_nlh,
struct ynl_dump_state *yds)
{
- ssize_t len;
int err;
- err = mnl_socket_sendto(ys->sock, req_nlh, req_nlh->nlmsg_len);
+ err = ynl_msg_end(ys, req_nlh);
if (err < 0)
return err;
- do {
- len = mnl_socket_recvfrom(ys->sock, ys->rx_buf,
- MNL_SOCKET_BUFFER_SIZE);
- if (len < 0)
- goto err_close_list;
+ err = send(ys->socket, req_nlh, req_nlh->nlmsg_len, 0);
+ if (err < 0)
+ return err;
- err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
- ynl_dump_trampoline, yds,
- ynl_cb_array, NLMSG_MIN_TYPE);
+ do {
+ err = ynl_sock_read_msgs(&yds->yarg, ynl_dump_trampoline);
if (err < 0)
goto err_close_list;
} while (err > 0);
diff --git a/tools/net/ynl/lib/ynl.h b/tools/net/ynl/lib/ynl.h
index ce77a6d76ce0..9842e85a8c57 100644
--- a/tools/net/ynl/lib/ynl.h
+++ b/tools/net/ynl/lib/ynl.h
@@ -12,6 +12,7 @@ enum ynl_error_code {
YNL_ERROR_NONE = 0,
__YNL_ERRNO_END = 4096,
YNL_ERROR_INTERNAL,
+ YNL_ERROR_DUMP_INTER,
YNL_ERROR_EXPECT_ACK,
YNL_ERROR_EXPECT_MSG,
YNL_ERROR_UNEXPECT_MSG,
@@ -19,6 +20,8 @@ enum ynl_error_code {
YNL_ERROR_ATTR_INVALID,
YNL_ERROR_UNKNOWN_NTF,
YNL_ERROR_INV_RESP,
+ YNL_ERROR_INPUT_INVALID,
+ YNL_ERROR_INPUT_TOO_BIG,
};
/**
@@ -58,7 +61,7 @@ struct ynl_sock {
/* private: */
const struct ynl_family *family;
- struct mnl_socket *sock;
+ int socket;
__u32 seq;
__u32 portid;
__u16 family_id;
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 1e10512b2117..2d7fdd903d9e 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -7,6 +7,7 @@ import random
import socket
import struct
from struct import Struct
+import sys
import yaml
import ipaddress
import uuid
@@ -84,6 +85,10 @@ class NlError(Exception):
return f"Netlink error: {os.strerror(-self.nl_msg.error)}\n{self.nl_msg}"
+class ConfigError(Exception):
+ pass
+
+
class NlAttr:
ScalarFormat = namedtuple('ScalarFormat', ['native', 'big', 'little'])
type_formats = {
@@ -113,20 +118,6 @@ class NlAttr:
else format.little
return format.native
- @classmethod
- def formatted_string(cls, raw, display_hint):
- if display_hint == 'mac':
- formatted = ':'.join('%02x' % b for b in raw)
- elif display_hint == 'hex':
- formatted = bytes.hex(raw, ' ')
- elif display_hint in [ 'ipv4', 'ipv6' ]:
- formatted = format(ipaddress.ip_address(raw))
- elif display_hint == 'uuid':
- formatted = str(uuid.UUID(bytes=raw))
- else:
- formatted = raw
- return formatted
-
def as_scalar(self, attr_type, byte_order=None):
format = self.get_format(attr_type, byte_order)
return format.unpack(self.raw)[0]
@@ -148,23 +139,6 @@ class NlAttr:
format = self.get_format(type)
return [ x[0] for x in format.iter_unpack(self.raw) ]
- def as_struct(self, members):
- value = dict()
- offset = 0
- for m in members:
- # TODO: handle non-scalar members
- if m.type == 'binary':
- decoded = self.raw[offset : offset + m['len']]
- offset += m['len']
- elif m.type in NlAttr.type_formats:
- format = self.get_format(m.type, m.byte_order)
- [ decoded ] = format.unpack_from(self.raw, offset)
- offset += format.size
- if m.display_hint:
- decoded = self.formatted_string(decoded, m.display_hint)
- value[m.name] = decoded
- return value
-
def __repr__(self):
return f"[type:{self.type} len:{self._len}] {self.raw}"
@@ -244,11 +218,11 @@ class NlMsg:
return self.nl_type
def __repr__(self):
- msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}\n"
+ msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}"
if self.error:
- msg += '\terror: ' + str(self.error)
+ msg += '\n\terror: ' + str(self.error)
if self.extack:
- msg += '\textack: ' + repr(self.extack)
+ msg += '\n\textack: ' + repr(self.extack)
return msg
@@ -370,7 +344,7 @@ class NetlinkProtocol:
fixed_header_size = 0
if ynl:
op = ynl.rsp_by_value[msg.cmd()]
- fixed_header_size = ynl._fixed_header_size(op.fixed_header)
+ fixed_header_size = ynl._struct_size(op.fixed_header)
msg.raw_attrs = NlAttrs(msg.raw, fixed_header_size)
return msg
@@ -379,6 +353,9 @@ class NetlinkProtocol:
raise Exception(f'Multicast group "{mcast_name}" not present in the spec')
return mcast_groups[mcast_name].value
+ def msghdr_size(self):
+ return 16
+
class GenlProtocol(NetlinkProtocol):
def __init__(self, family_name):
@@ -404,6 +381,28 @@ class GenlProtocol(NetlinkProtocol):
raise Exception(f'Multicast group "{mcast_name}" not present in the family')
return self.genl_family['mcast'][mcast_name]
+ def msghdr_size(self):
+ return super().msghdr_size() + 4
+
+
+class SpaceAttrs:
+ SpecValuesPair = namedtuple('SpecValuesPair', ['spec', 'values'])
+
+ def __init__(self, attr_space, attrs, outer = None):
+ outer_scopes = outer.scopes if outer else []
+ inner_scope = self.SpecValuesPair(attr_space, attrs)
+ self.scopes = [inner_scope] + outer_scopes
+
+ def lookup(self, name):
+ for scope in self.scopes:
+ if name in scope.spec:
+ if name in scope.values:
+ return scope.values[name]
+ spec_name = scope.spec.yaml['name']
+ raise Exception(
+ f"No value for '{name}' in attribute space '{spec_name}'")
+ raise Exception(f"Attribute '{name}' not defined in any attribute-set")
+
#
# YNL implementation details.
@@ -411,7 +410,8 @@ class GenlProtocol(NetlinkProtocol):
class YnlFamily(SpecFamily):
- def __init__(self, def_path, schema=None, process_unknown=False):
+ def __init__(self, def_path, schema=None, process_unknown=False,
+ recv_size=0):
super().__init__(def_path, schema)
self.include_raw = False
@@ -426,6 +426,17 @@ class YnlFamily(SpecFamily):
except KeyError:
raise Exception(f"Family '{self.yaml['name']}' not supported by the kernel")
+ self._recv_dbg = False
+ # Note that netlink will use conservative (min) message size for
+ # the first dump recv() on the socket, our setting will only matter
+ # from the second recv() on.
+ self._recv_size = recv_size if recv_size else 131072
+ # Netlink will always allocate at least PAGE_SIZE - sizeof(skb_shinfo)
+ # for a message, so smaller receive sizes will lead to truncation.
+ # Note that the min size for other families may be larger than 4k!
+ if self._recv_size < 4000:
+ raise ConfigError()
+
self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, self.nlproto.proto_num)
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1)
@@ -449,18 +460,61 @@ class YnlFamily(SpecFamily):
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_ADD_MEMBERSHIP,
mcast_id)
- def _add_attr(self, space, name, value):
+ def set_recv_dbg(self, enabled):
+ self._recv_dbg = enabled
+
+ def _recv_dbg_print(self, reply, nl_msgs):
+ if not self._recv_dbg:
+ return
+ print("Recv: read", len(reply), "bytes,",
+ len(nl_msgs.msgs), "messages", file=sys.stderr)
+ for nl_msg in nl_msgs:
+ print(" ", nl_msg, file=sys.stderr)
+
+ def _encode_enum(self, attr_spec, value):
+ enum = self.consts[attr_spec['enum']]
+ if enum.type == 'flags' or attr_spec.get('enum-as-flags', False):
+ scalar = 0
+ if isinstance(value, str):
+ value = [value]
+ for single_value in value:
+ scalar += enum.entries[single_value].user_value(as_flags = True)
+ return scalar
+ else:
+ return enum.entries[value].user_value()
+
+ def _get_scalar(self, attr_spec, value):
+ try:
+ return int(value)
+ except (ValueError, TypeError) as e:
+ if 'enum' not in attr_spec:
+ raise e
+ return self._encode_enum(attr_spec, value);
+
+ def _add_attr(self, space, name, value, search_attrs):
try:
attr = self.attr_sets[space][name]
except KeyError:
raise Exception(f"Space '{space}' has no attribute '{name}'")
nl_type = attr.value
+
+ if attr.is_multi and isinstance(value, list):
+ attr_payload = b''
+ for subvalue in value:
+ attr_payload += self._add_attr(space, name, subvalue, search_attrs)
+ return attr_payload
+
if attr["type"] == 'nest':
nl_type |= Netlink.NLA_F_NESTED
attr_payload = b''
+ sub_attrs = SpaceAttrs(self.attr_sets[space], value, search_attrs)
for subname, subvalue in value.items():
- attr_payload += self._add_attr(attr['nested-attributes'], subname, subvalue)
+ attr_payload += self._add_attr(attr['nested-attributes'],
+ subname, subvalue, sub_attrs)
elif attr["type"] == 'flag':
+ if not value:
+ # If value is absent or false then skip attribute creation.
+ return b''
attr_payload = b''
elif attr["type"] == 'string':
attr_payload = str(value).encode('ascii') + b'\x00'
@@ -469,18 +523,36 @@ class YnlFamily(SpecFamily):
attr_payload = value
elif isinstance(value, str):
attr_payload = bytes.fromhex(value)
+ elif isinstance(value, dict) and attr.struct_name:
+ attr_payload = self._encode_struct(attr.struct_name, value)
else:
raise Exception(f'Unknown type for binary attribute, value: {value}')
- elif attr.is_auto_scalar:
- scalar = int(value)
- real_type = attr["type"][0] + ('32' if scalar.bit_length() <= 32 else '64')
- format = NlAttr.get_format(real_type, attr.byte_order)
- attr_payload = format.pack(int(value))
- elif attr['type'] in NlAttr.type_formats:
- format = NlAttr.get_format(attr['type'], attr.byte_order)
- attr_payload = format.pack(int(value))
+ elif attr['type'] in NlAttr.type_formats or attr.is_auto_scalar:
+ scalar = self._get_scalar(attr, value)
+ if attr.is_auto_scalar:
+ attr_type = attr["type"][0] + ('32' if scalar.bit_length() <= 32 else '64')
+ else:
+ attr_type = attr["type"]
+ format = NlAttr.get_format(attr_type, attr.byte_order)
+ attr_payload = format.pack(scalar)
elif attr['type'] in "bitfield32":
- attr_payload = struct.pack("II", int(value["value"]), int(value["selector"]))
+ scalar_value = self._get_scalar(attr, value["value"])
+ scalar_selector = self._get_scalar(attr, value["selector"])
+ attr_payload = struct.pack("II", scalar_value, scalar_selector)
+ elif attr['type'] == 'sub-message':
+ msg_format = self._resolve_selector(attr, search_attrs)
+ attr_payload = b''
+ if msg_format.fixed_header:
+ attr_payload += self._encode_struct(msg_format.fixed_header, value)
+ if msg_format.attr_set:
+ if msg_format.attr_set in self.attr_sets:
+ nl_type |= Netlink.NLA_F_NESTED
+ sub_attrs = SpaceAttrs(msg_format.attr_set, value, search_attrs)
+ for subname, subvalue in value.items():
+ attr_payload += self._add_attr(msg_format.attr_set,
+ subname, subvalue, sub_attrs)
+ else:
+ raise Exception(f"Unknown attribute-set '{msg_format.attr_set}'")
else:
raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
@@ -503,17 +575,13 @@ class YnlFamily(SpecFamily):
def _decode_binary(self, attr, attr_spec):
if attr_spec.struct_name:
- members = self.consts[attr_spec.struct_name]
- decoded = attr.as_struct(members)
- for m in members:
- if m.enum:
- decoded[m.name] = self._decode_enum(decoded[m.name], m)
+ decoded = self._decode_struct(attr.raw, attr_spec.struct_name)
elif attr_spec.sub_type:
decoded = attr.as_c_array(attr_spec.sub_type)
else:
decoded = attr.as_bin()
if attr_spec.display_hint:
- decoded = NlAttr.formatted_string(decoded, attr_spec.display_hint)
+ decoded = self._formatted_string(decoded, attr_spec.display_hint)
return decoded
def _decode_array_nest(self, attr, attr_spec):
@@ -527,6 +595,16 @@ class YnlFamily(SpecFamily):
decoded.append({ item.type: subattrs })
return decoded
+ def _decode_nest_type_value(self, attr, attr_spec):
+ decoded = {}
+ value = attr
+ for name in attr_spec['type-value']:
+ value = NlAttr(value.raw, 0)
+ decoded[name] = value.type
+ subattrs = self._decode(NlAttrs(value.raw), attr_spec['nested-attributes'])
+ decoded.update(subattrs)
+ return decoded
+
def _decode_unknown(self, attr):
if attr.is_nest:
return self._decode(NlAttrs(attr.raw), None)
@@ -548,29 +626,27 @@ class YnlFamily(SpecFamily):
else:
rsp[name] = [decoded]
- def _resolve_selector(self, attr_spec, vals):
+ def _resolve_selector(self, attr_spec, search_attrs):
sub_msg = attr_spec.sub_message
if sub_msg not in self.sub_msgs:
raise Exception(f"No sub-message spec named {sub_msg} for {attr_spec.name}")
sub_msg_spec = self.sub_msgs[sub_msg]
selector = attr_spec.selector
- if selector not in vals:
- raise Exception(f"There is no value for {selector} to resolve '{attr_spec.name}'")
- value = vals[selector]
+ value = search_attrs.lookup(selector)
if value not in sub_msg_spec.formats:
raise Exception(f"No message format for '{value}' in sub-message spec '{sub_msg}'")
spec = sub_msg_spec.formats[value]
return spec
- def _decode_sub_msg(self, attr, attr_spec, rsp):
- msg_format = self._resolve_selector(attr_spec, rsp)
+ def _decode_sub_msg(self, attr, attr_spec, search_attrs):
+ msg_format = self._resolve_selector(attr_spec, search_attrs)
decoded = {}
offset = 0
if msg_format.fixed_header:
- decoded.update(self._decode_fixed_header(attr, msg_format.fixed_header));
- offset = self._fixed_header_size(msg_format.fixed_header)
+ decoded.update(self._decode_struct(attr.raw, msg_format.fixed_header));
+ offset = self._struct_size(msg_format.fixed_header)
if msg_format.attr_set:
if msg_format.attr_set in self.attr_sets:
subdict = self._decode(NlAttrs(attr.raw, offset), msg_format.attr_set)
@@ -579,10 +655,12 @@ class YnlFamily(SpecFamily):
raise Exception(f"Unknown attribute-set '{attr_space}' when decoding '{attr_spec.name}'")
return decoded
- def _decode(self, attrs, space):
+ def _decode(self, attrs, space, outer_attrs = None):
+ rsp = dict()
if space:
attr_space = self.attr_sets[space]
- rsp = dict()
+ search_attrs = SpaceAttrs(attr_space, rsp, outer_attrs)
+
for attr in attrs:
try:
attr_spec = attr_space.attrs_by_val[attr.type]
@@ -594,7 +672,7 @@ class YnlFamily(SpecFamily):
continue
if attr_spec["type"] == 'nest':
- subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'])
+ subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'], search_attrs)
decoded = subdict
elif attr_spec["type"] == 'string':
decoded = attr.as_strz()
@@ -617,7 +695,9 @@ class YnlFamily(SpecFamily):
selector = self._decode_enum(selector, attr_spec)
decoded = {"value": value, "selector": selector}
elif attr_spec["type"] == 'sub-message':
- decoded = self._decode_sub_msg(attr, attr_spec, rsp)
+ decoded = self._decode_sub_msg(attr, attr_spec, search_attrs)
+ elif attr_spec["type"] == 'nest-type-value':
+ decoded = self._decode_nest_type_value(attr, attr_spec)
else:
if not self.process_unknown:
raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
@@ -658,20 +738,23 @@ class YnlFamily(SpecFamily):
return
msg = self.nlproto.decode(self, NlMsg(request, 0, op.attr_set))
- offset = 20 + self._fixed_header_size(op.fixed_header)
+ offset = self.nlproto.msghdr_size() + self._struct_size(op.fixed_header)
path = self._decode_extack_path(msg.raw_attrs, op.attr_set, offset,
extack['bad-attr-offs'])
if path:
del extack['bad-attr-offs']
extack['bad-attr'] = path
- def _fixed_header_size(self, name):
+ def _struct_size(self, name):
if name:
- fixed_header_members = self.consts[name].members
+ members = self.consts[name].members
size = 0
- for m in fixed_header_members:
+ for m in members:
if m.type in ['pad', 'binary']:
- size += m.len
+ if m.struct:
+ size += self._struct_size(m.struct)
+ else:
+ size += m.len
else:
format = NlAttr.get_format(m.type, m.byte_order)
size += format.size
@@ -679,26 +762,71 @@ class YnlFamily(SpecFamily):
else:
return 0
- def _decode_fixed_header(self, msg, name):
- fixed_header_members = self.consts[name].members
- fixed_header_attrs = dict()
+ def _decode_struct(self, data, name):
+ members = self.consts[name].members
+ attrs = dict()
offset = 0
- for m in fixed_header_members:
+ for m in members:
value = None
if m.type == 'pad':
offset += m.len
elif m.type == 'binary':
- value = msg.raw[offset : offset + m.len]
- offset += m.len
+ if m.struct:
+ len = self._struct_size(m.struct)
+ value = self._decode_struct(data[offset : offset + len],
+ m.struct)
+ offset += len
+ else:
+ value = data[offset : offset + m.len]
+ offset += m.len
else:
format = NlAttr.get_format(m.type, m.byte_order)
- [ value ] = format.unpack_from(msg.raw, offset)
+ [ value ] = format.unpack_from(data, offset)
offset += format.size
if value is not None:
if m.enum:
value = self._decode_enum(value, m)
- fixed_header_attrs[m.name] = value
- return fixed_header_attrs
+ elif m.display_hint:
+ value = self._formatted_string(value, m.display_hint)
+ attrs[m.name] = value
+ return attrs
+
+ def _encode_struct(self, name, vals):
+ members = self.consts[name].members
+ attr_payload = b''
+ for m in members:
+ value = vals.pop(m.name) if m.name in vals else None
+ if m.type == 'pad':
+ attr_payload += bytearray(m.len)
+ elif m.type == 'binary':
+ if m.struct:
+ if value is None:
+ value = dict()
+ attr_payload += self._encode_struct(m.struct, value)
+ else:
+ if value is None:
+ attr_payload += bytearray(m.len)
+ else:
+ attr_payload += bytes.fromhex(value)
+ else:
+ if value is None:
+ value = 0
+ format = NlAttr.get_format(m.type, m.byte_order)
+ attr_payload += format.pack(value)
+ return attr_payload
+
+ def _formatted_string(self, raw, display_hint):
+ if display_hint == 'mac':
+ formatted = ':'.join('%02x' % b for b in raw)
+ elif display_hint == 'hex':
+ formatted = bytes.hex(raw, ' ')
+ elif display_hint in [ 'ipv4', 'ipv6' ]:
+ formatted = format(ipaddress.ip_address(raw))
+ elif display_hint == 'uuid':
+ formatted = str(uuid.UUID(bytes=raw))
+ else:
+ formatted = raw
+ return formatted
def handle_ntf(self, decoded):
msg = dict()
@@ -707,7 +835,7 @@ class YnlFamily(SpecFamily):
op = self.rsp_by_value[decoded.cmd()]
attrs = self._decode(decoded.raw_attrs, op.attr_set.name)
if op.fixed_header:
- attrs.update(self._decode_fixed_header(decoded, op.fixed_header))
+ attrs.update(self._decode_struct(decoded.raw, op.fixed_header))
msg['name'] = op['name']
msg['msg'] = attrs
@@ -716,11 +844,12 @@ class YnlFamily(SpecFamily):
def check_ntf(self):
while True:
try:
- reply = self.sock.recv(128 * 1024, socket.MSG_DONTWAIT)
+ reply = self.sock.recv(self._recv_size, socket.MSG_DONTWAIT)
except BlockingIOError:
return
nms = NlMsgs(reply)
+ self._recv_dbg_print(reply, nms)
for nl_msg in nms:
if nl_msg.error:
print("Netlink error in ntf!?", os.strerror(-nl_msg.error))
@@ -759,20 +888,11 @@ class YnlFamily(SpecFamily):
req_seq = random.randint(1024, 65535)
msg = self.nlproto.message(nl_flags, op.req_value, 1, req_seq)
- fixed_header_members = []
if op.fixed_header:
- fixed_header_members = self.consts[op.fixed_header].members
- for m in fixed_header_members:
- value = vals.pop(m.name) if m.name in vals else 0
- if m.type == 'pad':
- msg += bytearray(m.len)
- elif m.type == 'binary':
- msg += bytes.fromhex(value)
- else:
- format = NlAttr.get_format(m.type, m.byte_order)
- msg += format.pack(value)
+ msg += self._encode_struct(op.fixed_header, vals)
+ search_attrs = SpaceAttrs(op.attr_set, vals)
for name, value in vals.items():
- msg += self._add_attr(op.attr_set.name, name, value)
+ msg += self._add_attr(op.attr_set.name, name, value, search_attrs)
msg = _genl_msg_finalize(msg)
self.sock.send(msg, 0)
@@ -780,8 +900,9 @@ class YnlFamily(SpecFamily):
done = False
rsp = []
while not done:
- reply = self.sock.recv(128 * 1024)
+ reply = self.sock.recv(self._recv_size)
nms = NlMsgs(reply, attr_space=op.attr_set)
+ self._recv_dbg_print(reply, nms)
for nl_msg in nms:
if nl_msg.extack:
self._decode_extack(msg, op, nl_msg.extack)
@@ -808,7 +929,7 @@ class YnlFamily(SpecFamily):
rsp_msg = self._decode(decoded.raw_attrs, op.attr_set.name)
if op.fixed_header:
- rsp_msg.update(self._decode_fixed_header(decoded, op.fixed_header))
+ rsp_msg.update(self._decode_struct(decoded.raw, op.fixed_header))
rsp.append(rsp_msg)
if not rsp:
diff --git a/tools/net/ynl/samples/.gitignore b/tools/net/ynl/samples/.gitignore
index 49637b26c482..dda6686257a7 100644
--- a/tools/net/ynl/samples/.gitignore
+++ b/tools/net/ynl/samples/.gitignore
@@ -1,4 +1,5 @@
ethtool
devlink
netdev
+ovs
page-pool \ No newline at end of file
diff --git a/tools/net/ynl/samples/Makefile b/tools/net/ynl/samples/Makefile
index 28bdb1557a54..e194a7565861 100644
--- a/tools/net/ynl/samples/Makefile
+++ b/tools/net/ynl/samples/Makefile
@@ -9,7 +9,7 @@ ifeq ("$(DEBUG)","1")
CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan
endif
-LDLIBS=-lmnl ../lib/ynl.a ../generated/protos.a
+LDLIBS=../lib/ynl.a ../generated/protos.a
SRCS=$(wildcard *.c)
BINS=$(patsubst %.c,%,${SRCS})
@@ -28,8 +28,8 @@ $(BINS): ../lib/ynl.a ../generated/protos.a $(SRCS)
clean:
rm -f *.o *.d *~
-hardclean: clean
+distclean: clean
rm -f $(BINS)
-.PHONY: all clean
+.PHONY: all clean distclean
.DEFAULT_GOAL=all
diff --git a/tools/net/ynl/samples/ovs.c b/tools/net/ynl/samples/ovs.c
new file mode 100644
index 000000000000..3e975c003d77
--- /dev/null
+++ b/tools/net/ynl/samples/ovs.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+
+#include <ynl.h>
+
+#include "ovs_datapath-user.h"
+
+int main(int argc, char **argv)
+{
+ struct ynl_sock *ys;
+ int err;
+
+ ys = ynl_sock_create(&ynl_ovs_datapath_family, NULL);
+ if (!ys)
+ return 1;
+
+ if (argc > 1) {
+ struct ovs_datapath_new_req *req;
+
+ req = ovs_datapath_new_req_alloc();
+ if (!req)
+ goto err_close;
+
+ ovs_datapath_new_req_set_upcall_pid(req, 1);
+ ovs_datapath_new_req_set_name(req, argv[1]);
+
+ err = ovs_datapath_new(ys, req);
+ ovs_datapath_new_req_free(req);
+ if (err)
+ goto err_close;
+ } else {
+ struct ovs_datapath_get_req_dump *req;
+ struct ovs_datapath_get_list *dps;
+
+ printf("Dump:\n");
+ req = ovs_datapath_get_req_dump_alloc();
+
+ dps = ovs_datapath_get_dump(ys, req);
+ ovs_datapath_get_req_dump_free(req);
+ if (!dps)
+ goto err_close;
+
+ ynl_dump_foreach(dps, dp) {
+ printf(" %s(%d): pid:%u cache:%u\n",
+ dp->name, dp->_hdr.dp_ifindex,
+ dp->upcall_pid, dp->masks_cache_size);
+ }
+ ovs_datapath_get_list_free(dps);
+ }
+
+ ynl_sock_destroy(ys);
+
+ return 0;
+
+err_close:
+ fprintf(stderr, "YNL (%d): %s\n", ys->err.code, ys->err.msg);
+ ynl_sock_destroy(ys);
+ return 2;
+}
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index 7fc1aa788f6f..67bfaff05154 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -40,14 +40,6 @@ class BaseNlLib:
def get_family_id(self):
return 'ys->family_id'
- def parse_cb_run(self, cb, data, is_dump=False, indent=1):
- ind = '\n\t\t' + '\t' * indent + ' '
- if is_dump:
- return f"mnl_cb_run2(ys->rx_buf, len, 0, 0, {cb}, {data},{ind}ynl_cb_array, NLMSG_MIN_TYPE)"
- else:
- return f"mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,{ind}{cb}, {data},{ind}" + \
- "ynl_cb_array, NLMSG_MIN_TYPE)"
-
class Type(SpecAttr):
def __init__(self, family, attr_set, attr, value):
@@ -168,15 +160,6 @@ class Type(SpecAttr):
spec = self._attr_policy(policy)
cw.p(f"\t[{self.enum_name}] = {spec},")
- def _mnl_type(self):
- # mnl does not have helpers for signed integer types
- # turn signed type into unsigned
- # this only makes sense for scalar types
- t = self.type
- if t[0] == 's':
- t = 'u' + t[1:]
- return t
-
def _attr_typol(self):
raise Exception(f"Type policy not implemented for class type {self.type}")
@@ -192,7 +175,7 @@ class Type(SpecAttr):
ri.cw.p(f"{line};")
def _attr_put_simple(self, ri, var, put_type):
- line = f"mnl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name})"
+ line = f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name})"
self._attr_put_line(ri, var, line)
def attr_put(self, ri, var):
@@ -217,7 +200,7 @@ class Type(SpecAttr):
if not self.is_multi_val():
ri.cw.p("if (ynl_attr_validate(yarg, attr))")
- ri.cw.p("return MNL_CB_ERROR;")
+ ri.cw.p("return YNL_PARSE_CB_ERROR;")
if self.presence_type() == 'bit':
ri.cw.p(f"{var}->_present.{self.c_name} = 1;")
@@ -264,7 +247,7 @@ class TypeUnused(Type):
return []
def _attr_get(self, ri, var):
- return ['return MNL_CB_ERROR;'], None, None
+ return ['return YNL_PARSE_CB_ERROR;'], None, None
def _attr_typol(self):
return '.type = YNL_PT_REJECT, '
@@ -357,9 +340,6 @@ class TypeScalar(Type):
else:
self.type_name = '__' + self.type
- def mnl_type(self):
- return self._mnl_type()
-
def _attr_policy(self, policy):
if 'flags-mask' in self.checks or self.is_bitfield:
if self.is_bitfield:
@@ -387,10 +367,10 @@ class TypeScalar(Type):
return [f'{self.type_name} {self.c_name}{self.byte_order_comment}']
def attr_put(self, ri, var):
- self._attr_put_simple(ri, var, self.mnl_type())
+ self._attr_put_simple(ri, var, self.type)
def _attr_get(self, ri, var):
- return f"{var}->{self.c_name} = mnl_attr_get_{self.mnl_type()}(attr);", None, None
+ return f"{var}->{self.c_name} = ynl_attr_get_{self.type}(attr);", None, None
def _setter_lines(self, ri, member, presence):
return [f"{member} = {self.c_name};"]
@@ -404,7 +384,7 @@ class TypeFlag(Type):
return '.type = YNL_PT_FLAG, '
def attr_put(self, ri, var):
- self._attr_put_line(ri, var, f"mnl_attr_put(nlh, {self.enum_name}, 0, NULL)")
+ self._attr_put_line(ri, var, f"ynl_attr_put(nlh, {self.enum_name}, NULL, 0)")
def _attr_get(self, ri, var):
return [], None, None
@@ -446,15 +426,15 @@ class TypeString(Type):
cw.p(f"\t[{self.enum_name}] = {spec},")
def attr_put(self, ri, var):
- self._attr_put_simple(ri, var, 'strz')
+ self._attr_put_simple(ri, var, 'str')
def _attr_get(self, ri, var):
len_mem = var + '->_present.' + self.c_name + '_len'
return [f"{len_mem} = len;",
f"{var}->{self.c_name} = malloc(len + 1);",
- f"memcpy({var}->{self.c_name}, mnl_attr_get_str(attr), len);",
+ f"memcpy({var}->{self.c_name}, ynl_attr_get_str(attr), len);",
f"{var}->{self.c_name}[len] = 0;"], \
- ['len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));'], \
+ ['len = strnlen(ynl_attr_get_str(attr), ynl_attr_data_len(attr));'], \
['unsigned int len;']
def _setter_lines(self, ri, member, presence):
@@ -493,15 +473,15 @@ class TypeBinary(Type):
return mem
def attr_put(self, ri, var):
- self._attr_put_line(ri, var, f"mnl_attr_put(nlh, {self.enum_name}, " +
- f"{var}->_present.{self.c_name}_len, {var}->{self.c_name})")
+ self._attr_put_line(ri, var, f"ynl_attr_put(nlh, {self.enum_name}, " +
+ f"{var}->{self.c_name}, {var}->_present.{self.c_name}_len)")
def _attr_get(self, ri, var):
len_mem = var + '->_present.' + self.c_name + '_len'
return [f"{len_mem} = len;",
f"{var}->{self.c_name} = malloc(len);",
- f"memcpy({var}->{self.c_name}, mnl_attr_get_payload(attr), len);"], \
- ['len = mnl_attr_get_payload_len(attr);'], \
+ f"memcpy({var}->{self.c_name}, ynl_attr_data(attr), len);"], \
+ ['len = ynl_attr_data_len(attr);'], \
['unsigned int len;']
def _setter_lines(self, ri, member, presence):
@@ -526,11 +506,11 @@ class TypeBitfield32(Type):
return f"NLA_POLICY_BITFIELD32({mask})"
def attr_put(self, ri, var):
- line = f"mnl_attr_put(nlh, {self.enum_name}, sizeof(struct nla_bitfield32), &{var}->{self.c_name})"
+ line = f"ynl_attr_put(nlh, {self.enum_name}, &{var}->{self.c_name}, sizeof(struct nla_bitfield32))"
self._attr_put_line(ri, var, line)
def _attr_get(self, ri, var):
- return f"memcpy(&{var}->{self.c_name}, mnl_attr_get_payload(attr), sizeof(struct nla_bitfield32));", None, None
+ return f"memcpy(&{var}->{self.c_name}, ynl_attr_data(attr), sizeof(struct nla_bitfield32));", None, None
def _setter_lines(self, ri, member, presence):
return [f"memcpy(&{member}, {self.c_name}, sizeof(struct nla_bitfield32));"]
@@ -563,7 +543,7 @@ class TypeNest(Type):
def _attr_get(self, ri, var):
get_lines = [f"if ({self.nested_render_name}_parse(&parg, attr))",
- "return MNL_CB_ERROR;"]
+ "return YNL_PARSE_CB_ERROR;"]
init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
f"parg.data = &{var}->{self.c_name};"]
return get_lines, init_lines, None
@@ -589,9 +569,6 @@ class TypeMultiAttr(Type):
def presence_type(self):
return 'count'
- def mnl_type(self):
- return self._mnl_type()
-
def _complex_member_type(self, ri):
if 'type' not in self.attr or self.attr['type'] == 'nest':
return self.nested_struct_type
@@ -625,9 +602,9 @@ class TypeMultiAttr(Type):
def attr_put(self, ri, var):
if self.attr['type'] in scalars:
- put_type = self.mnl_type()
+ put_type = self.type
ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
- ri.cw.p(f"mnl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name}[i]);")
+ ri.cw.p(f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name}[i]);")
elif 'type' not in self.attr or self.attr['type'] == 'nest':
ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
@@ -665,7 +642,7 @@ class TypeArrayNest(Type):
def _attr_get(self, ri, var):
local_vars = ['const struct nlattr *attr2;']
get_lines = [f'attr_{self.c_name} = attr;',
- 'mnl_attr_for_each_nested(attr2, attr)',
+ 'ynl_attr_for_each_nested(attr2, attr)',
f'\t{var}->n_{self.c_name}++;']
return get_lines, None, local_vars
@@ -690,8 +667,8 @@ class TypeNestTypeValue(Type):
local_vars += [f'__u32 {", ".join(tv_names)};']
for level in self.attr["type-value"]:
level = c_lower(level)
- get_lines += [f'attr_{level} = mnl_attr_get_payload({prev});']
- get_lines += [f'{level} = mnl_attr_get_type(attr_{level});']
+ get_lines += [f'attr_{level} = ynl_attr_data({prev});']
+ get_lines += [f'{level} = ynl_attr_type(attr_{level});']
prev = 'attr_' + level
tv_args = f", {', '.join(tv_names)}"
@@ -1550,7 +1527,7 @@ def _put_enum_to_str_helper(cw, render_name, map_name, arg_name, enum=None):
cw.block_start()
if enum and enum.type == 'flags':
cw.p(f'{arg_name} = ffs({arg_name}) - 1;')
- cw.p(f'if ({arg_name} < 0 || {arg_name} >= (int)MNL_ARRAY_SIZE({map_name}))')
+ cw.p(f'if ({arg_name} < 0 || {arg_name} >= (int)YNL_ARRAY_SIZE({map_name}))')
cw.p('return NULL;')
cw.p(f'return {map_name}[{arg_name}];')
cw.block_end()
@@ -1612,12 +1589,12 @@ def put_req_nested(ri, struct):
ri.cw.block_start()
ri.cw.write_func_lvar('struct nlattr *nest;')
- ri.cw.p("nest = mnl_attr_nest_start(nlh, attr_type);")
+ ri.cw.p("nest = ynl_attr_nest_start(nlh, attr_type);")
for _, arg in struct.member_list():
arg.attr_put(ri, "obj")
- ri.cw.p("mnl_attr_nest_end(nlh, nest);")
+ ri.cw.p("ynl_attr_nest_end(nlh, nest);")
ri.cw.nl()
ri.cw.p('return 0;')
@@ -1627,11 +1604,11 @@ def put_req_nested(ri, struct):
def _multi_parse(ri, struct, init_lines, local_vars):
if struct.nested:
- iter_line = "mnl_attr_for_each_nested(attr, nested)"
+ iter_line = "ynl_attr_for_each_nested(attr, nested)"
else:
if ri.fixed_hdr:
local_vars += ['void *hdr;']
- iter_line = "mnl_attr_for_each(attr, nlh, yarg->ys->family->hdr_len)"
+ iter_line = "ynl_attr_for_each(attr, nlh, yarg->ys->family->hdr_len)"
array_nests = set()
multi_attrs = set()
@@ -1665,7 +1642,7 @@ def _multi_parse(ri, struct, init_lines, local_vars):
ri.cw.p(f'dst->{arg} = {arg};')
if ri.fixed_hdr:
- ri.cw.p('hdr = mnl_nlmsg_get_payload_offset(nlh, sizeof(struct genlmsghdr));')
+ ri.cw.p('hdr = ynl_nlmsg_data_offset(nlh, sizeof(struct genlmsghdr));')
ri.cw.p(f"memcpy(&dst->_hdr, hdr, sizeof({ri.fixed_hdr}));")
for anest in sorted(all_multi):
aspec = struct[anest]
@@ -1674,7 +1651,7 @@ def _multi_parse(ri, struct, init_lines, local_vars):
ri.cw.nl()
ri.cw.block_start(line=iter_line)
- ri.cw.p('unsigned int type = mnl_attr_get_type(attr);')
+ ri.cw.p('unsigned int type = ynl_attr_type(attr);')
ri.cw.nl()
first = True
@@ -1690,14 +1667,14 @@ def _multi_parse(ri, struct, init_lines, local_vars):
aspec = struct[anest]
ri.cw.block_start(line=f"if (n_{aspec.c_name})")
- ri.cw.p(f"dst->{aspec.c_name} = calloc({aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};")
ri.cw.p('i = 0;')
ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
- ri.cw.block_start(line=f"mnl_attr_for_each_nested(attr, attr_{aspec.c_name})")
+ ri.cw.block_start(line=f"ynl_attr_for_each_nested(attr, attr_{aspec.c_name})")
ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
- ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, mnl_attr_get_type(attr)))")
- ri.cw.p('return MNL_CB_ERROR;')
+ ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, ynl_attr_type(attr)))")
+ ri.cw.p('return YNL_PARSE_CB_ERROR;')
ri.cw.p('i++;')
ri.cw.block_end()
ri.cw.block_end()
@@ -1712,13 +1689,13 @@ def _multi_parse(ri, struct, init_lines, local_vars):
if 'nested-attributes' in aspec:
ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
ri.cw.block_start(line=iter_line)
- ri.cw.block_start(line=f"if (mnl_attr_get_type(attr) == {aspec.enum_name})")
+ ri.cw.block_start(line=f"if (ynl_attr_type(attr) == {aspec.enum_name})")
if 'nested-attributes' in aspec:
ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr))")
- ri.cw.p('return MNL_CB_ERROR;')
+ ri.cw.p('return YNL_PARSE_CB_ERROR;')
elif aspec.type in scalars:
- ri.cw.p(f"dst->{aspec.c_name}[i] = mnl_attr_get_{aspec.mnl_type()}(attr);")
+ ri.cw.p(f"dst->{aspec.c_name}[i] = ynl_attr_get_{aspec.type}(attr);")
else:
raise Exception('Nest parsing type not supported yet')
ri.cw.p('i++;')
@@ -1730,7 +1707,7 @@ def _multi_parse(ri, struct, init_lines, local_vars):
if struct.nested:
ri.cw.p('return 0;')
else:
- ri.cw.p('return MNL_CB_OK;')
+ ri.cw.p('return YNL_PARSE_CB_OK;')
ri.cw.block_end()
ri.cw.nl()
@@ -1760,10 +1737,9 @@ def parse_rsp_msg(ri, deref=False):
return
func_args = ['const struct nlmsghdr *nlh',
- 'void *data']
+ 'struct ynl_parse_arg *yarg']
local_vars = [f'{type_name(ri, "reply", deref=deref)} *dst;',
- 'struct ynl_parse_arg *yarg = data;',
'const struct nlattr *attr;']
init_lines = ['dst = yarg->data;']
@@ -1774,7 +1750,7 @@ def parse_rsp_msg(ri, deref=False):
else:
# Empty reply
ri.cw.block_start()
- ri.cw.p('return MNL_CB_OK;')
+ ri.cw.p('return YNL_PARSE_CB_OK;')
ri.cw.block_end()
ri.cw.nl()
@@ -1809,7 +1785,7 @@ def print_req(ri):
if ri.fixed_hdr:
ri.cw.p("hdr_len = sizeof(req->_hdr);")
- ri.cw.p("hdr = mnl_nlmsg_put_extra_header(nlh, hdr_len);")
+ ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);")
ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);")
ri.cw.nl()
@@ -1859,20 +1835,21 @@ def print_dump(ri):
ri.cw.write_func_lvar(local_vars)
- ri.cw.p('yds.ys = ys;')
+ ri.cw.p('yds.yarg.ys = ys;')
+ ri.cw.p(f"yds.yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.p("yds.yarg.data = NULL;")
ri.cw.p(f"yds.alloc_sz = sizeof({type_name(ri, rdir(direction))});")
ri.cw.p(f"yds.cb = {op_prefix(ri, 'reply', deref=True)}_parse;")
if ri.op.value is not None:
ri.cw.p(f'yds.rsp_cmd = {ri.op.enum_name};')
else:
ri.cw.p(f'yds.rsp_cmd = {ri.op.rsp_value};')
- ri.cw.p(f"yds.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
ri.cw.nl()
ri.cw.p(f"nlh = ynl_gemsg_start_dump(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
if ri.fixed_hdr:
ri.cw.p("hdr_len = sizeof(req->_hdr);")
- ri.cw.p("hdr = mnl_nlmsg_put_extra_header(nlh, hdr_len);")
+ ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);")
ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);")
ri.cw.nl()
@@ -2584,7 +2561,7 @@ def render_user_family(family, cw, prototype):
cw.p('.hdr_len\t= sizeof(struct genlmsghdr),')
if family.ntfs:
cw.p(f".ntf_info\t= {family['name']}_ntf_info,")
- cw.p(f".ntf_info_size\t= MNL_ARRAY_SIZE({family['name']}_ntf_info),")
+ cw.p(f".ntf_info_size\t= YNL_ARRAY_SIZE({family['name']}_ntf_info),")
cw.block_end(line=';')
@@ -2700,7 +2677,6 @@ def main():
if args.mode == "user":
if not args.header:
- cw.p("#include <libmnl/libmnl.h>")
cw.p("#include <linux/genetlink.h>")
cw.nl()
for one in args.user_header:
diff --git a/tools/net/ynl/ynl-gen-rst.py b/tools/net/ynl/ynl-gen-rst.py
index 262d88f88696..927407b3efb3 100755
--- a/tools/net/ynl/ynl-gen-rst.py
+++ b/tools/net/ynl/ynl-gen-rst.py
@@ -189,12 +189,19 @@ def parse_operations(operations: List[Dict[str, Any]]) -> str:
def parse_entries(entries: List[Dict[str, Any]], level: int) -> str:
"""Parse a list of entries"""
+ ignored = ["pad"]
lines = []
for entry in entries:
if isinstance(entry, dict):
# entries could be a list or a dictionary
+ field_name = entry.get("name", "")
+ if field_name in ignored:
+ continue
+ type_ = entry.get("type")
+ if type_:
+ field_name += f" ({inline(type_)})"
lines.append(
- rst_fields(entry.get("name", ""), sanitize(entry.get("doc", "")), level)
+ rst_fields(field_name, sanitize(entry.get("doc", "")), level)
)
elif isinstance(entry, list):
lines.append(rst_list_inline(entry, level))
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index f533e76fb480..f5b81d439387 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -39,6 +39,9 @@ trace/beauty/generated/
pmu-events/pmu-events.c
pmu-events/jevents
pmu-events/metric_test.log
+tests/shell/*.shellcheck_log
+tests/shell/coresight/*.shellcheck_log
+tests/shell/lib/*.shellcheck_log
feature/
libapi/
libbpf/
@@ -49,3 +52,4 @@ libtraceevent/
libtraceevent_plugins/
fixdep
Documentation/doc.dep
+python_ext_build/
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index a97f95825b14..19cc179be9a7 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -25,6 +25,7 @@
q quicker (less detailed) decoding
A approximate IPC
Z prefer to ignore timestamps (so-called "timeless" decoding)
+ T use the timestamp trace as kernel time
The default is all events i.e. the same as --itrace=iybxwpe,
except for perf script where it is --itrace=ce
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index fe168e8165c8..b95524bea021 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -155,6 +155,17 @@ include::itrace.txt[]
stdio or stdio2 (Default: 0). Note that this is about selection of
functions to display, not about lines within the function.
+--data-type[=TYPE_NAME]::
+ Display data type annotation instead of code. It infers data type of
+ samples (if they are memory accessing instructions) using DWARF debug
+ information. It can take an optional argument of data type name. In
+ that case it'd show annotation for the type only, otherwise it'd show
+ all data types it finds.
+
+--type-stat::
+ Show stats for the data type annotation.
+
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 0b4e79dbd3f6..379f9d7a8ab1 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -251,7 +251,8 @@ annotate.*::
addr2line binary to use for file names and line numbers.
annotate.objdump::
- objdump binary to use for disassembly and annotations.
+ objdump binary to use for disassembly and annotations,
+ including in the 'perf test' command.
annotate.disassembler_style::
Use this to change the default disassembler style to some other value
@@ -722,7 +723,6 @@ session-<NAME>.*::
Defines new record session for daemon. The value is record's
command line without the 'record' keyword.
-
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index d5f78e125efe..3b12595193c9 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -47,6 +47,10 @@ Print PMU events and metrics limited to the specific PMU name.
--json::
Output in JSON format.
+-o::
+--output=::
+ Output file name. By default output is written to stdout.
+
[[EVENT_MODIFIERS]]
EVENT MODIFIERS
---------------
@@ -81,11 +85,13 @@ For Intel systems precise event sampling is implemented with PEBS
which supports up to precise-level 2, and precise level 3 for
some special cases
-On AMD systems it is implemented using IBS (up to precise-level 2).
-The precise modifier works with event types 0x76 (cpu-cycles, CPU
-clocks not halted) and 0xC1 (micro-ops retired). Both events map to
-IBS execution sampling (IBS op) with the IBS Op Counter Control bit
-(IbsOpCntCtl) set respectively (see the
+On AMD systems it is implemented using IBS OP (up to precise-level 2).
+Unlike Intel PEBS which provides levels of precision, AMD core pmu is
+inherently non-precise and IBS is inherently precise. (i.e. ibs_op//,
+ibs_op//p, ibs_op//pp and ibs_op//ppp are all same). The precise modifier
+works with event types 0x76 (cpu-cycles, CPU clocks not halted) and 0xC1
+(micro-ops retired). Both events map to IBS execution sampling (IBS op)
+with the IBS Op Counter Control bit (IbsOpCntCtl) set respectively (see the
Core Complex (CCX) -> Processor x86 Core -> Instruction Based Sampling (IBS)
section of the [AMD Processor Programming Reference (PPR)] relevant to the
family, model and stepping of the processor being used).
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 503abcba1438..f5938d616d75 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -119,7 +119,7 @@ INFO OPTIONS
CONTENTION OPTIONS
---------------
+------------------
-k::
--key=<value>::
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 1889f66addf2..6015fdd08fb6 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -445,6 +445,10 @@ following filters are defined:
4th-Gen Xeon+ server), the save branch type is unconditionally enabled
when the taken branch stack sampling is enabled.
- priv: save privilege state during sampling in case binary is not available later
+ - counter: save occurrences of the event since the last branch entry. Currently, the
+ feature is only supported by a newer CPU, e.g., Intel Sierra Forest and
+ later platforms. An error out is expected if it's used on the unsupported
+ kernel or CPUs.
+
The option requires at least one branch type among any, any_call, any_ret, ind_call, cond.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index af068b4f1e5a..38f59ac064f7 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -118,6 +118,9 @@ OPTIONS
- retire_lat: On X86, this reports pipeline stall of this instruction compared
to the previous instruction in cycles. And currently supported only on X86
- simd: Flags describing a SIMD operation. "e" for empty Arm SVE predicate. "p" for partial Arm SVE predicate
+ - type: Data type of sample memory access.
+ - typeoff: Offset in the data type of sample memory access.
+ - symoff: Offset in the symbol.
By default, comm, dso and symbol keys are used.
(i.e. --sort comm,dso,symbol)
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 8f789fa1242e..5af2e432b54f 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -422,7 +422,34 @@ See perf list output for the possible metrics and metricgroups.
-A::
--no-aggr::
-Do not aggregate counts across all monitored CPUs.
+--no-merge::
+Do not aggregate/merge counts across monitored CPUs or PMUs.
+
+When multiple events are created from a single event specification,
+stat will, by default, aggregate the event counts and show the result
+in a single row. This option disables that behavior and shows the
+individual events and counts.
+
+Multiple events are created from a single event specification when:
+
+1. PID monitoring isn't requested and the system has more than one
+ CPU. For example, a system with 8 SMT threads will have one event
+ opened on each thread and aggregation is performed across them.
+
+2. Prefix or glob wildcard matching is used for the PMU name. For
+ example, multiple memory controller PMUs may exist typically with a
+ suffix of _0, _1, etc. By default the event counts will all be
+ combined if the PMU is specified without the suffix such as
+ uncore_imc rather than uncore_imc_0.
+
+3. Aliases, which are listed immediately after the Kernel PMU events
+ by perf list, are used.
+
+--hybrid-merge::
+Merge core event counts from all core PMUs. In hybrid or big.LITTLE
+systems by default each core PMU will report its count
+separately. This option forces core PMU counts to be combined to give
+a behavior closer to having a single CPU type in the system.
--topdown::
Print top-down metrics supported by the CPU. This allows to determine
@@ -475,29 +502,6 @@ highlight 'tma_frontend_bound'. This metric may be drilled into with
Error out if the input is higher than the supported max level.
---no-merge::
-Do not merge results from same PMUs.
-
-When multiple events are created from a single event specification,
-stat will, by default, aggregate the event counts and show the result
-in a single row. This option disables that behavior and shows
-the individual events and counts.
-
-Multiple events are created from a single event specification when:
-1. Prefix or glob matching is used for the PMU name.
-2. Aliases, which are listed immediately after the Kernel PMU events
- by perf list, are used.
-
---hybrid-merge::
-Merge the hybrid event counts from all PMUs.
-
-For hybrid events, by default, the stat aggregates and reports the event
-counts per PMU. But sometimes, it's also useful to aggregate event counts
-from all PMUs. This option enables that behavior and reports the counts
-without PMUs.
-
-For non-hybrid events, it should be no effect.
-
--smi-cost::
Measure SMI cost if msr/aperf/ and msr/smi/ events are supported.
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index ba3df49c169d..a7cf7bc2f968 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -64,6 +64,9 @@ OPTIONS
perf-event-open - Print perf_event_open() arguments and
return value
+--debug-file::
+ Write debug output to a specified file.
+
DESCRIPTION
-----------
Performance counters for Linux are a new kernel-based subsystem
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b3e6ed10f40c..aa55850fbc21 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -476,6 +476,11 @@ else
else
CFLAGS += -DHAVE_DWARF_GETLOCATIONS_SUPPORT
endif # dwarf_getlocations
+ ifneq ($(feature-dwarf_getcfi), 1)
+ msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.142);
+ else
+ CFLAGS += -DHAVE_DWARF_CFI_SUPPORT
+ endif # dwarf_getcfi
endif # Dwarf support
endif # libelf support
endif # NO_LIBELF
@@ -680,15 +685,15 @@ ifndef BUILD_BPF_SKEL
endif
ifeq ($(BUILD_BPF_SKEL),1)
- ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),)
- dummy := $(warning Warning: Disabled BPF skeletons as libbpf is required)
- BUILD_BPF_SKEL := 0
- else ifeq ($(filter -DHAVE_LIBELF_SUPPORT, $(CFLAGS)),)
+ ifeq ($(filter -DHAVE_LIBELF_SUPPORT, $(CFLAGS)),)
dummy := $(warning Warning: Disabled BPF skeletons as libelf is required by bpftool)
BUILD_BPF_SKEL := 0
else ifeq ($(filter -DHAVE_ZLIB_SUPPORT, $(CFLAGS)),)
dummy := $(warning Warning: Disabled BPF skeletons as zlib is required by bpftool)
BUILD_BPF_SKEL := 0
+ else ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),)
+ dummy := $(warning Warning: Disabled BPF skeletons as libbpf is required)
+ BUILD_BPF_SKEL := 0
else ifeq ($(call get-executable,$(CLANG)),)
dummy := $(warning Warning: Disabled BPF skeletons as clang ($(CLANG)) is missing)
BUILD_BPF_SKEL := 0
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 058c9aecf608..f8774a9b1377 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -134,6 +134,8 @@ include ../scripts/utilities.mak
# x86 instruction decoder - new instructions test
#
# Define GEN_VMLINUX_H to generate vmlinux.h from the BTF.
+#
+# Define NO_SHELLCHECK if you do not want to run shellcheck during build
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -227,8 +229,25 @@ else
force_fixdep := $(config)
endif
+# Runs shellcheck on perf test shell scripts
+ifeq ($(NO_SHELLCHECK),1)
+ SHELLCHECK :=
+else
+ SHELLCHECK := $(shell which shellcheck 2> /dev/null)
+endif
+
+# shellcheck is using in tools/perf/tests/Build with option -a/--check-sourced (
+# introduced in v0.4.7) and -S/--severity (introduced in v0.6.0). So make the
+# minimal shellcheck version as v0.6.0.
+ifneq ($(SHELLCHECK),)
+ ifeq ($(shell expr $(shell $(SHELLCHECK) --version | grep version: | \
+ sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \< 060), 1)
+ SHELLCHECK :=
+ endif
+endif
+
export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
-export HOSTCC HOSTLD HOSTAR HOSTCFLAGS
+export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK
include $(srctree)/tools/build/Makefile.include
@@ -1152,7 +1171,7 @@ bpf-skel-clean:
clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean
$(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS)
- $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+ $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete -o -name '*.shellcheck_log' -delete
$(Q)$(RM) $(OUTPUT).config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)$(LIBJVMTI).so
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 2cf873d71dff..77e6663c1703 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -199,7 +199,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
{
int i, err = -EINVAL;
struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
- struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
/* Set option of each CPU we have */
for (i = 0; i < cpu__max_cpu().cpu; i++) {
@@ -211,7 +211,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
* program can run on any CPUs in this case, thus don't skip
* validation.
*/
- if (!perf_cpu_map__empty(event_cpus) &&
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus) &&
!perf_cpu_map__has(event_cpus, cpu))
continue;
@@ -435,7 +435,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
* Also the case of per-cpu mmaps, need the contextID in order to be notified
* when a context switch happened.
*/
- if (!perf_cpu_map__empty(cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
"timestamp", 1);
evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
@@ -461,7 +461,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */
- if (!perf_cpu_map__empty(cpus))
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(evsel, TIME);
err = cs_etm_validate_config(itr, cs_etm_evsel);
@@ -536,10 +536,10 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
int i;
int etmv3 = 0, etmv4 = 0, ete = 0;
struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
- struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
/* cpu map is not empty, we have specific CPUs to work with */
- if (!perf_cpu_map__empty(event_cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
for (i = 0; i < cpu__max_cpu().cpu; i++) {
struct perf_cpu cpu = { .cpu = i, };
@@ -802,7 +802,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
- struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
@@ -814,7 +814,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return -EINVAL;
/* If the cpu_map is empty all online CPUs are involved */
- if (perf_cpu_map__empty(event_cpus)) {
+ if (perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
cpu_map = online_cpus;
} else {
/* Make sure all specified CPUs are online */
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index e3acc739bd00..51ccbfd3d246 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -232,7 +232,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, sample CPU for AUX event;
* also enable the timestamp tracing for samples correlation.
*/
- if (!perf_cpu_map__empty(cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(arm_spe_evsel, CPU);
evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel,
"ts_enable", 1);
@@ -265,7 +265,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
tracking_evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */
- if (!perf_cpu_map__empty(cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME);
evsel__set_sample_bit(tracking_evsel, CPU);
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
index a2eef9ec5491..97037499152e 100644
--- a/tools/perf/arch/arm64/util/header.c
+++ b/tools/perf/arch/arm64/util/header.c
@@ -57,7 +57,7 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
int get_cpuid(char *buf, size_t sz)
{
- struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
int ret;
if (!cpus)
diff --git a/tools/perf/arch/loongarch/annotate/instructions.c b/tools/perf/arch/loongarch/annotate/instructions.c
index 98e19c5366ac..21cc7e4149f7 100644
--- a/tools/perf/arch/loongarch/annotate/instructions.c
+++ b/tools/perf/arch/loongarch/annotate/instructions.c
@@ -61,10 +61,10 @@ static int loongarch_jump__parse(struct arch *arch, struct ins_operands *ops, st
const char *c = strchr(ops->raw, '#');
u64 start, end;
- ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
- ops->raw_func_start = strchr(ops->raw, '<');
+ ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+ ops->jump.raw_func_start = strchr(ops->raw, '<');
- if (ops->raw_func_start && c > ops->raw_func_start)
+ if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
c = NULL;
if (c++ != NULL)
diff --git a/tools/perf/arch/x86/tests/hybrid.c b/tools/perf/arch/x86/tests/hybrid.c
index eb152770f148..40f5d17fedab 100644
--- a/tools/perf/arch/x86/tests/hybrid.c
+++ b/tools/perf/arch/x86/tests/hybrid.c
@@ -47,7 +47,7 @@ static int test__hybrid_hw_group_event(struct evlist *evlist)
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
return TEST_OK;
}
@@ -102,7 +102,7 @@ static int test__hybrid_group_modifier1(struct evlist *evlist)
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -163,6 +163,24 @@ static int test__checkevent_pmu(struct evlist *evlist)
return TEST_OK;
}
+static int test__hybrid_hw_group_event_2(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
+ TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == 0x3c);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ return TEST_OK;
+}
+
struct evlist_test {
const char *name;
bool (*valid)(void);
@@ -171,27 +189,27 @@ struct evlist_test {
static const struct evlist_test test__hybrid_events[] = {
{
- .name = "cpu_core/cpu-cycles/",
+ .name = "cpu_core/cycles/",
.check = test__hybrid_hw_event_with_pmu,
/* 0 */
},
{
- .name = "{cpu_core/cpu-cycles/,cpu_core/instructions/}",
+ .name = "{cpu_core/cycles/,cpu_core/branches/}",
.check = test__hybrid_hw_group_event,
/* 1 */
},
{
- .name = "{cpu-clock,cpu_core/cpu-cycles/}",
+ .name = "{cpu-clock,cpu_core/cycles/}",
.check = test__hybrid_sw_hw_group_event,
/* 2 */
},
{
- .name = "{cpu_core/cpu-cycles/,cpu-clock}",
+ .name = "{cpu_core/cycles/,cpu-clock}",
.check = test__hybrid_hw_sw_group_event,
/* 3 */
},
{
- .name = "{cpu_core/cpu-cycles/k,cpu_core/instructions/u}",
+ .name = "{cpu_core/cycles/k,cpu_core/branches/u}",
.check = test__hybrid_group_modifier1,
/* 4 */
},
@@ -215,6 +233,11 @@ static const struct evlist_test test__hybrid_events[] = {
.check = test__hybrid_cache_event,
/* 8 */
},
+ {
+ .name = "{cpu_core/cycles/,cpu_core/cpu-cycles/}",
+ .check = test__hybrid_hw_group_event_2,
+ /* 9 */
+ },
};
static int test_event(const struct evlist_test *e)
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index 530934805710..399c4a0a29d8 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -113,3 +113,41 @@ int regs_query_register_offset(const char *name)
return roff->offset;
return -EINVAL;
}
+
+struct dwarf_regs_idx {
+ const char *name;
+ int idx;
+};
+
+static const struct dwarf_regs_idx x86_regidx_table[] = {
+ { "rax", 0 }, { "eax", 0 }, { "ax", 0 }, { "al", 0 },
+ { "rdx", 1 }, { "edx", 1 }, { "dx", 1 }, { "dl", 1 },
+ { "rcx", 2 }, { "ecx", 2 }, { "cx", 2 }, { "cl", 2 },
+ { "rbx", 3 }, { "edx", 3 }, { "bx", 3 }, { "bl", 3 },
+ { "rsi", 4 }, { "esi", 4 }, { "si", 4 }, { "sil", 4 },
+ { "rdi", 5 }, { "edi", 5 }, { "di", 5 }, { "dil", 5 },
+ { "rbp", 6 }, { "ebp", 6 }, { "bp", 6 }, { "bpl", 6 },
+ { "rsp", 7 }, { "esp", 7 }, { "sp", 7 }, { "spl", 7 },
+ { "r8", 8 }, { "r8d", 8 }, { "r8w", 8 }, { "r8b", 8 },
+ { "r9", 9 }, { "r9d", 9 }, { "r9w", 9 }, { "r9b", 9 },
+ { "r10", 10 }, { "r10d", 10 }, { "r10w", 10 }, { "r10b", 10 },
+ { "r11", 11 }, { "r11d", 11 }, { "r11w", 11 }, { "r11b", 11 },
+ { "r12", 12 }, { "r12d", 12 }, { "r12w", 12 }, { "r12b", 12 },
+ { "r13", 13 }, { "r13d", 13 }, { "r13w", 13 }, { "r13b", 13 },
+ { "r14", 14 }, { "r14d", 14 }, { "r14w", 14 }, { "r14b", 14 },
+ { "r15", 15 }, { "r15d", 15 }, { "r15w", 15 }, { "r15b", 15 },
+ { "rip", DWARF_REG_PC },
+};
+
+int get_arch_regnum(const char *name)
+{
+ unsigned int i;
+
+ if (*name != '%')
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(x86_regidx_table); i++)
+ if (!strcmp(x86_regidx_table[i].name, name + 1))
+ return x86_regidx_table[i].idx;
+ return -ENOENT;
+}
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
index 5741ffe47312..e65b7dbe27fb 100644
--- a/tools/perf/arch/x86/util/event.c
+++ b/tools/perf/arch/x86/util/event.c
@@ -14,66 +14,79 @@
#if defined(__x86_64__)
-int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
+struct perf_event__synthesize_extra_kmaps_cb_args {
+ struct perf_tool *tool;
+ perf_event__handler_t process;
+ struct machine *machine;
+ union perf_event *event;
+};
+
+static int perf_event__synthesize_extra_kmaps_cb(struct map *map, void *data)
{
- int rc = 0;
- struct map_rb_node *pos;
- struct maps *kmaps = machine__kernel_maps(machine);
- union perf_event *event = zalloc(sizeof(event->mmap) +
- machine->id_hdr_size);
+ struct perf_event__synthesize_extra_kmaps_cb_args *args = data;
+ union perf_event *event = args->event;
+ struct kmap *kmap;
+ size_t size;
- if (!event) {
- pr_debug("Not enough memory synthesizing mmap event "
- "for extra kernel maps\n");
- return -1;
- }
+ if (!__map__is_extra_kernel_map(map))
+ return 0;
- maps__for_each_entry(kmaps, pos) {
- struct kmap *kmap;
- size_t size;
- struct map *map = pos->map;
+ kmap = map__kmap(map);
- if (!__map__is_extra_kernel_map(map))
- continue;
+ size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
+ PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
+ args->machine->id_hdr_size;
- kmap = map__kmap(map);
+ memset(event, 0, size);
- size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
- PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
- machine->id_hdr_size;
+ event->mmap.header.type = PERF_RECORD_MMAP;
- memset(event, 0, size);
+ /*
+ * kernel uses 0 for user space maps, see kernel/perf_event.c
+ * __perf_event_mmap
+ */
+ if (machine__is_host(args->machine))
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = size;
- /*
- * kernel uses 0 for user space maps, see kernel/perf_event.c
- * __perf_event_mmap
- */
- if (machine__is_host(machine))
- event->header.misc = PERF_RECORD_MISC_KERNEL;
- else
- event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ event->mmap.start = map__start(map);
+ event->mmap.len = map__size(map);
+ event->mmap.pgoff = map__pgoff(map);
+ event->mmap.pid = args->machine->pid;
- event->mmap.header.size = size;
+ strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
- event->mmap.start = map__start(map);
- event->mmap.len = map__size(map);
- event->mmap.pgoff = map__pgoff(map);
- event->mmap.pid = machine->pid;
+ if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
+ return -1;
- strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
+ return 0;
+}
- if (perf_tool__process_synth_event(tool, event, machine,
- process) != 0) {
- rc = -1;
- break;
- }
+int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ int rc;
+ struct maps *kmaps = machine__kernel_maps(machine);
+ struct perf_event__synthesize_extra_kmaps_cb_args args = {
+ .tool = tool,
+ .process = process,
+ .machine = machine,
+ .event = zalloc(sizeof(args.event->mmap) + machine->id_hdr_size),
+ };
+
+ if (!args.event) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for extra kernel maps\n");
+ return -1;
}
- free(event);
+ rc = maps__for_each_map(kmaps, perf_event__synthesize_extra_kmaps_cb, &args);
+
+ free(args.event);
return rc;
}
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index d2c8cac11470..af8ae4647585 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -143,7 +143,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
if (!opts->full_auxtrace)
return 0;
- if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) {
+ if (opts->full_auxtrace && !perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
return -EINVAL;
}
@@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
- if (!perf_cpu_map__empty(cpus))
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(intel_bts_evsel, CPU);
}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index fa0c718b9e72..d199619df3ab 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -369,7 +369,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
ui__warning("Intel Processor Trace: TSC not available\n");
}
- per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
+ per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@@ -774,7 +774,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Per-cpu recording needs sched_switch events to distinguish different
* threads.
*/
- if (have_timing_info && !perf_cpu_map__empty(cpus) &&
+ if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
!record_opts__no_switch_events(opts)) {
if (perf_can_record_switch_events()) {
bool cpu_wide = !target__none(&opts->target) &&
@@ -832,7 +832,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
- if (!perf_cpu_map__empty(cpus))
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(intel_pt_evsel, CPU);
}
@@ -858,7 +858,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
tracking_evsel->immediate = true;
/* In per-cpu case, always need the time of mmap events etc */
- if (!perf_cpu_map__empty(cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME);
/* And the CPU for switch events */
evsel__set_sample_bit(tracking_evsel, CPU);
@@ -870,7 +870,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Warn the user when we do not have enough information to decode i.e.
* per-cpu with no sched_switch (except workload-only).
*/
- if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
+ if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
!target__none(&opts->target) &&
!intel_pt_evsel->core.attr.exclude_user)
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index 6bfffe83dde9..d3db73dac66a 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -330,7 +330,7 @@ int bench_epoll_ctl(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- cpu = perf_cpu_map__new(NULL);
+ cpu = perf_cpu_map__new_online_cpus();
if (!cpu)
goto errmem;
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index cb5174b53940..06bb3187660a 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -444,7 +444,7 @@ int bench_epoll_wait(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- cpu = perf_cpu_map__new(NULL);
+ cpu = perf_cpu_map__new_online_cpus();
if (!cpu)
goto errmem;
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index 2005a3fa3026..0c69d20efa32 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -138,7 +138,7 @@ int bench_futex_hash(int argc, const char **argv)
exit(EXIT_FAILURE);
}
- cpu = perf_cpu_map__new(NULL);
+ cpu = perf_cpu_map__new_online_cpus();
if (!cpu)
goto errmem;
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 092cbd52db82..7a4973346180 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -172,7 +172,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
if (argc)
goto err;
- cpu = perf_cpu_map__new(NULL);
+ cpu = perf_cpu_map__new_online_cpus();
if (!cpu)
err(EXIT_FAILURE, "calloc");
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index c0035990a33c..d9ad736c1a3e 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -174,7 +174,7 @@ int bench_futex_requeue(int argc, const char **argv)
if (argc)
goto err;
- cpu = perf_cpu_map__new(NULL);
+ cpu = perf_cpu_map__new_online_cpus();
if (!cpu)
err(EXIT_FAILURE, "cpu_map__new");
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index 5ab0234d74e6..b66df553e561 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -264,7 +264,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
err(EXIT_FAILURE, "mlockall");
}
- cpu = perf_cpu_map__new(NULL);
+ cpu = perf_cpu_map__new_online_cpus();
if (!cpu)
err(EXIT_FAILURE, "calloc");
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 18a5894af8bb..690fd6d3da13 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -149,7 +149,7 @@ int bench_futex_wake(int argc, const char **argv)
exit(EXIT_FAILURE);
}
- cpu = perf_cpu_map__new(NULL);
+ cpu = perf_cpu_map__new_online_cpus();
if (!cpu)
err(EXIT_FAILURE, "calloc");
diff --git a/tools/perf/bench/sched-seccomp-notify.c b/tools/perf/bench/sched-seccomp-notify.c
index a01c40131493..269c1f4a6852 100644
--- a/tools/perf/bench/sched-seccomp-notify.c
+++ b/tools/perf/bench/sched-seccomp-notify.c
@@ -32,7 +32,7 @@ static bool sync_mode;
static const struct option options[] = {
OPT_U64('l', "loop", &loops, "Specify number of loops"),
OPT_BOOLEAN('s', "sync-mode", &sync_mode,
- "Enable the synchronious mode for seccomp notifications"),
+ "Enable the synchronous mode for seccomp notifications"),
OPT_END()
};
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index aeeb801f1ed7..6c1cc797692d 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -20,6 +20,7 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/annotate.h"
+#include "util/annotate-data.h"
#include "util/event.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
@@ -45,7 +46,6 @@
struct perf_annotate {
struct perf_tool tool;
struct perf_session *session;
- struct annotation_options opts;
#ifdef HAVE_SLANG_SUPPORT
bool use_tui;
#endif
@@ -56,9 +56,13 @@ struct perf_annotate {
bool skip_missing;
bool has_br_stack;
bool group_set;
+ bool data_type;
+ bool type_stat;
+ bool insn_stat;
float min_percent;
const char *sym_hist_filter;
const char *cpu_list;
+ const char *target_data_type;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
@@ -94,6 +98,7 @@ static void process_basic_block(struct addr_map_symbol *start,
struct annotation *notes = sym ? symbol__annotation(sym) : NULL;
struct block_range_iter iter;
struct block_range *entry;
+ struct annotated_branch *branch;
/*
* Sanity; NULL isn't executable and the CPU cannot execute backwards
@@ -105,6 +110,8 @@ static void process_basic_block(struct addr_map_symbol *start,
if (!block_range_iter__valid(&iter))
return;
+ branch = annotation__get_branch(notes);
+
/*
* First block in range is a branch target.
*/
@@ -118,8 +125,8 @@ static void process_basic_block(struct addr_map_symbol *start,
entry->coverage++;
entry->sym = sym;
- if (notes)
- notes->max_coverage = max(notes->max_coverage, entry->coverage);
+ if (branch)
+ branch->max_coverage = max(branch->max_coverage, entry->coverage);
} while (block_range_iter__next(&iter));
@@ -315,9 +322,153 @@ static int hist_entry__tty_annotate(struct hist_entry *he,
struct perf_annotate *ann)
{
if (!ann->use_stdio2)
- return symbol__tty_annotate(&he->ms, evsel, &ann->opts);
+ return symbol__tty_annotate(&he->ms, evsel);
+
+ return symbol__tty_annotate2(&he->ms, evsel);
+}
+
+static void print_annotated_data_header(struct hist_entry *he, struct evsel *evsel)
+{
+ struct dso *dso = map__dso(he->ms.map);
+ int nr_members = 1;
+ int nr_samples = he->stat.nr_events;
+
+ if (evsel__is_group_event(evsel)) {
+ struct hist_entry *pair;
+
+ list_for_each_entry(pair, &he->pairs.head, pairs.node)
+ nr_samples += pair->stat.nr_events;
+ }
+
+ printf("Annotate type: '%s' in %s (%d samples):\n",
+ he->mem_type->self.type_name, dso->name, nr_samples);
+
+ if (evsel__is_group_event(evsel)) {
+ struct evsel *pos;
+ int i = 0;
+
+ for_each_group_evsel(pos, evsel)
+ printf(" event[%d] = %s\n", i++, pos->name);
+
+ nr_members = evsel->core.nr_members;
+ }
+
+ printf("============================================================================\n");
+ printf("%*s %10s %10s %s\n", 11 * nr_members, "samples", "offset", "size", "field");
+}
+
+static void print_annotated_data_type(struct annotated_data_type *mem_type,
+ struct annotated_member *member,
+ struct evsel *evsel, int indent)
+{
+ struct annotated_member *child;
+ struct type_hist *h = mem_type->histograms[evsel->core.idx];
+ int i, nr_events = 1, samples = 0;
+
+ for (i = 0; i < member->size; i++)
+ samples += h->addr[member->offset + i].nr_samples;
+ printf(" %10d", samples);
- return symbol__tty_annotate2(&he->ms, evsel, &ann->opts);
+ if (evsel__is_group_event(evsel)) {
+ struct evsel *pos;
+
+ for_each_group_member(pos, evsel) {
+ h = mem_type->histograms[pos->core.idx];
+
+ samples = 0;
+ for (i = 0; i < member->size; i++)
+ samples += h->addr[member->offset + i].nr_samples;
+ printf(" %10d", samples);
+ }
+ nr_events = evsel->core.nr_members;
+ }
+
+ printf(" %10d %10d %*s%s\t%s",
+ member->offset, member->size, indent, "", member->type_name,
+ member->var_name ?: "");
+
+ if (!list_empty(&member->children))
+ printf(" {\n");
+
+ list_for_each_entry(child, &member->children, node)
+ print_annotated_data_type(mem_type, child, evsel, indent + 4);
+
+ if (!list_empty(&member->children))
+ printf("%*s}", 11 * nr_events + 24 + indent, "");
+ printf(";\n");
+}
+
+static void print_annotate_data_stat(struct annotated_data_stat *s)
+{
+#define PRINT_STAT(fld) if (s->fld) printf("%10d : %s\n", s->fld, #fld)
+
+ int bad = s->no_sym +
+ s->no_insn +
+ s->no_insn_ops +
+ s->no_mem_ops +
+ s->no_reg +
+ s->no_dbginfo +
+ s->no_cuinfo +
+ s->no_var +
+ s->no_typeinfo +
+ s->invalid_size +
+ s->bad_offset;
+ int ok = s->total - bad;
+
+ printf("Annotate data type stats:\n");
+ printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n",
+ s->total, ok, 100.0 * ok / (s->total ?: 1), bad, 100.0 * bad / (s->total ?: 1));
+ printf("-----------------------------------------------------------\n");
+ PRINT_STAT(no_sym);
+ PRINT_STAT(no_insn);
+ PRINT_STAT(no_insn_ops);
+ PRINT_STAT(no_mem_ops);
+ PRINT_STAT(no_reg);
+ PRINT_STAT(no_dbginfo);
+ PRINT_STAT(no_cuinfo);
+ PRINT_STAT(no_var);
+ PRINT_STAT(no_typeinfo);
+ PRINT_STAT(invalid_size);
+ PRINT_STAT(bad_offset);
+ printf("\n");
+
+#undef PRINT_STAT
+}
+
+static void print_annotate_item_stat(struct list_head *head, const char *title)
+{
+ struct annotated_item_stat *istat, *pos, *iter;
+ int total_good, total_bad, total;
+ int sum1, sum2;
+ LIST_HEAD(tmp);
+
+ /* sort the list by count */
+ list_splice_init(head, &tmp);
+ total_good = total_bad = 0;
+
+ list_for_each_entry_safe(istat, pos, &tmp, list) {
+ total_good += istat->good;
+ total_bad += istat->bad;
+ sum1 = istat->good + istat->bad;
+
+ list_for_each_entry(iter, head, list) {
+ sum2 = iter->good + iter->bad;
+ if (sum1 > sum2)
+ break;
+ }
+ list_move_tail(&istat->list, &iter->list);
+ }
+ total = total_good + total_bad;
+
+ printf("Annotate %s stats\n", title);
+ printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n\n", total,
+ total_good, 100.0 * total_good / (total ?: 1),
+ total_bad, 100.0 * total_bad / (total ?: 1));
+ printf(" %-10s: %5s %5s\n", "Name", "Good", "Bad");
+ printf("-----------------------------------------------------------\n");
+ list_for_each_entry(istat, head, list)
+ printf(" %-10s: %5d %5d\n", istat->name, istat->good, istat->bad);
+ printf("\n");
}
static void hists__find_annotations(struct hists *hists,
@@ -327,6 +478,11 @@ static void hists__find_annotations(struct hists *hists,
struct rb_node *nd = rb_first_cached(&hists->entries), *next;
int key = K_RIGHT;
+ if (ann->type_stat)
+ print_annotate_data_stat(&ann_data_stat);
+ if (ann->insn_stat)
+ print_annotate_item_stat(&ann_insn_stat, "Instruction");
+
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct annotation *notes;
@@ -359,11 +515,38 @@ find_next:
continue;
}
+ if (ann->data_type) {
+ /* skip unknown type */
+ if (he->mem_type->histograms == NULL)
+ goto find_next;
+
+ if (ann->target_data_type) {
+ const char *type_name = he->mem_type->self.type_name;
+
+ /* skip 'struct ' prefix in the type name */
+ if (strncmp(ann->target_data_type, "struct ", 7) &&
+ !strncmp(type_name, "struct ", 7))
+ type_name += 7;
+
+ /* skip 'union ' prefix in the type name */
+ if (strncmp(ann->target_data_type, "union ", 6) &&
+ !strncmp(type_name, "union ", 6))
+ type_name += 6;
+
+ if (strcmp(ann->target_data_type, type_name))
+ goto find_next;
+ }
+
+ print_annotated_data_header(he, evsel);
+ print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0);
+ printf("\n");
+ goto find_next;
+ }
+
if (use_browser == 2) {
int ret;
int (*annotate)(struct hist_entry *he,
struct evsel *evsel,
- struct annotation_options *options,
struct hist_browser_timer *hbt);
annotate = dlsym(perf_gtk_handle,
@@ -373,14 +556,14 @@ find_next:
return;
}
- ret = annotate(he, evsel, &ann->opts, NULL);
+ ret = annotate(he, evsel, NULL);
if (!ret || !ann->skip_missing)
return;
/* skip missing symbols */
nd = rb_next(nd);
} else if (use_browser == 1) {
- key = hist_entry__tui_annotate(he, evsel, NULL, &ann->opts);
+ key = hist_entry__tui_annotate(he, evsel, NULL);
switch (key) {
case -1:
@@ -422,9 +605,9 @@ static int __cmd_annotate(struct perf_annotate *ann)
goto out;
}
- if (!ann->opts.objdump_path) {
+ if (!annotate_opts.objdump_path) {
ret = perf_env__lookup_objdump(&session->header.env,
- &ann->opts.objdump_path);
+ &annotate_opts.objdump_path);
if (ret)
goto out;
}
@@ -457,8 +640,20 @@ static int __cmd_annotate(struct perf_annotate *ann)
evsel__reset_sample_bit(pos, CALLCHAIN);
evsel__output_resort(pos, NULL);
- if (symbol_conf.event_group && !evsel__is_group_leader(pos))
+ /*
+ * An event group needs to display other events too.
+ * Let's delay printing until other events are processed.
+ */
+ if (symbol_conf.event_group) {
+ if (!evsel__is_group_leader(pos)) {
+ struct hists *leader_hists;
+
+ leader_hists = evsel__hists(evsel__leader(pos));
+ hists__match(leader_hists, hists);
+ hists__link(leader_hists, hists);
+ }
continue;
+ }
hists__find_annotations(hists, pos, ann);
}
@@ -469,6 +664,20 @@ static int __cmd_annotate(struct perf_annotate *ann)
goto out;
}
+ /* Display group events together */
+ evlist__for_each_entry(session->evlist, pos) {
+ struct hists *hists = evsel__hists(pos);
+ u32 nr_samples = hists->stats.nr_samples;
+
+ if (nr_samples == 0)
+ continue;
+
+ if (!symbol_conf.event_group || !evsel__is_group_leader(pos))
+ continue;
+
+ hists__find_annotations(hists, pos, ann);
+ }
+
if (use_browser == 2) {
void (*show_annotations)(void);
@@ -495,6 +704,17 @@ static int parse_percent_limit(const struct option *opt, const char *str,
return 0;
}
+static int parse_data_type(const struct option *opt, const char *str, int unset)
+{
+ struct perf_annotate *ann = opt->value;
+
+ ann->data_type = !unset;
+ if (str)
+ ann->target_data_type = strdup(str);
+
+ return 0;
+}
+
static const char * const annotate_usage[] = {
"perf annotate [<options>]",
NULL
@@ -558,9 +778,9 @@ int cmd_annotate(int argc, const char **argv)
"file", "vmlinux pathname"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
- OPT_BOOLEAN('l', "print-line", &annotate.opts.print_lines,
+ OPT_BOOLEAN('l', "print-line", &annotate_opts.print_lines,
"print matching source lines (may be slow)"),
- OPT_BOOLEAN('P', "full-paths", &annotate.opts.full_path,
+ OPT_BOOLEAN('P', "full-paths", &annotate_opts.full_path,
"Don't shorten the displayed pathnames"),
OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
"Skip symbols that cannot be annotated"),
@@ -571,15 +791,15 @@ int cmd_annotate(int argc, const char **argv)
OPT_CALLBACK(0, "symfs", NULL, "directory",
"Look for files with symbols relative to this directory",
symbol__config_symfs),
- OPT_BOOLEAN(0, "source", &annotate.opts.annotate_src,
+ OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
"Interleave source code with assembly code (default)"),
- OPT_BOOLEAN(0, "asm-raw", &annotate.opts.show_asm_raw,
+ OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
- OPT_STRING(0, "prefix", &annotate.opts.prefix, "prefix",
+ OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
- OPT_STRING(0, "prefix-strip", &annotate.opts.prefix_strip, "N",
+ OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
"Strip first N entries of source file path name in programs (with --prefix)"),
OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
@@ -598,7 +818,7 @@ int cmd_annotate(int argc, const char **argv)
OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
"'always' (default), 'never' or 'auto' only applicable to --stdio mode",
stdio__config_color, "always"),
- OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
+ OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
OPT_CALLBACK(0, "percent-limit", &annotate, "percent",
@@ -606,7 +826,13 @@ int cmd_annotate(int argc, const char **argv)
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options\n" ITRACE_HELP,
itrace_parse_synth_opts),
-
+ OPT_CALLBACK_OPTARG(0, "data-type", &annotate, NULL, "name",
+ "Show data type annotate for the memory accesses",
+ parse_data_type),
+ OPT_BOOLEAN(0, "type-stat", &annotate.type_stat,
+ "Show stats for the data type annotation"),
+ OPT_BOOLEAN(0, "insn-stat", &annotate.insn_stat,
+ "Show instruction stats for the data type annotation"),
OPT_END()
};
int ret;
@@ -614,13 +840,13 @@ int cmd_annotate(int argc, const char **argv)
set_option_flag(options, 0, "show-total-period", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 0, "show-nr-samples", PARSE_OPT_EXCLUSIVE);
- annotation_options__init(&annotate.opts);
+ annotation_options__init();
ret = hists__init();
if (ret < 0)
return ret;
- annotation_config__init(&annotate.opts);
+ annotation_config__init();
argc = parse_options(argc, argv, options, annotate_usage, 0);
if (argc) {
@@ -635,13 +861,13 @@ int cmd_annotate(int argc, const char **argv)
}
if (disassembler_style) {
- annotate.opts.disassembler_style = strdup(disassembler_style);
- if (!annotate.opts.disassembler_style)
+ annotate_opts.disassembler_style = strdup(disassembler_style);
+ if (!annotate_opts.disassembler_style)
return -ENOMEM;
}
if (objdump_path) {
- annotate.opts.objdump_path = strdup(objdump_path);
- if (!annotate.opts.objdump_path)
+ annotate_opts.objdump_path = strdup(objdump_path);
+ if (!annotate_opts.objdump_path)
return -ENOMEM;
}
if (addr2line_path) {
@@ -650,7 +876,7 @@ int cmd_annotate(int argc, const char **argv)
return -ENOMEM;
}
- if (annotate_check_args(&annotate.opts) < 0)
+ if (annotate_check_args() < 0)
return -EINVAL;
#ifdef HAVE_GTK2_SUPPORT
@@ -660,6 +886,13 @@ int cmd_annotate(int argc, const char **argv)
}
#endif
+#ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT
+ if (annotate.data_type) {
+ pr_err("Error: Data type profiling is disabled due to missing DWARF support\n");
+ return -ENOTSUP;
+ }
+#endif
+
ret = symbol__validate_sym_arguments();
if (ret)
return ret;
@@ -702,6 +935,14 @@ int cmd_annotate(int argc, const char **argv)
use_browser = 2;
#endif
+ /* FIXME: only support stdio for now */
+ if (annotate.data_type) {
+ use_browser = 0;
+ annotate_opts.annotate_src = false;
+ symbol_conf.annotate_data_member = true;
+ symbol_conf.annotate_data_sample = true;
+ }
+
setup_browser(true);
/*
@@ -709,7 +950,10 @@ int cmd_annotate(int argc, const char **argv)
* symbol, we do not care about the processes in annotate,
* set sort order to avoid repeated output.
*/
- sort_order = "dso,symbol";
+ if (annotate.data_type)
+ sort_order = "dso,type";
+ else
+ sort_order = "dso,symbol";
/*
* Set SORT_MODE__BRANCH so that annotate display IPC/Cycle
@@ -731,7 +975,7 @@ out_delete:
#ifndef NDEBUG
perf_session__delete(annotate.session);
#endif
- annotation_options__exit(&annotate.opts);
+ annotation_options__exit();
return ret;
}
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index a4cf9de7a7b5..f78eea9e2153 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2320,7 +2320,7 @@ static int setup_nodes(struct perf_session *session)
nodes[node] = set;
/* empty node, skip */
- if (perf_cpu_map__empty(map))
+ if (perf_cpu_map__has_any_cpu_or_is_empty(map))
continue;
perf_cpu_map__for_each_cpu(cpu, idx, map) {
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index ac2e6c75f912..eb30c8eca488 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -333,7 +333,7 @@ static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
static int reset_tracing_cpu(void)
{
- struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *cpumap = perf_cpu_map__new_online_cpus();
int ret;
ret = set_tracing_cpumask(cpumap);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index c8cf2fdd9cff..eb3ef5c24b66 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -2265,6 +2265,12 @@ int cmd_inject(int argc, const char **argv)
"perf inject [<options>]",
NULL
};
+
+ if (!inject.itrace_synth_opts.set) {
+ /* Disable eager loading of kernel symbols that adds overhead to perf inject. */
+ symbol_conf.lazy_load_kernel_maps = true;
+ }
+
#ifndef HAVE_JITDUMP
set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
#endif
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 61c2c96cc070..e27a1b1288c2 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -30,6 +30,8 @@
* functions.
*/
struct print_state {
+ /** @fp: File to write output to. */
+ FILE *fp;
/**
* @pmu_glob: Optionally restrict PMU and metric matching to PMU or
* debugfs subsystem name.
@@ -66,13 +68,15 @@ static void default_print_start(void *ps)
{
struct print_state *print_state = ps;
- if (!print_state->name_only && pager_in_use())
- printf("\nList of pre-defined events (to be used in -e or -M):\n\n");
+ if (!print_state->name_only && pager_in_use()) {
+ fprintf(print_state->fp,
+ "\nList of pre-defined events (to be used in -e or -M):\n\n");
+ }
}
static void default_print_end(void *print_state __maybe_unused) {}
-static void wordwrap(const char *s, int start, int max, int corr)
+static void wordwrap(FILE *fp, const char *s, int start, int max, int corr)
{
int column = start;
int n;
@@ -82,10 +86,10 @@ static void wordwrap(const char *s, int start, int max, int corr)
int wlen = strcspn(s, " \t\n");
if ((column + wlen >= max && column > start) || saw_newline) {
- printf("\n%*s", start, "");
+ fprintf(fp, "\n%*s", start, "");
column = start + corr;
}
- n = printf("%s%.*s", column > start ? " " : "", wlen, s);
+ n = fprintf(fp, "%s%.*s", column > start ? " " : "", wlen, s);
if (n <= 0)
break;
saw_newline = s[wlen] == '\n';
@@ -104,6 +108,7 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
{
struct print_state *print_state = ps;
int pos;
+ FILE *fp = print_state->fp;
if (deprecated && !print_state->deprecated)
return;
@@ -119,30 +124,30 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
if (print_state->name_only) {
if (event_alias && strlen(event_alias))
- printf("%s ", event_alias);
+ fprintf(fp, "%s ", event_alias);
else
- printf("%s ", event_name);
+ fprintf(fp, "%s ", event_name);
return;
}
if (strcmp(print_state->last_topic, topic ?: "")) {
if (topic)
- printf("\n%s:\n", topic);
+ fprintf(fp, "\n%s:\n", topic);
zfree(&print_state->last_topic);
print_state->last_topic = strdup(topic ?: "");
}
if (event_alias && strlen(event_alias))
- pos = printf(" %s OR %s", event_name, event_alias);
+ pos = fprintf(fp, " %s OR %s", event_name, event_alias);
else
- pos = printf(" %s", event_name);
+ pos = fprintf(fp, " %s", event_name);
if (!topic && event_type_desc) {
for (; pos < 53; pos++)
- putchar(' ');
- printf("[%s]\n", event_type_desc);
+ fputc(' ', fp);
+ fprintf(fp, "[%s]\n", event_type_desc);
} else
- putchar('\n');
+ fputc('\n', fp);
if (desc && print_state->desc) {
char *desc_with_unit = NULL;
@@ -155,22 +160,22 @@ static void default_print_event(void *ps, const char *pmu_name, const char *topi
? "%s. Unit: %s" : "%s Unit: %s",
desc, pmu_name);
}
- printf("%*s", 8, "[");
- wordwrap(desc_len > 0 ? desc_with_unit : desc, 8, pager_get_columns(), 0);
- printf("]\n");
+ fprintf(fp, "%*s", 8, "[");
+ wordwrap(fp, desc_len > 0 ? desc_with_unit : desc, 8, pager_get_columns(), 0);
+ fprintf(fp, "]\n");
free(desc_with_unit);
}
long_desc = long_desc ?: desc;
if (long_desc && print_state->long_desc) {
- printf("%*s", 8, "[");
- wordwrap(long_desc, 8, pager_get_columns(), 0);
- printf("]\n");
+ fprintf(fp, "%*s", 8, "[");
+ wordwrap(fp, long_desc, 8, pager_get_columns(), 0);
+ fprintf(fp, "]\n");
}
if (print_state->detailed && encoding_desc) {
- printf("%*s", 8, "");
- wordwrap(encoding_desc, 8, pager_get_columns(), 0);
- putchar('\n');
+ fprintf(fp, "%*s", 8, "");
+ wordwrap(fp, encoding_desc, 8, pager_get_columns(), 0);
+ fputc('\n', fp);
}
}
@@ -184,6 +189,7 @@ static void default_print_metric(void *ps,
const char *unit __maybe_unused)
{
struct print_state *print_state = ps;
+ FILE *fp = print_state->fp;
if (print_state->event_glob &&
(!print_state->metrics || !name || !strglobmatch(name, print_state->event_glob)) &&
@@ -192,27 +198,27 @@ static void default_print_metric(void *ps,
if (!print_state->name_only && !print_state->last_metricgroups) {
if (print_state->metricgroups) {
- printf("\nMetric Groups:\n");
+ fprintf(fp, "\nMetric Groups:\n");
if (!print_state->metrics)
- putchar('\n');
+ fputc('\n', fp);
} else {
- printf("\nMetrics:\n\n");
+ fprintf(fp, "\nMetrics:\n\n");
}
}
if (!print_state->last_metricgroups ||
strcmp(print_state->last_metricgroups, group ?: "")) {
if (group && print_state->metricgroups) {
if (print_state->name_only)
- printf("%s ", group);
+ fprintf(fp, "%s ", group);
else if (print_state->metrics) {
const char *gdesc = describe_metricgroup(group);
if (gdesc)
- printf("\n%s: [%s]\n", group, gdesc);
+ fprintf(fp, "\n%s: [%s]\n", group, gdesc);
else
- printf("\n%s:\n", group);
+ fprintf(fp, "\n%s:\n", group);
} else
- printf("%s\n", group);
+ fprintf(fp, "%s\n", group);
}
zfree(&print_state->last_metricgroups);
print_state->last_metricgroups = strdup(group ?: "");
@@ -223,53 +229,59 @@ static void default_print_metric(void *ps,
if (print_state->name_only) {
if (print_state->metrics &&
!strlist__has_entry(print_state->visited_metrics, name)) {
- printf("%s ", name);
+ fprintf(fp, "%s ", name);
strlist__add(print_state->visited_metrics, name);
}
return;
}
- printf(" %s\n", name);
+ fprintf(fp, " %s\n", name);
if (desc && print_state->desc) {
- printf("%*s", 8, "[");
- wordwrap(desc, 8, pager_get_columns(), 0);
- printf("]\n");
+ fprintf(fp, "%*s", 8, "[");
+ wordwrap(fp, desc, 8, pager_get_columns(), 0);
+ fprintf(fp, "]\n");
}
if (long_desc && print_state->long_desc) {
- printf("%*s", 8, "[");
- wordwrap(long_desc, 8, pager_get_columns(), 0);
- printf("]\n");
+ fprintf(fp, "%*s", 8, "[");
+ wordwrap(fp, long_desc, 8, pager_get_columns(), 0);
+ fprintf(fp, "]\n");
}
if (expr && print_state->detailed) {
- printf("%*s", 8, "[");
- wordwrap(expr, 8, pager_get_columns(), 0);
- printf("]\n");
+ fprintf(fp, "%*s", 8, "[");
+ wordwrap(fp, expr, 8, pager_get_columns(), 0);
+ fprintf(fp, "]\n");
}
if (threshold && print_state->detailed) {
- printf("%*s", 8, "[");
- wordwrap(threshold, 8, pager_get_columns(), 0);
- printf("]\n");
+ fprintf(fp, "%*s", 8, "[");
+ wordwrap(fp, threshold, 8, pager_get_columns(), 0);
+ fprintf(fp, "]\n");
}
}
struct json_print_state {
+ /** @fp: File to write output to. */
+ FILE *fp;
/** Should a separator be printed prior to the next item? */
bool need_sep;
};
-static void json_print_start(void *print_state __maybe_unused)
+static void json_print_start(void *ps)
{
- printf("[\n");
+ struct json_print_state *print_state = ps;
+ FILE *fp = print_state->fp;
+
+ fprintf(fp, "[\n");
}
static void json_print_end(void *ps)
{
struct json_print_state *print_state = ps;
+ FILE *fp = print_state->fp;
- printf("%s]\n", print_state->need_sep ? "\n" : "");
+ fprintf(fp, "%s]\n", print_state->need_sep ? "\n" : "");
}
-static void fix_escape_printf(struct strbuf *buf, const char *fmt, ...)
+static void fix_escape_fprintf(FILE *fp, struct strbuf *buf, const char *fmt, ...)
{
va_list args;
@@ -318,7 +330,7 @@ static void fix_escape_printf(struct strbuf *buf, const char *fmt, ...)
}
}
va_end(args);
- fputs(buf->buf, stdout);
+ fputs(buf->buf, fp);
}
static void json_print_event(void *ps, const char *pmu_name, const char *topic,
@@ -330,60 +342,71 @@ static void json_print_event(void *ps, const char *pmu_name, const char *topic,
{
struct json_print_state *print_state = ps;
bool need_sep = false;
+ FILE *fp = print_state->fp;
struct strbuf buf;
strbuf_init(&buf, 0);
- printf("%s{\n", print_state->need_sep ? ",\n" : "");
+ fprintf(fp, "%s{\n", print_state->need_sep ? ",\n" : "");
print_state->need_sep = true;
if (pmu_name) {
- fix_escape_printf(&buf, "\t\"Unit\": \"%S\"", pmu_name);
+ fix_escape_fprintf(fp, &buf, "\t\"Unit\": \"%S\"", pmu_name);
need_sep = true;
}
if (topic) {
- fix_escape_printf(&buf, "%s\t\"Topic\": \"%S\"", need_sep ? ",\n" : "", topic);
+ fix_escape_fprintf(fp, &buf, "%s\t\"Topic\": \"%S\"",
+ need_sep ? ",\n" : "",
+ topic);
need_sep = true;
}
if (event_name) {
- fix_escape_printf(&buf, "%s\t\"EventName\": \"%S\"", need_sep ? ",\n" : "",
- event_name);
+ fix_escape_fprintf(fp, &buf, "%s\t\"EventName\": \"%S\"",
+ need_sep ? ",\n" : "",
+ event_name);
need_sep = true;
}
if (event_alias && strlen(event_alias)) {
- fix_escape_printf(&buf, "%s\t\"EventAlias\": \"%S\"", need_sep ? ",\n" : "",
- event_alias);
+ fix_escape_fprintf(fp, &buf, "%s\t\"EventAlias\": \"%S\"",
+ need_sep ? ",\n" : "",
+ event_alias);
need_sep = true;
}
if (scale_unit && strlen(scale_unit)) {
- fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "",
- scale_unit);
+ fix_escape_fprintf(fp, &buf, "%s\t\"ScaleUnit\": \"%S\"",
+ need_sep ? ",\n" : "",
+ scale_unit);
need_sep = true;
}
if (event_type_desc) {
- fix_escape_printf(&buf, "%s\t\"EventType\": \"%S\"", need_sep ? ",\n" : "",
- event_type_desc);
+ fix_escape_fprintf(fp, &buf, "%s\t\"EventType\": \"%S\"",
+ need_sep ? ",\n" : "",
+ event_type_desc);
need_sep = true;
}
if (deprecated) {
- fix_escape_printf(&buf, "%s\t\"Deprecated\": \"%S\"", need_sep ? ",\n" : "",
- deprecated ? "1" : "0");
+ fix_escape_fprintf(fp, &buf, "%s\t\"Deprecated\": \"%S\"",
+ need_sep ? ",\n" : "",
+ deprecated ? "1" : "0");
need_sep = true;
}
if (desc) {
- fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "",
- desc);
+ fix_escape_fprintf(fp, &buf, "%s\t\"BriefDescription\": \"%S\"",
+ need_sep ? ",\n" : "",
+ desc);
need_sep = true;
}
if (long_desc) {
- fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "",
- long_desc);
+ fix_escape_fprintf(fp, &buf, "%s\t\"PublicDescription\": \"%S\"",
+ need_sep ? ",\n" : "",
+ long_desc);
need_sep = true;
}
if (encoding_desc) {
- fix_escape_printf(&buf, "%s\t\"Encoding\": \"%S\"", need_sep ? ",\n" : "",
- encoding_desc);
+ fix_escape_fprintf(fp, &buf, "%s\t\"Encoding\": \"%S\"",
+ need_sep ? ",\n" : "",
+ encoding_desc);
need_sep = true;
}
- printf("%s}", need_sep ? "\n" : "");
+ fprintf(fp, "%s}", need_sep ? "\n" : "");
strbuf_release(&buf);
}
@@ -394,43 +417,53 @@ static void json_print_metric(void *ps __maybe_unused, const char *group,
{
struct json_print_state *print_state = ps;
bool need_sep = false;
+ FILE *fp = print_state->fp;
struct strbuf buf;
strbuf_init(&buf, 0);
- printf("%s{\n", print_state->need_sep ? ",\n" : "");
+ fprintf(fp, "%s{\n", print_state->need_sep ? ",\n" : "");
print_state->need_sep = true;
if (group) {
- fix_escape_printf(&buf, "\t\"MetricGroup\": \"%S\"", group);
+ fix_escape_fprintf(fp, &buf, "\t\"MetricGroup\": \"%S\"", group);
need_sep = true;
}
if (name) {
- fix_escape_printf(&buf, "%s\t\"MetricName\": \"%S\"", need_sep ? ",\n" : "", name);
+ fix_escape_fprintf(fp, &buf, "%s\t\"MetricName\": \"%S\"",
+ need_sep ? ",\n" : "",
+ name);
need_sep = true;
}
if (expr) {
- fix_escape_printf(&buf, "%s\t\"MetricExpr\": \"%S\"", need_sep ? ",\n" : "", expr);
+ fix_escape_fprintf(fp, &buf, "%s\t\"MetricExpr\": \"%S\"",
+ need_sep ? ",\n" : "",
+ expr);
need_sep = true;
}
if (threshold) {
- fix_escape_printf(&buf, "%s\t\"MetricThreshold\": \"%S\"", need_sep ? ",\n" : "",
- threshold);
+ fix_escape_fprintf(fp, &buf, "%s\t\"MetricThreshold\": \"%S\"",
+ need_sep ? ",\n" : "",
+ threshold);
need_sep = true;
}
if (unit) {
- fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "", unit);
+ fix_escape_fprintf(fp, &buf, "%s\t\"ScaleUnit\": \"%S\"",
+ need_sep ? ",\n" : "",
+ unit);
need_sep = true;
}
if (desc) {
- fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "",
- desc);
+ fix_escape_fprintf(fp, &buf, "%s\t\"BriefDescription\": \"%S\"",
+ need_sep ? ",\n" : "",
+ desc);
need_sep = true;
}
if (long_desc) {
- fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "",
- long_desc);
+ fix_escape_fprintf(fp, &buf, "%s\t\"PublicDescription\": \"%S\"",
+ need_sep ? ",\n" : "",
+ long_desc);
need_sep = true;
}
- printf("%s}", need_sep ? "\n" : "");
+ fprintf(fp, "%s}", need_sep ? "\n" : "");
strbuf_release(&buf);
}
@@ -449,8 +482,12 @@ static bool default_skip_duplicate_pmus(void *ps)
int cmd_list(int argc, const char **argv)
{
int i, ret = 0;
- struct print_state default_ps = {};
- struct print_state json_ps = {};
+ struct print_state default_ps = {
+ .fp = stdout,
+ };
+ struct print_state json_ps = {
+ .fp = stdout,
+ };
void *ps = &default_ps;
struct print_callbacks print_cb = {
.print_start = default_print_start,
@@ -461,6 +498,7 @@ int cmd_list(int argc, const char **argv)
};
const char *cputype = NULL;
const char *unit_name = NULL;
+ const char *output_path = NULL;
bool json = false;
struct option list_options[] = {
OPT_BOOLEAN(0, "raw-dump", &default_ps.name_only, "Dump raw events"),
@@ -471,6 +509,7 @@ int cmd_list(int argc, const char **argv)
"Print longer event descriptions."),
OPT_BOOLEAN(0, "details", &default_ps.detailed,
"Print information on the perf event names and expressions used internally by events."),
+ OPT_STRING('o', "output", &output_path, "file", "output file name"),
OPT_BOOLEAN(0, "deprecated", &default_ps.deprecated,
"Print deprecated events."),
OPT_STRING(0, "cputype", &cputype, "cpu type",
@@ -497,6 +536,11 @@ int cmd_list(int argc, const char **argv)
argc = parse_options(argc, argv, list_options, list_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ if (output_path) {
+ default_ps.fp = fopen(output_path, "w");
+ json_ps.fp = default_ps.fp;
+ }
+
setup_pager();
if (!default_ps.name_only)
@@ -618,5 +662,8 @@ out:
free(default_ps.last_topic);
free(default_ps.last_metricgroups);
strlist__delete(default_ps.visited_metrics);
+ if (output_path)
+ fclose(default_ps.fp);
+
return ret;
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index a3ff2f4edbaa..230461280e45 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -2285,8 +2285,10 @@ setup_args:
else
ev_name = strdup(contention_tracepoints[j].name);
- if (!ev_name)
+ if (!ev_name) {
+ free(rec_argv);
return -ENOMEM;
+ }
rec_argv[i++] = "-e";
rec_argv[i++] = ev_name;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index dcf288a4fb9a..86c910125172 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -270,7 +270,7 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
static int record__aio_enabled(struct record *rec);
static int record__comp_enabled(struct record *rec);
-static size_t zstd_compress(struct perf_session *session, struct mmap *map,
+static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
void *dst, size_t dst_size, void *src, size_t src_size);
#ifdef HAVE_AIO_SUPPORT
@@ -405,9 +405,13 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
*/
if (record__comp_enabled(aio->rec)) {
- size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
- mmap__mmap_len(map) - aio->size,
- buf, size);
+ ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
+ mmap__mmap_len(map) - aio->size,
+ buf, size);
+ if (compressed < 0)
+ return (int)compressed;
+
+ size = compressed;
} else {
memcpy(aio->data + aio->size, buf, size);
}
@@ -633,7 +637,13 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
struct record *rec = to;
if (record__comp_enabled(rec)) {
- size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
+ ssize_t compressed = zstd_compress(rec->session, map, map->data,
+ mmap__mmap_len(map), bf, size);
+
+ if (compressed < 0)
+ return (int)compressed;
+
+ size = compressed;
bf = map->data;
}
@@ -1350,7 +1360,7 @@ static int record__open(struct record *rec)
evlist__for_each_entry(evlist, pos) {
try_again:
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
- if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
+ if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
@@ -1527,10 +1537,10 @@ static size_t process_comp_header(void *record, size_t increment)
return size;
}
-static size_t zstd_compress(struct perf_session *session, struct mmap *map,
+static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
void *dst, size_t dst_size, void *src, size_t src_size)
{
- size_t compressed;
+ ssize_t compressed;
size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
struct zstd_data *zstd_data = &session->zstd_data;
@@ -1539,6 +1549,8 @@ static size_t zstd_compress(struct perf_session *session, struct mmap *map,
compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
max_record_size, process_comp_header);
+ if (compressed < 0)
+ return compressed;
if (map && map->file) {
thread->bytes_transferred += src_size;
@@ -1912,21 +1924,13 @@ static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
static void record__read_lost_samples(struct record *rec)
{
struct perf_session *session = rec->session;
- struct perf_record_lost_samples *lost;
+ struct perf_record_lost_samples *lost = NULL;
struct evsel *evsel;
/* there was an error during record__open */
if (session->evlist == NULL)
return;
- lost = zalloc(PERF_SAMPLE_MAX_SIZE);
- if (lost == NULL) {
- pr_debug("Memory allocation failed\n");
- return;
- }
-
- lost->header.type = PERF_RECORD_LOST_SAMPLES;
-
evlist__for_each_entry(session->evlist, evsel) {
struct xyarray *xy = evsel->core.sample_id;
u64 lost_count;
@@ -1949,6 +1953,15 @@ static void record__read_lost_samples(struct record *rec)
}
if (count.lost) {
+ if (!lost) {
+ lost = zalloc(sizeof(*lost) +
+ session->machines.host.id_hdr_size);
+ if (!lost) {
+ pr_debug("Memory allocation failed\n");
+ return;
+ }
+ lost->header.type = PERF_RECORD_LOST_SAMPLES;
+ }
__record__save_lost_samples(rec, evsel, lost,
x, y, count.lost, 0);
}
@@ -1956,9 +1969,19 @@ static void record__read_lost_samples(struct record *rec)
}
lost_count = perf_bpf_filter__lost_count(evsel);
- if (lost_count)
+ if (lost_count) {
+ if (!lost) {
+ lost = zalloc(sizeof(*lost) +
+ session->machines.host.id_hdr_size);
+ if (!lost) {
+ pr_debug("Memory allocation failed\n");
+ return;
+ }
+ lost->header.type = PERF_RECORD_LOST_SAMPLES;
+ }
__record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
PERF_RECORD_MISC_LOST_SAMPLES_BPF);
+ }
}
out:
free(lost);
@@ -2216,32 +2239,6 @@ static void hit_auxtrace_snapshot_trigger(struct record *rec)
}
}
-static void record__uniquify_name(struct record *rec)
-{
- struct evsel *pos;
- struct evlist *evlist = rec->evlist;
- char *new_name;
- int ret;
-
- if (perf_pmus__num_core_pmus() == 1)
- return;
-
- evlist__for_each_entry(evlist, pos) {
- if (!evsel__is_hybrid(pos))
- continue;
-
- if (strchr(pos->name, '/'))
- continue;
-
- ret = asprintf(&new_name, "%s/%s/",
- pos->pmu_name, pos->name);
- if (ret) {
- free(pos->name);
- pos->name = new_name;
- }
- }
-}
-
static int record__terminate_thread(struct record_thread *thread_data)
{
int err;
@@ -2475,7 +2472,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (data->is_pipe && rec->evlist->core.nr_entries == 1)
rec->opts.sample_id = true;
- record__uniquify_name(rec);
+ evlist__uniquify_name(rec->evlist);
/* Debug message used by test scripts */
pr_debug3("perf record opening and mmapping events\n");
@@ -3580,9 +3577,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
if (cpu_map__is_dummy(cpus))
return 0;
- perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
- if (cpu.cpu == -1)
- continue;
+ perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) {
/* Return ENODEV is input cpu is greater than max cpu */
if ((unsigned long)cpu.cpu > mask->nbits)
return -ENODEV;
@@ -3989,6 +3984,8 @@ int cmd_record(int argc, const char **argv)
# undef set_nobuild
#endif
+ /* Disable eager loading of kernel symbols that adds overhead to perf record. */
+ symbol_conf.lazy_load_kernel_maps = true;
rec->opts.affinity = PERF_AFFINITY_SYS;
rec->evlist = evlist__new();
@@ -4083,8 +4080,8 @@ int cmd_record(int argc, const char **argv)
}
if (rec->switch_output.num_files) {
- rec->switch_output.filenames = calloc(sizeof(char *),
- rec->switch_output.num_files);
+ rec->switch_output.filenames = calloc(rec->switch_output.num_files,
+ sizeof(char *));
if (!rec->switch_output.filenames) {
err = -EINVAL;
goto out_opts;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 9cb1da2dc0c0..f2ed2b7e80a3 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -96,9 +96,9 @@ struct report {
bool stitch_lbr;
bool disable_order;
bool skip_empty;
+ bool data_type;
int max_stack;
struct perf_read_values show_threads_values;
- struct annotation_options annotation_opts;
const char *pretty_printing_style;
const char *cpu_list;
const char *symbol_filter_str;
@@ -171,7 +171,7 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
struct mem_info *mi;
struct branch_info *bi;
- if (!ui__has_annotation() && !rep->symbol_ipc)
+ if (!ui__has_annotation() && !rep->symbol_ipc && !rep->data_type)
return 0;
if (sort__mode == SORT_MODE__BRANCH) {
@@ -541,8 +541,7 @@ static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *
evlist__for_each_entry(evlist, pos) {
ret = report__browse_block_hists(&rep->block_reports[i++].hist,
rep->min_percent, pos,
- &rep->session->header.env,
- &rep->annotation_opts);
+ &rep->session->header.env);
if (ret != 0)
return ret;
}
@@ -574,8 +573,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c
if (rep->total_cycles_mode) {
report__browse_block_hists(&rep->block_reports[i++].hist,
- rep->min_percent, pos,
- NULL, NULL);
+ rep->min_percent, pos, NULL);
continue;
}
@@ -670,7 +668,7 @@ static int report__browse_hists(struct report *rep)
}
ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
- &session->header.env, true, &rep->annotation_opts);
+ &session->header.env, true);
/*
* Usually "ret" is the last pressed key, and we only
* care if the key notifies us to switch data file.
@@ -745,7 +743,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg)
if (rep->symbol_ipc && sym && !sym->annotate2) {
struct evsel *evsel = hists_to_evsel(he->hists);
- symbol__annotate2(&he->ms, evsel, &rep->annotation_opts, NULL);
+ symbol__annotate2(&he->ms, evsel, NULL);
}
return 0;
@@ -859,27 +857,47 @@ static struct task *tasks_list(struct task *task, struct machine *machine)
return tasks_list(parent_task, machine);
}
-static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
+struct maps__fprintf_task_args {
+ int indent;
+ FILE *fp;
+ size_t printed;
+};
+
+static int maps__fprintf_task_cb(struct map *map, void *data)
{
- size_t printed = 0;
- struct map_rb_node *rb_node;
+ struct maps__fprintf_task_args *args = data;
+ const struct dso *dso = map__dso(map);
+ u32 prot = map__prot(map);
+ int ret;
- maps__for_each_entry(maps, rb_node) {
- struct map *map = rb_node->map;
- const struct dso *dso = map__dso(map);
- u32 prot = map__prot(map);
+ ret = fprintf(args->fp,
+ "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
+ args->indent, "", map__start(map), map__end(map),
+ prot & PROT_READ ? 'r' : '-',
+ prot & PROT_WRITE ? 'w' : '-',
+ prot & PROT_EXEC ? 'x' : '-',
+ map__flags(map) ? 's' : 'p',
+ map__pgoff(map),
+ dso->id.ino, dso->name);
- printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
- indent, "", map__start(map), map__end(map),
- prot & PROT_READ ? 'r' : '-',
- prot & PROT_WRITE ? 'w' : '-',
- prot & PROT_EXEC ? 'x' : '-',
- map__flags(map) ? 's' : 'p',
- map__pgoff(map),
- dso->id.ino, dso->name);
- }
+ if (ret < 0)
+ return ret;
+
+ args->printed += ret;
+ return 0;
+}
+
+static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
+{
+ struct maps__fprintf_task_args args = {
+ .indent = indent,
+ .fp = fp,
+ .printed = 0,
+ };
- return printed;
+ maps__for_each_map(maps, maps__fprintf_task_cb, &args);
+
+ return args.printed;
}
static void task__print_level(struct task *task, FILE *fp, int level)
@@ -1341,15 +1359,15 @@ int cmd_report(int argc, const char **argv)
"list of cpus to profile"),
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
"Display extended information about perf.data file"),
- OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src,
+ OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
"Interleave source code with assembly code (default)"),
- OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
+ OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
- OPT_STRING(0, "prefix", &report.annotation_opts.prefix, "prefix",
+ OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
- OPT_STRING(0, "prefix-strip", &report.annotation_opts.prefix_strip, "N",
+ OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
"Strip first N entries of source file path name in programs (with --prefix)"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
@@ -1401,7 +1419,7 @@ int cmd_report(int argc, const char **argv)
"Time span of interest (start,stop)"),
OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
"Show inline function"),
- OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
+ OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
@@ -1426,7 +1444,14 @@ int cmd_report(int argc, const char **argv)
if (ret < 0)
goto exit;
- annotation_options__init(&report.annotation_opts);
+ /*
+ * tasks_mode require access to exited threads to list those that are in
+ * the data file. Off-cpu events are synthesized after other events and
+ * reference exited threads.
+ */
+ symbol_conf.keep_exited_threads = true;
+
+ annotation_options__init();
ret = perf_config(report__config, &report);
if (ret)
@@ -1445,13 +1470,13 @@ int cmd_report(int argc, const char **argv)
}
if (disassembler_style) {
- report.annotation_opts.disassembler_style = strdup(disassembler_style);
- if (!report.annotation_opts.disassembler_style)
+ annotate_opts.disassembler_style = strdup(disassembler_style);
+ if (!annotate_opts.disassembler_style)
return -ENOMEM;
}
if (objdump_path) {
- report.annotation_opts.objdump_path = strdup(objdump_path);
- if (!report.annotation_opts.objdump_path)
+ annotate_opts.objdump_path = strdup(objdump_path);
+ if (!annotate_opts.objdump_path)
return -ENOMEM;
}
if (addr2line_path) {
@@ -1460,7 +1485,7 @@ int cmd_report(int argc, const char **argv)
return -ENOMEM;
}
- if (annotate_check_args(&report.annotation_opts) < 0) {
+ if (annotate_check_args() < 0) {
ret = -EINVAL;
goto exit;
}
@@ -1615,6 +1640,16 @@ repeat:
sort_order = NULL;
}
+ if (sort_order && strstr(sort_order, "type")) {
+ report.data_type = true;
+ annotate_opts.annotate_src = false;
+
+#ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT
+ pr_err("Error: Data type profiling is disabled due to missing DWARF support\n");
+ goto error;
+#endif
+ }
+
if (strcmp(input_name, "-") != 0)
setup_browser(true);
else
@@ -1673,7 +1708,7 @@ repeat:
* so don't allocate extra space that won't be used in the stdio
* implementation.
*/
- if (ui__has_annotation() || report.symbol_ipc ||
+ if (ui__has_annotation() || report.symbol_ipc || report.data_type ||
report.total_cycles_mode) {
ret = symbol__annotation_init();
if (ret < 0)
@@ -1692,7 +1727,7 @@ repeat:
*/
symbol_conf.priv_size += sizeof(u32);
}
- annotation_config__init(&report.annotation_opts);
+ annotation_config__init();
}
if (symbol__init(&session->header.env) < 0)
@@ -1746,7 +1781,7 @@ error:
zstd_fini(&(session->zstd_data));
perf_session__delete(session);
exit:
- annotation_options__exit(&report.annotation_opts);
+ annotation_options__exit();
free(sort_order_help);
free(field_order_help);
return ret;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a3af805a1d57..5fe9abc6a524 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -653,7 +653,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
if ((evsel__leader(counter) != counter) ||
!(counter->core.leader->nr_members > 1))
return COUNTER_SKIP;
- } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
+ } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
return COUNTER_RETRY;
@@ -1204,8 +1204,9 @@ static struct option stat_options[] = {
OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
"list of cpus to monitor in system-wide"),
OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
- "disable CPU count aggregation", AGGR_NONE),
- OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
+ "disable aggregation across CPUs or PMUs", AGGR_NONE),
+ OPT_SET_UINT(0, "no-merge", &stat_config.aggr_mode,
+ "disable aggregation the same as -A or -no-aggr", AGGR_NONE),
OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
"Merge identical named hybrid events"),
OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
@@ -1255,7 +1256,7 @@ static struct option stat_options[] = {
OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
"don't try to share events between metrics in a group"),
OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
- "don't try to share events between metrics in a group "),
+ "disable adding events for the metric threshold calculation"),
OPT_BOOLEAN(0, "topdown", &topdown_run,
"measure top-down statistics"),
OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
@@ -1316,7 +1317,7 @@ static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
* be the first online CPU in the cache domain else use the
* first online CPU of the cache domain as the ID.
*/
- if (perf_cpu_map__empty(cpu_map))
+ if (perf_cpu_map__has_any_cpu_or_is_empty(cpu_map))
id = cpu.cpu;
else
id = perf_cpu_map__cpu(cpu_map, 0).cpu;
@@ -1622,7 +1623,7 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of
* the aggregation translate cpumap.
*/
- if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus))
nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
else
nr = 0;
@@ -2289,7 +2290,7 @@ int process_stat_config_event(struct perf_session *session,
perf_event__read_stat_config(&stat_config, &event->stat_config);
- if (perf_cpu_map__empty(st->cpus)) {
+ if (perf_cpu_map__has_any_cpu_or_is_empty(st->cpus)) {
if (st->aggr_mode != AGGR_UNSET)
pr_warning("warning: processing task data, aggregation mode not set\n");
} else if (st->aggr_mode != AGGR_UNSET) {
@@ -2695,15 +2696,19 @@ int cmd_stat(int argc, const char **argv)
*/
if (metrics) {
const char *pmu = parse_events_option_args.pmu_filter ?: "all";
+ int ret = metricgroup__parse_groups(evsel_list, pmu, metrics,
+ stat_config.metric_no_group,
+ stat_config.metric_no_merge,
+ stat_config.metric_no_threshold,
+ stat_config.user_requested_cpu_list,
+ stat_config.system_wide,
+ &stat_config.metric_events);
- metricgroup__parse_groups(evsel_list, pmu, metrics,
- stat_config.metric_no_group,
- stat_config.metric_no_merge,
- stat_config.metric_no_threshold,
- stat_config.user_requested_cpu_list,
- stat_config.system_wide,
- &stat_config.metric_events);
zfree(&metrics);
+ if (ret) {
+ status = ret;
+ goto out;
+ }
}
if (add_default_attributes())
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ea8c7eca5eee..5301d1badd43 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -147,7 +147,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
+ err = symbol__annotate(&he->ms, evsel, NULL);
if (err == 0) {
top->sym_filter_entry = he;
} else {
@@ -261,9 +261,9 @@ static void perf_top__show_details(struct perf_top *top)
goto out_unlock;
printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
- printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
+ printf(" Events Pcnt (>=%d%%)\n", annotate_opts.min_pcnt);
- more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
+ more = symbol__annotate_printf(&he->ms, top->sym_evsel);
if (top->evlist->enabled) {
if (top->zero)
@@ -357,7 +357,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
static void prompt_integer(int *target, const char *msg)
{
- char *buf = malloc(0), *p;
+ char *buf = NULL, *p;
size_t dummy = 0;
int tmp;
@@ -450,7 +450,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
- fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
+ fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", annotate_opts.min_pcnt);
fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
fprintf(stdout, "\t[S] stop annotation.\n");
@@ -553,7 +553,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
prompt_integer(&top->count_filter, "Enter display event count filter");
break;
case 'F':
- prompt_percent(&top->annotation_opts.min_pcnt,
+ prompt_percent(&annotate_opts.min_pcnt,
"Enter details display event filter (percent)");
break;
case 'K':
@@ -646,8 +646,7 @@ repeat:
}
ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
- &top->session->header.env, !top->record_opts.overwrite,
- &top->annotation_opts);
+ &top->session->header.env, !top->record_opts.overwrite);
if (ret == K_RELOAD) {
top->zero = true;
goto repeat;
@@ -1027,8 +1026,8 @@ static int perf_top__start_counters(struct perf_top *top)
evlist__for_each_entry(evlist, counter) {
try_again:
- if (evsel__open(counter, top->evlist->core.user_requested_cpus,
- top->evlist->core.threads) < 0) {
+ if (evsel__open(counter, counter->core.cpus,
+ counter->core.threads) < 0) {
/*
* Specially handle overwrite fall back.
@@ -1044,7 +1043,7 @@ try_again:
perf_top_overwrite_fallback(top, counter))
goto try_again;
- if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
+ if (evsel__fallback(counter, &opts->target, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
@@ -1241,9 +1240,9 @@ static int __cmd_top(struct perf_top *top)
pthread_t thread, thread_process;
int ret;
- if (!top->annotation_opts.objdump_path) {
+ if (!annotate_opts.objdump_path) {
ret = perf_env__lookup_objdump(&top->session->header.env,
- &top->annotation_opts.objdump_path);
+ &annotate_opts.objdump_path);
if (ret)
return ret;
}
@@ -1299,6 +1298,7 @@ static int __cmd_top(struct perf_top *top)
}
}
+ evlist__uniquify_name(top->evlist);
ret = perf_top__start_counters(top);
if (ret)
return ret;
@@ -1536,9 +1536,9 @@ int cmd_top(int argc, const char **argv)
"only consider symbols in these comms"),
OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
- OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
+ OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
"Interleave source code with assembly code (default)"),
- OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
+ OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
@@ -1549,9 +1549,9 @@ int cmd_top(int argc, const char **argv)
"addr2line binary to use for line numbers"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
- OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
+ OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
- OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N",
+ OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
"Strip first N entries of source file path name in programs (with --prefix)"),
OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
OPT_CALLBACK(0, "percent-limit", &top, "percent",
@@ -1609,10 +1609,10 @@ int cmd_top(int argc, const char **argv)
if (status < 0)
return status;
- annotation_options__init(&top.annotation_opts);
+ annotation_options__init();
- top.annotation_opts.min_pcnt = 5;
- top.annotation_opts.context = 4;
+ annotate_opts.min_pcnt = 5;
+ annotate_opts.context = 4;
top.evlist = evlist__new();
if (top.evlist == NULL)
@@ -1642,13 +1642,13 @@ int cmd_top(int argc, const char **argv)
usage_with_options(top_usage, options);
if (disassembler_style) {
- top.annotation_opts.disassembler_style = strdup(disassembler_style);
- if (!top.annotation_opts.disassembler_style)
+ annotate_opts.disassembler_style = strdup(disassembler_style);
+ if (!annotate_opts.disassembler_style)
return -ENOMEM;
}
if (objdump_path) {
- top.annotation_opts.objdump_path = strdup(objdump_path);
- if (!top.annotation_opts.objdump_path)
+ annotate_opts.objdump_path = strdup(objdump_path);
+ if (!annotate_opts.objdump_path)
return -ENOMEM;
}
if (addr2line_path) {
@@ -1661,7 +1661,7 @@ int cmd_top(int argc, const char **argv)
if (status)
goto out_delete_evlist;
- if (annotate_check_args(&top.annotation_opts) < 0)
+ if (annotate_check_args() < 0)
goto out_delete_evlist;
if (!top.evlist->core.nr_entries) {
@@ -1787,7 +1787,7 @@ int cmd_top(int argc, const char **argv)
if (status < 0)
goto out_delete_evlist;
- annotation_config__init(&top.annotation_opts);
+ annotation_config__init();
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
status = symbol__init(NULL);
@@ -1840,7 +1840,7 @@ int cmd_top(int argc, const char **argv)
out_delete_evlist:
evlist__delete(top.evlist);
perf_session__delete(top.session);
- annotation_options__exit(&top.annotation_opts);
+ annotation_options__exit();
return status;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index e541d0e2777a..109b8e64fe69 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2470,9 +2470,8 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
static const char *errno_to_name(struct evsel *evsel, int err)
{
struct perf_env *env = evsel__env(evsel);
- const char *arch_name = perf_env__arch(env);
- return arch_syscalls__strerrno(arch_name, err);
+ return perf_env__arch_strerrno(env, err);
}
static int trace__sys_exit(struct trace *trace, struct evsel *evsel,
@@ -4264,12 +4263,11 @@ static size_t thread__dump_stats(struct thread_trace *ttrace,
printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
if (trace->errno_summary && stats->nr_failures) {
- const char *arch_name = perf_env__arch(trace->host->env);
int e;
for (e = 0; e < stats->max_errno; ++e) {
if (stats->errnos[e] != 0)
- fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]);
+ fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]);
}
}
}
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
index 133f0eddbcc4..f94795794b36 100644..100755
--- a/tools/perf/perf-archive.sh
+++ b/tools/perf/perf-archive.sh
@@ -4,8 +4,73 @@
# Arnaldo Carvalho de Melo <acme@redhat.com>
PERF_DATA=perf.data
-if [ $# -ne 0 ] ; then
- PERF_DATA=$1
+PERF_SYMBOLS=perf.symbols
+PERF_ALL=perf.all
+ALL=0
+UNPACK=0
+
+while [ $# -gt 0 ] ; do
+ if [ $1 == "--all" ]; then
+ ALL=1
+ shift
+ elif [ $1 == "--unpack" ]; then
+ UNPACK=1
+ shift
+ else
+ PERF_DATA=$1
+ UNPACK_TAR=$1
+ shift
+ fi
+done
+
+if [ $UNPACK -eq 1 ]; then
+ if [ ! -z "$UNPACK_TAR" ]; then # tar given as an argument
+ if [ ! -e "$UNPACK_TAR" ]; then
+ echo "Provided file $UNPACK_TAR does not exist"
+ exit 1
+ fi
+ TARGET="$UNPACK_TAR"
+ else # search for perf tar in the current directory
+ TARGET=`find . -regex "\./perf.*\.tar\.bz2"`
+ TARGET_NUM=`echo -n "$TARGET" | grep -c '^'`
+
+ if [ -z "$TARGET" -o $TARGET_NUM -gt 1 ]; then
+ echo -e "Error: $TARGET_NUM files found for unpacking:\n$TARGET"
+ echo "Provide the requested file as an argument"
+ exit 1
+ else
+ echo "Found target file for unpacking: $TARGET"
+ fi
+ fi
+
+ if [[ "$TARGET" =~ (\./)?$PERF_ALL.*.tar.bz2 ]]; then # perf tar generated by --all option
+ TAR_CONTENTS=`tar tvf "$TARGET" | tr -s " " | cut -d " " -f 6`
+ VALID_TAR=`echo "$TAR_CONTENTS" | grep "$PERF_SYMBOLS.tar.bz2" | wc -l` # check if it contains a sub-tar perf.symbols
+ if [ $VALID_TAR -ne 1 ]; then
+ echo "Error: $TARGET file is not valid (contains zero or multiple sub-tar files with debug symbols)"
+ exit 1
+ fi
+
+ INTERSECT=`comm -12 <(ls) <(echo "$TAR_CONTENTS") | tr "\n" " "` # check for overwriting
+ if [ ! -z "$INTERSECT" ]; then # prompt if file(s) already exist in the current directory
+ echo "File(s) ${INTERSECT::-1} already exist in the current directory."
+ while true; do
+ read -p 'Do you wish to overwrite them? ' yn
+ case $yn in
+ [Yy]* ) break;;
+ [Nn]* ) exit 1;;
+ * ) echo "Please answer yes or no.";;
+ esac
+ done
+ fi
+
+ # unzip the perf.data file in the current working directory and debug symbols in ~/.debug directory
+ tar xvf $TARGET && tar xvf $PERF_SYMBOLS.tar.bz2 -C ~/.debug
+
+ else # perf tar generated by perf archive (contains only debug symbols)
+ tar xvf $TARGET -C ~/.debug
+ fi
+ exit 0
fi
#
@@ -39,9 +104,18 @@ while read build_id ; do
echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST
done
-tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
-rm $MANIFEST $BUILDIDS || true
+if [ $ALL -eq 1 ]; then # pack perf.data file together with tar containing debug symbols
+ HOSTNAME=$(hostname)
+ DATE=$(date '+%Y%m%d-%H%M%S')
+ tar cjf $PERF_SYMBOLS.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
+ tar cjf $PERF_ALL-$HOSTNAME-$DATE.tar.bz2 $PERF_DATA $PERF_SYMBOLS.tar.bz2
+ rm $PERF_SYMBOLS.tar.bz2 $MANIFEST $BUILDIDS || true
+else # pack only the debug symbols
+ tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
+ rm $MANIFEST $BUILDIDS || true
+fi
+
echo -e "Now please run:\n"
-echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n"
-echo "wherever you need to run 'perf report' on."
+echo -e "$ perf archive --unpack\n"
+echo "or unpack the tar manually wherever you need to run 'perf report' on."
exit 0
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index d3fc8090413c..921bee0a6437 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -39,6 +39,7 @@
#include <linux/zalloc.h>
static int use_pager = -1;
+static FILE *debug_fp = NULL;
struct cmd_struct {
const char *cmd;
@@ -162,6 +163,19 @@ static void commit_pager_choice(void)
}
}
+static int set_debug_file(const char *path)
+{
+ debug_fp = fopen(path, "w");
+ if (!debug_fp) {
+ fprintf(stderr, "Open debug file '%s' failed: %s\n",
+ path, strerror(errno));
+ return -1;
+ }
+
+ debug_set_file(debug_fp);
+ return 0;
+}
+
struct option options[] = {
OPT_ARGUMENT("help", "help"),
OPT_ARGUMENT("version", "version"),
@@ -174,6 +188,7 @@ struct option options[] = {
OPT_ARGUMENT("list-cmds", "list-cmds"),
OPT_ARGUMENT("list-opts", "list-opts"),
OPT_ARGUMENT("debug", "debug"),
+ OPT_ARGUMENT("debug-file", "debug-file"),
OPT_END()
};
@@ -287,6 +302,18 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
(*argv)++;
(*argc)--;
+ } else if (!strcmp(cmd, "--debug-file")) {
+ if (*argc < 2) {
+ fprintf(stderr, "No path given for --debug-file.\n");
+ usage(perf_usage_string);
+ }
+
+ if (set_debug_file((*argv)[1]))
+ usage(perf_usage_string);
+
+ (*argv)++;
+ (*argc)--;
+
} else {
fprintf(stderr, "Unknown option: %s\n", cmd);
usage(perf_usage_string);
@@ -547,5 +574,8 @@ int main(int argc, const char **argv)
fprintf(stderr, "Failed to run command '%s': %s\n",
cmd, str_error_r(errno, sbuf, sizeof(sbuf)));
out:
+ if (debug_fp)
+ fclose(debug_fp);
+
return 1;
}
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
index 88b23b85e33c..879ff21e0b17 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json
@@ -110,7 +110,7 @@
{
"PublicDescription": "Flushes due to memory hazards",
"EventCode": "0x121",
- "EventName": "BPU_FLUSH_MEM_FAULT",
+ "EventName": "GPC_FLUSH_MEM_FAULT",
"BriefDescription": "Flushes due to memory hazards"
},
{
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json
new file mode 100644
index 000000000000..a632755fc086
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json
@@ -0,0 +1,125 @@
+[
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, branch not taken",
+ "EventCode": "0x8107",
+ "EventName": "BR_SKIP_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, branch not taken"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, immediate branch taken",
+ "EventCode": "0x8108",
+ "EventName": "BR_IMMED_TAKEN_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, immediate branch taken"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, indirect branch excluding return retired",
+ "EventCode": "0x810c",
+ "EventName": "BR_INDNR_TAKEN_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, indirect branch excluding return retired"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, predicted immediate branch",
+ "EventCode": "0x8110",
+ "EventName": "BR_IMMED_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, predicted immediate branch"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted immediate branch",
+ "EventCode": "0x8111",
+ "EventName": "BR_IMMED_MIS_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, mispredicted immediate branch"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, predicted indirect branch",
+ "EventCode": "0x8112",
+ "EventName": "BR_IND_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, predicted indirect branch"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted indirect branch",
+ "EventCode": "0x8113",
+ "EventName": "BR_IND_MIS_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, mispredicted indirect branch"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, predicted procedure return",
+ "EventCode": "0x8114",
+ "EventName": "BR_RETURN_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, predicted procedure return"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted procedure return",
+ "EventCode": "0x8115",
+ "EventName": "BR_RETURN_MIS_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, mispredicted procedure return"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, predicted indirect branch excluding return",
+ "EventCode": "0x8116",
+ "EventName": "BR_INDNR_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, predicted indirect branch excluding return"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted indirect branch excluding return",
+ "EventCode": "0x8117",
+ "EventName": "BR_INDNR_MIS_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, mispredicted indirect branch excluding return"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, predicted branch, taken",
+ "EventCode": "0x8118",
+ "EventName": "BR_TAKEN_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, predicted branch, taken"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted branch, taken",
+ "EventCode": "0x8119",
+ "EventName": "BR_TAKEN_MIS_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, mispredicted branch, taken"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, predicted branch, not taken",
+ "EventCode": "0x811a",
+ "EventName": "BR_SKIP_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, predicted branch, not taken"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted branch, not taken",
+ "EventCode": "0x811b",
+ "EventName": "BR_SKIP_MIS_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, mispredicted branch, not taken"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, predicted branch",
+ "EventCode": "0x811c",
+ "EventName": "BR_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, predicted branch"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, indirect branch",
+ "EventCode": "0x811d",
+ "EventName": "BR_IND_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, indirect branch"
+ },
+ {
+ "PublicDescription": "Branch Record captured.",
+ "EventCode": "0x811f",
+ "EventName": "BRB_FILTRATE",
+ "BriefDescription": "Branch Record captured."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json
new file mode 100644
index 000000000000..2aeb9907831d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json
new file mode 100644
index 000000000000..c50d8e930b05
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json
@@ -0,0 +1,206 @@
+[
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "L2I_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "PublicDescription": "Level 1 data or unified cache demand access",
+ "EventCode": "0x8140",
+ "EventName": "L1D_CACHE_RW",
+ "BriefDescription": "Level 1 data or unified cache demand access"
+ },
+ {
+ "PublicDescription": "Level 1 data or unified cache preload or prefetch",
+ "EventCode": "0x8142",
+ "EventName": "L1D_CACHE_PRFM",
+ "BriefDescription": "Level 1 data or unified cache preload or prefetch"
+ },
+ {
+ "PublicDescription": "Level 1 data or unified cache refill, preload or prefetch",
+ "EventCode": "0x8146",
+ "EventName": "L1D_CACHE_REFILL_PRFM",
+ "BriefDescription": "Level 1 data or unified cache refill, preload or prefetch"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "PublicDescription": "L1D TLB miss",
+ "EventCode": "0xD600",
+ "EventName": "L1D_TLB_MISS",
+ "BriefDescription": "L1D TLB miss"
+ },
+ {
+ "PublicDescription": "Level 1 prefetcher, load prefetch requests generated",
+ "EventCode": "0xd606",
+ "EventName": "L1_PREFETCH_LD_GEN",
+ "BriefDescription": "Level 1 prefetcher, load prefetch requests generated"
+ },
+ {
+ "PublicDescription": "Level 1 prefetcher, load prefetch fills into the level 1 cache",
+ "EventCode": "0xd607",
+ "EventName": "L1_PREFETCH_LD_FILL",
+ "BriefDescription": "Level 1 prefetcher, load prefetch fills into the level 1 cache"
+ },
+ {
+ "PublicDescription": "Level 1 prefetcher, load prefetch to level 2 generated",
+ "EventCode": "0xd608",
+ "EventName": "L1_PREFETCH_L2_REQ",
+ "BriefDescription": "Level 1 prefetcher, load prefetch to level 2 generated"
+ },
+ {
+ "PublicDescription": "L1 prefetcher, distance was reset",
+ "EventCode": "0xd609",
+ "EventName": "L1_PREFETCH_DIST_RST",
+ "BriefDescription": "L1 prefetcher, distance was reset"
+ },
+ {
+ "PublicDescription": "L1 prefetcher, distance was increased",
+ "EventCode": "0xd60a",
+ "EventName": "L1_PREFETCH_DIST_INC",
+ "BriefDescription": "L1 prefetcher, distance was increased"
+ },
+ {
+ "PublicDescription": "Level 1 prefetcher, table entry is trained",
+ "EventCode": "0xd60b",
+ "EventName": "L1_PREFETCH_ENTRY_TRAINED",
+ "BriefDescription": "Level 1 prefetcher, table entry is trained"
+ },
+ {
+ "PublicDescription": "L1 data cache refill - Read or Write",
+ "EventCode": "0xd60e",
+ "EventName": "L1D_CACHE_REFILL_RW",
+ "BriefDescription": "L1 data cache refill - Read or Write"
+ },
+ {
+ "PublicDescription": "Level 2 cache refill from instruction-side miss, including IMMU refills",
+ "EventCode": "0xD701",
+ "EventName": "L2C_INST_REFILL",
+ "BriefDescription": "Level 2 cache refill from instruction-side miss, including IMMU refills"
+ },
+ {
+ "PublicDescription": "Level 2 cache refill from data-side miss, including DMMU refills",
+ "EventCode": "0xD702",
+ "EventName": "L2C_DATA_REFILL",
+ "BriefDescription": "Level 2 cache refill from data-side miss, including DMMU refills"
+ },
+ {
+ "PublicDescription": "Level 2 cache prefetcher, load prefetch requests generated",
+ "EventCode": "0xD703",
+ "EventName": "L2_PREFETCH_REQ",
+ "BriefDescription": "Level 2 cache prefetcher, load prefetch requests generated"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json
new file mode 100644
index 000000000000..eb5a2208d260
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json
@@ -0,0 +1,464 @@
+[
+ {
+ "PublicDescription": "Level 2 prefetch requests, refilled to L2 cache",
+ "EventCode": "0x10A",
+ "EventName": "L2_PREFETCH_REFILL",
+ "BriefDescription": "Level 2 prefetch requests, refilled to L2 cache"
+ },
+ {
+ "PublicDescription": "Level 2 prefetch requests, late",
+ "EventCode": "0x10B",
+ "EventName": "L2_PREFETCH_UPGRADE",
+ "BriefDescription": "Level 2 prefetch requests, late"
+ },
+ {
+ "PublicDescription": "Predictable branch speculatively executed that hit any level of BTB",
+ "EventCode": "0x110",
+ "EventName": "BPU_HIT_BTB",
+ "BriefDescription": "Predictable branch speculatively executed that hit any level of BTB"
+ },
+ {
+ "PublicDescription": "Predictable conditional branch speculatively executed that hit any level of BTB",
+ "EventCode": "0x111",
+ "EventName": "BPU_CONDITIONAL_BRANCH_HIT_BTB",
+ "BriefDescription": "Predictable conditional branch speculatively executed that hit any level of BTB"
+ },
+ {
+ "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor",
+ "EventCode": "0x112",
+ "EventName": "BPU_HIT_INDIRECT_PREDICTOR",
+ "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor"
+ },
+ {
+ "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor",
+ "EventCode": "0x113",
+ "EventName": "BPU_HIT_RSB",
+ "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor"
+ },
+ {
+ "PublicDescription": "Predictable unconditional branch speculatively executed that did not hit any level of BTB",
+ "EventCode": "0x114",
+ "EventName": "BPU_UNCONDITIONAL_BRANCH_MISS_BTB",
+ "BriefDescription": "Predictable unconditional branch speculatively executed that did not hit any level of BTB"
+ },
+ {
+ "PublicDescription": "Predictable branch speculatively executed, unpredicted",
+ "EventCode": "0x115",
+ "EventName": "BPU_BRANCH_NO_HIT",
+ "BriefDescription": "Predictable branch speculatively executed, unpredicted"
+ },
+ {
+ "PublicDescription": "Predictable branch speculatively executed that hit any level of BTB that mispredict",
+ "EventCode": "0x116",
+ "EventName": "BPU_HIT_BTB_AND_MISPREDICT",
+ "BriefDescription": "Predictable branch speculatively executed that hit any level of BTB that mispredict"
+ },
+ {
+ "PublicDescription": "Predictable conditional branch speculatively executed that hit any level of BTB that (direction) mispredict",
+ "EventCode": "0x117",
+ "EventName": "BPU_CONDITIONAL_BRANCH_HIT_BTB_AND_MISPREDICT",
+ "BriefDescription": "Predictable conditional branch speculatively executed that hit any level of BTB that (direction) mispredict"
+ },
+ {
+ "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor that mispredict",
+ "EventCode": "0x118",
+ "EventName": "BPU_INDIRECT_BRANCH_HIT_BTB_AND_MISPREDICT",
+ "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor that mispredict"
+ },
+ {
+ "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor that mispredict",
+ "EventCode": "0x119",
+ "EventName": "BPU_HIT_RSB_AND_MISPREDICT",
+ "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor that mispredict"
+ },
+ {
+ "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the overflow/underflow return predictor that mispredict",
+ "EventCode": "0x11a",
+ "EventName": "BPU_MISS_RSB_AND_MISPREDICT",
+ "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the overflow/underflow return predictor that mispredict"
+ },
+ {
+ "PublicDescription": "Predictable branch speculatively executed, unpredicted, that mispredict",
+ "EventCode": "0x11b",
+ "EventName": "BPU_NO_PREDICTION_MISPREDICT",
+ "BriefDescription": "Predictable branch speculatively executed, unpredicted, that mispredict"
+ },
+ {
+ "PublicDescription": "Preditable branch update the BTB region buffer entry",
+ "EventCode": "0x11c",
+ "EventName": "BPU_BTB_UPDATE",
+ "BriefDescription": "Preditable branch update the BTB region buffer entry"
+ },
+ {
+ "PublicDescription": "Count predict pipe stalls due to speculative return address predictor full",
+ "EventCode": "0x11d",
+ "EventName": "BPU_RSB_FULL_STALL",
+ "BriefDescription": "Count predict pipe stalls due to speculative return address predictor full"
+ },
+ {
+ "PublicDescription": "Macro-ops speculatively decoded",
+ "EventCode": "0x11f",
+ "EventName": "ICF_INST_SPEC_DECODE",
+ "BriefDescription": "Macro-ops speculatively decoded"
+ },
+ {
+ "PublicDescription": "Flushes",
+ "EventCode": "0x120",
+ "EventName": "GPC_FLUSH",
+ "BriefDescription": "Flushes"
+ },
+ {
+ "PublicDescription": "Flushes due to memory hazards",
+ "EventCode": "0x121",
+ "EventName": "GPC_FLUSH_MEM_FAULT",
+ "BriefDescription": "Flushes due to memory hazards"
+ },
+ {
+ "PublicDescription": "ETM extout bit 0",
+ "EventCode": "0x141",
+ "EventName": "MSC_ETM_EXTOUT0",
+ "BriefDescription": "ETM extout bit 0"
+ },
+ {
+ "PublicDescription": "ETM extout bit 1",
+ "EventCode": "0x142",
+ "EventName": "MSC_ETM_EXTOUT1",
+ "BriefDescription": "ETM extout bit 1"
+ },
+ {
+ "PublicDescription": "ETM extout bit 2",
+ "EventCode": "0x143",
+ "EventName": "MSC_ETM_EXTOUT2",
+ "BriefDescription": "ETM extout bit 2"
+ },
+ {
+ "PublicDescription": "ETM extout bit 3",
+ "EventCode": "0x144",
+ "EventName": "MSC_ETM_EXTOUT3",
+ "BriefDescription": "ETM extout bit 3"
+ },
+ {
+ "PublicDescription": "Bus request sn",
+ "EventCode": "0x156",
+ "EventName": "L2C_SNOOP",
+ "BriefDescription": "Bus request sn"
+ },
+ {
+ "PublicDescription": "L2 TXDAT LCRD blocked",
+ "EventCode": "0x169",
+ "EventName": "L2C_DAT_CRD_STALL",
+ "BriefDescription": "L2 TXDAT LCRD blocked"
+ },
+ {
+ "PublicDescription": "L2 TXRSP LCRD blocked",
+ "EventCode": "0x16a",
+ "EventName": "L2C_RSP_CRD_STALL",
+ "BriefDescription": "L2 TXRSP LCRD blocked"
+ },
+ {
+ "PublicDescription": "L2 TXREQ LCRD blocked",
+ "EventCode": "0x16b",
+ "EventName": "L2C_REQ_CRD_STALL",
+ "BriefDescription": "L2 TXREQ LCRD blocked"
+ },
+ {
+ "PublicDescription": "Early mispredict",
+ "EventCode": "0xD100",
+ "EventName": "ICF_EARLY_MIS_PRED",
+ "BriefDescription": "Early mispredict"
+ },
+ {
+ "PublicDescription": "FEQ full cycles",
+ "EventCode": "0xD101",
+ "EventName": "ICF_FEQ_FULL",
+ "BriefDescription": "FEQ full cycles"
+ },
+ {
+ "PublicDescription": "Instruction FIFO Full",
+ "EventCode": "0xD102",
+ "EventName": "ICF_INST_FIFO_FULL",
+ "BriefDescription": "Instruction FIFO Full"
+ },
+ {
+ "PublicDescription": "L1I TLB miss",
+ "EventCode": "0xD103",
+ "EventName": "L1I_TLB_MISS",
+ "BriefDescription": "L1I TLB miss"
+ },
+ {
+ "PublicDescription": "ICF sent 0 instructions to IDR this cycle",
+ "EventCode": "0xD104",
+ "EventName": "ICF_STALL",
+ "BriefDescription": "ICF sent 0 instructions to IDR this cycle"
+ },
+ {
+ "PublicDescription": "PC FIFO Full",
+ "EventCode": "0xD105",
+ "EventName": "ICF_PC_FIFO_FULL",
+ "BriefDescription": "PC FIFO Full"
+ },
+ {
+ "PublicDescription": "Stall due to BOB ID",
+ "EventCode": "0xD200",
+ "EventName": "IDR_STALL_BOB_ID",
+ "BriefDescription": "Stall due to BOB ID"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to LOB entries",
+ "EventCode": "0xD201",
+ "EventName": "IDR_STALL_LOB_ID",
+ "BriefDescription": "Dispatch stall due to LOB entries"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to SOB entries",
+ "EventCode": "0xD202",
+ "EventName": "IDR_STALL_SOB_ID",
+ "BriefDescription": "Dispatch stall due to SOB entries"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to IXU scheduler entries",
+ "EventCode": "0xD203",
+ "EventName": "IDR_STALL_IXU_SCHED",
+ "BriefDescription": "Dispatch stall due to IXU scheduler entries"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to FSU scheduler entries",
+ "EventCode": "0xD204",
+ "EventName": "IDR_STALL_FSU_SCHED",
+ "BriefDescription": "Dispatch stall due to FSU scheduler entries"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to ROB entries",
+ "EventCode": "0xD205",
+ "EventName": "IDR_STALL_ROB_ID",
+ "BriefDescription": "Dispatch stall due to ROB entries"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to flush",
+ "EventCode": "0xD206",
+ "EventName": "IDR_STALL_FLUSH",
+ "BriefDescription": "Dispatch stall due to flush"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to WFI",
+ "EventCode": "0xD207",
+ "EventName": "IDR_STALL_WFI",
+ "BriefDescription": "Dispatch stall due to WFI"
+ },
+ {
+ "PublicDescription": "Number of SWOB drains triggered by timeout",
+ "EventCode": "0xD208",
+ "EventName": "IDR_STALL_SWOB_TIMEOUT",
+ "BriefDescription": "Number of SWOB drains triggered by timeout"
+ },
+ {
+ "PublicDescription": "Number of SWOB drains triggered by system register or special-purpose register read-after-write or specific special-purpose register writes that cause SWOB drain",
+ "EventCode": "0xD209",
+ "EventName": "IDR_STALL_SWOB_RAW",
+ "BriefDescription": "Number of SWOB drains triggered by system register or special-purpose register read-after-write or specific special-purpose register writes that cause SWOB drain"
+ },
+ {
+ "PublicDescription": "Number of SWOB drains triggered by system register write when SWOB full",
+ "EventCode": "0xD20A",
+ "EventName": "IDR_STALL_SWOB_FULL",
+ "BriefDescription": "Number of SWOB drains triggered by system register write when SWOB full"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to L1 instruction cache miss",
+ "EventCode": "0xD20B",
+ "EventName": "STALL_FRONTEND_CACHE",
+ "BriefDescription": "Dispatch stall due to L1 instruction cache miss"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to L1 data cache miss",
+ "EventCode": "0xD20D",
+ "EventName": "STALL_BACKEND_CACHE",
+ "BriefDescription": "Dispatch stall due to L1 data cache miss"
+ },
+ {
+ "PublicDescription": "Dispatch stall due to lack of any core resource",
+ "EventCode": "0xD20F",
+ "EventName": "STALL_BACKEND_RESOURCE",
+ "BriefDescription": "Dispatch stall due to lack of any core resource"
+ },
+ {
+ "PublicDescription": "Instructions issued by the scheduler",
+ "EventCode": "0xD300",
+ "EventName": "IXU_NUM_UOPS_ISSUED",
+ "BriefDescription": "Instructions issued by the scheduler"
+ },
+ {
+ "PublicDescription": "Any uop issued was canceled for any reason",
+ "EventCode": "0xD301",
+ "EventName": "IXU_ISSUE_CANCEL",
+ "BriefDescription": "Any uop issued was canceled for any reason"
+ },
+ {
+ "PublicDescription": "A load wakeup to the scheduler has been canceled",
+ "EventCode": "0xD302",
+ "EventName": "IXU_LOAD_CANCEL",
+ "BriefDescription": "A load wakeup to the scheduler has been canceled"
+ },
+ {
+ "PublicDescription": "The scheduler had to cancel one slow Uop due to resource conflict",
+ "EventCode": "0xD303",
+ "EventName": "IXU_SLOW_CANCEL",
+ "BriefDescription": "The scheduler had to cancel one slow Uop due to resource conflict"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXA",
+ "EventCode": "0xD304",
+ "EventName": "IXU_IXA_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXA"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXA Par 0",
+ "EventCode": "0xD305",
+ "EventName": "IXU_IXA_PAR0_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXA Par 0"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXA Par 1",
+ "EventCode": "0xD306",
+ "EventName": "IXU_IXA_PAR1_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXA Par 1"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXB",
+ "EventCode": "0xD307",
+ "EventName": "IXU_IXB_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXB"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXB Par 0",
+ "EventCode": "0xD308",
+ "EventName": "IXU_IXB_PAR0_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXB Par 0"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXB Par 1",
+ "EventCode": "0xD309",
+ "EventName": "IXU_IXB_PAR1_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXB Par 1"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXC",
+ "EventCode": "0xD30A",
+ "EventName": "IXU_IXC_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXC"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXC Par 0",
+ "EventCode": "0xD30B",
+ "EventName": "IXU_IXC_PAR0_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXC Par 0"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXC Par 1",
+ "EventCode": "0xD30C",
+ "EventName": "IXU_IXC_PAR1_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXC Par 1"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXD",
+ "EventCode": "0xD30D",
+ "EventName": "IXU_IXD_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXD"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXD Par 0",
+ "EventCode": "0xD30E",
+ "EventName": "IXU_IXD_PAR0_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXD Par 0"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on IXD Par 1",
+ "EventCode": "0xD30F",
+ "EventName": "IXU_IXD_PAR1_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on IXD Par 1"
+ },
+ {
+ "PublicDescription": "Uops issued by the FSU scheduler",
+ "EventCode": "0xD400",
+ "EventName": "FSU_ISSUED",
+ "BriefDescription": "Uops issued by the FSU scheduler"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on FSX",
+ "EventCode": "0xD401",
+ "EventName": "FSU_FSX_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on FSX"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on FSY",
+ "EventCode": "0xD402",
+ "EventName": "FSU_FSY_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on FSY"
+ },
+ {
+ "PublicDescription": "Uops issued by the scheduler on FSZ",
+ "EventCode": "0xD403",
+ "EventName": "FSU_FSZ_ISSUED",
+ "BriefDescription": "Uops issued by the scheduler on FSZ"
+ },
+ {
+ "PublicDescription": "Uops canceled (load cancels)",
+ "EventCode": "0xD404",
+ "EventName": "FSU_CANCEL",
+ "BriefDescription": "Uops canceled (load cancels)"
+ },
+ {
+ "PublicDescription": "Count scheduler stalls due to divide/sqrt",
+ "EventCode": "0xD405",
+ "EventName": "FSU_DIV_SQRT_STALL",
+ "BriefDescription": "Count scheduler stalls due to divide/sqrt"
+ },
+ {
+ "PublicDescription": "Number of SWOB drains",
+ "EventCode": "0xD500",
+ "EventName": "GPC_SWOB_DRAIN",
+ "BriefDescription": "Number of SWOB drains"
+ },
+ {
+ "PublicDescription": "GPC detected a Breakpoint instruction match",
+ "EventCode": "0xD501",
+ "EventName": "BREAKPOINT_MATCH",
+ "BriefDescription": "GPC detected a Breakpoint instruction match"
+ },
+ {
+ "PublicDescription": "Core progress monitor triggered",
+ "EventCode": "0xd502",
+ "EventName": "GPC_CPM_TRIGGER",
+ "BriefDescription": "Core progress monitor triggered"
+ },
+ {
+ "PublicDescription": "Fill buffer full",
+ "EventCode": "0xD601",
+ "EventName": "OFB_FULL",
+ "BriefDescription": "Fill buffer full"
+ },
+ {
+ "PublicDescription": "Load satisified from store forwarded data",
+ "EventCode": "0xD605",
+ "EventName": "LD_FROM_ST_FWD",
+ "BriefDescription": "Load satisified from store forwarded data"
+ },
+ {
+ "PublicDescription": "Store retirement pipe stall",
+ "EventCode": "0xD60C",
+ "EventName": "LSU_ST_RETIRE_STALL",
+ "BriefDescription": "Store retirement pipe stall"
+ },
+ {
+ "PublicDescription": "LSU detected a Watchpoint data match",
+ "EventCode": "0xD60D",
+ "EventName": "WATCHPOINT_MATCH",
+ "BriefDescription": "LSU detected a Watchpoint data match"
+ },
+ {
+ "PublicDescription": "Counts cycles that MSC is telling GPC to stall commit due to ETM ISTALL feature",
+ "EventCode": "0xda00",
+ "EventName": "MSC_ETM_COMMIT_STALL",
+ "BriefDescription": "Counts cycles that MSC is telling GPC to stall commit due to ETM ISTALL feature"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json
new file mode 100644
index 000000000000..bd59ba7b74e4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json
new file mode 100644
index 000000000000..a6a20f541e33
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json
@@ -0,0 +1,128 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "PublicDescription": "Operation speculatively executed - ASE Scalar",
+ "EventCode": "0xd210",
+ "EventName": "ASE_SCALAR_SPEC",
+ "BriefDescription": "Operation speculatively executed - ASE Scalar"
+ },
+ {
+ "PublicDescription": "Operation speculatively executed - ASE Vector",
+ "EventCode": "0xd211",
+ "EventName": "ASE_VECTOR_SPEC",
+ "BriefDescription": "Operation speculatively executed - ASE Vector"
+ },
+ {
+ "PublicDescription": "Barrier speculatively executed, CSDB",
+ "EventCode": "0x7f",
+ "EventName": "CSDB_SPEC",
+ "BriefDescription": "Barrier speculatively executed, CSDB"
+ },
+ {
+ "PublicDescription": "Prefetch sent to L2.",
+ "EventCode": "0xd106",
+ "EventName": "ICF_PREFETCH_DISPATCH",
+ "BriefDescription": "Prefetch sent to L2."
+ },
+ {
+ "PublicDescription": "Prefetch response received but was dropped since we don't support inflight upgrades.",
+ "EventCode": "0xd107",
+ "EventName": "ICF_PREFETCH_DROPPED_NO_UPGRADE",
+ "BriefDescription": "Prefetch response received but was dropped since we don't support inflight upgrades."
+ },
+ {
+ "PublicDescription": "Prefetch request missed TLB.",
+ "EventCode": "0xd108",
+ "EventName": "ICF_PREFETCH_DROPPED_TLB_MISS",
+ "BriefDescription": "Prefetch request missed TLB."
+ },
+ {
+ "PublicDescription": "Prefetch request dropped since duplicate was found in TLB.",
+ "EventCode": "0xd109",
+ "EventName": "ICF_PREFETCH_DROPPED_DUPLICATE",
+ "BriefDescription": "Prefetch request dropped since duplicate was found in TLB."
+ },
+ {
+ "PublicDescription": "Prefetch request dropped since it was found in cache.",
+ "EventCode": "0xd10a",
+ "EventName": "ICF_PREFETCH_DROPPED_CACHE_HIT",
+ "BriefDescription": "Prefetch request dropped since it was found in cache."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json
new file mode 100644
index 000000000000..7ecffb989ae0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json
new file mode 100644
index 000000000000..a211d94aacde
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json
@@ -0,0 +1,41 @@
+[
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ },
+ {
+ "PublicDescription": "Flushes due to memory hazards",
+ "EventCode": "0x121",
+ "EventName": "BPU_FLUSH_MEM_FAULT",
+ "BriefDescription": "Flushes due to memory hazards"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
new file mode 100644
index 000000000000..c5d1d22bd034
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json
@@ -0,0 +1,442 @@
+[
+ {
+ "MetricName": "branch_miss_pred_rate",
+ "MetricExpr": "BR_MIS_PRED / BR_PRED",
+ "BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch",
+ "MetricGroup": "branch",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "bus_utilization",
+ "MetricExpr": "BUS_ACCESS / (BUS_CYCLES * 1)",
+ "BriefDescription": "Core-to-uncore bus utilization",
+ "MetricGroup": "Bus",
+ "ScaleUnit": "100percent of bus cycles"
+ },
+ {
+ "MetricName": "l1d_cache_miss_ratio",
+ "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
+ "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
+ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l1i_cache_miss_ratio",
+ "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
+ "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
+ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "Miss_Ratio;l1d_cache_read_miss",
+ "MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD",
+ "BriefDescription": "L1D cache read miss rate",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "1per cache read access"
+ },
+ {
+ "MetricName": "l2_cache_miss_ratio",
+ "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
+ "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l1i_cache_read_miss_rate",
+ "MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE",
+ "BriefDescription": "L1I cache read miss rate",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l2d_cache_read_miss_rate",
+ "MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD",
+ "BriefDescription": "L2 cache read miss rate",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "1per cache read access"
+ },
+ {
+ "MetricName": "l1d_cache_miss_mpki",
+ "MetricExpr": "(L1D_CACHE_LMISS_RD * 1e3) / INST_RETIRED",
+ "BriefDescription": "Misses per thousand instructions (data)",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l1i_cache_miss_mpki",
+ "MetricExpr": "(L1I_CACHE_LMISS * 1e3) / INST_RETIRED",
+ "BriefDescription": "Misses per thousand instructions (instruction)",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "simd_percentage",
+ "MetricExpr": "ASE_SPEC / INST_SPEC",
+ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "100percent of operations"
+ },
+ {
+ "MetricName": "crypto_percentage",
+ "MetricExpr": "CRYPTO_SPEC / INST_SPEC",
+ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "100percent of operations"
+ },
+ {
+ "MetricName": "gflops",
+ "MetricExpr": "VFP_SPEC / (duration_time * 1e9)",
+ "BriefDescription": "Giga-floating point operations per second",
+ "MetricGroup": "InstructionMix"
+ },
+ {
+ "MetricName": "integer_dp_percentage",
+ "MetricExpr": "DP_SPEC / INST_SPEC",
+ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "100percent of operations"
+ },
+ {
+ "MetricName": "ipc",
+ "MetricExpr": "INST_RETIRED / CPU_CYCLES",
+ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
+ "MetricGroup": "General",
+ "ScaleUnit": "1per cycle"
+ },
+ {
+ "MetricName": "load_percentage",
+ "MetricExpr": "LD_SPEC / INST_SPEC",
+ "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "100percent of operations"
+ },
+ {
+ "MetricName": "load_store_spec_rate",
+ "MetricExpr": "LDST_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "100percent of operations"
+ },
+ {
+ "MetricName": "retired_mips",
+ "MetricExpr": "INST_RETIRED / (duration_time * 1e6)",
+ "BriefDescription": "Millions of instructions per second",
+ "MetricGroup": "InstructionMix"
+ },
+ {
+ "MetricName": "spec_utilization_mips",
+ "MetricExpr": "INST_SPEC / (duration_time * 1e6)",
+ "BriefDescription": "Millions of instructions per second",
+ "MetricGroup": "PEutilization"
+ },
+ {
+ "MetricName": "pc_write_spec_rate",
+ "MetricExpr": "PC_WRITE_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "100percent of operations"
+ },
+ {
+ "MetricName": "store_percentage",
+ "MetricExpr": "ST_SPEC / INST_SPEC",
+ "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "100percent of operations"
+ },
+ {
+ "MetricName": "scalar_fp_percentage",
+ "MetricExpr": "VFP_SPEC / INST_SPEC",
+ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "100percent of operations"
+ },
+ {
+ "MetricName": "retired_rate",
+ "MetricExpr": "OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
+ "MetricGroup": "General",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "wasted",
+ "MetricExpr": "1 - (OP_RETIRED / (CPU_CYCLES * #slots))",
+ "BriefDescription": "Of all the micro-operations issued, what proportion are lost",
+ "MetricGroup": "General",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "wasted_rate",
+ "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
+ "MetricGroup": "General",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "stall_backend_cache_rate",
+ "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss",
+ "MetricGroup": "Stall",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "stall_backend_resource_rate",
+ "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full",
+ "MetricGroup": "Stall",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "stall_backend_tlb_rate",
+ "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss",
+ "MetricGroup": "Stall",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "stall_frontend_cache_rate",
+ "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss",
+ "MetricGroup": "Stall",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "stall_frontend_tlb_rate",
+ "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
+ "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss",
+ "MetricGroup": "Stall",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "dtlb_walk_ratio",
+ "MetricExpr": "DTLB_WALK / L1D_TLB",
+ "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "itlb_walk_ratio",
+ "MetricExpr": "ITLB_WALK / L1I_TLB",
+ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "ArchStdEvent": "backend_bound"
+ },
+ {
+ "ArchStdEvent": "frontend_bound",
+ "MetricExpr": "100 - (retired_fraction + slots_lost_misspeculation_fraction + backend_bound)"
+ },
+ {
+ "MetricName": "slots_lost_misspeculation_fraction",
+ "MetricExpr": "(OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)",
+ "BriefDescription": "Fraction of slots lost due to misspeculation",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricGroup": "Default;TopdownL1",
+ "ScaleUnit": "100percent of slots"
+ },
+ {
+ "MetricName": "retired_fraction",
+ "MetricExpr": "OP_RETIRED / (CPU_CYCLES * #slots)",
+ "BriefDescription": "Fraction of slots retiring, useful work",
+ "DefaultMetricgroupName": "TopdownL1",
+ "MetricGroup": "Default;TopdownL1",
+ "ScaleUnit": "100percent of slots"
+ },
+ {
+ "MetricName": "backend_core",
+ "MetricExpr": "(backend_bound / 100) - backend_memory",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues",
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "backend_memory",
+ "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE) / CPU_CYCLES",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)",
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "branch_mispredict",
+ "MetricExpr": "(BR_MIS_PRED_RETIRED / GPC_FLUSH) * slots_lost_misspeculation_fraction",
+ "BriefDescription": "Fraction of slots lost due to branch misprediciton",
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
+ },
+ {
+ "MetricName": "frontend_bandwidth",
+ "MetricExpr": "frontend_bound - frontend_latency",
+ "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)",
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
+ },
+ {
+ "MetricName": "frontend_latency",
+ "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - ((frontend_bound / 100) * CPU_CYCLES * #slots)) / #slots)) / CPU_CYCLES",
+ "BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch",
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "100percent of slots"
+ },
+ {
+ "MetricName": "other_miss_pred",
+ "MetricExpr": "slots_lost_misspeculation_fraction - branch_mispredict",
+ "BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation",
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
+ },
+ {
+ "MetricName": "pipe_utilization",
+ "MetricExpr": "100 * ((IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6))",
+ "BriefDescription": "Fraction of execute slots utilized",
+ "MetricGroup": "TopdownL2",
+ "ScaleUnit": "1percent of slots"
+ },
+ {
+ "MetricName": "d_cache_l2_miss_rate",
+ "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss",
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "d_cache_miss_rate",
+ "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss",
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "d_tlb_miss_rate",
+ "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss",
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "fsu_pipe_utilization",
+ "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)",
+ "BriefDescription": "Fraction of FSU execute slots utilized",
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "100percent of slots"
+ },
+ {
+ "MetricName": "i_cache_miss_rate",
+ "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss",
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "100percent of slots"
+ },
+ {
+ "MetricName": "i_tlb_miss_rate",
+ "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss",
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "100percent of slots"
+ },
+ {
+ "MetricName": "ixu_pipe_utilization",
+ "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES * #slots)",
+ "BriefDescription": "Fraction of IXU execute slots utilized",
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "100percent of slots"
+ },
+ {
+ "MetricName": "stall_recovery_rate",
+ "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery",
+ "MetricGroup": "TopdownL3",
+ "ScaleUnit": "100percent of slots"
+ },
+ {
+ "MetricName": "stall_fsu_sched_rate",
+ "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full",
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "stall_ixu_sched_rate",
+ "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full",
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "stall_lob_id_rate",
+ "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full",
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "stall_rob_id_rate",
+ "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full",
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "stall_sob_id_rate",
+ "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES",
+ "BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full",
+ "MetricGroup": "TopdownL4",
+ "ScaleUnit": "100percent of cycles"
+ },
+ {
+ "MetricName": "l1d_cache_access_demand",
+ "MetricExpr": "L1D_CACHE_RW / L1D_CACHE",
+ "BriefDescription": "L1D cache access - demand",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "100percent of cache acceses"
+ },
+ {
+ "MetricName": "l1d_cache_access_prefetces",
+ "MetricExpr": "L1D_CACHE_PRFM / L1D_CACHE",
+ "BriefDescription": "L1D cache access - prefetch",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "100percent of cache acceses"
+ },
+ {
+ "MetricName": "l1d_cache_demand_misses",
+ "MetricExpr": "L1D_CACHE_REFILL_RW / L1D_CACHE",
+ "BriefDescription": "L1D cache demand misses",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "100percent of cache acceses"
+ },
+ {
+ "MetricName": "l1d_cache_demand_misses_read",
+ "MetricExpr": "L1D_CACHE_REFILL_RD / L1D_CACHE",
+ "BriefDescription": "L1D cache demand misses - read",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "100percent of cache acceses"
+ },
+ {
+ "MetricName": "l1d_cache_demand_misses_write",
+ "MetricExpr": "L1D_CACHE_REFILL_WR / L1D_CACHE",
+ "BriefDescription": "L1D cache demand misses - write",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "100percent of cache acceses"
+ },
+ {
+ "MetricName": "l1d_cache_prefetch_misses",
+ "MetricExpr": "L1D_CACHE_REFILL_PRFM / L1D_CACHE",
+ "BriefDescription": "L1D cache prefetch misses",
+ "MetricGroup": "Cache",
+ "ScaleUnit": "100percent of cache acceses"
+ },
+ {
+ "MetricName": "ase_scalar_mix",
+ "MetricExpr": "ASE_SCALAR_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) scalar operations",
+ "MetricGroup": "Instructions",
+ "ScaleUnit": "100percent of cache acceses"
+ },
+ {
+ "MetricName": "ase_vector_mix",
+ "MetricExpr": "ASE_VECTOR_SPEC / OP_SPEC",
+ "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) vector operations",
+ "MetricGroup": "Instructions",
+ "ScaleUnit": "100percent of cache acceses"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json
new file mode 100644
index 000000000000..66d83b680651
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json
@@ -0,0 +1,170 @@
+[
+ {
+ "PublicDescription": "Level 2 data translation buffer allocation",
+ "EventCode": "0xD800",
+ "EventName": "MMU_D_OTB_ALLOC",
+ "BriefDescription": "Level 2 data translation buffer allocation"
+ },
+ {
+ "PublicDescription": "Data TLB translation cache hit on S1L2 walk cache entry",
+ "EventCode": "0xd801",
+ "EventName": "MMU_D_TRANS_CACHE_HIT_S1L2_WALK",
+ "BriefDescription": "Data TLB translation cache hit on S1L2 walk cache entry"
+ },
+ {
+ "PublicDescription": "Data TLB translation cache hit on S1L1 walk cache entry",
+ "EventCode": "0xd802",
+ "EventName": "MMU_D_TRANS_CACHE_HIT_S1L1_WALK",
+ "BriefDescription": "Data TLB translation cache hit on S1L1 walk cache entry"
+ },
+ {
+ "PublicDescription": "Data TLB translation cache hit on S1L0 walk cache entry",
+ "EventCode": "0xd803",
+ "EventName": "MMU_D_TRANS_CACHE_HIT_S1L0_WALK",
+ "BriefDescription": "Data TLB translation cache hit on S1L0 walk cache entry"
+ },
+ {
+ "PublicDescription": "Data TLB translation cache hit on S2L2 walk cache entry",
+ "EventCode": "0xd804",
+ "EventName": "MMU_D_TRANS_CACHE_HIT_S2L2_WALK",
+ "BriefDescription": "Data TLB translation cache hit on S2L2 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Data TLB translation cache hit on S2L1 walk cache entry",
+ "EventCode": "0xd805",
+ "EventName": "MMU_D_TRANS_CACHE_HIT_S2L1_WALK",
+ "BriefDescription": "Data TLB translation cache hit on S2L1 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Data TLB translation cache hit on S2L0 walk cache entry",
+ "EventCode": "0xd806",
+ "EventName": "MMU_D_TRANS_CACHE_HIT_S2L0_WALK",
+ "BriefDescription": "Data TLB translation cache hit on S2L0 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Data-side S1 page walk cache lookup",
+ "EventCode": "0xd807",
+ "EventName": "MMU_D_S1_WALK_CACHE_LOOKUP",
+ "BriefDescription": "Data-side S1 page walk cache lookup"
+ },
+ {
+ "PublicDescrition": "Data-side S1 page walk cache refill",
+ "EventCode": "0xd808",
+ "EventName": "MMU_D_S1_WALK_CACHE_REFILL",
+ "BriefDescription": "Data-side S1 page walk cache refill"
+ },
+ {
+ "PublicDescrition": "Data-side S2 page walk cache lookup",
+ "EventCode": "0xd809",
+ "EventName": "MMU_D_S2_WALK_CACHE_LOOKUP",
+ "BriefDescription": "Data-side S2 page walk cache lookup"
+ },
+ {
+ "PublicDescrition": "Data-side S2 page walk cache refill",
+ "EventCode": "0xd80a",
+ "EventName": "MMU_D_S2_WALK_CACHE_REFILL",
+ "BriefDescription": "Data-side S2 page walk cache refill"
+ },
+ {
+ "PublicDescription": "Data-side S1 table walk fault",
+ "EventCode": "0xD80B",
+ "EventName": "MMU_D_S1_WALK_FAULT",
+ "BriefDescription": "Data-side S1 table walk fault"
+ },
+ {
+ "PublicDescription": "Data-side S2 table walk fault",
+ "EventCode": "0xD80C",
+ "EventName": "MMU_D_S2_WALK_FAULT",
+ "BriefDescription": "Data-side S2 table walk fault"
+ },
+ {
+ "PublicDescription": "Data-side table walk steps or descriptor fetches",
+ "EventCode": "0xD80D",
+ "EventName": "MMU_D_WALK_STEPS",
+ "BriefDescription": "Data-side table walk steps or descriptor fetches"
+ },
+ {
+ "PublicDescription": "Level 2 instruction translation buffer allocation",
+ "EventCode": "0xD900",
+ "EventName": "MMU_I_OTB_ALLOC",
+ "BriefDescription": "Level 2 instruction translation buffer allocation"
+ },
+ {
+ "PublicDescrition": "Instruction TLB translation cache hit on S1L2 walk cache entry",
+ "EventCode": "0xd901",
+ "EventName": "MMU_I_TRANS_CACHE_HIT_S1L2_WALK",
+ "BriefDescription": "Instruction TLB translation cache hit on S1L2 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Instruction TLB translation cache hit on S1L1 walk cache entry",
+ "EventCode": "0xd902",
+ "EventName": "MMU_I_TRANS_CACHE_HIT_S1L1_WALK",
+ "BriefDescription": "Instruction TLB translation cache hit on S1L1 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Instruction TLB translation cache hit on S1L0 walk cache entry",
+ "EventCode": "0xd903",
+ "EventName": "MMU_I_TRANS_CACHE_HIT_S1L0_WALK",
+ "BriefDescription": "Instruction TLB translation cache hit on S1L0 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Instruction TLB translation cache hit on S2L2 walk cache entry",
+ "EventCode": "0xd904",
+ "EventName": "MMU_I_TRANS_CACHE_HIT_S2L2_WALK",
+ "BriefDescription": "Instruction TLB translation cache hit on S2L2 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Instruction TLB translation cache hit on S2L1 walk cache entry",
+ "EventCode": "0xd905",
+ "EventName": "MMU_I_TRANS_CACHE_HIT_S2L1_WALK",
+ "BriefDescription": "Instruction TLB translation cache hit on S2L1 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Instruction TLB translation cache hit on S2L0 walk cache entry",
+ "EventCode": "0xd906",
+ "EventName": "MMU_I_TRANS_CACHE_HIT_S2L0_WALK",
+ "BriefDescription": "Instruction TLB translation cache hit on S2L0 walk cache entry"
+ },
+ {
+ "PublicDescrition": "Instruction-side S1 page walk cache lookup",
+ "EventCode": "0xd907",
+ "EventName": "MMU_I_S1_WALK_CACHE_LOOKUP",
+ "BriefDescription": "Instruction-side S1 page walk cache lookup"
+ },
+ {
+ "PublicDescrition": "Instruction-side S1 page walk cache refill",
+ "EventCode": "0xd908",
+ "EventName": "MMU_I_S1_WALK_CACHE_REFILL",
+ "BriefDescription": "Instruction-side S1 page walk cache refill"
+ },
+ {
+ "PublicDescrition": "Instruction-side S2 page walk cache lookup",
+ "EventCode": "0xd909",
+ "EventName": "MMU_I_S2_WALK_CACHE_LOOKUP",
+ "BriefDescription": "Instruction-side S2 page walk cache lookup"
+ },
+ {
+ "PublicDescrition": "Instruction-side S2 page walk cache refill",
+ "EventCode": "0xd90a",
+ "EventName": "MMU_I_S2_WALK_CACHE_REFILL",
+ "BriefDescription": "Instruction-side S2 page walk cache refill"
+ },
+ {
+ "PublicDescription": "Instruction-side S1 table walk fault",
+ "EventCode": "0xD90B",
+ "EventName": "MMU_I_S1_WALK_FAULT",
+ "BriefDescription": "Instruction-side S1 table walk fault"
+ },
+ {
+ "PublicDescription": "Instruction-side S2 table walk fault",
+ "EventCode": "0xD90C",
+ "EventName": "MMU_I_S2_WALK_FAULT",
+ "BriefDescription": "Instruction-side S2 table walk fault"
+ },
+ {
+ "PublicDescription": "Instruction-side table walk steps or descriptor fetches",
+ "EventCode": "0xD90D",
+ "EventName": "MMU_I_WALK_STEPS",
+ "BriefDescription": "Instruction-side table walk steps or descriptor fetches"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json
new file mode 100644
index 000000000000..2fb2d1f183fc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json
@@ -0,0 +1,41 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND",
+ "Errata": "Errata AC03_CPU_29",
+ "BriefDescription": "Impacted by errata, use metrics instead -"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL",
+ "Errata": "Errata AC03_CPU_29",
+ "BriefDescription": "Impacted by errata, use metrics instead -"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND",
+ "Errata": "Errata AC03_CPU_29",
+ "BriefDescription": "Impacted by errata, use metrics instead -"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ },
+ {
+ "PublicDescription": "Frontend stall cycles, TLB",
+ "EventCode": "0x815c",
+ "EventName": "STALL_FRONTEND_TLB",
+ "BriefDescription": "Frontend stall cycles, TLB"
+ },
+ {
+ "PublicDescription": "Backend stall cycles, TLB",
+ "EventCode": "0x8167",
+ "EventName": "STALL_BACKEND_TLB",
+ "BriefDescription": "Backend stall cycles, TLB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json
new file mode 100644
index 000000000000..20f2165c85fe
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "SAMPLE_POP"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FILTRATE"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_COLLISION"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json
index 428605c37d10..5ec157c39f0d 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json
@@ -107,7 +107,7 @@
"EventName": "hnf_qos_hh_retry",
"EventidCode": "0xe",
"NodeType": "0x5",
- "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN‑F.",
+ "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN-F.",
"Unit": "arm_cmn",
"Compat": "(434|436|43c|43a).*"
},
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index 5b58db5032c1..f4d1ca4d1493 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -42,3 +42,4 @@
0x00000000480fd010,v1,hisilicon/hip08,core
0x00000000500f0000,v1,ampere/emag,core
0x00000000c00fac30,v1,ampere/ampereone,core
+0x00000000c00fac40,v1,ampere/ampereonex,core
diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
index f4908af7ad66..599a588dbeb4 100644
--- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv
+++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
@@ -11,8 +11,7 @@
#
# Multiple PVRs could map to a single JSON file.
#
-
-# Power8 entries
0x004[bcd][[:xdigit:]]{4},1,power8,core
+0x0066[[:xdigit:]]{4},1,power8,core
0x004e[[:xdigit:]]{4},1,power9,core
0x0080[[:xdigit:]]{4},1,power10,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/datasource.json b/tools/perf/pmu-events/arch/powerpc/power10/datasource.json
index 6b0356f2d301..0eeaaf1a95b8 100644
--- a/tools/perf/pmu-events/arch/powerpc/power10/datasource.json
+++ b/tools/perf/pmu-events/arch/powerpc/power10/datasource.json
@@ -100,6 +100,11 @@
"BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L2 due to a demand miss."
},
{
+ "EventCode": "0x0003C0000000C040",
+ "EventName": "PM_DATA_FROM_L2MISS_DSRC",
+ "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss."
+ },
+ {
"EventCode": "0x000380000010C040",
"EventName": "PM_INST_FROM_L2MISS_ALL",
"BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L2 due to a demand miss or prefetch reload."
@@ -161,10 +166,15 @@
},
{
"EventCode": "0x000780000000C040",
- "EventName": "PM_INST_FROM_L3MISS",
+ "EventName": "PM_INST_FROM_L3MISS_DSRC",
"BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss."
},
{
+ "EventCode": "0x0007C0000000C040",
+ "EventName": "PM_DATA_FROM_L3MISS_DSRC",
+ "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss."
+ },
+ {
"EventCode": "0x000780000010C040",
"EventName": "PM_INST_FROM_L3MISS_ALL",
"BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss or prefetch reload."
@@ -981,7 +991,7 @@
},
{
"EventCode": "0x0003C0000000C142",
- "EventName": "PM_MRK_DATA_FROM_L2MISS",
+ "EventName": "PM_MRK_DATA_FROM_L2MISS_DSRC",
"BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction."
},
{
@@ -1046,12 +1056,12 @@
},
{
"EventCode": "0x000780000000C142",
- "EventName": "PM_MRK_INST_FROM_L3MISS",
+ "EventName": "PM_MRK_INST_FROM_L3MISS_DSRC",
"BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
},
{
"EventCode": "0x0007C0000000C142",
- "EventName": "PM_MRK_DATA_FROM_L3MISS",
+ "EventName": "PM_MRK_DATA_FROM_L3MISS_DSRC",
"BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction."
},
{
diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv
index c61b3d6ef616..cfc449b19810 100644
--- a/tools/perf/pmu-events/arch/riscv/mapfile.csv
+++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv
@@ -15,3 +15,5 @@
#
#MVENDORID-MARCHID-MIMPID,Version,Filename,EventType
0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core
+0x5b7-0x0-0x0,v1,thead/c900-legacy,core
+0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core
diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json
new file mode 100644
index 000000000000..fbffcacb2ace
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json
@@ -0,0 +1,172 @@
+[
+ {
+ "EventName": "ACCESS_MMU_STLB",
+ "EventCode": "0x1",
+ "BriefDescription": "access MMU STLB"
+ },
+ {
+ "EventName": "MISS_MMU_STLB",
+ "EventCode": "0x2",
+ "BriefDescription": "miss MMU STLB"
+ },
+ {
+ "EventName": "ACCESS_MMU_PTE_C",
+ "EventCode": "0x3",
+ "BriefDescription": "access MMU PTE-Cache"
+ },
+ {
+ "EventName": "MISS_MMU_PTE_C",
+ "EventCode": "0x4",
+ "BriefDescription": "miss MMU PTE-Cache"
+ },
+ {
+ "EventName": "ROB_FLUSH",
+ "EventCode": "0x5",
+ "BriefDescription": "ROB flush (all kinds of exceptions)"
+ },
+ {
+ "EventName": "BTB_PREDICTION_MISS",
+ "EventCode": "0x6",
+ "BriefDescription": "BTB prediction miss"
+ },
+ {
+ "EventName": "ITLB_MISS",
+ "EventCode": "0x7",
+ "BriefDescription": "ITLB miss"
+ },
+ {
+ "EventName": "SYNC_DEL_FETCH_G",
+ "EventCode": "0x8",
+ "BriefDescription": "SYNC delivery a fetch-group"
+ },
+ {
+ "EventName": "ICACHE_MISS",
+ "EventCode": "0x9",
+ "BriefDescription": "ICache miss"
+ },
+ {
+ "EventName": "BPU_BR_RETIRE",
+ "EventCode": "0xA",
+ "BriefDescription": "condition branch instruction retire"
+ },
+ {
+ "EventName": "BPU_BR_MISS",
+ "EventCode": "0xB",
+ "BriefDescription": "condition branch instruction miss"
+ },
+ {
+ "EventName": "RET_INS_RETIRE",
+ "EventCode": "0xC",
+ "BriefDescription": "return instruction retire"
+ },
+ {
+ "EventName": "RET_INS_MISS",
+ "EventCode": "0xD",
+ "BriefDescription": "return instruction miss"
+ },
+ {
+ "EventName": "INDIRECT_JR_MISS",
+ "EventCode": "0xE",
+ "BriefDescription": "indirect JR instruction miss (inlcude without target)"
+ },
+ {
+ "EventName": "IBUF_VAL_ID_NORDY",
+ "EventCode": "0xF",
+ "BriefDescription": "IBUF valid while ID not ready"
+ },
+ {
+ "EventName": "IBUF_NOVAL_ID_RDY",
+ "EventCode": "0x10",
+ "BriefDescription": "IBUF not valid while ID ready"
+ },
+ {
+ "EventName": "REN_INT_PHY_REG_NORDY",
+ "EventCode": "0x11",
+ "BriefDescription": "REN integer physical register file is not ready"
+ },
+ {
+ "EventName": "REN_FP_PHY_REG_NORDY",
+ "EventCode": "0x12",
+ "BriefDescription": "REN floating point physical register file is not ready"
+ },
+ {
+ "EventName": "REN_CP_NORDY",
+ "EventCode": "0x13",
+ "BriefDescription": "REN checkpoint is not ready"
+ },
+ {
+ "EventName": "DEC_VAL_ROB_NORDY",
+ "EventCode": "0x14",
+ "BriefDescription": "DEC is valid and ROB is not ready"
+ },
+ {
+ "EventName": "OOD_FLUSH_LS_DEP",
+ "EventCode": "0x15",
+ "BriefDescription": "out of order flush due to load/store dependency"
+ },
+ {
+ "EventName": "BRU_RET_IJR_INS",
+ "EventCode": "0x16",
+ "BriefDescription": "BRU retire an IJR instruction"
+ },
+ {
+ "EventName": "ACCESS_DTLB",
+ "EventCode": "0x17",
+ "BriefDescription": "access DTLB"
+ },
+ {
+ "EventName": "MISS_DTLB",
+ "EventCode": "0x18",
+ "BriefDescription": "miss DTLB"
+ },
+ {
+ "EventName": "LOAD_INS_DCACHE",
+ "EventCode": "0x19",
+ "BriefDescription": "load instruction access DCache"
+ },
+ {
+ "EventName": "LOAD_INS_MISS_DCACHE",
+ "EventCode": "0x1A",
+ "BriefDescription": "load instruction miss DCache"
+ },
+ {
+ "EventName": "STORE_INS_DCACHE",
+ "EventCode": "0x1B",
+ "BriefDescription": "store/amo instruction access DCache"
+ },
+ {
+ "EventName": "STORE_INS_MISS_DCACHE",
+ "EventCode": "0x1C",
+ "BriefDescription": "store/amo instruction miss DCache"
+ },
+ {
+ "EventName": "LOAD_SCACHE",
+ "EventCode": "0x1D",
+ "BriefDescription": "load access SCache"
+ },
+ {
+ "EventName": "STORE_SCACHE",
+ "EventCode": "0x1E",
+ "BriefDescription": "store access SCache"
+ },
+ {
+ "EventName": "LOAD_MISS_SCACHE",
+ "EventCode": "0x1F",
+ "BriefDescription": "load miss SCache"
+ },
+ {
+ "EventName": "STORE_MISS_SCACHE",
+ "EventCode": "0x20",
+ "BriefDescription": "store miss SCache"
+ },
+ {
+ "EventName": "L2C_PF_REQ",
+ "EventCode": "0x21",
+ "BriefDescription": "L2C data-prefetcher request"
+ },
+ {
+ "EventName": "L2C_PF_HIT",
+ "EventCode": "0x22",
+ "BriefDescription": "L2C data-prefetcher hit"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
new file mode 100644
index 000000000000..9b4a032186a7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
@@ -0,0 +1,68 @@
+[
+ {
+ "ArchStdEvent": "FW_MISALIGNED_LOAD"
+ },
+ {
+ "ArchStdEvent": "FW_MISALIGNED_STORE"
+ },
+ {
+ "ArchStdEvent": "FW_ACCESS_LOAD"
+ },
+ {
+ "ArchStdEvent": "FW_ACCESS_STORE"
+ },
+ {
+ "ArchStdEvent": "FW_ILLEGAL_INSN"
+ },
+ {
+ "ArchStdEvent": "FW_SET_TIMER"
+ },
+ {
+ "ArchStdEvent": "FW_IPI_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_IPI_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_FENCE_I_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_FENCE_I_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json
new file mode 100644
index 000000000000..2b142348d635
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json
@@ -0,0 +1,67 @@
+[
+ {
+ "EventName": "L1_ICACHE_ACCESS",
+ "EventCode": "0x00000001",
+ "BriefDescription": "L1 instruction cache access"
+ },
+ {
+ "EventName": "L1_ICACHE_MISS",
+ "EventCode": "0x00000002",
+ "BriefDescription": "L1 instruction cache miss"
+ },
+ {
+ "EventName": "ITLB_MISS",
+ "EventCode": "0x00000003",
+ "BriefDescription": "I-UTLB miss"
+ },
+ {
+ "EventName": "DTLB_MISS",
+ "EventCode": "0x00000004",
+ "BriefDescription": "D-UTLB miss"
+ },
+ {
+ "EventName": "JTLB_MISS",
+ "EventCode": "0x00000005",
+ "BriefDescription": "JTLB miss"
+ },
+ {
+ "EventName": "L1_DCACHE_READ_ACCESS",
+ "EventCode": "0x0000000c",
+ "BriefDescription": "L1 data cache read access"
+ },
+ {
+ "EventName": "L1_DCACHE_READ_MISS",
+ "EventCode": "0x0000000d",
+ "BriefDescription": "L1 data cache read miss"
+ },
+ {
+ "EventName": "L1_DCACHE_WRITE_ACCESS",
+ "EventCode": "0x0000000e",
+ "BriefDescription": "L1 data cache write access"
+ },
+ {
+ "EventName": "L1_DCACHE_WRITE_MISS",
+ "EventCode": "0x0000000f",
+ "BriefDescription": "L1 data cache write miss"
+ },
+ {
+ "EventName": "LL_CACHE_READ_ACCESS",
+ "EventCode": "0x00000010",
+ "BriefDescription": "LL Cache read access"
+ },
+ {
+ "EventName": "LL_CACHE_READ_MISS",
+ "EventCode": "0x00000011",
+ "BriefDescription": "LL Cache read miss"
+ },
+ {
+ "EventName": "LL_CACHE_WRITE_ACCESS",
+ "EventCode": "0x00000012",
+ "BriefDescription": "LL Cache write access"
+ },
+ {
+ "EventName": "LL_CACHE_WRITE_MISS",
+ "EventCode": "0x00000013",
+ "BriefDescription": "LL Cache write miss"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
new file mode 100644
index 000000000000..9b4a032186a7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
@@ -0,0 +1,68 @@
+[
+ {
+ "ArchStdEvent": "FW_MISALIGNED_LOAD"
+ },
+ {
+ "ArchStdEvent": "FW_MISALIGNED_STORE"
+ },
+ {
+ "ArchStdEvent": "FW_ACCESS_LOAD"
+ },
+ {
+ "ArchStdEvent": "FW_ACCESS_STORE"
+ },
+ {
+ "ArchStdEvent": "FW_ILLEGAL_INSN"
+ },
+ {
+ "ArchStdEvent": "FW_SET_TIMER"
+ },
+ {
+ "ArchStdEvent": "FW_IPI_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_IPI_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_FENCE_I_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_FENCE_I_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json
new file mode 100644
index 000000000000..c822b5373333
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json
@@ -0,0 +1,72 @@
+[
+ {
+ "EventName": "INST_BRANCH_MISPREDICT",
+ "EventCode": "0x00000006",
+ "BriefDescription": "Mispredicted branch instructions"
+ },
+ {
+ "EventName": "INST_BRANCH",
+ "EventCode": "0x00000007",
+ "BriefDescription": "Retired branch instructions"
+ },
+ {
+ "EventName": "INST_JMP_MISPREDICT",
+ "EventCode": "0x00000008",
+ "BriefDescription": "Indirect branch mispredict"
+ },
+ {
+ "EventName": "INST_JMP",
+ "EventCode": "0x00000009",
+ "BriefDescription": "Retired jmp instructions"
+ },
+ {
+ "EventName": "INST_STORE",
+ "EventCode": "0x0000000b",
+ "BriefDescription": "Retired store instructions"
+ },
+ {
+ "EventName": "INST_ALU",
+ "EventCode": "0x0000001d",
+ "BriefDescription": "Retired ALU instructions"
+ },
+ {
+ "EventName": "INST_LDST",
+ "EventCode": "0x0000001e",
+ "BriefDescription": "Retired Load/Store instructions"
+ },
+ {
+ "EventName": "INST_VECTOR",
+ "EventCode": "0x0000001f",
+ "BriefDescription": "Retired Vector instructions"
+ },
+ {
+ "EventName": "INST_CSR",
+ "EventCode": "0x00000020",
+ "BriefDescription": "Retired CSR instructions"
+ },
+ {
+ "EventName": "INST_SYNC",
+ "EventCode": "0x00000021",
+ "BriefDescription": "Retired sync instructions (AMO/LR/SC instructions)"
+ },
+ {
+ "EventName": "INST_UNALIGNED_ACCESS",
+ "EventCode": "0x00000022",
+ "BriefDescription": "Retired Store/Load instructions with unaligned memory access"
+ },
+ {
+ "EventName": "INST_ECALL",
+ "EventCode": "0x00000025",
+ "BriefDescription": "Retired ecall instructions"
+ },
+ {
+ "EventName": "INST_LONG_JP",
+ "EventCode": "0x00000026",
+ "BriefDescription": "Retired long jump instructions"
+ },
+ {
+ "EventName": "INST_FP",
+ "EventCode": "0x0000002a",
+ "BriefDescription": "Retired FPU instructions"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json
new file mode 100644
index 000000000000..0ab6f288af91
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json
@@ -0,0 +1,80 @@
+[
+ {
+ "EventName": "LSU_SPEC_FAIL",
+ "EventCode": "0x0000000a",
+ "BriefDescription": "LSU speculation fail"
+ },
+ {
+ "EventName": "IDU_RF_PIPE_FAIL",
+ "EventCode": "0x00000014",
+ "BriefDescription": "Instruction decode unit launch pipeline failed in RF state"
+ },
+ {
+ "EventName": "IDU_RF_REG_FAIL",
+ "EventCode": "0x00000015",
+ "BriefDescription": "Instruction decode unit launch register file fail in RF state"
+ },
+ {
+ "EventName": "IDU_RF_INSTRUCTION",
+ "EventCode": "0x00000016",
+ "BriefDescription": "retired instruction count of Instruction decode unit in RF (Register File) stage"
+ },
+ {
+ "EventName": "LSU_4K_STALL",
+ "EventCode": "0x00000017",
+ "BriefDescription": "LSU stall times for long distance data access (Over 4K)",
+ "PublicDescription": "This stall occurs when translate virtual address with page offset over 4k"
+ },
+ {
+ "EventName": "LSU_OTHER_STALL",
+ "EventCode": "0x00000018",
+ "BriefDescription": "LSU stall times for other reasons (except the 4k stall)"
+ },
+ {
+ "EventName": "LSU_SQ_OTHER_DIS",
+ "EventCode": "0x00000019",
+ "BriefDescription": "LSU store queue discard others"
+ },
+ {
+ "EventName": "LSU_SQ_DATA_DISCARD",
+ "EventCode": "0x0000001a",
+ "BriefDescription": "LSU store queue discard data (uops)"
+ },
+ {
+ "EventName": "BRANCH_DIRECTION_MISPREDICTION",
+ "EventCode": "0x0000001b",
+ "BriefDescription": "Branch misprediction in BTB"
+ },
+ {
+ "EventName": "BRANCH_DIRECTION_PREDICTION",
+ "EventCode": "0x0000001c",
+ "BriefDescription": "All branch prediction in BTB",
+ "PublicDescription": "This event including both successful prediction and failed prediction in BTB"
+ },
+ {
+ "EventName": "INTERRUPT_ACK_COUNT",
+ "EventCode": "0x00000023",
+ "BriefDescription": "acknowledged interrupt count"
+ },
+ {
+ "EventName": "INTERRUPT_OFF_CYCLE",
+ "EventCode": "0x00000024",
+ "BriefDescription": "PLIC arbitration time when the interrupt is not responded",
+ "PublicDescription": "The arbitration time is recorded while meeting any of the following:\n- CPU is M-mode and MIE == 0\n- CPU is S-mode and delegation and SIE == 0\n"
+ },
+ {
+ "EventName": "IFU_STALLED_CYCLE",
+ "EventCode": "0x00000027",
+ "BriefDescription": "Number of stall cycles of the instruction fetch unit (IFU)."
+ },
+ {
+ "EventName": "IDU_STALLED_CYCLE",
+ "EventCode": "0x00000028",
+ "BriefDescription": "hpcp_backend_stall Number of stall cycles of the instruction decoding unit (IDU) and next-level pipeline unit."
+ },
+ {
+ "EventName": "SYNC_STALL",
+ "EventCode": "0x00000029",
+ "BriefDescription": "Sync instruction stall cycle fence/fence.i/sync/sfence"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
index 3388b58b8f1a..bbfa3883e533 100644
--- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -70,12 +70,6 @@
"ScaleUnit": "100%"
},
{
- "BriefDescription": "Uncore frequency per die [GHZ]",
- "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
- "MetricGroup": "SoC",
- "MetricName": "UNCORE_FREQ"
- },
- {
"BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
"MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
"MetricGroup": "smi",
@@ -120,7 +114,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.",
- "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_alloc_restriction",
"MetricThreshold": "tma_alloc_restriction > 0.1",
@@ -130,7 +124,7 @@
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "TOPDOWN_BE_BOUND.ALL / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.ALL@ / tma_info_core_slots",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_backend_bound",
"MetricThreshold": "tma_backend_bound > 0.1",
@@ -175,7 +169,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
- "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_DETECT@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_detect",
"MetricThreshold": "tma_branch_detect > 0.05",
@@ -185,7 +179,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.",
- "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MISPREDICT@ / tma_info_core_slots",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_branch_mispredicts",
"MetricThreshold": "tma_branch_mispredicts > 0.05",
@@ -195,7 +189,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
- "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.BRANCH_RESTEER@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteer",
"MetricThreshold": "tma_branch_resteer > 0.05",
@@ -204,7 +198,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
- "MetricExpr": "TOPDOWN_FE_BOUND.CISC / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.CISC@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_cisc",
"MetricThreshold": "tma_cisc > 0.05",
@@ -223,7 +217,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
- "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.DECODE@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_decode",
"MetricThreshold": "tma_decode > 0.05",
@@ -241,7 +235,6 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
@@ -251,7 +244,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.",
- "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.FASTNUKE@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_fast_nuke",
"MetricThreshold": "tma_fast_nuke > 0.05",
@@ -260,7 +253,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
- "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH@ / tma_info_core_slots",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_fetch_bandwidth",
"MetricThreshold": "tma_fetch_bandwidth > 0.1",
@@ -270,7 +263,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
- "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.FRONTEND_LATENCY@ / tma_info_core_slots",
"MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
"MetricName": "tma_fetch_latency",
"MetricThreshold": "tma_fetch_latency > 0.15",
@@ -289,7 +282,7 @@
},
{
"BriefDescription": "Counts the number of floating point divide operations per uop.",
- "MetricExpr": "UOPS_RETIRED.FPDIV / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@UOPS_RETIRED.FPDIV@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
"MetricName": "tma_fpdiv_uops",
"MetricThreshold": "tma_fpdiv_uops > 0.2",
@@ -299,7 +292,7 @@
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "TOPDOWN_FE_BOUND.ALL / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ALL@ / tma_info_core_slots",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_frontend_bound",
"MetricThreshold": "tma_frontend_bound > 0.2",
@@ -309,7 +302,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
- "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ICACHE@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05",
@@ -336,7 +329,7 @@
},
{
"BriefDescription": "Instructions Per Cycle",
- "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / tma_info_core_clks",
"MetricName": "tma_info_core_ipc",
"Unit": "cpu_atom"
},
@@ -348,7 +341,7 @@
},
{
"BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.ALL / INST_RETIRED.ANY",
+ "MetricExpr": "cpu_atom@UOPS_RETIRED.ALL@ / INST_RETIRED.ANY",
"MetricName": "tma_info_core_upi",
"Unit": "cpu_atom"
},
@@ -372,13 +365,13 @@
},
{
"BriefDescription": "Ratio of all branches which mispredict",
- "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.ALL_BRANCHES",
"MetricName": "tma_info_inst_mix_branch_mispredict_ratio",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Ratio between Mispredicted branches and unknown branches",
- "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
+ "MetricExpr": "cpu_atom@BR_MISP_RETIRED.ALL_BRANCHES@ / BACLEARS.ANY",
"MetricName": "tma_info_inst_mix_branch_mispredict_to_unknown_branch_ratio",
"Unit": "cpu_atom"
},
@@ -396,61 +389,61 @@
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_INST_RETIRED.ALL_BRANCHES",
"MetricName": "tma_info_inst_mix_ipbranch",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_INST_RETIRED.CALL",
"MetricName": "tma_info_inst_mix_ipcall",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instructions per Far Branch",
- "MetricExpr": "INST_RETIRED.ANY / (cpu_atom@BR_INST_RETIRED.FAR_BRANCH@ / 2)",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_INST_RETIRED.FAR_BRANCH@ / 2)",
"MetricName": "tma_info_inst_mix_ipfarbranch",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instructions per Load",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / MEM_UOPS_RETIRED.ALL_LOADS",
"MetricName": "tma_info_inst_mix_ipload",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
- "MetricExpr": "INST_RETIRED.ANY / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / (cpu_atom@BR_MISP_RETIRED.COND@ - cpu_atom@BR_MISP_RETIRED.COND_TAKEN@)",
"MetricName": "tma_info_inst_mix_ipmisp_cond_ntaken",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_TAKEN",
"MetricName": "tma_info_inst_mix_ipmisp_cond_taken",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.INDIRECT",
"MetricName": "tma_info_inst_mix_ipmisp_indirect",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instructions per retired return Branch Misprediction",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.RETURN",
"MetricName": "tma_info_inst_mix_ipmisp_ret",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instructions per retired Branch Misprediction",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricName": "tma_info_inst_mix_ipmispredict",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Instructions per Store",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricExpr": "cpu_atom@INST_RETIRED.ANY@ / MEM_UOPS_RETIRED.ALL_STORES",
"MetricName": "tma_info_inst_mix_ipstore",
"Unit": "cpu_atom"
},
@@ -486,19 +479,19 @@
},
{
"BriefDescription": "Cycle cost per DRAM hit",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_DRAM_HIT@ / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
"MetricName": "tma_info_memory_cycles_per_demand_load_dram_hit",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Cycle cost per L2 hit",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / MEM_LOAD_UOPS_RETIRED.L2_HIT",
"MetricName": "tma_info_memory_cycles_per_demand_load_l2_hit",
"Unit": "cpu_atom"
},
{
"BriefDescription": "Cycle cost per LLC hit",
- "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / MEM_LOAD_UOPS_RETIRED.L3_HIT",
"MetricName": "tma_info_memory_cycles_per_demand_load_l3_hit",
"Unit": "cpu_atom"
},
@@ -510,7 +503,7 @@
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
"MetricName": "tma_info_system_cpu_utilization",
"Unit": "cpu_atom"
},
@@ -530,7 +523,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
- "MetricExpr": "TOPDOWN_FE_BOUND.ITLB / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.ITLB@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05",
@@ -539,7 +532,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.",
- "MetricExpr": "LD_HEAD.L1_BOUND_AT_RET / tma_info_core_clks",
+ "MetricExpr": "cpu_atom@LD_HEAD.L1_BOUND_AT_RET@ / tma_info_core_clks",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l1_bound",
"MetricThreshold": "tma_l1_bound > 0.1",
@@ -548,7 +541,6 @@
},
{
"BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_L2_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
@@ -558,7 +550,6 @@
},
{
"BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / tma_info_core_clks - max((cpu_atom@MEM_BOUND_STALLS.LOAD@ - cpu_atom@LD_HEAD.L1_MISS_AT_RET@) / tma_info_core_clks, 0) * cpu_atom@MEM_BOUND_STALLS.LOAD_LLC_HIT@ / cpu_atom@MEM_BOUND_STALLS.LOAD@",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -577,7 +568,7 @@
},
{
"BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
- "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS@ / tma_info_core_slots",
"MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
"MetricName": "tma_machine_clears",
"MetricThreshold": "tma_machine_clears > 0.05",
@@ -587,7 +578,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
- "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.MEM_SCHEDULER@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_mem_scheduler",
"MetricThreshold": "tma_mem_scheduler > 0.1",
@@ -596,7 +587,7 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to stores or loads.",
- "MetricExpr": "min(cpu_atom@TOPDOWN_BE_BOUND.ALL@ / tma_info_core_slots, cpu_atom@LD_HEAD.ANY_AT_RET@ / tma_info_core_clks + tma_store_bound)",
+ "MetricExpr": "min(tma_backend_bound, cpu_atom@LD_HEAD.ANY_AT_RET@ / tma_info_core_clks + tma_store_bound)",
"MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
"MetricName": "tma_memory_bound",
"MetricThreshold": "tma_memory_bound > 0.2",
@@ -615,7 +606,7 @@
},
{
"BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS)",
- "MetricExpr": "UOPS_RETIRED.MS / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@UOPS_RETIRED.MS@ / tma_info_core_slots",
"MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
"MetricName": "tma_ms_uops",
"MetricThreshold": "tma_ms_uops > 0.05",
@@ -626,7 +617,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
- "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_non_mem_scheduler",
"MetricThreshold": "tma_non_mem_scheduler > 0.1",
@@ -635,7 +626,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear (slow nuke).",
- "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BAD_SPECULATION.NUKE@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
"MetricName": "tma_nuke",
"MetricThreshold": "tma_nuke > 0.05",
@@ -644,7 +635,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
- "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.OTHER@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_other_fb",
"MetricThreshold": "tma_other_fb > 0.05",
@@ -653,7 +644,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.",
- "MetricExpr": "LD_HEAD.OTHER_AT_RET / tma_info_core_clks",
+ "MetricExpr": "cpu_atom@LD_HEAD.OTHER_AT_RET@ / tma_info_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_other_l1",
"MetricThreshold": "tma_other_l1 > 0.05",
@@ -689,7 +680,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
- "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_FE_BOUND.PREDECODE@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
"MetricName": "tma_predecode",
"MetricThreshold": "tma_predecode > 0.05",
@@ -698,7 +689,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
- "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REGISTER@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_register",
"MetricThreshold": "tma_register > 0.1",
@@ -707,7 +698,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
- "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.REORDER_BUFFER@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_reorder_buffer",
"MetricThreshold": "tma_reorder_buffer > 0.1",
@@ -728,7 +719,7 @@
{
"BriefDescription": "Counts the number of issue slots that result in retirement slots.",
"DefaultMetricgroupName": "TopdownL1",
- "MetricExpr": "TOPDOWN_RETIRING.ALL / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_RETIRING.ALL@ / tma_info_core_slots",
"MetricGroup": "Default;TopdownL1;tma_L1_group",
"MetricName": "tma_retiring",
"MetricThreshold": "tma_retiring > 0.75",
@@ -747,7 +738,7 @@
},
{
"BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
- "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / tma_info_core_slots",
+ "MetricExpr": "cpu_atom@TOPDOWN_BE_BOUND.SERIALIZATION@ / tma_info_core_slots",
"MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
"MetricName": "tma_serialization",
"MetricThreshold": "tma_serialization > 0.1",
@@ -774,7 +765,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.",
- "MetricExpr": "LD_HEAD.DTLB_MISS_AT_RET / tma_info_core_clks",
+ "MetricExpr": "cpu_atom@LD_HEAD.DTLB_MISS_AT_RET@ / tma_info_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_stlb_hit",
"MetricThreshold": "tma_stlb_hit > 0.05",
@@ -783,7 +774,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.",
- "MetricExpr": "LD_HEAD.PGWALK_AT_RET / tma_info_core_clks",
+ "MetricExpr": "cpu_atom@LD_HEAD.PGWALK_AT_RET@ / tma_info_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_stlb_miss",
"MetricThreshold": "tma_stlb_miss > 0.05",
@@ -801,8 +792,7 @@
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks",
+ "MetricExpr": "cpu_atom@LD_HEAD.ST_ADDR_AT_RET@ / tma_info_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
"MetricThreshold": "tma_store_fwd_blk > 0.05",
@@ -810,6 +800,13 @@
"Unit": "cpu_atom"
},
{
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ",
+ "Unit": "cpu_core"
+ },
+ {
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
"MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_0@ + cpu_core@UOPS_DISPATCHED.PORT_1@ + cpu_core@UOPS_DISPATCHED.PORT_5_11@ + cpu_core@UOPS_DISPATCHED.PORT_6@) / (5 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
@@ -874,7 +871,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
- "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_thread_clks + tma_unknown_branches",
+ "MetricExpr": "cpu_core@INT_MISC.CLEAR_RESTEER_CYCLES@ / tma_info_thread_clks + tma_unknown_branches",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_branch_resteers",
"MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
@@ -904,7 +901,6 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(25 * tma_info_system_average_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) + 24 * tma_info_system_average_frequency * cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS@) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
@@ -926,7 +922,6 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "24 * tma_info_system_average_frequency * (cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD@ + cpu_core@MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD@ * (1 - cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ / (cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM@ + cpu_core@OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD@))) * (1 + cpu_core@MEM_LOAD_RETIRED.FB_HIT@ / cpu_core@MEM_LOAD_RETIRED.L1_MISS@ / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
@@ -947,7 +942,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
- "MetricExpr": "ARITH.DIV_ACTIVE / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@ARITH.DIV_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
"MetricName": "tma_divider",
"MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
@@ -957,7 +952,6 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
@@ -978,7 +972,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_dsb_switches",
"MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
@@ -1018,7 +1012,7 @@
},
{
"BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
- "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@L1D_PEND_MISS.FB_FULL@ / tma_info_thread_clks",
"MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
"MetricName": "tma_fb_full",
"MetricThreshold": "tma_fb_full > 0.3",
@@ -1153,7 +1147,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
- "MetricExpr": "ICACHE_DATA.STALLS / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_icache_misses",
"MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
@@ -1163,7 +1157,6 @@
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;BrMispredicts;tma_issueBM",
"MetricName": "tma_info_bad_spec_branch_misprediction_cost",
@@ -1172,7 +1165,7 @@
},
{
"BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_NTAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_ntaken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_ntaken < 200",
@@ -1180,7 +1173,7 @@
},
{
"BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.COND_TAKEN",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_cond_taken",
"MetricThreshold": "tma_info_bad_spec_ipmisp_cond_taken < 200",
@@ -1196,7 +1189,7 @@
},
{
"BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.RET",
"MetricGroup": "Bad;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmisp_ret",
"MetricThreshold": "tma_info_bad_spec_ipmisp_ret < 500",
@@ -1204,7 +1197,7 @@
},
{
"BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;BadSpec;BrMispredicts",
"MetricName": "tma_info_bad_spec_ipmispredict",
"MetricThreshold": "tma_info_bad_spec_ipmispredict < 200",
@@ -1212,7 +1205,6 @@
},
{
"BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
"MetricGroup": "Cor;SMT",
"MetricName": "tma_info_botlnk_l0_core_bound_likely",
@@ -1221,7 +1213,6 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
@@ -1231,7 +1222,6 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
@@ -1241,7 +1231,6 @@
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
"MetricName": "tma_info_bottleneck_big_code",
@@ -1260,7 +1249,6 @@
},
{
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
"MetricGroup": "Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
@@ -1269,7 +1257,6 @@
},
{
"BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
"MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_memory_bandwidth",
@@ -1279,7 +1266,6 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
@@ -1289,7 +1275,6 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))",
"MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_memory_latency",
@@ -1299,7 +1284,6 @@
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
@@ -1316,14 +1300,14 @@
},
{
"BriefDescription": "Fraction of branches that are non-taken conditionals",
- "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_NTAKEN@ / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
"MetricName": "tma_info_branches_cond_nt",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of branches that are taken conditionals",
- "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.COND_TAKEN@ / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;Branches;CodeGen;PGO",
"MetricName": "tma_info_branches_cond_tk",
"Unit": "cpu_core"
@@ -1351,7 +1335,7 @@
},
{
"BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / tma_info_core_core_clks",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_core_core_clks",
"MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
"MetricName": "tma_info_core_coreipc",
"Unit": "cpu_core"
@@ -1373,14 +1357,14 @@
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@)",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / (cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@ / 2 if #SMT_on else cpu_core@UOPS_EXECUTED.CORE_CYCLES_GE_1@)",
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
"MetricName": "tma_info_core_ilp",
"Unit": "cpu_core"
},
{
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / cpu_core@UOPS_ISSUED.ANY@",
+ "MetricExpr": "cpu_core@IDQ.DSB_UOPS@ / cpu_core@UOPS_ISSUED.ANY@",
"MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
"MetricName": "tma_info_frontend_dsb_coverage",
"MetricThreshold": "tma_info_frontend_dsb_coverage < 0.7 & tma_info_thread_ipc / 6 > 0.35",
@@ -1389,28 +1373,28 @@
},
{
"BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
- "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricExpr": "cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES@ / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
"MetricGroup": "DSBmiss",
"MetricName": "tma_info_frontend_dsb_switch_cost",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average number of Uops issued by front-end when it issued something",
- "MetricExpr": "UOPS_ISSUED.ANY / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricExpr": "cpu_core@UOPS_ISSUED.ANY@ / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
"MetricGroup": "Fed;FetchBW",
"MetricName": "tma_info_frontend_fetch_upc",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average Latency for L1 instruction cache misses",
- "MetricExpr": "ICACHE_DATA.STALLS / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
+ "MetricExpr": "cpu_core@ICACHE_DATA.STALLS@ / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
"MetricGroup": "Fed;FetchLat;IcMiss",
"MetricName": "tma_info_frontend_icache_miss_latency",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FRONTEND_RETIRED.ANY_DSB_MISS",
"MetricGroup": "DSBmiss;Fed",
"MetricName": "tma_info_frontend_ipdsb_miss_ret",
"MetricThreshold": "tma_info_frontend_ipdsb_miss_ret < 50",
@@ -1439,14 +1423,14 @@
},
{
"BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
- "MetricExpr": "LSD.UOPS / cpu_core@UOPS_ISSUED.ANY@",
+ "MetricExpr": "cpu_core@LSD.UOPS@ / cpu_core@UOPS_ISSUED.ANY@",
"MetricGroup": "Fed;LSD",
"MetricName": "tma_info_frontend_lsd_coverage",
"Unit": "cpu_core"
},
{
"BriefDescription": "Branch instructions per taken branch.",
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricExpr": "cpu_core@BR_INST_RETIRED.ALL_BRANCHES@ / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_bptkbranch",
"Unit": "cpu_core"
@@ -1461,7 +1445,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_iparith",
"MetricThreshold": "tma_info_inst_mix_iparith < 10",
@@ -1470,7 +1454,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@)",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx128",
"MetricThreshold": "tma_info_inst_mix_iparith_avx128 < 10",
@@ -1479,7 +1463,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
"MetricGroup": "Flops;FpVector;InsType",
"MetricName": "tma_info_inst_mix_iparith_avx256",
"MetricThreshold": "tma_info_inst_mix_iparith_avx256 < 10",
@@ -1488,7 +1472,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_dp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_dp < 10",
@@ -1497,7 +1481,7 @@
},
{
"BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"MetricGroup": "Flops;FpScalar;InsType",
"MetricName": "tma_info_inst_mix_iparith_scalar_sp",
"MetricThreshold": "tma_info_inst_mix_iparith_scalar_sp < 10",
@@ -1506,7 +1490,7 @@
},
{
"BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.ALL_BRANCHES",
"MetricGroup": "Branches;Fed;InsType",
"MetricName": "tma_info_inst_mix_ipbranch",
"MetricThreshold": "tma_info_inst_mix_ipbranch < 8",
@@ -1514,7 +1498,7 @@
},
{
"BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.NEAR_CALL",
"MetricGroup": "Branches;Fed;PGO",
"MetricName": "tma_info_inst_mix_ipcall",
"MetricThreshold": "tma_info_inst_mix_ipcall < 200",
@@ -1522,7 +1506,7 @@
},
{
"BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.SCALAR_DOUBLE@ + 2 * cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE@ + 4 * (cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE@ + cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE@) + 8 * cpu_core@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE@)",
"MetricGroup": "Flops;InsType",
"MetricName": "tma_info_inst_mix_ipflop",
"MetricThreshold": "tma_info_inst_mix_ipflop < 10",
@@ -1530,7 +1514,7 @@
},
{
"BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_LOADS",
"MetricGroup": "InsType",
"MetricName": "tma_info_inst_mix_ipload",
"MetricThreshold": "tma_info_inst_mix_ipload < 3",
@@ -1538,7 +1522,7 @@
},
{
"BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / MEM_INST_RETIRED.ALL_STORES",
"MetricGroup": "InsType",
"MetricName": "tma_info_inst_mix_ipstore",
"MetricThreshold": "tma_info_inst_mix_ipstore < 8",
@@ -1546,7 +1530,7 @@
},
{
"BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
- "MetricExpr": "INST_RETIRED.ANY / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
"MetricGroup": "Prefetches",
"MetricName": "tma_info_inst_mix_ipswpf",
"MetricThreshold": "tma_info_inst_mix_ipswpf < 100",
@@ -1554,7 +1538,7 @@
},
{
"BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / BR_INST_RETIRED.NEAR_TAKEN",
"MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
"MetricName": "tma_info_inst_mix_iptb",
"MetricThreshold": "tma_info_inst_mix_iptb < 13",
@@ -1654,14 +1638,14 @@
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / MEM_LOAD_COMPLETED.L1_MISS_ANY",
"MetricGroup": "Mem;MemoryBound;MemoryLat",
"MetricName": "tma_info_memory_load_miss_real_latency",
"Unit": "cpu_core"
},
{
"BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricExpr": "cpu_core@L1D_PEND_MISS.PENDING@ / L1D_PEND_MISS.PENDING_CYCLES",
"MetricGroup": "Mem;MemoryBW;MemoryBound",
"MetricName": "tma_info_memory_mlp",
"PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
@@ -1669,28 +1653,28 @@
},
{
"BriefDescription": "Average Parallel L2 cache miss data reads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD@ / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_oro_data_l2_mlp",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average Latency for L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / OFFCORE_REQUESTS.DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
"MetricName": "tma_info_memory_oro_load_l2_miss_latency",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average Parallel L2 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD@ / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
"MetricGroup": "Memory_BW;Offcore",
"MetricName": "tma_info_memory_oro_load_l2_mlp",
"Unit": "cpu_core"
},
{
"BriefDescription": "Average Latency for L3 cache miss demand Loads",
- "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricExpr": "cpu_core@OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
"MetricGroup": "Memory_Lat;Offcore",
"MetricName": "tma_info_memory_oro_load_l3_miss_latency",
"Unit": "cpu_core"
@@ -1754,14 +1738,14 @@
},
{
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
- "MetricExpr": "UOPS_EXECUTED.THREAD / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
"MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
"MetricName": "tma_info_pipeline_execute",
"Unit": "cpu_core"
},
{
"BriefDescription": "Instructions per a microcode Assist invocation",
- "MetricExpr": "INST_RETIRED.ANY / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@",
"MetricGroup": "Pipeline;Ret;Retire",
"MetricName": "tma_info_pipeline_ipassist",
"MetricThreshold": "tma_info_pipeline_ipassist < 100e3",
@@ -1777,7 +1761,7 @@
},
{
"BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
- "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricExpr": "cpu_core@INST_RETIRED.REP_ITERATION@ / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
"MetricGroup": "Pipeline;Ret",
"MetricName": "tma_info_pipeline_strings_cycles",
"MetricThreshold": "tma_info_pipeline_strings_cycles > 0.1",
@@ -1792,7 +1776,7 @@
},
{
"BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.REF_TSC@ / TSC",
"MetricGroup": "HPC;Summary",
"MetricName": "tma_info_system_cpu_utilization",
"Unit": "cpu_core"
@@ -1815,7 +1799,7 @@
},
{
"BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
- "MetricExpr": "INST_RETIRED.ANY / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / cpu_core@BR_INST_RETIRED.FAR_BRANCH@u",
"MetricGroup": "Branches;OS",
"MetricName": "tma_info_system_ipfarbranch",
"MetricThreshold": "tma_info_system_ipfarbranch < 1e6",
@@ -1838,7 +1822,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / cpu_core@UNC_ARB_DAT_OCCUPANCY.RD\\,cmask\\=1@",
+ "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
@@ -1846,6 +1830,7 @@
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.RD + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.RD",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
@@ -1854,6 +1839,7 @@
},
{
"BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.ALL",
"MetricGroup": "Mem;SoC",
"MetricName": "tma_info_system_mem_request_latency",
@@ -1896,7 +1882,7 @@
},
{
"BriefDescription": "The ratio of Executed- by Issued-Uops",
- "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.THREAD@ / UOPS_ISSUED.ANY",
"MetricGroup": "Cor;Pipeline",
"MetricName": "tma_info_thread_execute_per_issue",
"PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
@@ -1904,7 +1890,7 @@
},
{
"BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@INST_RETIRED.ANY@ / tma_info_thread_clks",
"MetricGroup": "Ret;Summary",
"MetricName": "tma_info_thread_ipc",
"Unit": "cpu_core"
@@ -1971,7 +1957,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
- "MetricExpr": "ICACHE_TAG.STALLS / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@ICACHE_TAG.STALLS@ / tma_info_thread_clks",
"MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
"MetricName": "tma_itlb_misses",
"MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
@@ -1991,7 +1977,6 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L1D_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
@@ -2002,7 +1987,6 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(cpu_core@MEMORY_ACTIVITY.STALLS_L2_MISS@ - cpu_core@MEMORY_ACTIVITY.STALLS_L3_MISS@) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -2023,7 +2007,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
- "MetricExpr": "DECODE.LCP / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@DECODE.LCP@ / tma_info_thread_clks",
"MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
"MetricName": "tma_lcp",
"MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
@@ -2044,7 +2028,7 @@
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
- "MetricExpr": "UOPS_DISPATCHED.PORT_2_3_10 / (3 * tma_info_core_core_clks)",
+ "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_2_3_10@ / (3 * tma_info_core_core_clks)",
"MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
"MetricName": "tma_load_op_utilization",
"MetricThreshold": "tma_load_op_utilization > 0.6",
@@ -2063,7 +2047,7 @@
},
{
"BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
- "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@DTLB_LOAD_MISSES.WALK_ACTIVE@ / tma_info_thread_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
"MetricName": "tma_load_stlb_miss",
"MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
@@ -2072,7 +2056,6 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(16 * max(0, cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ - cpu_core@L2_RQSTS.ALL_RFO@) + cpu_core@MEM_INST_RETIRED.LOCK_LOADS@ / cpu_core@MEM_INST_RETIRED.ALL_STORES@ * (10 * cpu_core@L2_RQSTS.RFO_HIT@ + min(cpu_core@CPU_CLK_UNHALTED.THREAD@, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO@))) / tma_info_thread_clks",
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
@@ -2135,6 +2118,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * cpu_core@MISC2_RETIRED.LFENCE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
@@ -2144,7 +2128,6 @@
},
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_light_operations * cpu_core@MEM_UOP_RETIRED.ANY@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -2154,7 +2137,7 @@
},
{
"BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
- "MetricExpr": "UOPS_RETIRED.MS / tma_info_thread_slots",
+ "MetricExpr": "cpu_core@UOPS_RETIRED.MS@ / tma_info_thread_slots",
"MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
"MetricName": "tma_microcode_sequencer",
"MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
@@ -2224,7 +2207,6 @@
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -2245,7 +2227,7 @@
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
- "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_core_clks",
+ "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_0@ / tma_info_core_core_clks",
"MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_0",
"MetricThreshold": "tma_port_0 > 0.6",
@@ -2255,7 +2237,7 @@
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
- "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_core_clks",
+ "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_1@ / tma_info_core_core_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_1",
"MetricThreshold": "tma_port_1 > 0.6",
@@ -2265,7 +2247,7 @@
},
{
"BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
- "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_core_clks",
+ "MetricExpr": "cpu_core@UOPS_DISPATCHED.PORT_6@ / tma_info_core_core_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
"MetricName": "tma_port_6",
"MetricThreshold": "tma_port_6 > 0.6",
@@ -2295,7 +2277,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@EXE_ACTIVITY.1_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_1",
"MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -2305,7 +2287,8 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "cpu_core@EXE_ACTIVITY.2_PORTS_UTIL@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
"MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -2315,7 +2298,8 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
- "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "cpu_core@UOPS_EXECUTED.CYCLES_GE_3@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
"MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
@@ -2337,7 +2321,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
- "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@RESOURCE_STALLS.SCOREBOARD@ / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
"MetricName": "tma_serializing_operation",
"MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
@@ -2347,7 +2331,7 @@
},
{
"BriefDescription": "This metric represents Shuffle (cross \"vector lane\" data transfers) uops fraction the CPU has retired.",
- "MetricExpr": "INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_thread_slots)",
+ "MetricExpr": "cpu_core@INT_VEC_RETIRED.SHUFFLES@ / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group",
"MetricName": "tma_shuffles",
"MetricThreshold": "tma_shuffles > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
@@ -2356,7 +2340,8 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
- "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "cpu_core@CPU_CLK_UNHALTED.PAUSE@ / tma_info_thread_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
"MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
@@ -2376,8 +2361,7 @@
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
- "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
+ "MetricExpr": "cpu_core@MEM_INST_RETIRED.SPLIT_STORES@ / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
"MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
@@ -2397,7 +2381,7 @@
},
{
"BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
- "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@EXE_ACTIVITY.BOUND_ON_STORES@ / tma_info_thread_clks",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_store_bound",
"MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
@@ -2407,7 +2391,6 @@
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * cpu_core@LD_BLOCKS.STORE_FORWARD@ / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
@@ -2447,7 +2430,7 @@
},
{
"BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
- "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_core_clks",
+ "MetricExpr": "cpu_core@DTLB_STORE_MISSES.WALK_ACTIVE@ / tma_info_core_core_clks",
"MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
"MetricName": "tma_store_stlb_miss",
"MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
@@ -2466,7 +2449,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
- "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_thread_clks",
+ "MetricExpr": "cpu_core@INT_MISC.UNKNOWN_BRANCH_CYCLES@ / tma_info_thread_clks",
"MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
"MetricName": "tma_unknown_branches",
"MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
index c150c14ac6ed..a35edf7d86a9 100644
--- a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
@@ -195,7 +195,6 @@
},
{
"BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
@@ -457,7 +456,6 @@
},
{
"BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
@@ -466,7 +464,6 @@
},
{
"BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_core_clks - max((MEM_BOUND_STALLS.LOAD - LD_HEAD.L1_MISS_AT_RET) / tma_info_core_clks, 0) * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD",
"MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -683,7 +680,6 @@
},
{
"BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json b/tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json
new file mode 100644
index 000000000000..55263e5e4f69
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json
@@ -0,0 +1,101 @@
+[
+ {
+ "EventName": "umc_mem_clk",
+ "PublicDescription": "Number of memory clock cycles.",
+ "EventCode": "0x00",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_act_cmd.all",
+ "PublicDescription": "Number of ACTIVATE commands sent.",
+ "EventCode": "0x05",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_act_cmd.rd",
+ "PublicDescription": "Number of ACTIVATE commands sent for reads.",
+ "EventCode": "0x05",
+ "RdWrMask": "0x1",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_act_cmd.wr",
+ "PublicDescription": "Number of ACTIVATE commands sent for writes.",
+ "EventCode": "0x05",
+ "RdWrMask": "0x2",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_pchg_cmd.all",
+ "PublicDescription": "Number of PRECHARGE commands sent.",
+ "EventCode": "0x06",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_pchg_cmd.rd",
+ "PublicDescription": "Number of PRECHARGE commands sent for reads.",
+ "EventCode": "0x06",
+ "RdWrMask": "0x1",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_pchg_cmd.wr",
+ "PublicDescription": "Number of PRECHARGE commands sent for writes.",
+ "EventCode": "0x06",
+ "RdWrMask": "0x2",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_cas_cmd.all",
+ "PublicDescription": "Number of CAS commands sent.",
+ "EventCode": "0x0a",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_cas_cmd.rd",
+ "PublicDescription": "Number of CAS commands sent for reads.",
+ "EventCode": "0x0a",
+ "RdWrMask": "0x1",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_cas_cmd.wr",
+ "PublicDescription": "Number of CAS commands sent for writes.",
+ "EventCode": "0x0a",
+ "RdWrMask": "0x2",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_data_slot_clks.all",
+ "PublicDescription": "Number of clocks used by the data bus.",
+ "EventCode": "0x14",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_data_slot_clks.rd",
+ "PublicDescription": "Number of clocks used by the data bus for reads.",
+ "EventCode": "0x14",
+ "RdWrMask": "0x1",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ },
+ {
+ "EventName": "umc_data_slot_clks.wr",
+ "PublicDescription": "Number of clocks used by the data bus for writes.",
+ "EventCode": "0x14",
+ "RdWrMask": "0x2",
+ "PerPkg": "1",
+ "Unit": "UMCPMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json
index 5e6a793acf7b..96e06401c6cb 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json
@@ -330,5 +330,89 @@
"MetricGroup": "data_fabric",
"PerPkg": "1",
"ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "umc_data_bus_utilization",
+ "BriefDescription": "Memory controller data bus utilization.",
+ "MetricExpr": "d_ratio(umc_data_slot_clks.all / 2, umc_mem_clk)",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "umc_cas_cmd_rate",
+ "BriefDescription": "Memory controller CAS command rate.",
+ "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1"
+ },
+ {
+ "MetricName": "umc_cas_cmd_read_ratio",
+ "BriefDescription": "Ratio of memory controller CAS commands for reads.",
+ "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "umc_cas_cmd_write_ratio",
+ "BriefDescription": "Ratio of memory controller CAS commands for writes.",
+ "MetricExpr": "d_ratio(umc_cas_cmd.wr, umc_cas_cmd.all)",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "umc_mem_read_bandwidth",
+ "BriefDescription": "Estimated memory read bandwidth.",
+ "MetricExpr": "(umc_cas_cmd.rd * 64) / 1e6 / duration_time",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "MetricName": "umc_mem_write_bandwidth",
+ "BriefDescription": "Estimated memory write bandwidth.",
+ "MetricExpr": "(umc_cas_cmd.wr * 64) / 1e6 / duration_time",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "MetricName": "umc_mem_bandwidth",
+ "BriefDescription": "Estimated combined memory bandwidth.",
+ "MetricExpr": "(umc_cas_cmd.all * 64) / 1e6 / duration_time",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1",
+ "ScaleUnit": "1MB/s"
+ },
+ {
+ "MetricName": "umc_cas_cmd_read_ratio",
+ "BriefDescription": "Ratio of memory controller CAS commands for reads.",
+ "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "umc_cas_cmd_rate",
+ "BriefDescription": "Memory controller CAS command rate.",
+ "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1"
+ },
+ {
+ "MetricName": "umc_activate_cmd_rate",
+ "BriefDescription": "Memory controller ACTIVATE command rate.",
+ "MetricExpr": "d_ratio(umc_act_cmd.all * 1000, umc_mem_clk)",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1"
+ },
+ {
+ "MetricName": "umc_precharge_cmd_rate",
+ "BriefDescription": "Memory controller PRECHARGE command rate.",
+ "MetricExpr": "d_ratio(umc_pchg_cmd.all * 1000, umc_mem_clk)",
+ "MetricGroup": "memory_controller",
+ "PerPkg": "1"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 84c132af3dfa..8bc6c0707856 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -1863,6 +1863,12 @@
"ScaleUnit": "1GHz"
},
{
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+ "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+ "MetricName": "upi_data_receive_bw",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
"MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
"MetricName": "upi_data_transmit_bw",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
index 4a9d211e9d4f..1bdefaf96287 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json
@@ -23,27 +23,48 @@
"UMask": "0x10"
},
{
- "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_0",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_1",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_5",
"SampleAfterValue": "2000003",
"UMask": "0x4"
},
{
+ "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
index 6dcf3b763af4..1f8200fb8964 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json
@@ -1,21 +1,5 @@
[
{
- "BriefDescription": "AMX retired arithmetic BF16 operations.",
- "EventCode": "0xce",
- "EventName": "AMX_OPS_RETIRED.BF16",
- "PublicDescription": "Number of AMX-based retired arithmetic bfloat16 (BF16) floating-point operations. Counts TDPBF16PS FP instructions. SW to use operation multiplier of 4",
- "SampleAfterValue": "1000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "AMX retired arithmetic integer 8-bit operations.",
- "EventCode": "0xce",
- "EventName": "AMX_OPS_RETIRED.INT8",
- "PublicDescription": "Number of AMX-based retired arithmetic integer operations of 8-bit width source operands. Counts TDPB[SS,UU,US,SU]D instructions. SW should use operation multiplier of 8.",
- "SampleAfterValue": "1000003",
- "UMask": "0x1"
- },
- {
"BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
"CounterMask": "1",
"Deprecated": "1",
@@ -505,7 +489,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
"EventCode": "0xad",
"EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"MSRIndex": "0x3F7",
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
index 09d840c7da4c..65d088556bae 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json
@@ -4825,11 +4825,11 @@
"Unit": "M3UPI"
},
{
- "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bouncable)",
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bounceable)",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC",
"PerPkg": "1",
- "PublicDescription": "AD Bouncable : Number of allocations into the CRS Egress",
+ "PublicDescription": "AD Bounceable : Number of allocations into the CRS Egress",
"UMask": "0x1",
"Unit": "MDF"
},
@@ -4861,11 +4861,11 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bouncable)",
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bounceable)",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC",
"PerPkg": "1",
- "PublicDescription": "BL Bouncable : Number of allocations into the CRS Egress",
+ "PublicDescription": "BL Bounceable : Number of allocations into the CRS Egress",
"UMask": "0x4",
"Unit": "MDF"
},
diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
index 557080b74ee5..0761980c34a0 100644
--- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json
@@ -1186,6 +1186,36 @@
"Unit": "IIO"
},
{
+ "BriefDescription": ": IOTLB Hits to a 1G Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 2M Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 4K Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": ": Context cache hits",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index e98602c66707..71d78a7841ea 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -1847,6 +1847,12 @@
"ScaleUnit": "1GHz"
},
{
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+ "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+ "MetricName": "upi_data_receive_bw",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
"MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
"MetricName": "upi_data_transmit_bw",
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json
index 63d5faf2fc43..11810daaf150 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json
@@ -19,7 +19,7 @@
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
"EventCode": "0x28",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
- "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.",
"SampleAfterValue": "200003",
"UMask": "0x20"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
index 176e5ef2a24a..45ee6bceba7f 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -519,7 +519,7 @@
"BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
"EventCode": "0x5e",
"EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
"SampleAfterValue": "1000003",
"UMask": "0x1"
},
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
index f87ea3f66d1b..a066a009c511 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
@@ -38,7 +38,7 @@
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
"PerPkg": "1",
- "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations servied by the IRP",
+ "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x80",
"Unit": "IRP"
},
@@ -65,7 +65,7 @@
"EventCode": "0x10",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
"PerPkg": "1",
- "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations servied by the IRP",
+ "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations serviced by the IRP",
"UMask": "0x40",
"Unit": "IRP"
},
@@ -454,7 +454,7 @@
"EventCode": "0x11",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
"PerPkg": "1",
- "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+ "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
"UMask": "0x2",
"Unit": "IRP"
},
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index e571683f59f3..4d1deed4437a 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -7,7 +7,7 @@ GenuineIntel-6-56,v11,broadwellde,core
GenuineIntel-6-4F,v22,broadwellx,core
GenuineIntel-6-55-[56789ABCDEF],v1.20,cascadelakex,core
GenuineIntel-6-9[6C],v1.04,elkhartlake,core
-GenuineIntel-6-CF,v1.01,emeraldrapids,core
+GenuineIntel-6-CF,v1.02,emeraldrapids,core
GenuineIntel-6-5[CF],v13,goldmont,core
GenuineIntel-6-7A,v1.01,goldmontplus,core
GenuineIntel-6-B6,v1.00,grandridge,core
@@ -15,7 +15,7 @@ GenuineIntel-6-A[DE],v1.01,graniterapids,core
GenuineIntel-6-(3C|45|46),v33,haswell,core
GenuineIntel-6-3F,v28,haswellx,core
GenuineIntel-6-7[DE],v1.19,icelake,core
-GenuineIntel-6-6[AC],v1.21,icelakex,core
+GenuineIntel-6-6[AC],v1.23,icelakex,core
GenuineIntel-6-3A,v24,ivybridge,core
GenuineIntel-6-3E,v24,ivytown,core
GenuineIntel-6-2D,v24,jaketown,core
@@ -26,7 +26,7 @@ GenuineIntel-6-1[AEF],v4,nehalemep,core
GenuineIntel-6-2E,v4,nehalemex,core
GenuineIntel-6-A7,v1.01,rocketlake,core
GenuineIntel-6-2A,v19,sandybridge,core
-GenuineIntel-6-8F,v1.16,sapphirerapids,core
+GenuineIntel-6-8F,v1.17,sapphirerapids,core
GenuineIntel-6-AF,v1.00,sierraforest,core
GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v57,skylake,core
diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
index 0c880e415669..27433fc15ede 100644
--- a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json
@@ -985,7 +985,7 @@
},
{
"BriefDescription": "Average number of parallel data read requests to external memory",
- "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / cpu@UNC_ARB_DAT_OCCUPANCY.RD\\,cmask\\=1@",
+ "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
"MetricGroup": "Mem;MemoryBW;SoC",
"MetricName": "tma_info_system_mem_parallel_reads",
"PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
index 4a9d211e9d4f..1bdefaf96287 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
@@ -23,27 +23,48 @@
"UMask": "0x10"
},
{
- "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_0",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_1",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]",
"EventCode": "0xb3",
"EventName": "FP_ARITH_DISPATCHED.PORT_5",
"SampleAfterValue": "2000003",
"UMask": "0x4"
},
{
+ "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.V2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
index 6dcf3b763af4..2cfe814d2015 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
@@ -505,7 +505,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).",
"EventCode": "0xad",
"EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
"MSRIndex": "0x3F7",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
index 06c6d67cb76b..56e54babcc26 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -400,7 +400,6 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(76 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 75.5 * tma_info_system_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_contested_accesses",
@@ -421,7 +420,6 @@
},
{
"BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "75.5 * tma_info_system_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_thread_clks",
"MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
"MetricName": "tma_data_sharing",
@@ -449,7 +447,6 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks - tma_pmm_bound if #has_pmem > 0 else MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_thread_clks)",
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_dram_bound",
@@ -656,7 +653,6 @@
},
{
"BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_thread_slots / BR_MISP_RETIRED.ALL_BRANCHES",
"MetricGroup": "Bad;BrMispredicts;tma_issueBM",
"MetricName": "tma_info_bad_spec_branch_misprediction_cost",
@@ -699,7 +695,6 @@
},
{
"BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_system_smt_2t_utilization > 0.5 else 0)",
"MetricGroup": "Cor;SMT",
"MetricName": "tma_info_botlnk_l0_core_bound_likely",
@@ -707,7 +702,6 @@
},
{
"BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
"MetricGroup": "DSBmiss;Fed;tma_issueFB",
"MetricName": "tma_info_botlnk_l2_dsb_misses",
@@ -716,7 +710,6 @@
},
{
"BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
"MetricName": "tma_info_botlnk_l2_ic_misses",
@@ -725,7 +718,6 @@
},
{
"BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
"MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
"MetricName": "tma_info_bottleneck_big_code",
@@ -742,7 +734,6 @@
},
{
"BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_bottleneck_big_code",
"MetricGroup": "Fed;FetchBW;Frontend",
"MetricName": "tma_info_bottleneck_instruction_fetch_bw",
@@ -750,7 +741,6 @@
},
{
"BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
"MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
"MetricName": "tma_info_bottleneck_memory_bandwidth",
@@ -759,7 +749,6 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
"MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
"MetricName": "tma_info_bottleneck_memory_data_tlbs",
@@ -768,7 +757,6 @@
},
{
"BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))",
"MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
"MetricName": "tma_info_bottleneck_memory_latency",
@@ -777,7 +765,6 @@
},
{
"BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
"MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
"MetricName": "tma_info_bottleneck_mispredictions",
@@ -1301,6 +1288,7 @@
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_system_socket_clks / duration_time)",
"MetricGroup": "Mem;MemoryLat;SoC",
"MetricName": "tma_info_system_mem_read_latency",
@@ -1455,7 +1443,6 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l2_bound",
@@ -1465,7 +1452,6 @@
},
{
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_thread_clks",
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
"MetricName": "tma_l3_bound",
@@ -1538,7 +1524,6 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_thread_clks",
"MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
"MetricName": "tma_lock_latency",
@@ -1596,6 +1581,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_thread_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
"MetricName": "tma_memory_fence",
@@ -1604,7 +1590,6 @@
},
{
"BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_thread_slots)",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_memory_operations",
@@ -1676,7 +1661,6 @@
},
{
"BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
- "MetricConstraint": "NO_GROUP_EVENTS",
"MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
"MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
"MetricName": "tma_other_light_ops",
@@ -1758,6 +1742,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_2",
@@ -1767,6 +1752,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_thread_clks",
"MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
"MetricName": "tma_ports_utilized_3m",
@@ -1822,6 +1808,7 @@
},
{
"BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_thread_clks",
"MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
"MetricName": "tma_slow_pause",
@@ -1840,7 +1827,6 @@
},
{
"BriefDescription": "This metric represents rate of split store accesses",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_core_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
"MetricName": "tma_split_stores",
@@ -1868,7 +1854,6 @@
},
{
"BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
- "MetricConstraint": "NO_GROUP_EVENTS_NMI",
"MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_thread_clks",
"MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
"MetricName": "tma_store_fwd_blk",
@@ -1965,6 +1950,12 @@
"ScaleUnit": "1GHz"
},
{
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+ "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+ "MetricName": "upi_data_receive_bw",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
"MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
"MetricName": "upi_data_transmit_bw",
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
index 09d840c7da4c..65d088556bae 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
@@ -4825,11 +4825,11 @@
"Unit": "M3UPI"
},
{
- "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bouncable)",
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bounceable)",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC",
"PerPkg": "1",
- "PublicDescription": "AD Bouncable : Number of allocations into the CRS Egress",
+ "PublicDescription": "AD Bounceable : Number of allocations into the CRS Egress",
"UMask": "0x1",
"Unit": "MDF"
},
@@ -4861,11 +4861,11 @@
"Unit": "MDF"
},
{
- "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bouncable)",
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bounceable)",
"EventCode": "0x47",
"EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC",
"PerPkg": "1",
- "PublicDescription": "BL Bouncable : Number of allocations into the CRS Egress",
+ "PublicDescription": "BL Bounceable : Number of allocations into the CRS Egress",
"UMask": "0x4",
"Unit": "MDF"
},
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
index 8b5f54fed103..03596db87710 100644
--- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
@@ -1250,6 +1250,36 @@
"Unit": "IIO"
},
{
+ "BriefDescription": ": IOTLB Hits to a 1G Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 2M Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 4K Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": ": Context cache hits",
"EventCode": "0x40",
"EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 4a8f8eeb7525..ec3aa5ef00a3 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -1807,6 +1807,12 @@
"ScaleUnit": "1GHz"
},
{
+ "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)",
+ "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
+ "MetricName": "upi_data_receive_bw",
+ "ScaleUnit": "1MB/s"
+ },
+ {
"BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)",
"MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time",
"MetricName": "upi_data_transmit_bw",
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index 3c091ab75305..53ab050c8fa4 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -83,7 +83,7 @@ def c_len(s: str) -> int:
"""Return the length of s a C string
This doesn't handle all escape characters properly. It first assumes
- all \ are for escaping, it then adjusts as it will have over counted
+ all \\ are for escaping, it then adjusts as it will have over counted
\\. The code uses \000 rather than \0 as a terminator as an adjacent
number would be folded into a string of \0 (ie. "\0" + "5" doesn't
equal a terminator followed by the number 5 but the escape of
@@ -286,6 +286,7 @@ class JsonEvent:
'imx8_ddr': 'imx8_ddr',
'L3PMC': 'amd_l3',
'DFPMC': 'amd_df',
+ 'UMCPMC': 'amd_umc',
'cpu_core': 'cpu_core',
'cpu_atom': 'cpu_atom',
'ali_drw': 'ali_drw',
@@ -354,6 +355,7 @@ class JsonEvent:
('SampleAfterValue', 'period='),
('UMask', 'umask='),
('NodeType', 'type='),
+ ('RdWrMask', 'rdwrmask='),
]
for key, value in event_fields:
if key in jd and jd[key] != '0':
diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py
index d59ff53f1d94..d973c2baed1c 100755
--- a/tools/perf/scripts/python/arm-cs-trace-disasm.py
+++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py
@@ -45,8 +45,8 @@ parser = OptionParser(option_list=option_list)
# Initialize global dicts and regular expression
disasm_cache = dict()
cpu_data = dict()
-disasm_re = re.compile("^\s*([0-9a-fA-F]+):")
-disasm_func_re = re.compile("^\s*([0-9a-fA-F]+)\s.*:")
+disasm_re = re.compile(r"^\s*([0-9a-fA-F]+):")
+disasm_func_re = re.compile(r"^\s*([0-9a-fA-F]+)\s.*:")
cache_size = 64*1024
glb_source_file_name = None
@@ -188,6 +188,17 @@ def process_event(param_dict):
dso_end = get_optional(param_dict, "dso_map_end")
symbol = get_optional(param_dict, "symbol")
+ cpu = sample["cpu"]
+ ip = sample["ip"]
+ addr = sample["addr"]
+
+ # Initialize CPU data if it's empty, and directly return back
+ # if this is the first tracing event for this CPU.
+ if (cpu_data.get(str(cpu) + 'addr') == None):
+ cpu_data[str(cpu) + 'addr'] = addr
+ return
+
+
if (options.verbose == True):
print("Event type: %s" % name)
print_sample(sample)
@@ -209,16 +220,6 @@ def process_event(param_dict):
if (name[0:8] != "branches"):
return
- cpu = sample["cpu"]
- ip = sample["ip"]
- addr = sample["addr"]
-
- # Initialize CPU data if it's empty, and directly return back
- # if this is the first tracing event for this CPU.
- if (cpu_data.get(str(cpu) + 'addr') == None):
- cpu_data[str(cpu) + 'addr'] = addr
- return
-
# The format for packet is:
#
# +------------+------------+------------+
@@ -258,8 +259,9 @@ def process_event(param_dict):
if (options.objdump_name != None):
# It doesn't need to decrease virtual memory offset for disassembly
- # for kernel dso, so in this case we set vm_start to zero.
- if (dso == "[kernel.kallsyms]"):
+ # for kernel dso and executable file dso, so in this case we set
+ # vm_start to zero.
+ if (dso == "[kernel.kallsyms]" or dso_start == 0x400000):
dso_vm_start = 0
else:
dso_vm_start = int(dso_start)
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
index 2560a042dc6f..9401f7c14747 100644
--- a/tools/perf/scripts/python/compaction-times.py
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -260,7 +260,7 @@ def pr_help():
comm_re = None
pid_re = None
-pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
+pid_regex = r"^(\d*)-(\d*)$|^(\d*)$"
opt_proc = popt.DISP_DFL
opt_disp = topt.DISP_ALL
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 13f2d8a81610..121cf61ba1b3 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -677,8 +677,8 @@ class CallGraphModelBase(TreeModel):
# sqlite supports GLOB (text only) which uses * and ? and is case sensitive
if not self.glb.dbref.is_sqlite3:
# Escape % and _
- s = value.replace("%", "\%")
- s = s.replace("_", "\_")
+ s = value.replace("%", "\\%")
+ s = s.replace("_", "\\_")
# Translate * and ? into SQL LIKE pattern characters % and _
trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 2b45ffa462a6..53ba9c3e20e0 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -77,3 +77,17 @@ CFLAGS_python-use.o += -DPYTHONPATH="BUILD_STR($(OUTPUT)python)" -DPYTHON="BUI
CFLAGS_dwarf-unwind.o += -fno-optimize-sibling-calls
perf-y += workloads/
+
+ifdef SHELLCHECK
+ SHELL_TESTS := $(shell find tests/shell -executable -type f -name '*.sh')
+ TEST_LOGS := $(SHELL_TESTS:tests/shell/%=shell/%.shellcheck_log)
+else
+ SHELL_TESTS :=
+ TEST_LOGS :=
+endif
+
+$(OUTPUT)%.shellcheck_log: %
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+
+perf-y += $(TEST_LOGS)
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 61186d0d1cfa..97e1bdd6ec0e 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -188,7 +188,7 @@ static int test__attr(struct test_suite *test __maybe_unused, int subtest __mayb
if (perf_pmus__num_core_pmus() > 1) {
/*
* TODO: Attribute tests hard code the PMU type. If there are >1
- * core PMU then each PMU will have a different type whic
+ * core PMU then each PMU will have a different type which
* requires additional support.
*/
pr_debug("Skip test on hybrid systems");
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 27c21271a16c..b44e4e6e4443 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -6,7 +6,7 @@ flags=0|8
cpu=*
type=0|1
size=136
-config=0
+config=0|1
sample_period=*
sample_type=263
read_format=0|4|20
diff --git a/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64 b/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
index fbb065842880..bed765450ca9 100644
--- a/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
+++ b/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64
@@ -6,4 +6,4 @@ args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
ret = 129
test_ret = true
arch = aarch64
-auxv = auxv["AT_HWCAP"] & 0x200000 == 0
+auxv = auxv["AT_HWCAP"] & 0x400000 == 0
diff --git a/tools/perf/tests/attr/test-record-user-regs-sve-aarch64 b/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
index c598c803221d..a65113cd7311 100644
--- a/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
+++ b/tools/perf/tests/attr/test-record-user-regs-sve-aarch64
@@ -6,7 +6,7 @@ args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
ret = 1
test_ret = true
arch = aarch64
-auxv = auxv["AT_HWCAP"] & 0x200000 == 0x200000
+auxv = auxv["AT_HWCAP"] & 0x400000 == 0x400000
kernel_since = 6.1
[event:base-record]
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index cb6f1dd00dc4..4a5973f9bb9b 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -14,6 +14,7 @@
#include <sys/wait.h>
#include <sys/stat.h>
#include "builtin.h"
+#include "config.h"
#include "hist.h"
#include "intlist.h"
#include "tests.h"
@@ -32,6 +33,7 @@
static bool dont_fork;
const char *dso_to_test;
+const char *test_objdump_path = "objdump";
/*
* List of architecture specific tests. Not a weak symbol as the array length is
@@ -60,8 +62,6 @@ static struct test_suite *generic_tests[] = {
&suite__pmu,
&suite__pmu_events,
&suite__dso_data,
- &suite__dso_data_cache,
- &suite__dso_data_reopen,
&suite__perf_evsel__roundtrip_name_test,
#ifdef HAVE_LIBTRACEEVENT
&suite__perf_evsel__tp_sched_test,
@@ -513,6 +513,15 @@ static int run_workload(const char *work, int argc, const char **argv)
return -1;
}
+static int perf_test__config(const char *var, const char *value,
+ void *data __maybe_unused)
+{
+ if (!strcmp(var, "annotate.objdump"))
+ test_objdump_path = value;
+
+ return 0;
+}
+
int cmd_test(int argc, const char **argv)
{
const char *test_usage[] = {
@@ -529,6 +538,8 @@ int cmd_test(int argc, const char **argv)
"Do not fork for testcase"),
OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
+ OPT_STRING(0, "objdump", &test_objdump_path, "path",
+ "objdump binary to use for disassembly and annotations"),
OPT_END()
};
const char * const test_subcommands[] = { "list", NULL };
@@ -538,6 +549,8 @@ int cmd_test(int argc, const char **argv)
if (ret < 0)
return ret;
+ perf_config(perf_test__config, NULL);
+
/* Unbuffered output */
setvbuf(stdout, NULL, _IONBF, 0);
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 3af81012014e..7a3a7bbbec71 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -185,7 +185,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
int ret;
fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
- ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
+ ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, addr + len,
filename);
if (ret <= 0 || (size_t)ret >= sizeof(cmd))
return -1;
@@ -511,38 +511,6 @@ static void fs_something(void)
}
}
-#ifdef __s390x__
-#include "header.h" // for get_cpuid()
-#endif
-
-static const char *do_determine_event(bool excl_kernel)
-{
- const char *event = excl_kernel ? "cycles:u" : "cycles";
-
-#ifdef __s390x__
- char cpuid[128], model[16], model_c[16], cpum_cf_v[16];
- unsigned int family;
- int ret, cpum_cf_a;
-
- if (get_cpuid(cpuid, sizeof(cpuid)))
- goto out_clocks;
- ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c,
- model, cpum_cf_v, &cpum_cf_a);
- if (ret != 5) /* Not available */
- goto out_clocks;
- if (excl_kernel && (cpum_cf_a & 4))
- return event;
- if (!excl_kernel && (cpum_cf_a & 2))
- return event;
-
- /* Fall through: missing authorization */
-out_clocks:
- event = excl_kernel ? "cpu-clock:u" : "cpu-clock";
-
-#endif
- return event;
-}
-
static void do_something(void)
{
fs_something();
@@ -583,8 +551,10 @@ static int do_test_code_reading(bool try_kcore)
int err = -1, ret;
pid_t pid;
struct map *map;
- bool have_vmlinux, have_kcore, excl_kernel = false;
+ bool have_vmlinux, have_kcore;
struct dso *dso;
+ const char *events[] = { "cycles", "cycles:u", "cpu-clock", "cpu-clock:u", NULL };
+ int evidx = 0;
pid = getpid();
@@ -618,7 +588,7 @@ static int do_test_code_reading(bool try_kcore)
/* No point getting kernel events if there is no kernel object */
if (!have_vmlinux && !have_kcore)
- excl_kernel = true;
+ evidx++;
threads = thread_map__new_by_tid(pid);
if (!threads) {
@@ -640,13 +610,13 @@ static int do_test_code_reading(bool try_kcore)
goto out_put;
}
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus) {
pr_debug("perf_cpu_map__new failed\n");
goto out_put;
}
- while (1) {
+ while (events[evidx]) {
const char *str;
evlist = evlist__new();
@@ -657,7 +627,7 @@ static int do_test_code_reading(bool try_kcore)
perf_evlist__set_maps(&evlist->core, cpus, threads);
- str = do_determine_event(excl_kernel);
+ str = events[evidx];
pr_debug("Parsing event '%s'\n", str);
ret = parse_event(evlist, str);
if (ret < 0) {
@@ -675,32 +645,32 @@ static int do_test_code_reading(bool try_kcore)
ret = evlist__open(evlist);
if (ret < 0) {
- if (!excl_kernel) {
- excl_kernel = true;
- /*
- * Both cpus and threads are now owned by evlist
- * and will be freed by following perf_evlist__set_maps
- * call. Getting reference to keep them alive.
- */
- perf_cpu_map__get(cpus);
- perf_thread_map__get(threads);
- perf_evlist__set_maps(&evlist->core, NULL, NULL);
- evlist__delete(evlist);
- evlist = NULL;
- continue;
- }
+ evidx++;
- if (verbose > 0) {
+ if (events[evidx] == NULL && verbose > 0) {
char errbuf[512];
evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
}
- goto out_put;
+ /*
+ * Both cpus and threads are now owned by evlist
+ * and will be freed by following perf_evlist__set_maps
+ * call. Getting reference to keep them alive.
+ */
+ perf_cpu_map__get(cpus);
+ perf_thread_map__get(threads);
+ perf_evlist__set_maps(&evlist->core, NULL, NULL);
+ evlist__delete(evlist);
+ evlist = NULL;
+ continue;
}
break;
}
+ if (events[evidx] == NULL)
+ goto out_put;
+
ret = evlist__mmap(evlist, UINT_MAX);
if (ret < 0) {
pr_debug("evlist__mmap failed\n");
@@ -721,7 +691,7 @@ static int do_test_code_reading(bool try_kcore)
err = TEST_CODE_READING_NO_KERNEL_OBJ;
else if (!have_vmlinux && !try_kcore)
err = TEST_CODE_READING_NO_VMLINUX;
- else if (excl_kernel)
+ else if (strstr(events[evidx], ":u"))
err = TEST_CODE_READING_NO_ACCESS;
else
err = TEST_CODE_READING_OK;
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
index 7730fc2ab40b..bd8e396f3e57 100644
--- a/tools/perf/tests/cpumap.c
+++ b/tools/perf/tests/cpumap.c
@@ -213,7 +213,7 @@ static int test__cpu_map_intersect(struct test_suite *test __maybe_unused,
static int test__cpu_map_equal(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
- struct perf_cpu_map *any = perf_cpu_map__dummy_new();
+ struct perf_cpu_map *any = perf_cpu_map__new_any_cpu();
struct perf_cpu_map *one = perf_cpu_map__new("1");
struct perf_cpu_map *two = perf_cpu_map__new("2");
struct perf_cpu_map *empty = perf_cpu_map__intersect(one, two);
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index 3419a4ab5590..2d67422c1222 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -394,6 +394,15 @@ static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int sub
return 0;
}
-DEFINE_SUITE("DSO data read", dso_data);
-DEFINE_SUITE("DSO data cache", dso_data_cache);
-DEFINE_SUITE("DSO data reopen", dso_data_reopen);
+
+static struct test_case tests__dso_data[] = {
+ TEST_CASE("read", dso_data),
+ TEST_CASE("cache", dso_data_cache),
+ TEST_CASE("reopen", dso_data_reopen),
+ { .name = NULL, }
+};
+
+struct test_suite suite__dso_data = {
+ .desc = "DSO data tests",
+ .test_cases = tests__dso_data,
+};
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index 8f4f9b632e1e..5a3b2bed07f3 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -81,7 +81,7 @@ static int test__keep_tracking(struct test_suite *test __maybe_unused, int subte
threads = thread_map__new(-1, getpid(), UINT_MAX);
CHECK_NOT_NULL__(threads);
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
CHECK_NOT_NULL__(cpus);
evlist = evlist__new();
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index d9945ed25bc5..8a4da7eb637a 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -183,7 +183,7 @@ run += make_install_prefix_slash
# run += make_install_pdf
run += make_minimal
-old_libbpf := $(shell echo '\#include <bpf/libbpf.h>' | $(CC) -E -dM -x c -| egrep -q "define[[:space:]]+LIBBPF_MAJOR_VERSION[[:space:]]+0{1}")
+old_libbpf := $(shell echo '\#include <bpf/libbpf.h>' | $(CC) -E -dM -x c -| grep -q -E "define[[:space:]]+LIBBPF_MAJOR_VERSION[[:space:]]+0{1}")
ifneq ($(old_libbpf),)
run += make_libbpf_dynamic
diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c
index 5bb1123a91a7..bb3fbfe5a73e 100644
--- a/tools/perf/tests/maps.c
+++ b/tools/perf/tests/maps.c
@@ -14,44 +14,59 @@ struct map_def {
u64 end;
};
+struct check_maps_cb_args {
+ struct map_def *merged;
+ unsigned int i;
+};
+
+static int check_maps_cb(struct map *map, void *data)
+{
+ struct check_maps_cb_args *args = data;
+ struct map_def *merged = &args->merged[args->i];
+
+ if (map__start(map) != merged->start ||
+ map__end(map) != merged->end ||
+ strcmp(map__dso(map)->name, merged->name) ||
+ refcount_read(map__refcnt(map)) != 1) {
+ return 1;
+ }
+ args->i++;
+ return 0;
+}
+
+static int failed_cb(struct map *map, void *data __maybe_unused)
+{
+ pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: %d\n",
+ map__start(map),
+ map__end(map),
+ map__dso(map)->name,
+ refcount_read(map__refcnt(map)));
+
+ return 0;
+}
+
static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps)
{
- struct map_rb_node *rb_node;
- unsigned int i = 0;
bool failed = false;
if (maps__nr_maps(maps) != size) {
pr_debug("Expected %d maps, got %d", size, maps__nr_maps(maps));
failed = true;
} else {
- maps__for_each_entry(maps, rb_node) {
- struct map *map = rb_node->map;
-
- if (map__start(map) != merged[i].start ||
- map__end(map) != merged[i].end ||
- strcmp(map__dso(map)->name, merged[i].name) ||
- refcount_read(map__refcnt(map)) != 1) {
- failed = true;
- }
- i++;
- }
+ struct check_maps_cb_args args = {
+ .merged = merged,
+ .i = 0,
+ };
+ failed = maps__for_each_map(maps, check_maps_cb, &args);
}
if (failed) {
pr_debug("Expected:\n");
- for (i = 0; i < size; i++) {
+ for (unsigned int i = 0; i < size; i++) {
pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: 1\n",
merged[i].start, merged[i].end, merged[i].name);
}
pr_debug("Got:\n");
- maps__for_each_entry(maps, rb_node) {
- struct map *map = rb_node->map;
-
- pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: %d\n",
- map__start(map),
- map__end(map),
- map__dso(map)->name,
- refcount_read(map__refcnt(map)));
- }
+ maps__for_each_map(maps, failed_cb, NULL);
}
return failed ? TEST_FAIL : TEST_OK;
}
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 886a13a77a16..012c8ae439fd 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -52,7 +52,7 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
return -1;
}
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (cpus == NULL) {
pr_debug("perf_cpu_map__new\n");
goto out_free_threads;
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index f3275be83a33..fb114118c876 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -37,7 +37,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
return -1;
}
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (cpus == NULL) {
pr_debug("perf_cpu_map__new\n");
goto out_thread_map_delete;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index f78be21a5999..fbdf710d5eea 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -162,6 +162,22 @@ static int test__checkevent_numeric(struct evlist *evlist)
return TEST_OK;
}
+
+static int assert_hw(struct perf_evsel *evsel, enum perf_hw_id id, const char *name)
+{
+ struct perf_pmu *pmu;
+
+ if (evsel->attr.type == PERF_TYPE_HARDWARE) {
+ TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, id));
+ return 0;
+ }
+ pmu = perf_pmus__find_by_type(evsel->attr.type);
+
+ TEST_ASSERT_VAL("unexpected PMU type", pmu);
+ TEST_ASSERT_VAL("PMU missing event", perf_pmu__have_event(pmu, name));
+ return 0;
+}
+
static int test__checkevent_symbolic_name(struct evlist *evlist)
{
struct perf_evsel *evsel;
@@ -169,10 +185,12 @@ static int test__checkevent_symbolic_name(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
perf_evlist__for_each_evsel(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
- TEST_ASSERT_VAL("wrong config",
- test_perf_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ int ret = assert_hw(evsel, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+
+ if (ret)
+ return ret;
}
+
return TEST_OK;
}
@@ -183,8 +201,10 @@ static int test__checkevent_symbolic_name_config(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries);
perf_evlist__for_each_evsel(&evlist->core, evsel) {
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
- TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ int ret = assert_hw(evsel, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+ if (ret)
+ return ret;
/*
* The period value gets configured within evlist__config,
* while this test executes only parse events method.
@@ -861,10 +881,14 @@ static int test__group1(struct evlist *evlist)
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* instructions:k */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -878,8 +902,10 @@ static int test__group1(struct evlist *evlist)
/* cycles:upp */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -907,6 +933,8 @@ static int test__group2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist));
evlist__for_each_entry(evlist, evsel) {
+ int ret;
+
if (evsel->core.attr.type == PERF_TYPE_SOFTWARE) {
/* faults + :ku modifier */
leader = evsel;
@@ -939,8 +967,10 @@ static int test__group2(struct evlist *evlist)
continue;
}
/* cycles:k */
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -957,6 +987,7 @@ static int test__group2(struct evlist *evlist)
static int test__group3(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel, *group1_leader = NULL, *group2_leader = NULL;
+ int ret;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus() + 2));
@@ -1045,8 +1076,10 @@ static int test__group3(struct evlist *evlist __maybe_unused)
continue;
}
/* instructions:u */
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1070,10 +1103,14 @@ static int test__group4(struct evlist *evlist __maybe_unused)
num_core_entries() == evlist__nr_groups(evlist));
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* cycles:u + p */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1089,8 +1126,10 @@ static int test__group4(struct evlist *evlist __maybe_unused)
/* instructions:kp + p */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1108,6 +1147,7 @@ static int test__group4(struct evlist *evlist __maybe_unused)
static int test__group5(struct evlist *evlist __maybe_unused)
{
struct evsel *evsel = NULL, *leader;
+ int ret;
TEST_ASSERT_VAL("wrong number of entries",
evlist->core.nr_entries == (5 * num_core_entries()));
@@ -1117,8 +1157,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
for (int i = 0; i < num_core_entries(); i++) {
/* cycles + G */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1133,8 +1175,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
/* instructions + G */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1148,8 +1192,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
for (int i = 0; i < num_core_entries(); i++) {
/* cycles:G */
evsel = leader = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1164,8 +1210,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
/* instructions:G */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1178,8 +1226,10 @@ static int test__group5(struct evlist *evlist __maybe_unused)
for (int i = 0; i < num_core_entries(); i++) {
/* cycles */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1201,10 +1251,14 @@ static int test__group_gh1(struct evlist *evlist)
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* cycles + :H group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1218,8 +1272,10 @@ static int test__group_gh1(struct evlist *evlist)
/* cache-misses:G + :H group modifier */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1242,10 +1298,14 @@ static int test__group_gh2(struct evlist *evlist)
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* cycles + :G group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1259,8 +1319,10 @@ static int test__group_gh2(struct evlist *evlist)
/* cache-misses:H + :G group modifier */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1283,10 +1345,14 @@ static int test__group_gh3(struct evlist *evlist)
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* cycles:G + :u group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1300,8 +1366,10 @@ static int test__group_gh3(struct evlist *evlist)
/* cache-misses:H + :u group modifier */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1324,10 +1392,14 @@ static int test__group_gh4(struct evlist *evlist)
evlist__nr_groups(evlist) == num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* cycles:G + :uG group modifier */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1341,8 +1413,10 @@ static int test__group_gh4(struct evlist *evlist)
/* cache-misses:H + :uG group modifier */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1363,10 +1437,14 @@ static int test__leader_sample1(struct evlist *evlist)
evlist->core.nr_entries == (3 * num_core_entries()));
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* cycles - sampling group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1379,8 +1457,10 @@ static int test__leader_sample1(struct evlist *evlist)
/* cache-misses - not sampling */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1392,8 +1472,10 @@ static int test__leader_sample1(struct evlist *evlist)
/* branch-misses - not sampling */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
@@ -1415,10 +1497,14 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
evlist->core.nr_entries == (2 * num_core_entries()));
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* instructions - sampling group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1431,8 +1517,10 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
/* branch-misses - not sampling */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
@@ -1472,10 +1560,14 @@ static int test__pinned_group(struct evlist *evlist)
evlist->core.nr_entries == (3 * num_core_entries()));
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* cycles - group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
/* TODO: The group modifier is not copied to the split group leader. */
@@ -1484,13 +1576,18 @@ static int test__pinned_group(struct evlist *evlist)
/* cache-misses - can not be pinned, but will go on with the leader */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
/* branch-misses - ditto */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
}
return TEST_OK;
@@ -1517,10 +1614,14 @@ static int test__exclusive_group(struct evlist *evlist)
evlist->core.nr_entries == 3 * num_core_entries());
for (int i = 0; i < num_core_entries(); i++) {
+ int ret;
+
/* cycles - group leader */
evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel));
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
/* TODO: The group modifier is not copied to the split group leader. */
@@ -1529,13 +1630,18 @@ static int test__exclusive_group(struct evlist *evlist)
/* cache-misses - can not be pinned, but will go on with the leader */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
/* branch-misses - ditto */
evsel = evsel__next(evsel);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES));
+ ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses");
+ if (ret)
+ return ret;
+
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
}
return TEST_OK;
@@ -1677,9 +1783,11 @@ static int test__checkevent_raw_pmu(struct evlist *evlist)
static int test__sym_event_slash(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
+ int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+ if (ret)
+ return ret;
- TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
return TEST_OK;
}
@@ -1687,9 +1795,11 @@ static int test__sym_event_slash(struct evlist *evlist)
static int test__sym_event_dc(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
+ int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+ if (ret)
+ return ret;
- TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
return TEST_OK;
}
@@ -1697,9 +1807,11 @@ static int test__sym_event_dc(struct evlist *evlist)
static int test__term_equal_term(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
+ int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+ if (ret)
+ return ret;
- TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "name") == 0);
return TEST_OK;
}
@@ -1707,9 +1819,11 @@ static int test__term_equal_term(struct evlist *evlist)
static int test__term_equal_legacy(struct evlist *evlist)
{
struct evsel *evsel = evlist__first(evlist);
+ int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles");
+
+ if (ret)
+ return ret;
- TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "l1d") == 0);
return TEST_OK;
}
@@ -2549,7 +2663,7 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest
if (strchr(ent->d_name, '.'))
continue;
- /* exclude parametrized ones (name contains '?') */
+ /* exclude parameterized ones (name contains '?') */
n = snprintf(pmu_event, sizeof(pmu_event), "%s%s", path, ent->d_name);
if (n >= PATH_MAX) {
pr_err("pmu event name crossed PATH_MAX(%d) size\n", PATH_MAX);
@@ -2578,7 +2692,7 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest
fclose(file);
if (is_event_parameterized == 1) {
- pr_debug("skipping parametrized PMU event: %s which contains ?\n", pmu_event);
+ pr_debug("skipping parameterized PMU event: %s which contains ?\n", pmu_event);
continue;
}
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index efcd71c2738a..bbe2ddeb9b74 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -93,7 +93,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
threads = thread_map__new(-1, getpid(), UINT_MAX);
CHECK_NOT_NULL__(threads);
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
CHECK_NOT_NULL__(cpus);
evlist = evlist__new();
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
index a7e169d1bf64..5f886cd09e6b 100644
--- a/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
+++ b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
@@ -42,7 +42,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
int main(int argc, char **argv)
{
unsigned long i, len, size, thr;
- pthread_t threads[256];
struct args args[256];
long long v;
diff --git a/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
index c0158fac7d0b..e05a559253ca 100644
--- a/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
+++ b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
@@ -57,7 +57,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
int main(int argc, char **argv)
{
unsigned int i, len, thr;
- pthread_t threads[256];
struct args args[256];
if (argc < 3) {
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
index 8f6d384208ed..0fc7bf1a25af 100644
--- a/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
@@ -51,7 +51,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
int main(int argc, char **argv)
{
unsigned int i, thr;
- pthread_t threads[256];
struct args args[256];
if (argc < 2) {
diff --git a/tools/perf/tests/shell/daemon.sh b/tools/perf/tests/shell/daemon.sh
index 4c598cfc5afa..e5fa8d6f9eb1 100755
--- a/tools/perf/tests/shell/daemon.sh
+++ b/tools/perf/tests/shell/daemon.sh
@@ -414,16 +414,30 @@ EOF
# start daemon
daemon_start ${config} test
- # send 2 signals
- perf daemon signal --config ${config} --session test
- perf daemon signal --config ${config}
-
- # stop daemon
- daemon_exit ${config}
-
- # count is 2 perf.data for signals and 1 for perf record finished
- count=`ls ${base}/session-test/*perf.data* | wc -l`
- if [ ${count} -ne 3 ]; then
+ # send 2 signals then exit. Do this in a loop watching the number of
+ # files to avoid races. If the loop retries more than 600 times then
+ # give up.
+ local retries=0
+ local signals=0
+ local success=0
+ while [ ${retries} -lt 600 ] && [ ${success} -eq 0 ]; do
+ local files
+ files=`ls ${base}/session-test/*perf.data* 2> /dev/null | wc -l`
+ if [ ${signals} -eq 0 ]; then
+ perf daemon signal --config ${config} --session test
+ signals=1
+ elif [ ${signals} -eq 1 ] && [ $files -ge 1 ]; then
+ perf daemon signal --config ${config}
+ signals=2
+ elif [ ${signals} -eq 2 ] && [ $files -ge 2 ]; then
+ daemon_exit ${config}
+ signals=3
+ elif [ ${signals} -eq 3 ] && [ $files -ge 3 ]; then
+ success=1
+ fi
+ retries=$((${retries} +1))
+ done
+ if [ ${success} -eq 0 ]; then
error=1
echo "FAILED: perf data no generated"
fi
diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh
new file mode 100755
index 000000000000..14b87af88703
--- /dev/null
+++ b/tools/perf/tests/shell/diff.sh
@@ -0,0 +1,108 @@
+#!/bin/sh
+# perf diff tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata1=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+perfdata2=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+perfdata3=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+testprog="perf test -w thloop"
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
+testsym="test_loop"
+
+skip_test_missing_symbol ${testsym}
+
+cleanup() {
+ rm -rf "${perfdata1}"
+ rm -rf "${perfdata1}".old
+ rm -rf "${perfdata2}"
+ rm -rf "${perfdata2}".old
+ rm -rf "${perfdata3}"
+ rm -rf "${perfdata3}".old
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+make_data() {
+ file="$1"
+ if ! perf record -o "${file}" ${testprog} 2> /dev/null
+ then
+ echo "Workload record [Failed record]"
+ echo 1
+ return
+ fi
+ if ! perf report -i "${file}" -q | grep -q "${testsym}"
+ then
+ echo "Workload record [Failed missing output]"
+ echo 1
+ return
+ fi
+ echo 0
+}
+
+test_two_files() {
+ echo "Basic two file diff test"
+ err=$(make_data "${perfdata1}")
+ if [ $err != 0 ]
+ then
+ return
+ fi
+ err=$(make_data "${perfdata2}")
+ if [ $err != 0 ]
+ then
+ return
+ fi
+
+ if ! perf diff "${perfdata1}" "${perfdata2}" | grep -q "${testsym}"
+ then
+ echo "Basic two file diff test [Failed diff]"
+ err=1
+ return
+ fi
+ echo "Basic two file diff test [Success]"
+}
+
+test_three_files() {
+ echo "Basic three file diff test"
+ err=$(make_data "${perfdata1}")
+ if [ $err != 0 ]
+ then
+ return
+ fi
+ err=$(make_data "${perfdata2}")
+ if [ $err != 0 ]
+ then
+ return
+ fi
+ err=$(make_data "${perfdata3}")
+ if [ $err != 0 ]
+ then
+ return
+ fi
+
+ if ! perf diff "${perfdata1}" "${perfdata2}" "${perfdata3}" | grep -q "${testsym}"
+ then
+ echo "Basic three file diff test [Failed diff]"
+ err=1
+ return
+ fi
+ echo "Basic three file diff test [Success]"
+}
+
+test_two_files
+test_three_files
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
new file mode 100644
index 000000000000..5d59c32ae3e7
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+perf_has_symbol()
+{
+ if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
+ echo "perf does have symbol '$1'"
+ return 0
+ fi
+ echo "perf does not have symbol '$1'"
+ return 1
+}
+
+skip_test_missing_symbol()
+{
+ if ! perf_has_symbol "$1" ; then
+ echo "perf is missing symbols - skipping test"
+ exit 2
+ fi
+ return 0
+}
diff --git a/tools/perf/tests/shell/lib/setup_python.sh b/tools/perf/tests/shell/lib/setup_python.sh
new file mode 100644
index 000000000000..c2fce1793538
--- /dev/null
+++ b/tools/perf/tests/shell/lib/setup_python.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+if [ "x$PYTHON" = "x" ]
+then
+ python3 --version >/dev/null 2>&1 && PYTHON=python3
+fi
+if [ "x$PYTHON" = "x" ]
+then
+ python --version >/dev/null 2>&1 && PYTHON=python
+fi
+if [ "x$PYTHON" = "x" ]
+then
+ echo Skipping test, python not detected please set environment variable PYTHON.
+ exit 2
+fi
diff --git a/tools/perf/tests/shell/list.sh b/tools/perf/tests/shell/list.sh
new file mode 100755
index 000000000000..8a868ae64560
--- /dev/null
+++ b/tools/perf/tests/shell/list.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+# perf list tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
+
+list_output=$(mktemp /tmp/__perf_test.list_output.json.XXXXX)
+
+cleanup() {
+ rm -f "${list_output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_list_json() {
+ echo "Json output test"
+ perf list -j -o "${list_output}"
+ $PYTHON -m json.tool "${list_output}"
+ echo "Json output test [Success]"
+}
+
+test_list_json
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/pipe_test.sh b/tools/perf/tests/shell/pipe_test.sh
index 8dd115dd35a7..a78d35d2cff0 100755
--- a/tools/perf/tests/shell/pipe_test.sh
+++ b/tools/perf/tests/shell/pipe_test.sh
@@ -2,10 +2,17 @@
# perf pipe recording and injection test
# SPDX-License-Identifier: GPL-2.0
+shelldir=$(dirname "$0")
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
+sym="noploop"
+
+skip_test_missing_symbol ${sym}
+
data=$(mktemp /tmp/perf.data.XXXXXX)
prog="perf test -w noploop"
task="perf"
-sym="noploop"
if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep ${task}; then
echo "cannot find the test file in the perf report"
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index eebeea6bdc76..72c65570db37 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -45,7 +45,10 @@ trace_libc_inet_pton_backtrace() {
;;
ppc64|ppc64le)
eventattr='max-stack=4'
- echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ # Add gaih_inet to expected backtrace only if it is part of libc.
+ if nm $libc | grep -F -q gaih_inet.; then
+ echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ fi
echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 29443b8e8876..3d1a7759a7b2 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -8,10 +8,19 @@ shelldir=$(dirname "$0")
# shellcheck source=lib/waiting.sh
. "${shelldir}"/lib/waiting.sh
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
+testsym="test_loop"
+
+skip_test_missing_symbol ${testsym}
+
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
testprog="perf test -w thloop"
-testsym="test_loop"
+cpu_pmu_dir="/sys/bus/event_source/devices/cpu*"
+br_cntr_file="/caps/branch_counter_nr"
+br_cntr_output="branch stack counters"
cleanup() {
rm -rf "${perfdata}"
@@ -155,10 +164,37 @@ test_workload() {
echo "Basic target workload test [Success]"
}
+test_branch_counter() {
+ echo "Basic branch counter test"
+ # Check if the branch counter feature is supported
+ for dir in $cpu_pmu_dir
+ do
+ if [ ! -e "$dir$br_cntr_file" ]
+ then
+ echo "branch counter feature not supported on all core PMUs ($dir) [Skipped]"
+ return
+ fi
+ done
+ if ! perf record -o "${perfdata}" -j any,counter ${testprog} 2> /dev/null
+ then
+ echo "Basic branch counter test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf report -i "${perfdata}" -D -q | grep -q "$br_cntr_output"
+ then
+ echo "Basic branch record test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Basic branch counter test [Success]"
+}
+
test_per_thread
test_register_capture
test_system_wide
test_workload
+test_branch_counter
cleanup
exit $err
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
index a1ef8f0d2b5c..67c925f3a15a 100755
--- a/tools/perf/tests/shell/record_offcpu.sh
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -77,9 +77,9 @@ test_offcpu_child() {
err=1
return
fi
- # each process waits for read and write, so it should be more than 800 events
+ # each process waits at least for poll, so it should be more than 400 events
if ! perf report -i ${perfdata} -s comm -q -n -t ';' --percent-limit=90 | \
- awk -F ";" '{ if (NF > 3 && int($3) < 800) exit 1; }'
+ awk -F ";" '{ if (NF > 3 && int($3) < 400) exit 1; }'
then
echo "Child task off-cpu test [Failed invalid output]"
err=1
diff --git a/tools/perf/tests/shell/script.sh b/tools/perf/tests/shell/script.sh
new file mode 100755
index 000000000000..fa4d71e2e72a
--- /dev/null
+++ b/tools/perf/tests/shell/script.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+# perf script tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+temp_dir=$(mktemp -d /tmp/perf-test-script.XXXXXXXXXX)
+
+perfdatafile="${temp_dir}/perf.data"
+db_test="${temp_dir}/db_test.py"
+
+err=0
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ sane=$(echo "${temp_dir}" | cut -b 1-21)
+ if [ "${sane}" = "/tmp/perf-test-script" ] ; then
+ echo "--- Cleaning up ---"
+ rm -f "${temp_dir}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+
+test_db()
+{
+ echo "DB test"
+
+ # Check if python script is supported
+ if perf version --build-options | grep python | grep -q OFF ; then
+ echo "SKIP: python scripting is not supported"
+ err=2
+ return
+ fi
+
+ cat << "_end_of_file_" > "${db_test}"
+perf_db_export_mode = True
+perf_db_export_calls = False
+perf_db_export_callchains = True
+
+def sample_table(*args):
+ print(f'sample_table({args})')
+
+def call_path_table(*args):
+ print(f'call_path_table({args}')
+_end_of_file_
+ case $(uname -m)
+ in s390x)
+ cmd_flags="--call-graph dwarf -e cpu-clock";;
+ *)
+ cmd_flags="-g";;
+ esac
+
+ perf record $cmd_flags -o "${perfdatafile}" true
+ perf script -i "${perfdatafile}" -s "${db_test}"
+ echo "DB test [Success]"
+}
+
+test_db
+
+cleanup
+
+exit $err
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index 196e22672c50..3bc900533a5d 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -8,20 +8,10 @@ set -e
skip_test=0
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
-if [ "x$PYTHON" == "x" ]
-then
- if which python3 > /dev/null
- then
- PYTHON=python3
- elif which python > /dev/null
- then
- PYTHON=python
- else
- echo Skipping test, python not detected please set environment variable PYTHON.
- exit 2
- fi
-fi
stat_output=$(mktemp /tmp/__perf_test.stat_output.json.XXXXX)
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
index c77955419173..d2a3506e0d19 100755
--- a/tools/perf/tests/shell/stat_all_pmu.sh
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -4,7 +4,7 @@
set -e
-# Test all PMU events; however exclude parametrized ones (name contains '?')
+# Test all PMU events; however exclude parameterized ones (name contains '?')
for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g'); do
echo "Testing $p"
result=$(perf stat -e "$p" true 2>&1)
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
index ad94c936de7e..7ca172599aa6 100755
--- a/tools/perf/tests/shell/stat_metrics_values.sh
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -1,16 +1,10 @@
#!/bin/bash
# perf metrics value validation
# SPDX-License-Identifier: GPL-2.0
-if [ "x$PYTHON" == "x" ]
-then
- if which python3 > /dev/null
- then
- PYTHON=python3
- else
- echo Skipping test, python3 not detected please set environment variable PYTHON.
- exit 2
- fi
-fi
+
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index 66dfdfdad553..e342e6c8aa50 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -2,8 +2,14 @@
# Check Arm64 callgraphs are complete in fp mode
# SPDX-License-Identifier: GPL-2.0
+shelldir=$(dirname "$0")
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
lscpu | grep -q "aarch64" || exit 2
+skip_test_missing_symbol leafloop
+
PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
TEST_PROGRAM="perf test -w leafloop"
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index 09908d71c994..5f14d0cb013f 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -4,6 +4,10 @@
# SPDX-License-Identifier: GPL-2.0
# German Gomez <german.gomez@arm.com>, 2022
+shelldir=$(dirname "$0")
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
# skip the test if the hardware doesn't support branch stack sampling
# and if the architecture doesn't support filter types: any,save_type,u
if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev/null 2>&1 ; then
@@ -11,6 +15,8 @@ if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev
exit 2
fi
+skip_test_missing_symbol brstack_bench
+
TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
TESTPROG="perf test -w brstack"
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh
index 69bb6fe86c50..3dfa91832aa8 100755
--- a/tools/perf/tests/shell/test_data_symbol.sh
+++ b/tools/perf/tests/shell/test_data_symbol.sh
@@ -4,6 +4,13 @@
# SPDX-License-Identifier: GPL-2.0
# Leo Yan <leo.yan@linaro.org>, 2022
+shelldir=$(dirname "$0")
+# shellcheck source=lib/waiting.sh
+. "${shelldir}"/lib/waiting.sh
+
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
skip_if_no_mem_event() {
perf mem record -e list 2>&1 | grep -E -q 'available' && return 0
return 2
@@ -11,8 +18,11 @@ skip_if_no_mem_event() {
skip_if_no_mem_event || exit 2
+skip_test_missing_symbol buf1
+
TEST_PROGRAM="perf test -w datasym"
PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX)
check_result() {
# The memory report format is as below:
@@ -50,13 +60,15 @@ echo "Recording workload..."
# specific CPU and test in per-CPU mode.
is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo)
if (($is_amd >= 1)); then
- perf mem record -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM &
+ perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" &
else
- perf mem record --all-user -o ${PERF_DATA} -- $TEST_PROGRAM &
+ perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" &
fi
PERFPID=$!
+wait_for_perf_to_start ${PERFPID} "${ERR_FILE}"
+
sleep 1
kill $PERFPID
diff --git a/tools/perf/tests/shell/test_perf_data_converter_json.sh b/tools/perf/tests/shell/test_perf_data_converter_json.sh
index 6ded58f98f55..c4f1b59d116f 100755
--- a/tools/perf/tests/shell/test_perf_data_converter_json.sh
+++ b/tools/perf/tests/shell/test_perf_data_converter_json.sh
@@ -6,16 +6,9 @@ set -e
err=0
-if [ "$PYTHON" = "" ] ; then
- if which python3 > /dev/null ; then
- PYTHON=python3
- elif which python > /dev/null ; then
- PYTHON=python
- else
- echo Skipping test, python not detected please set environment variable PYTHON.
- exit 2
- fi
-fi
+shelldir=$(dirname "$0")
+# shellcheck source=lib/setup_python.sh
+. "${shelldir}"/lib/setup_python.sh
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
result=$(mktemp /tmp/__perf_test.output.json.XXXXX)
diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c
index 1de7478ec189..e6fd934b027a 100644
--- a/tools/perf/tests/sigtrap.c
+++ b/tools/perf/tests/sigtrap.c
@@ -57,36 +57,79 @@ static struct perf_event_attr make_event_attr(void)
#ifdef HAVE_BPF_SKEL
#include <bpf/btf.h>
-static bool attr_has_sigtrap(void)
+static struct btf *btf;
+
+static bool btf__available(void)
{
- bool ret = false;
- struct btf *btf;
- const struct btf_type *t;
+ if (btf == NULL)
+ btf = btf__load_vmlinux_btf();
+
+ return btf != NULL;
+}
+
+static void btf__exit(void)
+{
+ btf__free(btf);
+ btf = NULL;
+}
+
+static const struct btf_member *__btf_type__find_member_by_name(int type_id, const char *member_name)
+{
+ const struct btf_type *t = btf__type_by_id(btf, type_id);
const struct btf_member *m;
- const char *name;
- int i, id;
+ int i;
+
+ for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
+ const char *current_member_name = btf__name_by_offset(btf, m->name_off);
+ if (!strcmp(current_member_name, member_name))
+ return m;
+ }
- btf = btf__load_vmlinux_btf();
- if (btf == NULL) {
+ return NULL;
+}
+
+static bool attr_has_sigtrap(void)
+{
+ int id;
+
+ if (!btf__available()) {
/* should be an old kernel */
return false;
}
id = btf__find_by_name_kind(btf, "perf_event_attr", BTF_KIND_STRUCT);
if (id < 0)
- goto out;
+ return false;
- t = btf__type_by_id(btf, id);
- for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
- name = btf__name_by_offset(btf, m->name_off);
- if (!strcmp(name, "sigtrap")) {
- ret = true;
- break;
- }
- }
-out:
- btf__free(btf);
- return ret;
+ return __btf_type__find_member_by_name(id, "sigtrap") != NULL;
+}
+
+static bool kernel_with_sleepable_spinlocks(void)
+{
+ const struct btf_member *member;
+ const struct btf_type *type;
+ const char *type_name;
+ int id;
+
+ if (!btf__available())
+ return false;
+
+ id = btf__find_by_name_kind(btf, "spinlock", BTF_KIND_STRUCT);
+ if (id < 0)
+ return false;
+
+ // Only RT has a "lock" member for "struct spinlock"
+ member = __btf_type__find_member_by_name(id, "lock");
+ if (member == NULL)
+ return false;
+
+ // But check its type as well
+ type = btf__type_by_id(btf, member->type);
+ if (!type || !btf_is_struct(type))
+ return false;
+
+ type_name = btf__name_by_offset(btf, type->name_off);
+ return type_name && !strcmp(type_name, "rt_mutex_base");
}
#else /* !HAVE_BPF_SKEL */
static bool attr_has_sigtrap(void)
@@ -109,6 +152,15 @@ static bool attr_has_sigtrap(void)
return ret;
}
+
+static bool kernel_with_sleepable_spinlocks(void)
+{
+ return false;
+}
+
+static void btf__exit(void)
+{
+}
#endif /* HAVE_BPF_SKEL */
static void
@@ -147,7 +199,7 @@ static int run_test_threads(pthread_t *threads, pthread_barrier_t *barrier)
static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrier)
{
- int ret;
+ int ret, expected_sigtraps;
ctx.iterate_on = 3000;
@@ -156,7 +208,16 @@ static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrie
ret = run_test_threads(threads, barrier);
TEST_ASSERT_EQUAL("disable failed", ioctl(fd, PERF_EVENT_IOC_DISABLE, 0), 0);
- TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, NUM_THREADS * ctx.iterate_on);
+ expected_sigtraps = NUM_THREADS * ctx.iterate_on;
+
+ if (ctx.signal_count < expected_sigtraps && kernel_with_sleepable_spinlocks()) {
+ pr_debug("Expected %d sigtraps, got %d, running on a kernel with sleepable spinlocks.\n",
+ expected_sigtraps, ctx.signal_count);
+ pr_debug("See https://lore.kernel.org/all/e368f2c848d77fbc8d259f44e2055fe469c219cf.camel@gmx.de/\n");
+ return TEST_SKIP;
+ } else
+ TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, expected_sigtraps);
+
TEST_ASSERT_EQUAL("missing signals or incorrectly delivered", ctx.tids_want_signal, 0);
TEST_ASSERT_VAL("unexpected si_addr", ctx.first_siginfo.si_addr == &ctx.iterate_on);
#if 0 /* FIXME: enable when libc's signal.h has si_perf_{type,data} */
@@ -221,6 +282,7 @@ out_restore_sigaction:
sigaction(SIGTRAP, &oldact, NULL);
out:
pthread_barrier_destroy(&barrier);
+ btf__exit();
return ret;
}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 4d7493fa0105..290716783ac6 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -62,7 +62,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
}
evlist__add(evlist, evsel);
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
threads = thread_map__new_by_tid(getpid());
if (!cpus || !threads) {
err = -ENOMEM;
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index e52b031bedc5..5cab17a1942e 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -351,7 +351,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
goto out_err;
}
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus) {
pr_debug("perf_cpu_map__new failed!\n");
goto out_err;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 968dddde6dda..d33d0952025c 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -70,7 +70,7 @@ static int test__task_exit(struct test_suite *test __maybe_unused, int subtest _
* evlist__prepare_workload we'll fill in the only thread
* we're monitoring, the one forked there.
*/
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
threads = thread_map__new_by_tid(-1);
if (!cpus || !threads) {
err = -ENOMEM;
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index b394f3ac2d66..dad3d7414142 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -207,5 +207,6 @@ DECLARE_WORKLOAD(brstack);
DECLARE_WORKLOAD(datasym);
extern const char *dso_to_test;
+extern const char *test_objdump_path;
#endif /* TESTS_H */
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index 9dee63734e66..2a842f53fbb5 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -215,7 +215,7 @@ static int test__session_topology(struct test_suite *test __maybe_unused, int su
if (session_write_header(path))
goto free_path;
- map = perf_cpu_map__new(NULL);
+ map = perf_cpu_map__new_online_cpus();
if (map == NULL) {
pr_debug("failed to get system cpumap\n");
goto free_path;
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 1078a93b01aa..822f893e67d5 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -112,18 +112,92 @@ static bool is_ignored_symbol(const char *name, char type)
return false;
}
+struct test__vmlinux_matches_kallsyms_cb_args {
+ struct machine kallsyms;
+ struct map *vmlinux_map;
+ bool header_printed;
+};
+
+static int test__vmlinux_matches_kallsyms_cb1(struct map *map, void *data)
+{
+ struct test__vmlinux_matches_kallsyms_cb_args *args = data;
+ struct dso *dso = map__dso(map);
+ /*
+ * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
+ * the kernel will have the path for the vmlinux file being used, so use
+ * the short name, less descriptive but the same ("[kernel]" in both
+ * cases.
+ */
+ struct map *pair = maps__find_by_name(args->kallsyms.kmaps,
+ (dso->kernel ? dso->short_name : dso->name));
+
+ if (pair)
+ map__set_priv(pair, 1);
+ else {
+ if (!args->header_printed) {
+ pr_info("WARN: Maps only in vmlinux:\n");
+ args->header_printed = true;
+ }
+ map__fprintf(map, stderr);
+ }
+ return 0;
+}
+
+static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
+{
+ struct test__vmlinux_matches_kallsyms_cb_args *args = data;
+ struct map *pair;
+ u64 mem_start = map__unmap_ip(args->vmlinux_map, map__start(map));
+ u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map));
+
+ pair = maps__find(args->kallsyms.kmaps, mem_start);
+ if (pair == NULL || map__priv(pair))
+ return 0;
+
+ if (map__start(pair) == mem_start) {
+ struct dso *dso = map__dso(map);
+
+ if (!args->header_printed) {
+ pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
+ args->header_printed = true;
+ }
+
+ pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
+ map__start(map), map__end(map), map__pgoff(map), dso->name);
+ if (mem_end != map__end(pair))
+ pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
+ map__start(pair), map__end(pair), map__pgoff(pair));
+ pr_info(" %s\n", dso->name);
+ map__set_priv(pair, 1);
+ }
+ return 0;
+}
+
+static int test__vmlinux_matches_kallsyms_cb3(struct map *map, void *data)
+{
+ struct test__vmlinux_matches_kallsyms_cb_args *args = data;
+
+ if (!map__priv(map)) {
+ if (!args->header_printed) {
+ pr_info("WARN: Maps only in kallsyms:\n");
+ args->header_printed = true;
+ }
+ map__fprintf(map, stderr);
+ }
+ return 0;
+}
+
static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct rb_node *nd;
struct symbol *sym;
- struct map *kallsyms_map, *vmlinux_map;
- struct map_rb_node *rb_node;
- struct machine kallsyms, vmlinux;
+ struct map *kallsyms_map;
+ struct machine vmlinux;
struct maps *maps;
u64 mem_start, mem_end;
- bool header_printed;
+ struct test__vmlinux_matches_kallsyms_cb_args args;
/*
* Step 1:
@@ -131,7 +205,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* Init the machines that will hold kernel, modules obtained from
* both vmlinux + .ko files and from /proc/kallsyms split by modules.
*/
- machine__init(&kallsyms, "", HOST_KERNEL_ID);
+ machine__init(&args.kallsyms, "", HOST_KERNEL_ID);
machine__init(&vmlinux, "", HOST_KERNEL_ID);
maps = machine__kernel_maps(&vmlinux);
@@ -143,7 +217,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* load /proc/kallsyms. Also create the modules maps from /proc/modules
* and find the .ko files that match them in /lib/modules/`uname -r`/.
*/
- if (machine__create_kernel_maps(&kallsyms) < 0) {
+ if (machine__create_kernel_maps(&args.kallsyms) < 0) {
pr_debug("machine__create_kernel_maps failed");
err = TEST_SKIP;
goto out;
@@ -160,7 +234,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* be compacted against the list of modules found in the "vmlinux"
* code and with the one got from /proc/modules from the "kallsyms" code.
*/
- if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
+ if (machine__load_kallsyms(&args.kallsyms, "/proc/kallsyms") <= 0) {
pr_debug("machine__load_kallsyms failed");
err = TEST_SKIP;
goto out;
@@ -174,7 +248,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* to see if the running kernel was relocated by checking if it has the
* same value in the vmlinux file we load.
*/
- kallsyms_map = machine__kernel_map(&kallsyms);
+ kallsyms_map = machine__kernel_map(&args.kallsyms);
/*
* Step 5:
@@ -186,7 +260,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
goto out;
}
- vmlinux_map = machine__kernel_map(&vmlinux);
+ args.vmlinux_map = machine__kernel_map(&vmlinux);
/*
* Step 6:
@@ -213,7 +287,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* in the kallsyms dso. For the ones that are in both, check its names and
* end addresses too.
*/
- map__for_each_symbol(vmlinux_map, sym, nd) {
+ map__for_each_symbol(args.vmlinux_map, sym, nd) {
struct symbol *pair, *first_pair;
sym = rb_entry(nd, struct symbol, rb_node);
@@ -221,10 +295,10 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
if (sym->start == sym->end)
continue;
- mem_start = map__unmap_ip(vmlinux_map, sym->start);
- mem_end = map__unmap_ip(vmlinux_map, sym->end);
+ mem_start = map__unmap_ip(args.vmlinux_map, sym->start);
+ mem_end = map__unmap_ip(args.vmlinux_map, sym->end);
- first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
+ first_pair = machine__find_kernel_symbol(&args.kallsyms, mem_start, NULL);
pair = first_pair;
if (pair && UM(pair->start) == mem_start) {
@@ -253,7 +327,8 @@ next_pair:
*/
continue;
} else {
- pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
+ pair = machine__find_kernel_symbol_by_name(&args.kallsyms,
+ sym->name, NULL);
if (pair) {
if (UM(pair->start) == mem_start)
goto next_pair;
@@ -267,7 +342,7 @@ next_pair:
continue;
}
- } else if (mem_start == map__end(kallsyms.vmlinux_map)) {
+ } else if (mem_start == map__end(args.kallsyms.vmlinux_map)) {
/*
* Ignore aliases to _etext, i.e. to the end of the kernel text area,
* such as __indirect_thunk_end.
@@ -289,78 +364,18 @@ next_pair:
if (verbose <= 0)
goto out;
- header_printed = false;
-
- maps__for_each_entry(maps, rb_node) {
- struct map *map = rb_node->map;
- struct dso *dso = map__dso(map);
- /*
- * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
- * the kernel will have the path for the vmlinux file being used,
- * so use the short name, less descriptive but the same ("[kernel]" in
- * both cases.
- */
- struct map *pair = maps__find_by_name(kallsyms.kmaps, (dso->kernel ?
- dso->short_name :
- dso->name));
- if (pair) {
- map__set_priv(pair, 1);
- } else {
- if (!header_printed) {
- pr_info("WARN: Maps only in vmlinux:\n");
- header_printed = true;
- }
- map__fprintf(map, stderr);
- }
- }
-
- header_printed = false;
-
- maps__for_each_entry(maps, rb_node) {
- struct map *pair, *map = rb_node->map;
-
- mem_start = map__unmap_ip(vmlinux_map, map__start(map));
- mem_end = map__unmap_ip(vmlinux_map, map__end(map));
+ args.header_printed = false;
+ maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb1, &args);
- pair = maps__find(kallsyms.kmaps, mem_start);
- if (pair == NULL || map__priv(pair))
- continue;
-
- if (map__start(pair) == mem_start) {
- struct dso *dso = map__dso(map);
-
- if (!header_printed) {
- pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
- header_printed = true;
- }
-
- pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
- map__start(map), map__end(map), map__pgoff(map), dso->name);
- if (mem_end != map__end(pair))
- pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
- map__start(pair), map__end(pair), map__pgoff(pair));
- pr_info(" %s\n", dso->name);
- map__set_priv(pair, 1);
- }
- }
-
- header_printed = false;
-
- maps = machine__kernel_maps(&kallsyms);
+ args.header_printed = false;
+ maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb2, &args);
- maps__for_each_entry(maps, rb_node) {
- struct map *map = rb_node->map;
+ args.header_printed = false;
+ maps = machine__kernel_maps(&args.kallsyms);
+ maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb3, &args);
- if (!map__priv(map)) {
- if (!header_printed) {
- pr_info("WARN: Maps only in kallsyms:\n");
- header_printed = true;
- }
- map__fprintf(map, stderr);
- }
- }
out:
- machine__exit(&kallsyms);
+ machine__exit(&args.kallsyms);
machine__exit(&vmlinux);
return err;
}
diff --git a/tools/perf/tests/workloads/thloop.c b/tools/perf/tests/workloads/thloop.c
index af05269c2eb8..457b29f91c3e 100644
--- a/tools/perf/tests/workloads/thloop.c
+++ b/tools/perf/tests/workloads/thloop.c
@@ -7,7 +7,6 @@
#include "../tests.h"
static volatile sig_atomic_t done;
-static volatile unsigned count;
/* We want to check this symbol in perf report */
noinline void test_loop(void);
@@ -19,8 +18,7 @@ static void sighandler(int sig __maybe_unused)
noinline void test_loop(void)
{
- while (!done)
- __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED);
+ while (!done);
}
static void *thfunc(void *arg)
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
index cc09dcaa891e..7df4bf5b55a3 100755
--- a/tools/perf/trace/beauty/arch_errno_names.sh
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -57,13 +57,13 @@ create_arch_errno_table_func()
archlist="$1"
default="$2"
- printf 'const char *arch_syscalls__strerrno(const char *arch, int err)\n'
+ printf 'arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch)\n'
printf '{\n'
for arch in $archlist; do
printf '\tif (!strcmp(arch, "%s"))\n' $(arch_string "$arch")
- printf '\t\treturn errno_to_name__%s(err);\n' $(arch_string "$arch")
+ printf '\t\treturn errno_to_name__%s;\n' $(arch_string "$arch")
done
- printf '\treturn errno_to_name__%s(err);\n' $(arch_string "$default")
+ printf '\treturn errno_to_name__%s;\n' $(arch_string "$default")
printf '}\n'
}
@@ -76,7 +76,9 @@ EoHEADER
# Create list of architectures that have a specific errno.h.
archlist=""
-for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | sort -r); do
+for f in $toolsdir/arch/*/include/uapi/asm/errno.h; do
+ d=${f%/include/uapi/asm/errno.h}
+ arch="${d##*/}"
test -f $toolsdir/arch/$arch/include/uapi/asm/errno.h && archlist="$archlist $arch"
done
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 788e8f6bd90e..9feb794f5c6e 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -251,6 +251,4 @@ size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool sh
void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg));
-const char *arch_syscalls__strerrno(const char *arch, int err);
-
#endif /* _PERF_TRACE_BEAUTY_H */
diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh
index 8059342ca412..9455d9672f14 100755
--- a/tools/perf/trace/beauty/prctl_option.sh
+++ b/tools/perf/trace/beauty/prctl_option.sh
@@ -4,9 +4,9 @@
[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *prctl_options[] = {\n"
-regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*\/.*)?$'
+regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*/.*)?$'
grep -E $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
- sed -r "s/$regex/\2 \1/g" | \
+ sed -E "s%$regex%\2 \1%g" | \
sort -n | xargs printf "\t[%s] = \"%s\",\n"
printf "};\n"
diff --git a/tools/perf/trace/beauty/socket.sh b/tools/perf/trace/beauty/socket.sh
index 8bc7ba62203e..670c6db298ae 100755
--- a/tools/perf/trace/beauty/socket.sh
+++ b/tools/perf/trace/beauty/socket.sh
@@ -18,10 +18,10 @@ grep -E $ipproto_regex ${uapi_header_dir}/in.h | \
printf "};\n\n"
printf "static const char *socket_level[] = {\n"
-socket_level_regex='^#define[[:space:]]+SOL_(\w+)[[:space:]]+([[:digit:]]+)([[:space:]]+\/.*)?'
+socket_level_regex='^#define[[:space:]]+SOL_(\w+)[[:space:]]+([[:digit:]]+)([[:space:]]+/.*)?'
grep -E $socket_level_regex ${beauty_header_dir}/socket.h | \
- sed -r "s/$socket_level_regex/\2 \1/g" | \
+ sed -E "s%$socket_level_regex%\2 \1%g" | \
sort -n | xargs printf "\t[%s] = \"%s\",\n"
printf "};\n\n"
diff --git a/tools/perf/trace/beauty/statx.c b/tools/perf/trace/beauty/statx.c
index 5f5320f7c6e2..dc5943a6352d 100644
--- a/tools/perf/trace/beauty/statx.c
+++ b/tools/perf/trace/beauty/statx.c
@@ -67,6 +67,7 @@ size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_a
P_FLAG(BTIME);
P_FLAG(MNT_ID);
P_FLAG(DIOALIGN);
+ P_FLAG(MNT_ID_UNIQUE);
#undef P_FLAG
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index ccdb2cd11fbf..ec5e21932876 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -27,7 +27,6 @@ struct annotate_browser {
struct rb_node *curr_hot;
struct annotation_line *selection;
struct arch *arch;
- struct annotation_options *opts;
bool searching_backwards;
char search_bf[128];
};
@@ -38,11 +37,10 @@ static inline struct annotation *browser__annotation(struct ui_browser *browser)
return symbol__annotation(ms->sym);
}
-static bool disasm_line__filter(struct ui_browser *browser, void *entry)
+static bool disasm_line__filter(struct ui_browser *browser __maybe_unused, void *entry)
{
- struct annotation *notes = browser__annotation(browser);
struct annotation_line *al = list_entry(entry, struct annotation_line, node);
- return annotation_line__filter(al, notes);
+ return annotation_line__filter(al);
}
static int ui_browser__jumps_percent_color(struct ui_browser *browser, int nr, bool current)
@@ -97,7 +95,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
struct annotation_write_ops ops = {
.first_line = row == 0,
.current_entry = is_current_entry,
- .change_color = (!notes->options->hide_src_code &&
+ .change_color = (!annotate_opts.hide_src_code &&
(!is_current_entry ||
(browser->use_navkeypressed &&
!browser->navkeypressed))),
@@ -114,7 +112,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
if (!browser->navkeypressed)
ops.width += 1;
- annotation_line__write(al, notes, &ops, ab->opts);
+ annotation_line__write(al, notes, &ops);
if (ops.current_entry)
ab->selection = al;
@@ -128,7 +126,7 @@ static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
while (pos && pos->al.offset == -1) {
pos = list_prev_entry(pos, al.node);
- if (!ab->opts->hide_src_code)
+ if (!annotate_opts.hide_src_code)
diff++;
}
@@ -188,14 +186,14 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
* name right after the '<' token and probably treating this like a
* 'call' instruction.
*/
- target = notes->offsets[cursor->ops.target.offset];
+ target = notes->src->offsets[cursor->ops.target.offset];
if (target == NULL) {
ui_helpline__printf("WARN: jump target inconsistency, press 'o', notes->offsets[%#x] = NULL\n",
cursor->ops.target.offset);
return;
}
- if (notes->options->hide_src_code) {
+ if (annotate_opts.hide_src_code) {
from = cursor->al.idx_asm;
to = target->idx_asm;
} else {
@@ -224,7 +222,7 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
int ret = ui_browser__list_head_refresh(browser);
int pcnt_width = annotation__pcnt_width(notes);
- if (notes->options->jump_arrows)
+ if (annotate_opts.jump_arrows)
annotate_browser__draw_current_jump(browser);
ui_browser__set_color(browser, HE_COLORSET_NORMAL);
@@ -258,7 +256,7 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser,
parent = *p;
l = rb_entry(parent, struct annotation_line, rb_node);
- if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
+ if (disasm__cmp(al, l, annotate_opts.percent_type) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -270,7 +268,6 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser,
static void annotate_browser__set_top(struct annotate_browser *browser,
struct annotation_line *pos, u32 idx)
{
- struct annotation *notes = browser__annotation(&browser->b);
unsigned back;
ui_browser__refresh_dimensions(&browser->b);
@@ -280,7 +277,7 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
while (browser->b.top_idx != 0 && back != 0) {
pos = list_entry(pos->node.prev, struct annotation_line, node);
- if (annotation_line__filter(pos, notes))
+ if (annotation_line__filter(pos))
continue;
--browser->b.top_idx;
@@ -294,11 +291,10 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
static void annotate_browser__set_rb_top(struct annotate_browser *browser,
struct rb_node *nd)
{
- struct annotation *notes = browser__annotation(&browser->b);
struct annotation_line * pos = rb_entry(nd, struct annotation_line, rb_node);
u32 idx = pos->idx;
- if (notes->options->hide_src_code)
+ if (annotate_opts.hide_src_code)
idx = pos->idx_asm;
annotate_browser__set_top(browser, pos, idx);
browser->curr_hot = nd;
@@ -331,13 +327,13 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
double percent;
percent = annotation_data__percent(&pos->al.data[i],
- browser->opts->percent_type);
+ annotate_opts.percent_type);
if (max_percent < percent)
max_percent = percent;
}
- if (max_percent < 0.01 && pos->al.ipc == 0) {
+ if (max_percent < 0.01 && (!pos->al.cycles || pos->al.cycles->ipc == 0)) {
RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
@@ -380,12 +376,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
browser->b.seek(&browser->b, offset, SEEK_CUR);
al = list_entry(browser->b.top, struct annotation_line, node);
- if (notes->options->hide_src_code) {
+ if (annotate_opts.hide_src_code) {
if (al->idx_asm < offset)
offset = al->idx;
- browser->b.nr_entries = notes->nr_entries;
- notes->options->hide_src_code = false;
+ browser->b.nr_entries = notes->src->nr_entries;
+ annotate_opts.hide_src_code = false;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = al->idx - offset;
browser->b.index = al->idx;
@@ -402,8 +398,8 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
if (al->idx_asm < offset)
offset = al->idx_asm;
- browser->b.nr_entries = notes->nr_asm_entries;
- notes->options->hide_src_code = true;
+ browser->b.nr_entries = notes->src->nr_asm_entries;
+ annotate_opts.hide_src_code = true;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
browser->b.top_idx = al->idx_asm - offset;
browser->b.index = al->idx_asm;
@@ -435,7 +431,7 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser)
{
struct annotation *notes = browser__annotation(browser);
ui_browser__reset_index(browser);
- browser->nr_entries = notes->nr_asm_entries;
+ browser->nr_entries = notes->src->nr_asm_entries;
}
static int sym_title(struct symbol *sym, struct map *map, char *title,
@@ -483,8 +479,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
target_ms.map = ms->map;
target_ms.sym = dl->ops.target.sym;
annotation__unlock(notes);
- symbol__tui_annotate(&target_ms, evsel, hbt, browser->opts);
- sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type);
+ symbol__tui_annotate(&target_ms, evsel, hbt);
+ sym_title(ms->sym, ms->map, title, sizeof(title), annotate_opts.percent_type);
ui_browser__show_title(&browser->b, title);
return true;
}
@@ -500,7 +496,7 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
list_for_each_entry(pos, &notes->src->source, al.node) {
if (pos->al.offset == offset)
return pos;
- if (!annotation_line__filter(&pos->al, notes))
+ if (!annotation_line__filter(&pos->al))
++*idx;
}
@@ -544,7 +540,7 @@ struct annotation_line *annotate_browser__find_string(struct annotate_browser *b
*idx = browser->b.index;
list_for_each_entry_continue(al, &notes->src->source, node) {
- if (annotation_line__filter(al, notes))
+ if (annotation_line__filter(al))
continue;
++*idx;
@@ -581,7 +577,7 @@ struct annotation_line *annotate_browser__find_string_reverse(struct annotate_br
*idx = browser->b.index;
list_for_each_entry_continue_reverse(al, &notes->src->source, node) {
- if (annotation_line__filter(al, notes))
+ if (annotation_line__filter(al))
continue;
--*idx;
@@ -659,7 +655,6 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help)
{
- struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
struct map_symbol *ms = browser->priv;
struct symbol *sym = ms->sym;
char symbol_dso[SYM_TITLE_MAX_SIZE];
@@ -667,7 +662,7 @@ static int annotate_browser__show(struct ui_browser *browser, char *title, const
if (ui_browser__show(browser, title, help) < 0)
return -1;
- sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type);
+ sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), annotate_opts.percent_type);
ui_browser__gotorc_title(browser, 0, 0);
ui_browser__set_color(browser, HE_COLORSET_ROOT);
@@ -809,7 +804,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
annotate_browser__show(&browser->b, title, help);
continue;
case 'k':
- notes->options->show_linenr = !notes->options->show_linenr;
+ annotate_opts.show_linenr = !annotate_opts.show_linenr;
continue;
case 'l':
annotate_browser__show_full_location (&browser->b);
@@ -822,18 +817,18 @@ static int annotate_browser__run(struct annotate_browser *browser,
ui_helpline__puts(help);
continue;
case 'o':
- notes->options->use_offset = !notes->options->use_offset;
+ annotate_opts.use_offset = !annotate_opts.use_offset;
annotation__update_column_widths(notes);
continue;
case 'O':
- if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
- notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
+ if (++annotate_opts.offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
+ annotate_opts.offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
continue;
case 'j':
- notes->options->jump_arrows = !notes->options->jump_arrows;
+ annotate_opts.jump_arrows = !annotate_opts.jump_arrows;
continue;
case 'J':
- notes->options->show_nr_jumps = !notes->options->show_nr_jumps;
+ annotate_opts.show_nr_jumps = !annotate_opts.show_nr_jumps;
annotation__update_column_widths(notes);
continue;
case '/':
@@ -860,7 +855,7 @@ show_help:
browser->b.height,
browser->b.index,
browser->b.top_idx,
- notes->nr_asm_entries);
+ notes->src->nr_asm_entries);
}
continue;
case K_ENTER:
@@ -884,7 +879,7 @@ show_sup_ins:
continue;
}
case 'P':
- map_symbol__annotation_dump(ms, evsel, browser->opts);
+ map_symbol__annotation_dump(ms, evsel);
continue;
case 't':
if (symbol_conf.show_total_period) {
@@ -897,15 +892,15 @@ show_sup_ins:
annotation__update_column_widths(notes);
continue;
case 'c':
- if (notes->options->show_minmax_cycle)
- notes->options->show_minmax_cycle = false;
+ if (annotate_opts.show_minmax_cycle)
+ annotate_opts.show_minmax_cycle = false;
else
- notes->options->show_minmax_cycle = true;
+ annotate_opts.show_minmax_cycle = true;
annotation__update_column_widths(notes);
continue;
case 'p':
case 'b':
- switch_percent_type(browser->opts, key == 'b');
+ switch_percent_type(&annotate_opts, key == 'b');
hists__scnprintf_title(hists, title, sizeof(title));
annotate_browser__show(&browser->b, title, help);
continue;
@@ -932,26 +927,24 @@ out:
}
int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct hist_browser_timer *hbt,
- struct annotation_options *opts)
+ struct hist_browser_timer *hbt)
{
- return symbol__tui_annotate(ms, evsel, hbt, opts);
+ return symbol__tui_annotate(ms, evsel, hbt);
}
int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
- struct hist_browser_timer *hbt,
- struct annotation_options *opts)
+ struct hist_browser_timer *hbt)
{
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
+ SLtty_set_suspend_state(true);
- return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts);
+ return map_symbol__tui_annotate(&he->ms, evsel, hbt);
}
int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct hist_browser_timer *hbt,
- struct annotation_options *opts)
+ struct hist_browser_timer *hbt)
{
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
@@ -965,7 +958,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
.priv = ms,
.use_navkeypressed = true,
},
- .opts = opts,
};
struct dso *dso;
int ret = -1, err;
@@ -979,7 +971,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
return -1;
if (not_annotated) {
- err = symbol__annotate2(ms, evsel, opts, &browser.arch);
+ err = symbol__annotate2(ms, evsel, &browser.arch);
if (err) {
char msg[BUFSIZ];
dso->annotate_warned = true;
@@ -991,12 +983,12 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
ui_helpline__push("Press ESC to exit");
- browser.b.width = notes->max_line_len;
- browser.b.nr_entries = notes->nr_entries;
+ browser.b.width = notes->src->max_line_len;
+ browser.b.nr_entries = notes->src->nr_entries;
browser.b.entries = &notes->src->source,
browser.b.width += 18; /* Percentage */
- if (notes->options->hide_src_code)
+ if (annotate_opts.hide_src_code)
ui_browser__init_asm_mode(&browser.b);
ret = annotate_browser__run(&browser, evsel, hbt);
@@ -1006,6 +998,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
out_free_offsets:
if(not_annotated)
- zfree(&notes->offsets);
+ zfree(&notes->src->offsets);
return ret;
}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index f4812b226818..0c02b3a8e121 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -2250,8 +2250,7 @@ struct hist_browser *hist_browser__new(struct hists *hists)
static struct hist_browser *
perf_evsel_browser__new(struct evsel *evsel,
struct hist_browser_timer *hbt,
- struct perf_env *env,
- struct annotation_options *annotation_opts)
+ struct perf_env *env)
{
struct hist_browser *browser = hist_browser__new(evsel__hists(evsel));
@@ -2259,7 +2258,6 @@ perf_evsel_browser__new(struct evsel *evsel,
browser->hbt = hbt;
browser->env = env;
browser->title = hists_browser__scnprintf_title;
- browser->annotation_opts = annotation_opts;
}
return browser;
}
@@ -2432,8 +2430,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
struct hist_entry *he;
int err;
- if (!browser->annotation_opts->objdump_path &&
- perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path))
+ if (!annotate_opts.objdump_path &&
+ perf_env__lookup_objdump(browser->env, &annotate_opts.objdump_path))
return 0;
notes = symbol__annotation(act->ms.sym);
@@ -2445,8 +2443,7 @@ do_annotate(struct hist_browser *browser, struct popup_action *act)
else
evsel = hists_to_evsel(browser->hists);
- err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt,
- browser->annotation_opts);
+ err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt);
he = hist_browser__selected_entry(browser);
/*
* offer option to annotate the other branch source or target
@@ -2943,11 +2940,10 @@ next:
static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *helpline,
bool left_exits, struct hist_browser_timer *hbt, float min_pcnt,
- struct perf_env *env, bool warn_lost_event,
- struct annotation_options *annotation_opts)
+ struct perf_env *env, bool warn_lost_event)
{
struct hists *hists = evsel__hists(evsel);
- struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
+ struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
struct branch_info *bi = NULL;
#define MAX_OPTIONS 16
char *options[MAX_OPTIONS];
@@ -3004,6 +3000,7 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
+ SLtty_set_suspend_state(true);
if (min_pcnt)
browser->min_pcnt = min_pcnt;
@@ -3398,7 +3395,6 @@ out:
struct evsel_menu {
struct ui_browser b;
struct evsel *selection;
- struct annotation_options *annotation_opts;
bool lost_events, lost_events_warned;
float min_pcnt;
struct perf_env *env;
@@ -3499,8 +3495,7 @@ browse_hists:
hbt->timer(hbt->arg);
key = evsel__hists_browse(pos, nr_events, help, true, hbt,
menu->min_pcnt, menu->env,
- warn_lost_event,
- menu->annotation_opts);
+ warn_lost_event);
ui_browser__show_title(&menu->b, title);
switch (key) {
case K_TAB:
@@ -3557,7 +3552,7 @@ static bool filter_group_entries(struct ui_browser *browser __maybe_unused,
static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, const char *help,
struct hist_browser_timer *hbt, float min_pcnt, struct perf_env *env,
- bool warn_lost_event, struct annotation_options *annotation_opts)
+ bool warn_lost_event)
{
struct evsel *pos;
struct evsel_menu menu = {
@@ -3572,7 +3567,6 @@ static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, con
},
.min_pcnt = min_pcnt,
.env = env,
- .annotation_opts = annotation_opts,
};
ui_helpline__push("Press ESC to exit");
@@ -3607,8 +3601,7 @@ static bool evlist__single_entry(struct evlist *evlist)
}
int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
- float min_pcnt, struct perf_env *env, bool warn_lost_event,
- struct annotation_options *annotation_opts)
+ float min_pcnt, struct perf_env *env, bool warn_lost_event)
{
int nr_entries = evlist->core.nr_entries;
@@ -3617,7 +3610,7 @@ single_entry: {
struct evsel *first = evlist__first(evlist);
return evsel__hists_browse(first, nr_entries, help, false, hbt, min_pcnt,
- env, warn_lost_event, annotation_opts);
+ env, warn_lost_event);
}
}
@@ -3635,7 +3628,7 @@ single_entry: {
}
return __evlist__tui_browse_hists(evlist, nr_entries, help, hbt, min_pcnt, env,
- warn_lost_event, annotation_opts);
+ warn_lost_event);
}
static int block_hists_browser__title(struct hist_browser *browser, char *bf,
@@ -3654,8 +3647,7 @@ static int block_hists_browser__title(struct hist_browser *browser, char *bf,
}
int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
- float min_percent, struct perf_env *env,
- struct annotation_options *annotation_opts)
+ float min_percent, struct perf_env *env)
{
struct hists *hists = &bh->block_hists;
struct hist_browser *browser;
@@ -3672,11 +3664,11 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
browser->title = block_hists_browser__title;
browser->min_pcnt = min_percent;
browser->env = env;
- browser->annotation_opts = annotation_opts;
/* reset abort key so that it can get Ctrl-C as a key */
SLang_reset_tty();
SLang_init_tty(0, 0, 0);
+ SLtty_set_suspend_state(true);
memset(&action, 0, sizeof(action));
diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
index 1e938d9ffa5e..de46f6c56b0e 100644
--- a/tools/perf/ui/browsers/hists.h
+++ b/tools/perf/ui/browsers/hists.h
@@ -4,7 +4,6 @@
#include "ui/browser.h"
-struct annotation_options;
struct evsel;
struct hist_browser {
@@ -15,7 +14,6 @@ struct hist_browser {
struct hist_browser_timer *hbt;
struct pstack *pstack;
struct perf_env *env;
- struct annotation_options *annotation_opts;
struct evsel *block_evsel;
int print_seq;
bool show_dso;
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index 47d2c7a8cbe1..50d45054ed6c 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -166,6 +166,7 @@ void run_script(char *cmd)
printf("\033[c\033[H\033[J");
fflush(stdout);
SLang_init_tty(0, 0, 0);
+ SLtty_set_suspend_state(true);
SLsmg_refresh();
}
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 2effac77ca8c..394861245fd3 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -162,7 +162,6 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms,
}
static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *options,
struct hist_browser_timer *hbt)
{
struct dso *dso = map__dso(ms->map);
@@ -176,7 +175,7 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
if (dso->annotate_warned)
return -1;
- err = symbol__annotate(ms, evsel, options, NULL);
+ err = symbol__annotate(ms, evsel, NULL);
if (err) {
char msg[BUFSIZ];
dso->annotate_warned = true;
@@ -244,10 +243,9 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
int hist_entry__gtk_annotate(struct hist_entry *he,
struct evsel *evsel,
- struct annotation_options *options,
struct hist_browser_timer *hbt)
{
- return symbol__gtk_annotate(&he->ms, evsel, options, hbt);
+ return symbol__gtk_annotate(&he->ms, evsel, hbt);
}
void perf_gtk__show_annotations(void)
diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h
index 1e84dceb5267..a2b497f03fd6 100644
--- a/tools/perf/ui/gtk/gtk.h
+++ b/tools/perf/ui/gtk/gtk.h
@@ -56,13 +56,11 @@ struct evsel;
struct evlist;
struct hist_entry;
struct hist_browser_timer;
-struct annotation_options;
int evlist__gtk_browse_hists(struct evlist *evlist, const char *help,
struct hist_browser_timer *hbt, float min_pcnt);
int hist_entry__gtk_annotate(struct hist_entry *he,
struct evsel *evsel,
- struct annotation_options *options,
struct hist_browser_timer *hbt);
void perf_gtk__show_annotations(void);
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 605d9e175ea7..16c6eff4d241 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -2,12 +2,14 @@
#include <signal.h>
#include <stdbool.h>
#include <stdlib.h>
+#include <termios.h>
#include <unistd.h>
#include <linux/kernel.h>
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
+#include "../../util/color.h"
#include "../../util/debug.h"
#include "../browser.h"
#include "../helpline.h"
@@ -121,6 +123,23 @@ static void ui__signal(int sig)
exit(0);
}
+static void ui__sigcont(int sig)
+{
+ static struct termios tty;
+
+ if (sig == SIGTSTP) {
+ while (tcgetattr(SLang_TT_Read_FD, &tty) == -1 && errno == EINTR)
+ ;
+ while (write(SLang_TT_Read_FD, PERF_COLOR_RESET, sizeof(PERF_COLOR_RESET) - 1) == -1 && errno == EINTR)
+ ;
+ raise(SIGSTOP);
+ } else {
+ while (tcsetattr(SLang_TT_Read_FD, TCSADRAIN, &tty) == -1 && errno == EINTR)
+ ;
+ raise(SIGWINCH);
+ }
+}
+
int ui__init(void)
{
int err;
@@ -135,6 +154,7 @@ int ui__init(void)
err = SLang_init_tty(-1, 0, 0);
if (err < 0)
goto out;
+ SLtty_set_suspend_state(true);
err = SLkp_init();
if (err < 0) {
@@ -149,6 +169,8 @@ int ui__init(void)
signal(SIGINT, ui__signal);
signal(SIGQUIT, ui__signal);
signal(SIGTERM, ui__signal);
+ signal(SIGTSTP, ui__sigcont);
+ signal(SIGCONT, ui__sigcont);
perf_error__register(&perf_tui_eops);
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 988473bf907a..8027f450fa3e 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -195,6 +195,8 @@ endif
perf-$(CONFIG_DWARF) += probe-finder.o
perf-$(CONFIG_DWARF) += dwarf-aux.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += debuginfo.o
+perf-$(CONFIG_DWARF) += annotate-data.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c
new file mode 100644
index 000000000000..f22b4f18271c
--- /dev/null
+++ b/tools/perf/util/annotate-data.c
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Convert sample address to data type using DWARF debug info.
+ *
+ * Written by Namhyung Kim <namhyung@kernel.org>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "annotate-data.h"
+#include "debuginfo.h"
+#include "debug.h"
+#include "dso.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "map.h"
+#include "map_symbol.h"
+#include "strbuf.h"
+#include "symbol.h"
+#include "symbol_conf.h"
+
+/*
+ * Compare type name and size to maintain them in a tree.
+ * I'm not sure if DWARF would have information of a single type in many
+ * different places (compilation units). If not, it could compare the
+ * offset of the type entry in the .debug_info section.
+ */
+static int data_type_cmp(const void *_key, const struct rb_node *node)
+{
+ const struct annotated_data_type *key = _key;
+ struct annotated_data_type *type;
+
+ type = rb_entry(node, struct annotated_data_type, node);
+
+ if (key->self.size != type->self.size)
+ return key->self.size - type->self.size;
+ return strcmp(key->self.type_name, type->self.type_name);
+}
+
+static bool data_type_less(struct rb_node *node_a, const struct rb_node *node_b)
+{
+ struct annotated_data_type *a, *b;
+
+ a = rb_entry(node_a, struct annotated_data_type, node);
+ b = rb_entry(node_b, struct annotated_data_type, node);
+
+ if (a->self.size != b->self.size)
+ return a->self.size < b->self.size;
+ return strcmp(a->self.type_name, b->self.type_name) < 0;
+}
+
+/* Recursively add new members for struct/union */
+static int __add_member_cb(Dwarf_Die *die, void *arg)
+{
+ struct annotated_member *parent = arg;
+ struct annotated_member *member;
+ Dwarf_Die member_type, die_mem;
+ Dwarf_Word size, loc;
+ Dwarf_Attribute attr;
+ struct strbuf sb;
+ int tag;
+
+ if (dwarf_tag(die) != DW_TAG_member)
+ return DIE_FIND_CB_SIBLING;
+
+ member = zalloc(sizeof(*member));
+ if (member == NULL)
+ return DIE_FIND_CB_END;
+
+ strbuf_init(&sb, 32);
+ die_get_typename(die, &sb);
+
+ die_get_real_type(die, &member_type);
+ if (dwarf_aggregate_size(&member_type, &size) < 0)
+ size = 0;
+
+ if (!dwarf_attr_integrate(die, DW_AT_data_member_location, &attr))
+ loc = 0;
+ else
+ dwarf_formudata(&attr, &loc);
+
+ member->type_name = strbuf_detach(&sb, NULL);
+ /* member->var_name can be NULL */
+ if (dwarf_diename(die))
+ member->var_name = strdup(dwarf_diename(die));
+ member->size = size;
+ member->offset = loc + parent->offset;
+ INIT_LIST_HEAD(&member->children);
+ list_add_tail(&member->node, &parent->children);
+
+ tag = dwarf_tag(&member_type);
+ switch (tag) {
+ case DW_TAG_structure_type:
+ case DW_TAG_union_type:
+ die_find_child(&member_type, __add_member_cb, member, &die_mem);
+ break;
+ default:
+ break;
+ }
+ return DIE_FIND_CB_SIBLING;
+}
+
+static void add_member_types(struct annotated_data_type *parent, Dwarf_Die *type)
+{
+ Dwarf_Die die_mem;
+
+ die_find_child(type, __add_member_cb, &parent->self, &die_mem);
+}
+
+static void delete_members(struct annotated_member *member)
+{
+ struct annotated_member *child, *tmp;
+
+ list_for_each_entry_safe(child, tmp, &member->children, node) {
+ list_del(&child->node);
+ delete_members(child);
+ free(child->type_name);
+ free(child->var_name);
+ free(child);
+ }
+}
+
+static struct annotated_data_type *dso__findnew_data_type(struct dso *dso,
+ Dwarf_Die *type_die)
+{
+ struct annotated_data_type *result = NULL;
+ struct annotated_data_type key;
+ struct rb_node *node;
+ struct strbuf sb;
+ char *type_name;
+ Dwarf_Word size;
+
+ strbuf_init(&sb, 32);
+ if (die_get_typename_from_type(type_die, &sb) < 0)
+ strbuf_add(&sb, "(unknown type)", 14);
+ type_name = strbuf_detach(&sb, NULL);
+ dwarf_aggregate_size(type_die, &size);
+
+ /* Check existing nodes in dso->data_types tree */
+ key.self.type_name = type_name;
+ key.self.size = size;
+ node = rb_find(&key, &dso->data_types, data_type_cmp);
+ if (node) {
+ result = rb_entry(node, struct annotated_data_type, node);
+ free(type_name);
+ return result;
+ }
+
+ /* If not, add a new one */
+ result = zalloc(sizeof(*result));
+ if (result == NULL) {
+ free(type_name);
+ return NULL;
+ }
+
+ result->self.type_name = type_name;
+ result->self.size = size;
+ INIT_LIST_HEAD(&result->self.children);
+
+ if (symbol_conf.annotate_data_member)
+ add_member_types(result, type_die);
+
+ rb_add(&result->node, &dso->data_types, data_type_less);
+ return result;
+}
+
+static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die)
+{
+ Dwarf_Off off, next_off;
+ size_t header_size;
+
+ if (dwarf_addrdie(di->dbg, pc, cu_die) != NULL)
+ return cu_die;
+
+ /*
+ * There are some kernels don't have full aranges and contain only a few
+ * aranges entries. Fallback to iterate all CU entries in .debug_info
+ * in case it's missing.
+ */
+ off = 0;
+ while (dwarf_nextcu(di->dbg, off, &next_off, &header_size,
+ NULL, NULL, NULL) == 0) {
+ if (dwarf_offdie(di->dbg, off + header_size, cu_die) &&
+ dwarf_haspc(cu_die, pc))
+ return true;
+
+ off = next_off;
+ }
+ return false;
+}
+
+/* The type info will be saved in @type_die */
+static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset)
+{
+ Dwarf_Word size;
+
+ /* Get the type of the variable */
+ if (die_get_real_type(var_die, type_die) == NULL) {
+ pr_debug("variable has no type\n");
+ ann_data_stat.no_typeinfo++;
+ return -1;
+ }
+
+ /*
+ * It expects a pointer type for a memory access.
+ * Convert to a real type it points to.
+ */
+ if (dwarf_tag(type_die) != DW_TAG_pointer_type ||
+ die_get_real_type(type_die, type_die) == NULL) {
+ pr_debug("no pointer or no type\n");
+ ann_data_stat.no_typeinfo++;
+ return -1;
+ }
+
+ /* Get the size of the actual type */
+ if (dwarf_aggregate_size(type_die, &size) < 0) {
+ pr_debug("type size is unknown\n");
+ ann_data_stat.invalid_size++;
+ return -1;
+ }
+
+ /* Minimal sanity check */
+ if ((unsigned)offset >= size) {
+ pr_debug("offset: %d is bigger than size: %" PRIu64 "\n", offset, size);
+ ann_data_stat.bad_offset++;
+ return -1;
+ }
+
+ return 0;
+}
+
+/* The result will be saved in @type_die */
+static int find_data_type_die(struct debuginfo *di, u64 pc,
+ int reg, int offset, Dwarf_Die *type_die)
+{
+ Dwarf_Die cu_die, var_die;
+ Dwarf_Die *scopes = NULL;
+ int ret = -1;
+ int i, nr_scopes;
+
+ /* Get a compile_unit for this address */
+ if (!find_cu_die(di, pc, &cu_die)) {
+ pr_debug("cannot find CU for address %" PRIx64 "\n", pc);
+ ann_data_stat.no_cuinfo++;
+ return -1;
+ }
+
+ /* Get a list of nested scopes - i.e. (inlined) functions and blocks. */
+ nr_scopes = die_get_scopes(&cu_die, pc, &scopes);
+
+ /* Search from the inner-most scope to the outer */
+ for (i = nr_scopes - 1; i >= 0; i--) {
+ /* Look up variables/parameters in this scope */
+ if (!die_find_variable_by_reg(&scopes[i], pc, reg, &var_die))
+ continue;
+
+ /* Found a variable, see if it's correct */
+ ret = check_variable(&var_die, type_die, offset);
+ goto out;
+ }
+ if (ret < 0)
+ ann_data_stat.no_var++;
+
+out:
+ free(scopes);
+ return ret;
+}
+
+/**
+ * find_data_type - Return a data type at the location
+ * @ms: map and symbol at the location
+ * @ip: instruction address of the memory access
+ * @reg: register that holds the base address
+ * @offset: offset from the base address
+ *
+ * This functions searches the debug information of the binary to get the data
+ * type it accesses. The exact location is expressed by (ip, reg, offset).
+ * It return %NULL if not found.
+ */
+struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip,
+ int reg, int offset)
+{
+ struct annotated_data_type *result = NULL;
+ struct dso *dso = map__dso(ms->map);
+ struct debuginfo *di;
+ Dwarf_Die type_die;
+ u64 pc;
+
+ di = debuginfo__new(dso->long_name);
+ if (di == NULL) {
+ pr_debug("cannot get the debug info\n");
+ return NULL;
+ }
+
+ /*
+ * IP is a relative instruction address from the start of the map, as
+ * it can be randomized/relocated, it needs to translate to PC which is
+ * a file address for DWARF processing.
+ */
+ pc = map__rip_2objdump(ms->map, ip);
+ if (find_data_type_die(di, pc, reg, offset, &type_die) < 0)
+ goto out;
+
+ result = dso__findnew_data_type(dso, &type_die);
+
+out:
+ debuginfo__delete(di);
+ return result;
+}
+
+static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_entries)
+{
+ int i;
+ size_t sz = sizeof(struct type_hist);
+
+ sz += sizeof(struct type_hist_entry) * adt->self.size;
+
+ /* Allocate a table of pointers for each event */
+ adt->nr_histograms = nr_entries;
+ adt->histograms = calloc(nr_entries, sizeof(*adt->histograms));
+ if (adt->histograms == NULL)
+ return -ENOMEM;
+
+ /*
+ * Each histogram is allocated for the whole size of the type.
+ * TODO: Probably we can move the histogram to members.
+ */
+ for (i = 0; i < nr_entries; i++) {
+ adt->histograms[i] = zalloc(sz);
+ if (adt->histograms[i] == NULL)
+ goto err;
+ }
+ return 0;
+
+err:
+ while (--i >= 0)
+ free(adt->histograms[i]);
+ free(adt->histograms);
+ return -ENOMEM;
+}
+
+static void delete_data_type_histograms(struct annotated_data_type *adt)
+{
+ for (int i = 0; i < adt->nr_histograms; i++)
+ free(adt->histograms[i]);
+ free(adt->histograms);
+}
+
+void annotated_data_type__tree_delete(struct rb_root *root)
+{
+ struct annotated_data_type *pos;
+
+ while (!RB_EMPTY_ROOT(root)) {
+ struct rb_node *node = rb_first(root);
+
+ rb_erase(node, root);
+ pos = rb_entry(node, struct annotated_data_type, node);
+ delete_members(&pos->self);
+ delete_data_type_histograms(pos);
+ free(pos->self.type_name);
+ free(pos);
+ }
+}
+
+/**
+ * annotated_data_type__update_samples - Update histogram
+ * @adt: Data type to update
+ * @evsel: Event to update
+ * @offset: Offset in the type
+ * @nr_samples: Number of samples at this offset
+ * @period: Event count at this offset
+ *
+ * This function updates type histogram at @ofs for @evsel. Samples are
+ * aggregated before calling this function so it can be called with more
+ * than one samples at a certain offset.
+ */
+int annotated_data_type__update_samples(struct annotated_data_type *adt,
+ struct evsel *evsel, int offset,
+ int nr_samples, u64 period)
+{
+ struct type_hist *h;
+
+ if (adt == NULL)
+ return 0;
+
+ if (adt->histograms == NULL) {
+ int nr = evsel->evlist->core.nr_entries;
+
+ if (alloc_data_type_histograms(adt, nr) < 0)
+ return -1;
+ }
+
+ if (offset < 0 || offset >= adt->self.size)
+ return -1;
+
+ h = adt->histograms[evsel->core.idx];
+
+ h->nr_samples += nr_samples;
+ h->addr[offset].nr_samples += nr_samples;
+ h->period += period;
+ h->addr[offset].period += period;
+ return 0;
+}
diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h
new file mode 100644
index 000000000000..8e73096c01d1
--- /dev/null
+++ b/tools/perf/util/annotate-data.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PERF_ANNOTATE_DATA_H
+#define _PERF_ANNOTATE_DATA_H
+
+#include <errno.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+struct evsel;
+struct map_symbol;
+
+/**
+ * struct annotated_member - Type of member field
+ * @node: List entry in the parent list
+ * @children: List head for child nodes
+ * @type_name: Name of the member type
+ * @var_name: Name of the member variable
+ * @offset: Offset from the outer data type
+ * @size: Size of the member field
+ *
+ * This represents a member type in a data type.
+ */
+struct annotated_member {
+ struct list_head node;
+ struct list_head children;
+ char *type_name;
+ char *var_name;
+ int offset;
+ int size;
+};
+
+/**
+ * struct type_hist_entry - Histogram entry per offset
+ * @nr_samples: Number of samples
+ * @period: Count of event
+ */
+struct type_hist_entry {
+ int nr_samples;
+ u64 period;
+};
+
+/**
+ * struct type_hist - Type histogram for each event
+ * @nr_samples: Total number of samples in this data type
+ * @period: Total count of the event in this data type
+ * @offset: Array of histogram entry
+ */
+struct type_hist {
+ u64 nr_samples;
+ u64 period;
+ struct type_hist_entry addr[];
+};
+
+/**
+ * struct annotated_data_type - Data type to profile
+ * @node: RB-tree node for dso->type_tree
+ * @self: Actual type information
+ * @nr_histogram: Number of histogram entries
+ * @histograms: An array of pointers to histograms
+ *
+ * This represents a data type accessed by samples in the profile data.
+ */
+struct annotated_data_type {
+ struct rb_node node;
+ struct annotated_member self;
+ int nr_histograms;
+ struct type_hist **histograms;
+};
+
+extern struct annotated_data_type unknown_type;
+
+/**
+ * struct annotated_data_stat - Debug statistics
+ * @total: Total number of entry
+ * @no_sym: No symbol or map found
+ * @no_insn: Failed to get disasm line
+ * @no_insn_ops: The instruction has no operands
+ * @no_mem_ops: The instruction has no memory operands
+ * @no_reg: Failed to extract a register from the operand
+ * @no_dbginfo: The binary has no debug information
+ * @no_cuinfo: Failed to find a compile_unit
+ * @no_var: Failed to find a matching variable
+ * @no_typeinfo: Failed to get a type info for the variable
+ * @invalid_size: Failed to get a size info of the type
+ * @bad_offset: The access offset is out of the type
+ */
+struct annotated_data_stat {
+ int total;
+ int no_sym;
+ int no_insn;
+ int no_insn_ops;
+ int no_mem_ops;
+ int no_reg;
+ int no_dbginfo;
+ int no_cuinfo;
+ int no_var;
+ int no_typeinfo;
+ int invalid_size;
+ int bad_offset;
+};
+extern struct annotated_data_stat ann_data_stat;
+
+#ifdef HAVE_DWARF_SUPPORT
+
+/* Returns data type at the location (ip, reg, offset) */
+struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip,
+ int reg, int offset);
+
+/* Update type access histogram at the given offset */
+int annotated_data_type__update_samples(struct annotated_data_type *adt,
+ struct evsel *evsel, int offset,
+ int nr_samples, u64 period);
+
+/* Release all data type information in the tree */
+void annotated_data_type__tree_delete(struct rb_root *root);
+
+#else /* HAVE_DWARF_SUPPORT */
+
+static inline struct annotated_data_type *
+find_data_type(struct map_symbol *ms __maybe_unused, u64 ip __maybe_unused,
+ int reg __maybe_unused, int offset __maybe_unused)
+{
+ return NULL;
+}
+
+static inline int
+annotated_data_type__update_samples(struct annotated_data_type *adt __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ int offset __maybe_unused,
+ int nr_samples __maybe_unused,
+ u64 period __maybe_unused)
+{
+ return -1;
+}
+
+static inline void annotated_data_type__tree_delete(struct rb_root *root __maybe_unused)
+{
+}
+
+#endif /* HAVE_DWARF_SUPPORT */
+
+#endif /* _PERF_ANNOTATE_DATA_H */
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 82956adf9963..9b70ab110ce7 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -25,12 +25,14 @@
#include "units.h"
#include "debug.h"
#include "annotate.h"
+#include "annotate-data.h"
#include "evsel.h"
#include "evlist.h"
#include "bpf-event.h"
#include "bpf-utils.h"
#include "block-range.h"
#include "string2.h"
+#include "dwarf-regs.h"
#include "util/event.h"
#include "util/sharded_mutex.h"
#include "arch/common.h"
@@ -57,6 +59,9 @@
#include <linux/ctype.h>
+/* global annotation options */
+struct annotation_options annotate_opts;
+
static regex_t file_lineno;
static struct ins_ops *ins__find(struct arch *arch, const char *name);
@@ -85,6 +90,8 @@ struct arch {
struct {
char comment_char;
char skip_functions_char;
+ char register_char;
+ char memory_ref_char;
} objdump;
};
@@ -96,6 +103,10 @@ static struct ins_ops nop_ops;
static struct ins_ops lock_ops;
static struct ins_ops ret_ops;
+/* Data type collection debug statistics */
+struct annotated_data_stat ann_data_stat;
+LIST_HEAD(ann_insn_stat);
+
static int arch__grow_instructions(struct arch *arch)
{
struct ins *new_instructions;
@@ -188,6 +199,8 @@ static struct arch architectures[] = {
.insn_suffix = "bwlq",
.objdump = {
.comment_char = '#',
+ .register_char = '%',
+ .memory_ref_char = '(',
},
},
{
@@ -340,10 +353,10 @@ bool ins__is_call(const struct ins *ins)
*/
static inline const char *validate_comma(const char *c, struct ins_operands *ops)
{
- if (ops->raw_comment && c > ops->raw_comment)
+ if (ops->jump.raw_comment && c > ops->jump.raw_comment)
return NULL;
- if (ops->raw_func_start && c > ops->raw_func_start)
+ if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
return NULL;
return c;
@@ -359,8 +372,8 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
const char *c = strchr(ops->raw, ',');
u64 start, end;
- ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
- ops->raw_func_start = strchr(ops->raw, '<');
+ ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+ ops->jump.raw_func_start = strchr(ops->raw, '<');
c = validate_comma(c, ops);
@@ -462,7 +475,16 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
ops->target.offset);
}
+static void jump__delete(struct ins_operands *ops __maybe_unused)
+{
+ /*
+ * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the
+ * raw string, don't free them.
+ */
+}
+
static struct ins_ops jump_ops = {
+ .free = jump__delete,
.parse = jump__parse,
.scnprintf = jump__scnprintf,
};
@@ -557,6 +579,34 @@ static struct ins_ops lock_ops = {
.scnprintf = lock__scnprintf,
};
+/*
+ * Check if the operand has more than one registers like x86 SIB addressing:
+ * 0x1234(%rax, %rbx, 8)
+ *
+ * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check
+ * the input string after 'memory_ref_char' if exists.
+ */
+static bool check_multi_regs(struct arch *arch, const char *op)
+{
+ int count = 0;
+
+ if (arch->objdump.register_char == 0)
+ return false;
+
+ if (arch->objdump.memory_ref_char) {
+ op = strchr(op, arch->objdump.memory_ref_char);
+ if (op == NULL)
+ return false;
+ }
+
+ while ((op = strchr(op, arch->objdump.register_char)) != NULL) {
+ count++;
+ op++;
+ }
+
+ return count > 1;
+}
+
static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
{
char *s = strchr(ops->raw, ','), *target, *comment, prev;
@@ -584,6 +634,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_sy
if (ops->source.raw == NULL)
return -1;
+ ops->source.multi_regs = check_multi_regs(arch, ops->source.raw);
+
target = skip_spaces(++s);
comment = strchr(s, arch->objdump.comment_char);
@@ -604,6 +656,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_sy
if (ops->target.raw == NULL)
goto out_free_source;
+ ops->target.multi_regs = check_multi_regs(arch, ops->target.raw);
+
if (comment == NULL)
return 0;
@@ -795,6 +849,11 @@ static struct arch *arch__find(const char *name)
return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
}
+bool arch__is(struct arch *arch, const char *name)
+{
+ return !strcmp(arch->name, name);
+}
+
static struct annotated_source *annotated_source__new(void)
{
struct annotated_source *src = zalloc(sizeof(*src));
@@ -810,7 +869,6 @@ static __maybe_unused void annotated_source__delete(struct annotated_source *src
if (src == NULL)
return;
zfree(&src->histograms);
- zfree(&src->cycles_hist);
free(src);
}
@@ -845,18 +903,6 @@ static int annotated_source__alloc_histograms(struct annotated_source *src,
return src->histograms ? 0 : -1;
}
-/* The cycles histogram is lazily allocated. */
-static int symbol__alloc_hist_cycles(struct symbol *sym)
-{
- struct annotation *notes = symbol__annotation(sym);
- const size_t size = symbol__size(sym);
-
- notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
- if (notes->src->cycles_hist == NULL)
- return -1;
- return 0;
-}
-
void symbol__annotate_zero_histograms(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
@@ -865,9 +911,10 @@ void symbol__annotate_zero_histograms(struct symbol *sym)
if (notes->src != NULL) {
memset(notes->src->histograms, 0,
notes->src->nr_histograms * notes->src->sizeof_sym_hist);
- if (notes->src->cycles_hist)
- memset(notes->src->cycles_hist, 0,
- symbol__size(sym) * sizeof(struct cyc_hist));
+ }
+ if (notes->branch && notes->branch->cycles_hist) {
+ memset(notes->branch->cycles_hist, 0,
+ symbol__size(sym) * sizeof(struct cyc_hist));
}
annotation__unlock(notes);
}
@@ -958,23 +1005,33 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms,
return 0;
}
+struct annotated_branch *annotation__get_branch(struct annotation *notes)
+{
+ if (notes == NULL)
+ return NULL;
+
+ if (notes->branch == NULL)
+ notes->branch = zalloc(sizeof(*notes->branch));
+
+ return notes->branch;
+}
+
static struct cyc_hist *symbol__cycles_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
+ struct annotated_branch *branch;
- if (notes->src == NULL) {
- notes->src = annotated_source__new();
- if (notes->src == NULL)
- return NULL;
- goto alloc_cycles_hist;
- }
+ branch = annotation__get_branch(notes);
+ if (branch == NULL)
+ return NULL;
+
+ if (branch->cycles_hist == NULL) {
+ const size_t size = symbol__size(sym);
- if (!notes->src->cycles_hist) {
-alloc_cycles_hist:
- symbol__alloc_hist_cycles(sym);
+ branch->cycles_hist = calloc(size, sizeof(struct cyc_hist));
}
- return notes->src->cycles_hist;
+ return branch->cycles_hist;
}
struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists)
@@ -1077,12 +1134,20 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64
u64 offset;
for (offset = start; offset <= end; offset++) {
- if (notes->offsets[offset])
+ if (notes->src->offsets[offset])
n_insn++;
}
return n_insn;
}
+static void annotated_branch__delete(struct annotated_branch *branch)
+{
+ if (branch) {
+ zfree(&branch->cycles_hist);
+ free(branch);
+ }
+}
+
static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
{
unsigned n_insn;
@@ -1091,6 +1156,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
n_insn = annotation__count_insn(notes, start, end);
if (n_insn && ch->num && ch->cycles) {
+ struct annotated_branch *branch;
float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
/* Hide data when there are too many overlaps. */
@@ -1098,54 +1164,76 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
return;
for (offset = start; offset <= end; offset++) {
- struct annotation_line *al = notes->offsets[offset];
+ struct annotation_line *al = notes->src->offsets[offset];
- if (al && al->ipc == 0.0) {
- al->ipc = ipc;
+ if (al && al->cycles && al->cycles->ipc == 0.0) {
+ al->cycles->ipc = ipc;
cover_insn++;
}
}
- if (cover_insn) {
- notes->hit_cycles += ch->cycles;
- notes->hit_insn += n_insn * ch->num;
- notes->cover_insn += cover_insn;
+ branch = annotation__get_branch(notes);
+ if (cover_insn && branch) {
+ branch->hit_cycles += ch->cycles;
+ branch->hit_insn += n_insn * ch->num;
+ branch->cover_insn += cover_insn;
}
}
}
-void annotation__compute_ipc(struct annotation *notes, size_t size)
+static int annotation__compute_ipc(struct annotation *notes, size_t size)
{
+ int err = 0;
s64 offset;
- if (!notes->src || !notes->src->cycles_hist)
- return;
+ if (!notes->branch || !notes->branch->cycles_hist)
+ return 0;
- notes->total_insn = annotation__count_insn(notes, 0, size - 1);
- notes->hit_cycles = 0;
- notes->hit_insn = 0;
- notes->cover_insn = 0;
+ notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1);
+ notes->branch->hit_cycles = 0;
+ notes->branch->hit_insn = 0;
+ notes->branch->cover_insn = 0;
annotation__lock(notes);
for (offset = size - 1; offset >= 0; --offset) {
struct cyc_hist *ch;
- ch = &notes->src->cycles_hist[offset];
+ ch = &notes->branch->cycles_hist[offset];
if (ch && ch->cycles) {
struct annotation_line *al;
+ al = notes->src->offsets[offset];
+ if (al && al->cycles == NULL) {
+ al->cycles = zalloc(sizeof(*al->cycles));
+ if (al->cycles == NULL) {
+ err = ENOMEM;
+ break;
+ }
+ }
if (ch->have_start)
annotation__count_and_fill(notes, ch->start, offset, ch);
- al = notes->offsets[offset];
if (al && ch->num_aggr) {
- al->cycles = ch->cycles_aggr / ch->num_aggr;
- al->cycles_max = ch->cycles_max;
- al->cycles_min = ch->cycles_min;
+ al->cycles->avg = ch->cycles_aggr / ch->num_aggr;
+ al->cycles->max = ch->cycles_max;
+ al->cycles->min = ch->cycles_min;
+ }
+ }
+ }
+
+ if (err) {
+ while (++offset < (s64)size) {
+ struct cyc_hist *ch = &notes->branch->cycles_hist[offset];
+
+ if (ch && ch->cycles) {
+ struct annotation_line *al = notes->src->offsets[offset];
+ if (al)
+ zfree(&al->cycles);
}
- notes->have_cycles = true;
}
}
+
annotation__unlock(notes);
+ return 0;
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
@@ -1225,6 +1313,7 @@ static void annotation_line__exit(struct annotation_line *al)
{
zfree_srcline(&al->path);
zfree(&al->line);
+ zfree(&al->cycles);
}
static size_t disasm_line_size(int nr)
@@ -1299,6 +1388,7 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r
void annotation__exit(struct annotation *notes)
{
annotated_source__delete(notes->src);
+ annotated_branch__delete(notes->branch);
}
static struct sharded_mutex *sharded_mutex;
@@ -1817,7 +1907,6 @@ static int symbol__disassemble_bpf(struct symbol *sym,
struct annotate_args *args)
{
struct annotation *notes = symbol__annotation(sym);
- struct annotation_options *opts = args->options;
struct bpf_prog_linfo *prog_linfo = NULL;
struct bpf_prog_info_node *info_node;
int len = sym->end - sym->start;
@@ -1927,7 +2016,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
prev_buf_size = buf_size;
fflush(s);
- if (!opts->hide_src_code && srcline) {
+ if (!annotate_opts.hide_src_code && srcline) {
args->offset = -1;
args->line = strdup(srcline);
args->line_nr = 0;
@@ -2050,7 +2139,7 @@ static char *expand_tabs(char *line, char **storage, size_t *storage_len)
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
- struct annotation_options *opts = args->options;
+ struct annotation_options *opts = &annotate_opts;
struct map *map = args->ms.map;
struct dso *dso = map__dso(map);
char *command;
@@ -2113,12 +2202,13 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
err = asprintf(&command,
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
- " -l -d %s %s %s %c%s%c %s%s -C \"$1\"",
+ " %s -d %s %s %s %c%s%c %s%s -C \"$1\"",
opts->objdump_path ?: "objdump",
opts->disassembler_style ? "-M " : "",
opts->disassembler_style ?: "",
map__rip_2objdump(map, sym->start),
map__rip_2objdump(map, sym->end),
+ opts->show_linenr ? "-l" : "",
opts->show_asm_raw ? "" : "--no-show-raw-insn",
opts->annotate_src ? "-S" : "",
opts->prefix ? "--prefix " : "",
@@ -2299,15 +2389,8 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
annotation__calc_percent(notes, evsel, symbol__size(sym));
}
-int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *options, struct arch **parch)
+static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
{
- struct symbol *sym = ms->sym;
- struct annotation *notes = symbol__annotation(sym);
- struct annotate_args args = {
- .evsel = evsel,
- .options = options,
- };
struct perf_env *env = evsel__env(evsel);
const char *arch_name = perf_env__arch(env);
struct arch *arch;
@@ -2316,25 +2399,45 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
if (!arch_name)
return errno;
- args.arch = arch = arch__find(arch_name);
+ *parch = arch = arch__find(arch_name);
if (arch == NULL) {
pr_err("%s: unsupported arch %s\n", __func__, arch_name);
return ENOTSUP;
}
- if (parch)
- *parch = arch;
-
if (arch->init) {
err = arch->init(arch, env ? env->cpuid : NULL);
if (err) {
- pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
+ pr_err("%s: failed to initialize %s arch priv area\n",
+ __func__, arch->name);
return err;
}
}
+ return 0;
+}
+int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
+ struct arch **parch)
+{
+ struct symbol *sym = ms->sym;
+ struct annotation *notes = symbol__annotation(sym);
+ struct annotate_args args = {
+ .evsel = evsel,
+ .options = &annotate_opts,
+ };
+ struct arch *arch = NULL;
+ int err;
+
+ err = evsel__get_arch(evsel, &arch);
+ if (err < 0)
+ return err;
+
+ if (parch)
+ *parch = arch;
+
+ args.arch = arch;
args.ms = *ms;
- if (notes->options && notes->options->full_addr)
+ if (annotate_opts.full_addr)
notes->start = map__objdump_2mem(ms->map, ms->sym->start);
else
notes->start = map__rip_2objdump(ms->map, ms->sym->start);
@@ -2342,12 +2445,12 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
return symbol__disassemble(sym, &args);
}
-static void insert_source_line(struct rb_root *root, struct annotation_line *al,
- struct annotation_options *opts)
+static void insert_source_line(struct rb_root *root, struct annotation_line *al)
{
struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
+ unsigned int percent_type = annotate_opts.percent_type;
int i, ret;
while (*p != NULL) {
@@ -2358,7 +2461,7 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al,
if (ret == 0) {
for (i = 0; i < al->data_nr; i++) {
iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
- opts->percent_type);
+ percent_type);
}
return;
}
@@ -2371,7 +2474,7 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al,
for (i = 0; i < al->data_nr; i++) {
al->data[i].percent_sum = annotation_data__percent(&al->data[i],
- opts->percent_type);
+ percent_type);
}
rb_link_node(&al->rb_node, parent, p);
@@ -2493,8 +2596,7 @@ static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
return 0;
}
-int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *opts)
+int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
{
struct map *map = ms->map;
struct symbol *sym = ms->sym;
@@ -2505,6 +2607,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->core.idx);
struct annotation_line *pos, *queue = NULL;
+ struct annotation_options *opts = &annotate_opts;
u64 start = map__rip_2objdump(map, sym->start);
int printed = 2, queue_len = 0, addr_fmt_width;
int more = 0;
@@ -2633,8 +2736,7 @@ static void FILE__write_graph(void *fp, int graph)
fputs(s, fp);
}
-static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
- struct annotation_options *opts)
+static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
{
struct annotation *notes = symbol__annotation(sym);
struct annotation_write_ops wops = {
@@ -2649,9 +2751,9 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
struct annotation_line *al;
list_for_each_entry(al, &notes->src->source, node) {
- if (annotation_line__filter(al, notes))
+ if (annotation_line__filter(al))
continue;
- annotation_line__write(al, notes, &wops, opts);
+ annotation_line__write(al, notes, &wops);
fputc('\n', fp);
wops.first_line = false;
}
@@ -2659,8 +2761,7 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
return 0;
}
-int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *opts)
+int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
{
const char *ev_name = evsel__name(evsel);
char buf[1024];
@@ -2682,7 +2783,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
fprintf(fp, "%s() %s\nEvent: %s\n\n",
ms->sym->name, map__dso(ms->map)->long_name, ev_name);
- symbol__annotate_fprintf2(ms->sym, fp, opts);
+ symbol__annotate_fprintf2(ms->sym, fp);
fclose(fp);
err = 0;
@@ -2769,7 +2870,7 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
return;
for (offset = 0; offset < size; ++offset) {
- struct annotation_line *al = notes->offsets[offset];
+ struct annotation_line *al = notes->src->offsets[offset];
struct disasm_line *dl;
dl = disasm_line(al);
@@ -2777,7 +2878,7 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
if (!disasm_line__is_valid_local_jump(dl, sym))
continue;
- al = notes->offsets[dl->ops.target.offset];
+ al = notes->src->offsets[dl->ops.target.offset];
/*
* FIXME: Oops, no jump target? Buggy disassembler? Or do we
@@ -2794,19 +2895,20 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
void annotation__set_offsets(struct annotation *notes, s64 size)
{
struct annotation_line *al;
+ struct annotated_source *src = notes->src;
- notes->max_line_len = 0;
- notes->nr_entries = 0;
- notes->nr_asm_entries = 0;
+ src->max_line_len = 0;
+ src->nr_entries = 0;
+ src->nr_asm_entries = 0;
- list_for_each_entry(al, &notes->src->source, node) {
+ list_for_each_entry(al, &src->source, node) {
size_t line_len = strlen(al->line);
- if (notes->max_line_len < line_len)
- notes->max_line_len = line_len;
- al->idx = notes->nr_entries++;
+ if (src->max_line_len < line_len)
+ src->max_line_len = line_len;
+ al->idx = src->nr_entries++;
if (al->offset != -1) {
- al->idx_asm = notes->nr_asm_entries++;
+ al->idx_asm = src->nr_asm_entries++;
/*
* FIXME: short term bandaid to cope with assembly
* routines that comes with labels in the same column
@@ -2815,7 +2917,7 @@ void annotation__set_offsets(struct annotation *notes, s64 size)
* E.g. copy_user_generic_unrolled
*/
if (al->offset < size)
- notes->offsets[al->offset] = al;
+ notes->src->offsets[al->offset] = al;
} else
al->idx_asm = -1;
}
@@ -2858,24 +2960,24 @@ void annotation__init_column_widths(struct annotation *notes, struct symbol *sym
void annotation__update_column_widths(struct annotation *notes)
{
- if (notes->options->use_offset)
+ if (annotate_opts.use_offset)
notes->widths.target = notes->widths.min_addr;
- else if (notes->options->full_addr)
+ else if (annotate_opts.full_addr)
notes->widths.target = BITS_PER_LONG / 4;
else
notes->widths.target = notes->widths.max_addr;
notes->widths.addr = notes->widths.target;
- if (notes->options->show_nr_jumps)
+ if (annotate_opts.show_nr_jumps)
notes->widths.addr += notes->widths.jumps + 1;
}
void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
{
- notes->options->full_addr = !notes->options->full_addr;
+ annotate_opts.full_addr = !annotate_opts.full_addr;
- if (notes->options->full_addr)
+ if (annotate_opts.full_addr)
notes->start = map__objdump_2mem(ms->map, ms->sym->start);
else
notes->start = map__rip_2objdump(ms->map, ms->sym->start);
@@ -2884,8 +2986,7 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m
}
static void annotation__calc_lines(struct annotation *notes, struct map *map,
- struct rb_root *root,
- struct annotation_options *opts)
+ struct rb_root *root)
{
struct annotation_line *al;
struct rb_root tmp_root = RB_ROOT;
@@ -2898,7 +2999,7 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
double percent;
percent = annotation_data__percent(&al->data[i],
- opts->percent_type);
+ annotate_opts.percent_type);
if (percent > percent_max)
percent_max = percent;
@@ -2909,22 +3010,20 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL,
false, true, notes->start + al->offset);
- insert_source_line(&tmp_root, al, opts);
+ insert_source_line(&tmp_root, al);
}
resort_source_line(root, &tmp_root);
}
-static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root,
- struct annotation_options *opts)
+static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
{
struct annotation *notes = symbol__annotation(ms->sym);
- annotation__calc_lines(notes, ms->map, root, opts);
+ annotation__calc_lines(notes, ms->map, root);
}
-int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *opts)
+int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
{
struct dso *dso = map__dso(ms->map);
struct symbol *sym = ms->sym;
@@ -2933,7 +3032,7 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
char buf[1024];
int err;
- err = symbol__annotate2(ms, evsel, opts, NULL);
+ err = symbol__annotate2(ms, evsel, NULL);
if (err) {
char msg[BUFSIZ];
@@ -2943,31 +3042,31 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel,
return -1;
}
- if (opts->print_lines) {
- srcline_full_filename = opts->full_path;
- symbol__calc_lines(ms, &source_line, opts);
+ if (annotate_opts.print_lines) {
+ srcline_full_filename = annotate_opts.full_path;
+ symbol__calc_lines(ms, &source_line);
print_summary(&source_line, dso->long_name);
}
hists__scnprintf_title(hists, buf, sizeof(buf));
fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
- buf, percent_type_str(opts->percent_type), sym->name, dso->long_name);
- symbol__annotate_fprintf2(sym, stdout, opts);
+ buf, percent_type_str(annotate_opts.percent_type), sym->name,
+ dso->long_name);
+ symbol__annotate_fprintf2(sym, stdout);
annotated_source__purge(symbol__annotation(sym)->src);
return 0;
}
-int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *opts)
+int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
{
struct dso *dso = map__dso(ms->map);
struct symbol *sym = ms->sym;
struct rb_root source_line = RB_ROOT;
int err;
- err = symbol__annotate(ms, evsel, opts, NULL);
+ err = symbol__annotate(ms, evsel, NULL);
if (err) {
char msg[BUFSIZ];
@@ -2979,13 +3078,13 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
symbol__calc_percent(sym, evsel);
- if (opts->print_lines) {
- srcline_full_filename = opts->full_path;
- symbol__calc_lines(ms, &source_line, opts);
+ if (annotate_opts.print_lines) {
+ srcline_full_filename = annotate_opts.full_path;
+ symbol__calc_lines(ms, &source_line);
print_summary(&source_line, dso->long_name);
}
- symbol__annotate_printf(ms, evsel, opts);
+ symbol__annotate_printf(ms, evsel);
annotated_source__purge(symbol__annotation(sym)->src);
@@ -3046,19 +3145,20 @@ call_like:
obj__printf(obj, " ");
}
- disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name);
+ disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name);
}
static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
{
double ipc = 0.0, coverage = 0.0;
+ struct annotated_branch *branch = annotation__get_branch(notes);
- if (notes->hit_cycles)
- ipc = notes->hit_insn / ((double)notes->hit_cycles);
+ if (branch && branch->hit_cycles)
+ ipc = branch->hit_insn / ((double)branch->hit_cycles);
- if (notes->total_insn) {
- coverage = notes->cover_insn * 100.0 /
- ((double)notes->total_insn);
+ if (branch && branch->total_insn) {
+ coverage = branch->cover_insn * 100.0 /
+ ((double)branch->total_insn);
}
scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
@@ -3083,8 +3183,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
int printed;
if (first_line && (al->offset == -1 || percent_max == 0.0)) {
- if (notes->have_cycles) {
- if (al->ipc == 0.0 && al->cycles == 0)
+ if (notes->branch && al->cycles) {
+ if (al->cycles->ipc == 0.0 && al->cycles->avg == 0)
show_title = true;
} else
show_title = true;
@@ -3120,18 +3220,18 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
}
}
- if (notes->have_cycles) {
- if (al->ipc)
- obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->ipc);
+ if (notes->branch) {
+ if (al->cycles && al->cycles->ipc)
+ obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc);
else if (!show_title)
obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " ");
else
obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC");
- if (!notes->options->show_minmax_cycle) {
- if (al->cycles)
+ if (!annotate_opts.show_minmax_cycle) {
+ if (al->cycles && al->cycles->avg)
obj__printf(obj, "%*" PRIu64 " ",
- ANNOTATION__CYCLES_WIDTH - 1, al->cycles);
+ ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg);
else if (!show_title)
obj__printf(obj, "%*s",
ANNOTATION__CYCLES_WIDTH, " ");
@@ -3145,8 +3245,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
scnprintf(str, sizeof(str),
"%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")",
- al->cycles, al->cycles_min,
- al->cycles_max);
+ al->cycles->avg, al->cycles->min,
+ al->cycles->max);
obj__printf(obj, "%*s ",
ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
@@ -3172,7 +3272,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
if (!*al->line)
obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
else if (al->offset == -1) {
- if (al->line_nr && notes->options->show_linenr)
+ if (al->line_nr && annotate_opts.show_linenr)
printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr);
else
printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " ");
@@ -3182,15 +3282,15 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
u64 addr = al->offset;
int color = -1;
- if (!notes->options->use_offset)
+ if (!annotate_opts.use_offset)
addr += notes->start;
- if (!notes->options->use_offset) {
+ if (!annotate_opts.use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
if (al->jump_sources &&
- notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
- if (notes->options->show_nr_jumps) {
+ annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) {
+ if (annotate_opts.show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
notes->widths.jumps,
@@ -3204,9 +3304,9 @@ print_addr:
printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
notes->widths.target, addr);
} else if (ins__is_call(&disasm_line(al)->ins) &&
- notes->options->offset_level >= ANNOTATION__OFFSET_CALL) {
+ annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
goto print_addr;
- } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
+ } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) {
goto print_addr;
} else {
printed = scnprintf(bf, sizeof(bf), "%-*s ",
@@ -3228,43 +3328,44 @@ print_addr:
}
void annotation_line__write(struct annotation_line *al, struct annotation *notes,
- struct annotation_write_ops *wops,
- struct annotation_options *opts)
+ struct annotation_write_ops *wops)
{
__annotation_line__write(al, notes, wops->first_line, wops->current_entry,
wops->change_color, wops->width, wops->obj,
- opts->percent_type,
+ annotate_opts.percent_type,
wops->set_color, wops->set_percent_color,
wops->set_jumps_percent_color, wops->printf,
wops->write_graph);
}
int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *options, struct arch **parch)
+ struct arch **parch)
{
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
size_t size = symbol__size(sym);
int nr_pcnt = 1, err;
- notes->offsets = zalloc(size * sizeof(struct annotation_line *));
- if (notes->offsets == NULL)
+ notes->src->offsets = zalloc(size * sizeof(struct annotation_line *));
+ if (notes->src->offsets == NULL)
return ENOMEM;
if (evsel__is_group_event(evsel))
nr_pcnt = evsel->core.nr_members;
- err = symbol__annotate(ms, evsel, options, parch);
+ err = symbol__annotate(ms, evsel, parch);
if (err)
goto out_free_offsets;
- notes->options = options;
-
symbol__calc_percent(sym, evsel);
annotation__set_offsets(notes, size);
annotation__mark_jump_targets(notes, sym);
- annotation__compute_ipc(notes, size);
+
+ err = annotation__compute_ipc(notes, size);
+ if (err)
+ goto out_free_offsets;
+
annotation__init_column_widths(notes, sym);
notes->nr_events = nr_pcnt;
@@ -3274,7 +3375,7 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
return 0;
out_free_offsets:
- zfree(&notes->offsets);
+ zfree(&notes->src->offsets);
return err;
}
@@ -3337,8 +3438,10 @@ static int annotation__config(const char *var, const char *value, void *data)
return 0;
}
-void annotation_options__init(struct annotation_options *opt)
+void annotation_options__init(void)
{
+ struct annotation_options *opt = &annotate_opts;
+
memset(opt, 0, sizeof(*opt));
/* Default values. */
@@ -3349,16 +3452,15 @@ void annotation_options__init(struct annotation_options *opt)
opt->percent_type = PERCENT_PERIOD_LOCAL;
}
-
-void annotation_options__exit(struct annotation_options *opt)
+void annotation_options__exit(void)
{
- zfree(&opt->disassembler_style);
- zfree(&opt->objdump_path);
+ zfree(&annotate_opts.disassembler_style);
+ zfree(&annotate_opts.objdump_path);
}
-void annotation_config__init(struct annotation_options *opt)
+void annotation_config__init(void)
{
- perf_config(annotation__config, opt);
+ perf_config(annotation__config, &annotate_opts);
}
static unsigned int parse_percent_type(char *str1, char *str2)
@@ -3382,10 +3484,9 @@ static unsigned int parse_percent_type(char *str1, char *str2)
return type;
}
-int annotate_parse_percent_type(const struct option *opt, const char *_str,
+int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str,
int unset __maybe_unused)
{
- struct annotation_options *opts = opt->value;
unsigned int type;
char *str1, *str2;
int err = -1;
@@ -3404,7 +3505,7 @@ int annotate_parse_percent_type(const struct option *opt, const char *_str,
if (type == (unsigned int) -1)
type = parse_percent_type(str2, str1);
if (type != (unsigned int) -1) {
- opts->percent_type = type;
+ annotate_opts.percent_type = type;
err = 0;
}
@@ -3413,11 +3514,267 @@ out:
return err;
}
-int annotate_check_args(struct annotation_options *args)
+int annotate_check_args(void)
{
+ struct annotation_options *args = &annotate_opts;
+
if (args->prefix_strip && !args->prefix) {
pr_err("--prefix-strip requires --prefix\n");
return -1;
}
return 0;
}
+
+/*
+ * Get register number and access offset from the given instruction.
+ * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs
+ * to revisit the format when it handles different architecture.
+ * Fills @reg and @offset when return 0.
+ */
+static int extract_reg_offset(struct arch *arch, const char *str,
+ struct annotated_op_loc *op_loc)
+{
+ char *p;
+ char *regname;
+
+ if (arch->objdump.register_char == 0)
+ return -1;
+
+ /*
+ * It should start from offset, but it's possible to skip 0
+ * in the asm. So 0(%rax) should be same as (%rax).
+ *
+ * However, it also start with a segment select register like
+ * %gs:0x18(%rbx). In that case it should skip the part.
+ */
+ if (*str == arch->objdump.register_char) {
+ while (*str && !isdigit(*str) &&
+ *str != arch->objdump.memory_ref_char)
+ str++;
+ }
+
+ op_loc->offset = strtol(str, &p, 0);
+
+ p = strchr(p, arch->objdump.register_char);
+ if (p == NULL)
+ return -1;
+
+ regname = strdup(p);
+ if (regname == NULL)
+ return -1;
+
+ op_loc->reg = get_dwarf_regnum(regname, 0);
+ free(regname);
+ return 0;
+}
+
+/**
+ * annotate_get_insn_location - Get location of instruction
+ * @arch: the architecture info
+ * @dl: the target instruction
+ * @loc: a buffer to save the data
+ *
+ * Get detailed location info (register and offset) in the instruction.
+ * It needs both source and target operand and whether it accesses a
+ * memory location. The offset field is meaningful only when the
+ * corresponding mem flag is set.
+ *
+ * Some examples on x86:
+ *
+ * mov (%rax), %rcx # src_reg = rax, src_mem = 1, src_offset = 0
+ * # dst_reg = rcx, dst_mem = 0
+ *
+ * mov 0x18, %r8 # src_reg = -1, dst_reg = r8
+ */
+int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
+ struct annotated_insn_loc *loc)
+{
+ struct ins_operands *ops;
+ struct annotated_op_loc *op_loc;
+ int i;
+
+ if (!strcmp(dl->ins.name, "lock"))
+ ops = dl->ops.locked.ops;
+ else
+ ops = &dl->ops;
+
+ if (ops == NULL)
+ return -1;
+
+ memset(loc, 0, sizeof(*loc));
+
+ for_each_insn_op_loc(loc, i, op_loc) {
+ const char *insn_str = ops->source.raw;
+
+ if (i == INSN_OP_TARGET)
+ insn_str = ops->target.raw;
+
+ /* Invalidate the register by default */
+ op_loc->reg = -1;
+
+ if (insn_str == NULL)
+ continue;
+
+ if (strchr(insn_str, arch->objdump.memory_ref_char)) {
+ op_loc->mem_ref = true;
+ extract_reg_offset(arch, insn_str, op_loc);
+ } else {
+ char *s = strdup(insn_str);
+
+ if (s) {
+ op_loc->reg = get_dwarf_regnum(s, 0);
+ free(s);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void symbol__ensure_annotate(struct map_symbol *ms, struct evsel *evsel)
+{
+ struct disasm_line *dl, *tmp_dl;
+ struct annotation *notes;
+
+ notes = symbol__annotation(ms->sym);
+ if (!list_empty(&notes->src->source))
+ return;
+
+ if (symbol__annotate(ms, evsel, NULL) < 0)
+ return;
+
+ /* remove non-insn disasm lines for simplicity */
+ list_for_each_entry_safe(dl, tmp_dl, &notes->src->source, al.node) {
+ if (dl->al.offset == -1) {
+ list_del(&dl->al.node);
+ free(dl);
+ }
+ }
+}
+
+static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip)
+{
+ struct disasm_line *dl;
+ struct annotation *notes;
+
+ notes = symbol__annotation(sym);
+
+ list_for_each_entry(dl, &notes->src->source, al.node) {
+ if (sym->start + dl->al.offset == ip)
+ return dl;
+ }
+ return NULL;
+}
+
+static struct annotated_item_stat *annotate_data_stat(struct list_head *head,
+ const char *name)
+{
+ struct annotated_item_stat *istat;
+
+ list_for_each_entry(istat, head, list) {
+ if (!strcmp(istat->name, name))
+ return istat;
+ }
+
+ istat = zalloc(sizeof(*istat));
+ if (istat == NULL)
+ return NULL;
+
+ istat->name = strdup(name);
+ if (istat->name == NULL) {
+ free(istat);
+ return NULL;
+ }
+
+ list_add_tail(&istat->list, head);
+ return istat;
+}
+
+/**
+ * hist_entry__get_data_type - find data type for given hist entry
+ * @he: hist entry
+ *
+ * This function first annotates the instruction at @he->ip and extracts
+ * register and offset info from it. Then it searches the DWARF debug
+ * info to get a variable and type information using the address, register,
+ * and offset.
+ */
+struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
+{
+ struct map_symbol *ms = &he->ms;
+ struct evsel *evsel = hists_to_evsel(he->hists);
+ struct arch *arch;
+ struct disasm_line *dl;
+ struct annotated_insn_loc loc;
+ struct annotated_op_loc *op_loc;
+ struct annotated_data_type *mem_type;
+ struct annotated_item_stat *istat;
+ u64 ip = he->ip;
+ int i;
+
+ ann_data_stat.total++;
+
+ if (ms->map == NULL || ms->sym == NULL) {
+ ann_data_stat.no_sym++;
+ return NULL;
+ }
+
+ if (!symbol_conf.init_annotation) {
+ ann_data_stat.no_sym++;
+ return NULL;
+ }
+
+ if (evsel__get_arch(evsel, &arch) < 0) {
+ ann_data_stat.no_insn++;
+ return NULL;
+ }
+
+ /* Make sure it runs objdump to get disasm of the function */
+ symbol__ensure_annotate(ms, evsel);
+
+ /*
+ * Get a disasm to extract the location from the insn.
+ * This is too slow...
+ */
+ dl = find_disasm_line(ms->sym, ip);
+ if (dl == NULL) {
+ ann_data_stat.no_insn++;
+ return NULL;
+ }
+
+ istat = annotate_data_stat(&ann_insn_stat, dl->ins.name);
+ if (istat == NULL) {
+ ann_data_stat.no_insn++;
+ return NULL;
+ }
+
+ if (annotate_get_insn_location(arch, dl, &loc) < 0) {
+ ann_data_stat.no_insn_ops++;
+ istat->bad++;
+ return NULL;
+ }
+
+ for_each_insn_op_loc(&loc, i, op_loc) {
+ if (!op_loc->mem_ref)
+ continue;
+
+ mem_type = find_data_type(ms, ip, op_loc->reg, op_loc->offset);
+ if (mem_type)
+ istat->good++;
+ else
+ istat->bad++;
+
+ if (symbol_conf.annotate_data_sample) {
+ annotated_data_type__update_samples(mem_type, evsel,
+ op_loc->offset,
+ he->stat.nr_events,
+ he->stat.period);
+ }
+ he->mem_type_off = op_loc->offset;
+ return mem_type;
+ }
+
+ ann_data_stat.no_mem_ops++;
+ istat->bad++;
+ return NULL;
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 962780559176..dba50762c6e8 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -23,6 +23,7 @@ struct option;
struct perf_sample;
struct evsel;
struct symbol;
+struct annotated_data_type;
struct ins {
const char *name;
@@ -31,8 +32,6 @@ struct ins {
struct ins_operands {
char *raw;
- char *raw_comment;
- char *raw_func_start;
struct {
char *raw;
char *name;
@@ -41,22 +40,30 @@ struct ins_operands {
s64 offset;
bool offset_avail;
bool outside;
+ bool multi_regs;
} target;
union {
struct {
char *raw;
char *name;
u64 addr;
+ bool multi_regs;
} source;
struct {
struct ins ins;
struct ins_operands *ops;
} locked;
+ struct {
+ char *raw_comment;
+ char *raw_func_start;
+ } jump;
};
};
struct arch;
+bool arch__is(struct arch *arch, const char *name);
+
struct ins_ops {
void (*free)(struct ins_operands *ops);
int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
@@ -101,6 +108,8 @@ struct annotation_options {
unsigned int percent_type;
};
+extern struct annotation_options annotate_opts;
+
enum {
ANNOTATION__OFFSET_JUMP_TARGETS = 1,
ANNOTATION__OFFSET_CALL,
@@ -130,6 +139,13 @@ struct annotation_data {
struct sym_hist_entry he;
};
+struct cycles_info {
+ float ipc;
+ u64 avg;
+ u64 max;
+ u64 min;
+};
+
struct annotation_line {
struct list_head node;
struct rb_node rb_node;
@@ -137,12 +153,9 @@ struct annotation_line {
char *line;
int line_nr;
char *fileloc;
- int jump_sources;
- float ipc;
- u64 cycles;
- u64 cycles_max;
- u64 cycles_min;
char *path;
+ struct cycles_info *cycles;
+ int jump_sources;
u32 idx;
int idx_asm;
int data_nr;
@@ -214,8 +227,7 @@ struct annotation_write_ops {
};
void annotation_line__write(struct annotation_line *al, struct annotation *notes,
- struct annotation_write_ops *ops,
- struct annotation_options *opts);
+ struct annotation_write_ops *ops);
int __annotation__scnprintf_samples_period(struct annotation *notes,
char *bf, size_t size,
@@ -264,27 +276,29 @@ struct cyc_hist {
* returns.
*/
struct annotated_source {
- struct list_head source;
- int nr_histograms;
- size_t sizeof_sym_hist;
- struct cyc_hist *cycles_hist;
- struct sym_hist *histograms;
+ struct list_head source;
+ size_t sizeof_sym_hist;
+ struct sym_hist *histograms;
+ struct annotation_line **offsets;
+ int nr_histograms;
+ int nr_entries;
+ int nr_asm_entries;
+ u16 max_line_len;
};
-struct LOCKABLE annotation {
- u64 max_coverage;
- u64 start;
+struct annotated_branch {
u64 hit_cycles;
u64 hit_insn;
unsigned int total_insn;
unsigned int cover_insn;
- struct annotation_options *options;
- struct annotation_line **offsets;
+ struct cyc_hist *cycles_hist;
+ u64 max_coverage;
+};
+
+struct LOCKABLE annotation {
+ u64 start;
int nr_events;
int max_jump_sources;
- int nr_entries;
- int nr_asm_entries;
- u16 max_line_len;
struct {
u8 addr;
u8 jumps;
@@ -293,8 +307,8 @@ struct LOCKABLE annotation {
u8 max_addr;
u8 max_ins_name;
} widths;
- bool have_cycles;
struct annotated_source *src;
+ struct annotated_branch *branch;
};
static inline void annotation__init(struct annotation *notes __maybe_unused)
@@ -308,10 +322,10 @@ bool annotation__trylock(struct annotation *notes) EXCLUSIVE_TRYLOCK_FUNCTION(tr
static inline int annotation__cycles_width(struct annotation *notes)
{
- if (notes->have_cycles && notes->options->show_minmax_cycle)
+ if (notes->branch && annotate_opts.show_minmax_cycle)
return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH;
- return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0;
+ return notes->branch ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0;
}
static inline int annotation__pcnt_width(struct annotation *notes)
@@ -319,13 +333,12 @@ static inline int annotation__pcnt_width(struct annotation *notes)
return (symbol_conf.show_total_period ? 12 : 7) * notes->nr_events;
}
-static inline bool annotation_line__filter(struct annotation_line *al, struct annotation *notes)
+static inline bool annotation_line__filter(struct annotation_line *al)
{
- return notes->options->hide_src_code && al->offset == -1;
+ return annotate_opts.hide_src_code && al->offset == -1;
}
void annotation__set_offsets(struct annotation *notes, s64 size);
-void annotation__compute_ipc(struct annotation *notes, size_t size);
void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym);
void annotation__update_column_widths(struct annotation *notes);
void annotation__init_column_widths(struct annotation *notes, struct symbol *sym);
@@ -349,6 +362,8 @@ static inline struct annotation *symbol__annotation(struct symbol *sym)
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample,
struct evsel *evsel);
+struct annotated_branch *annotation__get_branch(struct annotation *notes);
+
int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
struct addr_map_symbol *start,
unsigned cycles);
@@ -361,11 +376,9 @@ void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct map_symbol *ms,
struct evsel *evsel,
- struct annotation_options *options,
struct arch **parch);
int symbol__annotate2(struct map_symbol *ms,
struct evsel *evsel,
- struct annotation_options *options,
struct arch **parch);
enum symbol_disassemble_errno {
@@ -392,43 +405,86 @@ enum symbol_disassemble_errno {
int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen);
-int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *options);
+int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel);
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
void annotated_source__purge(struct annotated_source *as);
-int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel,
- struct annotation_options *opts);
+int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel);
bool ui__has_annotation(void);
-int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts);
+int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel);
-int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts);
+int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel);
#ifdef HAVE_SLANG_SUPPORT
int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct hist_browser_timer *hbt,
- struct annotation_options *opts);
+ struct hist_browser_timer *hbt);
#else
static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
struct evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused,
- struct annotation_options *opts __maybe_unused)
+ struct hist_browser_timer *hbt __maybe_unused)
{
return 0;
}
#endif
-void annotation_options__init(struct annotation_options *opt);
-void annotation_options__exit(struct annotation_options *opt);
+void annotation_options__init(void);
+void annotation_options__exit(void);
-void annotation_config__init(struct annotation_options *opt);
+void annotation_config__init(void);
int annotate_parse_percent_type(const struct option *opt, const char *_str,
int unset);
-int annotate_check_args(struct annotation_options *args);
+int annotate_check_args(void);
+
+/**
+ * struct annotated_op_loc - Location info of instruction operand
+ * @reg: Register in the operand
+ * @offset: Memory access offset in the operand
+ * @mem_ref: Whether the operand accesses memory
+ */
+struct annotated_op_loc {
+ int reg;
+ int offset;
+ bool mem_ref;
+};
+
+enum annotated_insn_ops {
+ INSN_OP_SOURCE = 0,
+ INSN_OP_TARGET = 1,
+
+ INSN_OP_MAX,
+};
+
+/**
+ * struct annotated_insn_loc - Location info of instruction
+ * @ops: Array of location info for source and target operands
+ */
+struct annotated_insn_loc {
+ struct annotated_op_loc ops[INSN_OP_MAX];
+};
+
+#define for_each_insn_op_loc(insn_loc, i, op_loc) \
+ for (i = INSN_OP_SOURCE, op_loc = &(insn_loc)->ops[i]; \
+ i < INSN_OP_MAX; \
+ i++, op_loc++)
+
+/* Get detailed location info in the instruction */
+int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
+ struct annotated_insn_loc *loc);
+
+/* Returns a data type from the sample instruction (if any) */
+struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he);
+
+struct annotated_item_stat {
+ struct list_head list;
+ char *name;
+ int good;
+ int bad;
+};
+extern struct list_head ann_insn_stat;
#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index a0368202a746..3684e6009b63 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct evlist *evlist,
struct evsel *evsel, int idx)
{
- bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+ bool per_cpu = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
mp->mmap_needed = evsel->needs_auxtrace_mmap;
@@ -648,7 +648,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
{
- bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+ bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
if (per_cpu_mmaps) {
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
@@ -1638,6 +1638,9 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
case 'Z':
synth_opts->timeless_decoding = true;
break;
+ case 'T':
+ synth_opts->use_timestamp = true;
+ break;
case ' ':
case ',':
break;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 29eb82dff574..55702215a82d 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -99,6 +99,7 @@ enum itrace_period_type {
* @remote_access: whether to synthesize remote access events
* @mem: whether to synthesize memory events
* @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
+ * @use_timestamp: use the timestamp trace as kernel time
* @vm_time_correlation: perform VM Time Correlation
* @vm_tm_corr_dry_run: VM Time Correlation dry-run
* @vm_tm_corr_args: VM Time Correlation implementation-specific arguments
@@ -146,6 +147,7 @@ struct itrace_synth_opts {
bool remote_access;
bool mem;
bool timeless_decoding;
+ bool use_timestamp;
bool vm_time_correlation;
bool vm_tm_corr_dry_run;
char *vm_tm_corr_args;
@@ -678,6 +680,7 @@ bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
" q: quicker (less detailed) decoding\n" \
" A: approximate IPC\n" \
" Z: prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \
+" T: use the timestamp trace as kernel time\n" \
" PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \
" concatenate multiple options. Default is iybxwpe or cewp\n"
diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c
index 591fc1edd385..dec910989701 100644
--- a/tools/perf/util/block-info.c
+++ b/tools/perf/util/block-info.c
@@ -129,9 +129,9 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
al.sym = he->ms.sym;
notes = symbol__annotation(he->ms.sym);
- if (!notes || !notes->src || !notes->src->cycles_hist)
+ if (!notes || !notes->branch || !notes->branch->cycles_hist)
return 0;
- ch = notes->src->cycles_hist;
+ ch = notes->branch->cycles_hist;
for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) {
if (ch[i].num_aggr) {
struct block_info *bi;
@@ -464,8 +464,7 @@ void block_info__free_report(struct block_report *reps, int nr_reps)
}
int report__browse_block_hists(struct block_hist *bh, float min_percent,
- struct evsel *evsel, struct perf_env *env,
- struct annotation_options *annotation_opts)
+ struct evsel *evsel, struct perf_env *env)
{
int ret;
@@ -477,8 +476,7 @@ int report__browse_block_hists(struct block_hist *bh, float min_percent,
return 0;
case 1:
symbol_conf.report_individual_block = true;
- ret = block_hists_tui_browse(bh, evsel, min_percent,
- env, annotation_opts);
+ ret = block_hists_tui_browse(bh, evsel, min_percent, env);
return ret;
default:
return -1;
diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h
index 42e9dcc4cf0a..96f53e89795e 100644
--- a/tools/perf/util/block-info.h
+++ b/tools/perf/util/block-info.h
@@ -78,8 +78,7 @@ struct block_report *block_info__create_report(struct evlist *evlist,
void block_info__free_report(struct block_report *reps, int nr_reps);
int report__browse_block_hists(struct block_hist *bh, float min_percent,
- struct evsel *evsel, struct perf_env *env,
- struct annotation_options *annotation_opts);
+ struct evsel *evsel, struct perf_env *env);
float block_info__total_cycles_percent(struct hist_entry *he);
diff --git a/tools/perf/util/block-range.c b/tools/perf/util/block-range.c
index 680e92774d0c..15c42196c24c 100644
--- a/tools/perf/util/block-range.c
+++ b/tools/perf/util/block-range.c
@@ -311,6 +311,7 @@ done:
double block_range__coverage(struct block_range *br)
{
struct symbol *sym;
+ struct annotated_branch *branch;
if (!br) {
if (block_ranges.blocks)
@@ -323,5 +324,9 @@ double block_range__coverage(struct block_range *br)
if (!sym)
return -1;
- return (double)br->coverage / symbol__annotation(sym)->max_coverage;
+ branch = symbol__annotation(sym)->branch;
+ if (!branch)
+ return -1;
+
+ return (double)br->coverage / branch->max_coverage;
}
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 38fcf3ba5749..3573e0b7ef3e 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -386,6 +386,9 @@ int perf_event__synthesize_bpf_events(struct perf_session *session,
int err;
int fd;
+ if (opts->no_bpf_event)
+ return 0;
+
event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
if (!event)
return -1;
@@ -542,9 +545,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
}
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
- struct perf_env *env,
- FILE *fp)
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp)
{
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
@@ -560,7 +563,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
if (info->btf_id) {
struct btf_node *node;
- node = perf_env__find_btf(env, info->btf_id);
+ node = __perf_env__find_btf(env, info->btf_id);
if (node)
btf = btf__new((__u8 *)(node->data),
node->data_size);
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index 1bcbd4fb6c66..e2f0420905f5 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -33,9 +33,9 @@ struct btf_node {
int machine__process_bpf(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
-void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
- struct perf_env *env,
- FILE *fp);
+void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp);
#else
static inline int machine__process_bpf(struct machine *machine __maybe_unused,
union perf_event *event __maybe_unused,
@@ -50,9 +50,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
return 0;
}
-static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
- struct perf_env *env __maybe_unused,
- FILE *fp __maybe_unused)
+static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+ struct perf_env *env __maybe_unused,
+ FILE *fp __maybe_unused)
{
}
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 7f9b0e46e008..7a8af60e0f51 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -455,7 +455,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
return -1;
if (!all_cpu_map) {
- all_cpu_map = perf_cpu_map__new(NULL);
+ all_cpu_map = perf_cpu_map__new_online_cpus();
if (!all_cpu_map)
return -1;
}
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index f1716c089c99..31ff19afc20c 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -318,7 +318,7 @@ int lock_contention_read(struct lock_contention *con)
}
/* make sure it loads the kernel map */
- map__load(maps__first(machine->kmaps)->map);
+ maps__load_first(machine->kmaps);
prev_key = NULL;
while (!bpf_map_get_next_key(fd, prev_key, &key)) {
diff --git a/tools/perf/util/compress.h b/tools/perf/util/compress.h
index 0cd3369af2a4..b29109cd3609 100644
--- a/tools/perf/util/compress.h
+++ b/tools/perf/util/compress.h
@@ -3,6 +3,8 @@
#define PERF_COMPRESS_H
#include <stdbool.h>
+#include <stddef.h>
+#include <sys/types.h>
#ifdef HAVE_ZSTD_SUPPORT
#include <zstd.h>
#endif
@@ -21,6 +23,7 @@ struct zstd_data {
#ifdef HAVE_ZSTD_SUPPORT
ZSTD_CStream *cstream;
ZSTD_DStream *dstream;
+ int comp_level;
#endif
};
@@ -29,7 +32,7 @@ struct zstd_data {
int zstd_init(struct zstd_data *data, int level);
int zstd_fini(struct zstd_data *data);
-size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
+ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
void *src, size_t src_size, size_t max_record_size,
size_t process_header(void *record, size_t increment));
@@ -48,7 +51,7 @@ static inline int zstd_fini(struct zstd_data *data __maybe_unused)
}
static inline
-size_t zstd_compress_stream_to_records(struct zstd_data *data __maybe_unused,
+ssize_t zstd_compress_stream_to_records(struct zstd_data *data __maybe_unused,
void *dst __maybe_unused, size_t dst_size __maybe_unused,
void *src __maybe_unused, size_t src_size __maybe_unused,
size_t max_record_size __maybe_unused,
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 0e090e8bc334..0581ee0fa5f2 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -672,7 +672,7 @@ struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
static struct perf_cpu_map *online;
if (!online)
- online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
+ online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */
return online;
}
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
index 81cfc85f4668..8bbeb2dc76fd 100644
--- a/tools/perf/util/cputopo.c
+++ b/tools/perf/util/cputopo.c
@@ -267,7 +267,7 @@ struct cpu_topology *cpu_topology__new(void)
ncpus = cpu__max_present_cpu().cpu;
/* build online CPU map */
- map = perf_cpu_map__new(NULL);
+ map = perf_cpu_map__new_online_cpus();
if (map == NULL) {
pr_debug("failed to get system cpumap\n");
return NULL;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index a9873d14c632..d65d7485886c 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -3346,12 +3346,27 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
etm->metadata = metadata;
etm->auxtrace_type = auxtrace_info->type;
- /* Use virtual timestamps if all ETMs report ts_source = 1 */
- etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
+ if (etm->synth_opts.use_timestamp)
+ /*
+ * Prior to Armv8.4, Arm CPUs don't support FEAT_TRF feature,
+ * therefore the decoder cannot know if the timestamp trace is
+ * same with the kernel time.
+ *
+ * If a user has knowledge for the working platform and can
+ * specify itrace option 'T' to tell decoder to forcely use the
+ * traced timestamp as the kernel time.
+ */
+ etm->has_virtual_ts = true;
+ else
+ /* Use virtual timestamps if all ETMs report ts_source = 1 */
+ etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
if (!etm->has_virtual_ts)
ui__warning("Virtual timestamps are not enabled, or not supported by the traced system.\n"
- "The time field of the samples will not be set accurately.\n\n");
+ "The time field of the samples will not be set accurately.\n"
+ "For Arm CPUs prior to Armv8.4 or without support FEAT_TRF,\n"
+ "you can specify the itrace option 'T' for timestamp decoding\n"
+ "if the Coresight timestamp on the platform is same with the kernel time.\n\n");
etm->auxtrace.process_event = cs_etm__process_event;
etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index b9fb71ab7a73..106429155c2e 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -253,8 +253,8 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
*/
addr_location__init(&al);
al.sym = node->ms.sym;
- al.map = node->ms.map;
- al.maps = thread__maps(thread);
+ al.map = map__get(node->ms.map);
+ al.maps = maps__get(thread__maps(thread));
al.addr = node->ip;
if (al.map && !al.sym)
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 88378c4c5dd9..e282b4ceb4d2 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -38,12 +38,21 @@ bool dump_trace = false, quiet = false;
int debug_ordered_events;
static int redirect_to_stderr;
int debug_data_convert;
-static FILE *debug_file;
+static FILE *_debug_file;
bool debug_display_time;
+FILE *debug_file(void)
+{
+ if (!_debug_file) {
+ pr_warning_once("debug_file not set");
+ debug_set_file(stderr);
+ }
+ return _debug_file;
+}
+
void debug_set_file(FILE *file)
{
- debug_file = file;
+ _debug_file = file;
}
void debug_set_display_time(bool set)
@@ -78,8 +87,8 @@ int veprintf(int level, int var, const char *fmt, va_list args)
if (use_browser >= 1 && !redirect_to_stderr) {
ui_helpline__vshow(fmt, args);
} else {
- ret = fprintf_time(debug_file);
- ret += vfprintf(debug_file, fmt, args);
+ ret = fprintf_time(debug_file());
+ ret += vfprintf(debug_file(), fmt, args);
}
}
@@ -107,9 +116,8 @@ static int veprintf_time(u64 t, const char *fmt, va_list args)
nsecs -= secs * NSEC_PER_SEC;
usecs = nsecs / NSEC_PER_USEC;
- ret = fprintf(stderr, "[%13" PRIu64 ".%06" PRIu64 "] ",
- secs, usecs);
- ret += vfprintf(stderr, fmt, args);
+ ret = fprintf(debug_file(), "[%13" PRIu64 ".%06" PRIu64 "] ", secs, usecs);
+ ret += vfprintf(debug_file(), fmt, args);
return ret;
}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index f99468a7f681..de8870980d44 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -77,6 +77,7 @@ int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __printf(4, 5)
int veprintf(int level, int var, const char *fmt, va_list args);
int perf_debug_option(const char *str);
+FILE *debug_file(void);
void debug_set_file(FILE *file);
void debug_set_display_time(bool set);
void perf_debug_setup(void);
diff --git a/tools/perf/util/debuginfo.c b/tools/perf/util/debuginfo.c
new file mode 100644
index 000000000000..19acf4775d35
--- /dev/null
+++ b/tools/perf/util/debuginfo.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DWARF debug information handling code. Copied from probe-finder.c.
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/zalloc.h>
+
+#include "build-id.h"
+#include "dso.h"
+#include "debug.h"
+#include "debuginfo.h"
+#include "symbol.h"
+
+#ifdef HAVE_DEBUGINFOD_SUPPORT
+#include <elfutils/debuginfod.h>
+#endif
+
+/* Dwarf FL wrappers */
+static char *debuginfo_path; /* Currently dummy */
+
+static const Dwfl_Callbacks offline_callbacks = {
+ .find_debuginfo = dwfl_standard_find_debuginfo,
+ .debuginfo_path = &debuginfo_path,
+
+ .section_address = dwfl_offline_section_address,
+
+ /* We use this table for core files too. */
+ .find_elf = dwfl_build_id_find_elf,
+};
+
+/* Get a Dwarf from offline image */
+static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
+ const char *path)
+{
+ GElf_Addr dummy;
+ int fd;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ dbg->dwfl = dwfl_begin(&offline_callbacks);
+ if (!dbg->dwfl)
+ goto error;
+
+ dwfl_report_begin(dbg->dwfl);
+ dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
+ if (!dbg->mod)
+ goto error;
+
+ dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias);
+ if (!dbg->dbg)
+ goto error;
+
+ dwfl_module_build_id(dbg->mod, &dbg->build_id, &dummy);
+
+ dwfl_report_end(dbg->dwfl, NULL, NULL);
+
+ return 0;
+error:
+ if (dbg->dwfl)
+ dwfl_end(dbg->dwfl);
+ else
+ close(fd);
+ memset(dbg, 0, sizeof(*dbg));
+
+ return -ENOENT;
+}
+
+static struct debuginfo *__debuginfo__new(const char *path)
+{
+ struct debuginfo *dbg = zalloc(sizeof(*dbg));
+ if (!dbg)
+ return NULL;
+
+ if (debuginfo__init_offline_dwarf(dbg, path) < 0)
+ zfree(&dbg);
+ if (dbg)
+ pr_debug("Open Debuginfo file: %s\n", path);
+ return dbg;
+}
+
+enum dso_binary_type distro_dwarf_types[] = {
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+};
+
+struct debuginfo *debuginfo__new(const char *path)
+{
+ enum dso_binary_type *type;
+ char buf[PATH_MAX], nil = '\0';
+ struct dso *dso;
+ struct debuginfo *dinfo = NULL;
+ struct build_id bid;
+
+ /* Try to open distro debuginfo files */
+ dso = dso__new(path);
+ if (!dso)
+ goto out;
+
+ /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */
+ if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0)
+ dso__set_build_id(dso, &bid);
+
+ for (type = distro_dwarf_types;
+ !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
+ type++) {
+ if (dso__read_binary_type_filename(dso, *type, &nil,
+ buf, PATH_MAX) < 0)
+ continue;
+ dinfo = __debuginfo__new(buf);
+ }
+ dso__put(dso);
+
+out:
+ /* if failed to open all distro debuginfo, open given binary */
+ return dinfo ? : __debuginfo__new(path);
+}
+
+void debuginfo__delete(struct debuginfo *dbg)
+{
+ if (dbg) {
+ if (dbg->dwfl)
+ dwfl_end(dbg->dwfl);
+ free(dbg);
+ }
+}
+
+/* For the kernel module, we need a special code to get a DIE */
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+ bool adjust_offset)
+{
+ int n, i;
+ Elf32_Word shndx;
+ Elf_Scn *scn;
+ Elf *elf;
+ GElf_Shdr mem, *shdr;
+ const char *p;
+
+ elf = dwfl_module_getelf(dbg->mod, &dbg->bias);
+ if (!elf)
+ return -EINVAL;
+
+ /* Get the number of relocations */
+ n = dwfl_module_relocations(dbg->mod);
+ if (n < 0)
+ return -ENOENT;
+ /* Search the relocation related .text section */
+ for (i = 0; i < n; i++) {
+ p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
+ if (strcmp(p, ".text") == 0) {
+ /* OK, get the section header */
+ scn = elf_getscn(elf, shndx);
+ if (!scn)
+ return -ENOENT;
+ shdr = gelf_getshdr(scn, &mem);
+ if (!shdr)
+ return -ENOENT;
+ *offs = shdr->sh_addr;
+ if (adjust_offset)
+ *offs -= shdr->sh_offset;
+ }
+ }
+ return 0;
+}
+
+#ifdef HAVE_DEBUGINFOD_SUPPORT
+int get_source_from_debuginfod(const char *raw_path,
+ const char *sbuild_id, char **new_path)
+{
+ debuginfod_client *c = debuginfod_begin();
+ const char *p = raw_path;
+ int fd;
+
+ if (!c)
+ return -ENOMEM;
+
+ fd = debuginfod_find_source(c, (const unsigned char *)sbuild_id,
+ 0, p, new_path);
+ pr_debug("Search %s from debuginfod -> %d\n", p, fd);
+ if (fd >= 0)
+ close(fd);
+ debuginfod_end(c);
+ if (fd < 0) {
+ pr_debug("Failed to find %s in debuginfod (%s)\n",
+ raw_path, sbuild_id);
+ return -ENOENT;
+ }
+ pr_debug("Got a source %s\n", *new_path);
+
+ return 0;
+}
+#endif /* HAVE_DEBUGINFOD_SUPPORT */
diff --git a/tools/perf/util/debuginfo.h b/tools/perf/util/debuginfo.h
new file mode 100644
index 000000000000..4d65b8c605fc
--- /dev/null
+++ b/tools/perf/util/debuginfo.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PERF_DEBUGINFO_H
+#define _PERF_DEBUGINFO_H
+
+#include <errno.h>
+#include <linux/compiler.h>
+
+#ifdef HAVE_DWARF_SUPPORT
+
+#include "dwarf-aux.h"
+
+/* debug information structure */
+struct debuginfo {
+ Dwarf *dbg;
+ Dwfl_Module *mod;
+ Dwfl *dwfl;
+ Dwarf_Addr bias;
+ const unsigned char *build_id;
+};
+
+/* This also tries to open distro debuginfo */
+struct debuginfo *debuginfo__new(const char *path);
+void debuginfo__delete(struct debuginfo *dbg);
+
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+ bool adjust_offset);
+
+#else /* HAVE_DWARF_SUPPORT */
+
+/* dummy debug information structure */
+struct debuginfo {
+};
+
+static inline struct debuginfo *debuginfo__new(const char *path __maybe_unused)
+{
+ return NULL;
+}
+
+static inline void debuginfo__delete(struct debuginfo *dbg __maybe_unused)
+{
+}
+
+static inline int debuginfo__get_text_offset(struct debuginfo *dbg __maybe_unused,
+ Dwarf_Addr *offs __maybe_unused,
+ bool adjust_offset __maybe_unused)
+{
+ return -EINVAL;
+}
+
+#endif /* HAVE_DWARF_SUPPORT */
+
+#ifdef HAVE_DEBUGINFOD_SUPPORT
+int get_source_from_debuginfod(const char *raw_path, const char *sbuild_id,
+ char **new_path);
+#else /* HAVE_DEBUGINFOD_SUPPORT */
+static inline int get_source_from_debuginfod(const char *raw_path __maybe_unused,
+ const char *sbuild_id __maybe_unused,
+ char **new_path __maybe_unused)
+{
+ return -ENOTSUP;
+}
+#endif /* HAVE_DEBUGINFOD_SUPPORT */
+
+#endif /* _PERF_DEBUGINFO_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 1f629b6fb7cf..22fd5fa806ed 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -31,6 +31,7 @@
#include "debug.h"
#include "string2.h"
#include "vdso.h"
+#include "annotate-data.h"
static const char * const debuglink_paths[] = {
"%.0s%s",
@@ -1327,6 +1328,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
dso->data.cache = RB_ROOT;
dso->inlined_nodes = RB_ROOT_CACHED;
dso->srclines = RB_ROOT_CACHED;
+ dso->data_types = RB_ROOT;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1370,6 +1372,8 @@ void dso__delete(struct dso *dso)
symbols__delete(&dso->symbols);
dso->symbol_names_len = 0;
zfree(&dso->symbol_names);
+ annotated_data_type__tree_delete(&dso->data_types);
+
if (dso->short_name_allocated) {
zfree((char **)&dso->short_name);
dso->short_name_allocated = false;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 3759de8c2267..ce9f3849a773 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -154,6 +154,8 @@ struct dso {
size_t symbol_names_len;
struct rb_root_cached inlined_nodes;
struct rb_root_cached srclines;
+ struct rb_root data_types;
+
struct {
u64 addr;
struct symbol *symbol;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 2941d88f2199..7aa5fee0da19 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -1051,32 +1051,28 @@ Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
}
/**
- * die_get_typename - Get the name of given variable DIE
- * @vr_die: a variable DIE
+ * die_get_typename_from_type - Get the name of given type DIE
+ * @type_die: a type DIE
* @buf: a strbuf for result type name
*
- * Get the name of @vr_die and stores it to @buf. Return 0 if succeeded.
+ * Get the name of @type_die and stores it to @buf. Return 0 if succeeded.
* and Return -ENOENT if failed to find type name.
* Note that the result will stores typedef name if possible, and stores
* "*(function_type)" if the type is a function pointer.
*/
-int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
+int die_get_typename_from_type(Dwarf_Die *type_die, struct strbuf *buf)
{
- Dwarf_Die type;
int tag, ret;
const char *tmp = "";
- if (__die_get_real_type(vr_die, &type) == NULL)
- return -ENOENT;
-
- tag = dwarf_tag(&type);
+ tag = dwarf_tag(type_die);
if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
tmp = "*";
else if (tag == DW_TAG_subroutine_type) {
/* Function pointer */
return strbuf_add(buf, "(function_type)", 15);
} else {
- const char *name = dwarf_diename(&type);
+ const char *name = dwarf_diename(type_die);
if (tag == DW_TAG_union_type)
tmp = "union ";
@@ -1089,8 +1085,35 @@ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
/* Write a base name */
return strbuf_addf(buf, "%s%s", tmp, name ?: "");
}
- ret = die_get_typename(&type, buf);
- return ret ? ret : strbuf_addstr(buf, tmp);
+ ret = die_get_typename(type_die, buf);
+ if (ret < 0) {
+ /* void pointer has no type attribute */
+ if (tag == DW_TAG_pointer_type && ret == -ENOENT)
+ return strbuf_addf(buf, "void*");
+
+ return ret;
+ }
+ return strbuf_addstr(buf, tmp);
+}
+
+/**
+ * die_get_typename - Get the name of given variable DIE
+ * @vr_die: a variable DIE
+ * @buf: a strbuf for result type name
+ *
+ * Get the name of @vr_die and stores it to @buf. Return 0 if succeeded.
+ * and Return -ENOENT if failed to find type name.
+ * Note that the result will stores typedef name if possible, and stores
+ * "*(function_type)" if the type is a function pointer.
+ */
+int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
+{
+ Dwarf_Die type;
+
+ if (__die_get_real_type(vr_die, &type) == NULL)
+ return -ENOENT;
+
+ return die_get_typename_from_type(&type, buf);
}
/**
@@ -1238,12 +1261,151 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
out:
return ret;
}
-#else
-int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
- Dwarf_Die *vr_die __maybe_unused,
- struct strbuf *buf __maybe_unused)
+
+/* Interval parameters for __die_find_var_reg_cb() */
+struct find_var_data {
+ /* Target instruction address */
+ Dwarf_Addr pc;
+ /* Target memory address (for global data) */
+ Dwarf_Addr addr;
+ /* Target register */
+ unsigned reg;
+ /* Access offset, set for global data */
+ int offset;
+};
+
+/* Max number of registers DW_OP_regN supports */
+#define DWARF_OP_DIRECT_REGS 32
+
+/* Only checks direct child DIEs in the given scope. */
+static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
+{
+ struct find_var_data *data = arg;
+ int tag = dwarf_tag(die_mem);
+ ptrdiff_t off = 0;
+ Dwarf_Attribute attr;
+ Dwarf_Addr base, start, end;
+ Dwarf_Op *ops;
+ size_t nops;
+
+ if (tag != DW_TAG_variable && tag != DW_TAG_formal_parameter)
+ return DIE_FIND_CB_SIBLING;
+
+ if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL)
+ return DIE_FIND_CB_SIBLING;
+
+ while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) {
+ /* Assuming the location list is sorted by address */
+ if (end < data->pc)
+ continue;
+ if (start > data->pc)
+ break;
+
+ /* Only match with a simple case */
+ if (data->reg < DWARF_OP_DIRECT_REGS) {
+ if (ops->atom == (DW_OP_reg0 + data->reg) && nops == 1)
+ return DIE_FIND_CB_END;
+ } else {
+ if (ops->atom == DW_OP_regx && ops->number == data->reg &&
+ nops == 1)
+ return DIE_FIND_CB_END;
+ }
+ }
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_variable_by_reg - Find a variable saved in a register
+ * @sc_die: a scope DIE
+ * @pc: the program address to find
+ * @reg: the register number to find
+ * @die_mem: a buffer to save the resulting DIE
+ *
+ * Find the variable DIE accessed by the given register.
+ */
+Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg,
+ Dwarf_Die *die_mem)
+{
+ struct find_var_data data = {
+ .pc = pc,
+ .reg = reg,
+ };
+ return die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem);
+}
+
+/* Only checks direct child DIEs in the given scope */
+static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg)
+{
+ struct find_var_data *data = arg;
+ int tag = dwarf_tag(die_mem);
+ ptrdiff_t off = 0;
+ Dwarf_Attribute attr;
+ Dwarf_Addr base, start, end;
+ Dwarf_Word size;
+ Dwarf_Die type_die;
+ Dwarf_Op *ops;
+ size_t nops;
+
+ if (tag != DW_TAG_variable)
+ return DIE_FIND_CB_SIBLING;
+
+ if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL)
+ return DIE_FIND_CB_SIBLING;
+
+ while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) {
+ if (ops->atom != DW_OP_addr)
+ continue;
+
+ if (data->addr < ops->number)
+ continue;
+
+ if (data->addr == ops->number) {
+ /* Update offset relative to the start of the variable */
+ data->offset = 0;
+ return DIE_FIND_CB_END;
+ }
+
+ if (die_get_real_type(die_mem, &type_die) == NULL)
+ continue;
+
+ if (dwarf_aggregate_size(&type_die, &size) < 0)
+ continue;
+
+ if (data->addr >= ops->number + size)
+ continue;
+
+ /* Update offset relative to the start of the variable */
+ data->offset = data->addr - ops->number;
+ return DIE_FIND_CB_END;
+ }
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_variable_by_addr - Find variable located at given address
+ * @sc_die: a scope DIE
+ * @pc: the program address to find
+ * @addr: the data address to find
+ * @die_mem: a buffer to save the resulting DIE
+ * @offset: the offset in the resulting type
+ *
+ * Find the variable DIE located at the given address (in PC-relative mode).
+ * This is usually for global variables.
+ */
+Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
+ Dwarf_Addr addr, Dwarf_Die *die_mem,
+ int *offset)
{
- return -ENOTSUP;
+ struct find_var_data data = {
+ .pc = pc,
+ .addr = addr,
+ };
+ Dwarf_Die *result;
+
+ result = die_find_child(sc_die, __die_find_var_addr_cb, &data, die_mem);
+ if (result)
+ *offset = data.offset;
+ return result;
}
#endif
@@ -1425,3 +1587,56 @@ void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die,
*entrypc = postprologue_addr;
}
+
+/* Internal parameters for __die_find_scope_cb() */
+struct find_scope_data {
+ /* Target instruction address */
+ Dwarf_Addr pc;
+ /* Number of scopes found [output] */
+ int nr;
+ /* Array of scopes found, 0 for the outermost one. [output] */
+ Dwarf_Die *scopes;
+};
+
+static int __die_find_scope_cb(Dwarf_Die *die_mem, void *arg)
+{
+ struct find_scope_data *data = arg;
+
+ if (dwarf_haspc(die_mem, data->pc)) {
+ Dwarf_Die *tmp;
+
+ tmp = realloc(data->scopes, (data->nr + 1) * sizeof(*tmp));
+ if (tmp == NULL)
+ return DIE_FIND_CB_END;
+
+ memcpy(tmp + data->nr, die_mem, sizeof(*die_mem));
+ data->scopes = tmp;
+ data->nr++;
+ return DIE_FIND_CB_CHILD;
+ }
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_get_scopes - Return a list of scopes including the address
+ * @cu_die: a compile unit DIE
+ * @pc: the address to find
+ * @scopes: the array of DIEs for scopes (result)
+ *
+ * This function does the same as the dwarf_getscopes() but doesn't follow
+ * the origins of inlined functions. It returns the number of scopes saved
+ * in the @scopes argument. The outer scope will be saved first (index 0) and
+ * the last one is the innermost scope at the @pc.
+ */
+int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes)
+{
+ struct find_scope_data data = {
+ .pc = pc,
+ };
+ Dwarf_Die die_mem;
+
+ die_find_child(cu_die, __die_find_scope_cb, &data, &die_mem);
+
+ *scopes = data.scopes;
+ return data.nr;
+}
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 7ec8bc1083bb..4e64caac6df8 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -116,12 +116,14 @@ Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name,
Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
Dwarf_Die *die_mem);
+/* Get the name of given type DIE */
+int die_get_typename_from_type(Dwarf_Die *type_die, struct strbuf *buf);
+
/* Get the name of given variable DIE */
int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf);
/* Get the name and type of given variable DIE, stored as "type\tname" */
int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf);
-int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf);
/* Check if target program is compiled with optimization */
bool die_is_optimized_target(Dwarf_Die *cu_die);
@@ -130,4 +132,49 @@ bool die_is_optimized_target(Dwarf_Die *cu_die);
void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die,
Dwarf_Addr *entrypc);
-#endif
+/* Get the list of including scopes */
+int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes);
+
+#ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
+
+/* Get byte offset range of given variable DIE */
+int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf);
+
+/* Find a variable saved in the 'reg' at given address */
+Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg,
+ Dwarf_Die *die_mem);
+
+/* Find a (global) variable located in the 'addr' */
+Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
+ Dwarf_Addr addr, Dwarf_Die *die_mem,
+ int *offset);
+
+#else /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
+
+static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
+ Dwarf_Die *vr_die __maybe_unused,
+ struct strbuf *buf __maybe_unused)
+{
+ return -ENOTSUP;
+}
+
+static inline Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die __maybe_unused,
+ Dwarf_Addr pc __maybe_unused,
+ int reg __maybe_unused,
+ Dwarf_Die *die_mem __maybe_unused)
+{
+ return NULL;
+}
+
+static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unused,
+ Dwarf_Addr pc __maybe_unused,
+ Dwarf_Addr addr __maybe_unused,
+ Dwarf_Die *die_mem __maybe_unused,
+ int *offset __maybe_unused)
+{
+ return NULL;
+}
+
+#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
+
+#endif /* _DWARF_AUX_H */
diff --git a/tools/perf/util/dwarf-regs.c b/tools/perf/util/dwarf-regs.c
index 69cfaa5953bf..5b7f86c0063f 100644
--- a/tools/perf/util/dwarf-regs.c
+++ b/tools/perf/util/dwarf-regs.c
@@ -5,9 +5,12 @@
* Written by: Masami Hiramatsu <mhiramat@kernel.org>
*/
+#include <stdlib.h>
+#include <string.h>
#include <debug.h>
#include <dwarf-regs.h>
#include <elf.h>
+#include <errno.h>
#include <linux/kernel.h>
#ifndef EM_AARCH64
@@ -68,3 +71,34 @@ const char *get_dwarf_regstr(unsigned int n, unsigned int machine)
}
return NULL;
}
+
+__weak int get_arch_regnum(const char *name __maybe_unused)
+{
+ return -ENOTSUP;
+}
+
+/* Return DWARF register number from architecture register name */
+int get_dwarf_regnum(const char *name, unsigned int machine)
+{
+ char *regname = strdup(name);
+ int reg = -1;
+ char *p;
+
+ if (regname == NULL)
+ return -EINVAL;
+
+ /* For convenience, remove trailing characters */
+ p = strpbrk(regname, " ,)");
+ if (p)
+ *p = '\0';
+
+ switch (machine) {
+ case EM_NONE: /* Generic arch - use host arch */
+ reg = get_arch_regnum(regname);
+ break;
+ default:
+ pr_err("ELF MACHINE %x is not supported.\n", machine);
+ }
+ free(regname);
+ return reg;
+}
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 44140b7f596a..a459374d0a1a 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -3,6 +3,7 @@
#include "debug.h"
#include "env.h"
#include "util/header.h"
+#include "linux/compiler.h"
#include <linux/ctype.h>
#include <linux/zalloc.h>
#include "cgroup.h"
@@ -12,6 +13,7 @@
#include <string.h>
#include "pmus.h"
#include "strbuf.h"
+#include "trace/beauty/beauty.h"
struct perf_env perf_env;
@@ -23,12 +25,18 @@ struct perf_env perf_env;
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node)
{
+ down_write(&env->bpf_progs.lock);
+ __perf_env__insert_bpf_prog_info(env, info_node);
+ up_write(&env->bpf_progs.lock);
+}
+
+void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
+{
__u32 prog_id = info_node->info_linear->info.id;
struct bpf_prog_info_node *node;
struct rb_node *parent = NULL;
struct rb_node **p;
- down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.infos.rb_node;
while (*p != NULL) {
@@ -40,15 +48,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
p = &(*p)->rb_right;
} else {
pr_debug("duplicated bpf prog info %u\n", prog_id);
- goto out;
+ return;
}
}
rb_link_node(&info_node->rb_node, parent, p);
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
env->bpf_progs.infos_cnt++;
-out:
- up_write(&env->bpf_progs.lock);
}
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
@@ -78,13 +84,21 @@ out:
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{
+ bool ret;
+
+ down_write(&env->bpf_progs.lock);
+ ret = __perf_env__insert_btf(env, btf_node);
+ up_write(&env->bpf_progs.lock);
+ return ret;
+}
+
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
struct rb_node *parent = NULL;
__u32 btf_id = btf_node->id;
struct btf_node *node;
struct rb_node **p;
- bool ret = true;
- down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.btfs.rb_node;
while (*p != NULL) {
@@ -96,25 +110,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
p = &(*p)->rb_right;
} else {
pr_debug("duplicated btf %u\n", btf_id);
- ret = false;
- goto out;
+ return false;
}
}
rb_link_node(&btf_node->rb_node, parent, p);
rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
env->bpf_progs.btfs_cnt++;
-out:
- up_write(&env->bpf_progs.lock);
- return ret;
+ return true;
}
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
{
+ struct btf_node *res;
+
+ down_read(&env->bpf_progs.lock);
+ res = __perf_env__find_btf(env, btf_id);
+ up_read(&env->bpf_progs.lock);
+ return res;
+}
+
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
struct btf_node *node = NULL;
struct rb_node *n;
- down_read(&env->bpf_progs.lock);
n = env->bpf_progs.btfs.rb_node;
while (n) {
@@ -124,13 +144,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
else if (btf_id > node->id)
n = n->rb_right;
else
- goto out;
+ return node;
}
- node = NULL;
-
-out:
- up_read(&env->bpf_progs.lock);
- return node;
+ return NULL;
}
/* purge data in bpf_progs.infos tree */
@@ -453,6 +469,18 @@ const char *perf_env__arch(struct perf_env *env)
return normalize_arch(arch_name);
}
+const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused)
+{
+#if defined(HAVE_SYSCALL_TABLE_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+ if (env->arch_strerrno == NULL)
+ env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env));
+
+ return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function";
+#else
+ return "!(HAVE_SYSCALL_TABLE_SUPPORT && HAVE_LIBTRACEEVENT)";
+#endif
+}
+
const char *perf_env__cpuid(struct perf_env *env)
{
int status;
@@ -531,6 +559,24 @@ int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
}
+bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name)
+{
+ char *pmu_mapping = env->pmu_mappings, *colon;
+
+ for (int i = 0; i < env->nr_pmu_mappings; ++i) {
+ if (strtoul(pmu_mapping, &colon, 0) == ULONG_MAX || *colon != ':')
+ goto out_error;
+
+ pmu_mapping = colon + 1;
+ if (strcmp(pmu_mapping, pmu_name) == 0)
+ return true;
+
+ pmu_mapping += strlen(pmu_mapping) + 1;
+ }
+out_error:
+ return false;
+}
+
char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
const char *cap)
{
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 4566c51f2fd9..7c527e65c186 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -46,10 +46,17 @@ struct hybrid_node {
struct pmu_caps {
int nr_caps;
unsigned int max_branches;
+ unsigned int br_cntr_nr;
+ unsigned int br_cntr_width;
+
char **caps;
char *pmu_name;
};
+typedef const char *(arch_syscalls__strerrno_t)(int err);
+
+arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch);
+
struct perf_env {
char *hostname;
char *os_release;
@@ -62,6 +69,8 @@ struct perf_env {
unsigned long long total_mem;
unsigned int msr_pmu_type;
unsigned int max_branches;
+ unsigned int br_cntr_nr;
+ unsigned int br_cntr_width;
int kernel_is_64_bit;
int nr_cmdline;
@@ -130,6 +139,7 @@ struct perf_env {
*/
bool enabled;
} clock;
+ arch_syscalls__strerrno_t *arch_strerrno;
};
enum perf_compress_type {
@@ -159,19 +169,26 @@ int perf_env__read_cpu_topology_map(struct perf_env *env);
void cpu_cache_level__free(struct cpu_cache_level *cache);
const char *perf_env__arch(struct perf_env *env);
+const char *perf_env__arch_strerrno(struct perf_env *env, int err);
const char *perf_env__cpuid(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
void perf_env__init(struct perf_env *env);
+void __perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
const char *cap);
+
+bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 923c0fb15122..68f45e9e63b6 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -617,13 +617,13 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
maps = machine__kernel_maps(machine);
- load_map = true;
+ load_map = !symbol_conf.lazy_load_kernel_maps;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
maps = machine__kernel_maps(machine);
- load_map = true;
+ load_map = !symbol_conf.lazy_load_kernel_maps;
} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
al->level = 'u';
} else {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index e36da58522ef..55a300a0977b 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -103,7 +103,14 @@ struct evlist *evlist__new_default(void)
err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
if (err) {
evlist__delete(evlist);
- evlist = NULL;
+ return NULL;
+ }
+
+ if (evlist->core.nr_entries > 1) {
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel)
+ evsel__set_sample_id(evsel, /*can_sample_identifier=*/false);
}
return evlist;
@@ -1056,7 +1063,7 @@ int evlist__create_maps(struct evlist *evlist, struct target *target)
return -1;
if (target__uses_dummy_map(target))
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
else
cpus = perf_cpu_map__new(target->cpu_list);
@@ -1352,7 +1359,7 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
* error, and we may not want to do that fallback to a
* default cpu identity map :-\
*/
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus)
goto out;
@@ -2518,3 +2525,33 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis
}
perf_cpu_map__put(user_requested_cpus);
}
+
+void evlist__uniquify_name(struct evlist *evlist)
+{
+ char *new_name, empty_attributes[2] = ":", *attributes;
+ struct evsel *pos;
+
+ if (perf_pmus__num_core_pmus() == 1)
+ return;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (!evsel__is_hybrid(pos))
+ continue;
+
+ if (strchr(pos->name, '/'))
+ continue;
+
+ attributes = strchr(pos->name, ':');
+ if (attributes)
+ *attributes = '\0';
+ else
+ attributes = empty_attributes;
+
+ if (asprintf(&new_name, "%s/%s/%s", pos->pmu_name, pos->name, attributes + 1)) {
+ free(pos->name);
+ pos->name = new_name;
+ } else {
+ *attributes = ':';
+ }
+ }
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 98e7ddb2bd30..cb91dc9117a2 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -442,5 +442,6 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
void evlist__check_mem_load_aux(struct evlist *evlist);
void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
+void evlist__uniquify_name(struct evlist *evlist);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 72a5dfc38d38..6d7c9c58a9bc 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1801,7 +1801,7 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
if (cpus == NULL) {
if (empty_cpu_map == NULL) {
- empty_cpu_map = perf_cpu_map__dummy_new();
+ empty_cpu_map = perf_cpu_map__new_any_cpu();
if (empty_cpu_map == NULL)
return -ENOMEM;
}
@@ -1832,6 +1832,8 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
static void evsel__disable_missing_features(struct evsel *evsel)
{
+ if (perf_missing_features.branch_counters)
+ evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_COUNTERS;
if (perf_missing_features.read_lost)
evsel->core.attr.read_format &= ~PERF_FORMAT_LOST;
if (perf_missing_features.weight_struct) {
@@ -1885,7 +1887,12 @@ bool evsel__detect_missing_features(struct evsel *evsel)
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.read_lost &&
+ if (!perf_missing_features.branch_counters &&
+ (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) {
+ perf_missing_features.branch_counters = true;
+ pr_debug2("switching off branch counters support\n");
+ return true;
+ } else if (!perf_missing_features.read_lost &&
(evsel->core.attr.read_format & PERF_FORMAT_LOST)) {
perf_missing_features.read_lost = true;
pr_debug2("switching off PERF_FORMAT_LOST support\n");
@@ -2318,6 +2325,22 @@ u64 evsel__bitfield_swap_branch_flags(u64 value)
return new_val;
}
+static inline bool evsel__has_branch_counters(const struct evsel *evsel)
+{
+ struct evsel *cur, *leader = evsel__leader(evsel);
+
+ /* The branch counters feature only supports group */
+ if (!leader || !evsel->evlist)
+ return false;
+
+ evlist__for_each_entry(evsel->evlist, cur) {
+ if ((leader == evsel__leader(cur)) &&
+ (cur->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
+ return true;
+ }
+ return false;
+}
+
int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
@@ -2551,6 +2574,16 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
OVERFLOW_CHECK(array, sz, max_size);
array = (void *)array + sz;
+
+ if (evsel__has_branch_counters(evsel)) {
+ OVERFLOW_CHECK_u64(array);
+
+ data->branch_stack_cntr = (u64 *)array;
+ sz = data->branch_stack->nr * sizeof(u64);
+
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
+ }
}
if (type & PERF_SAMPLE_REGS_USER) {
@@ -2820,7 +2853,8 @@ u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const
#endif
-bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
+bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
+ char *msg, size_t msgsize)
{
int paranoid;
@@ -2828,18 +2862,19 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
evsel->core.attr.type == PERF_TYPE_HARDWARE &&
evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
/*
- * If it's cycles then fall back to hrtimer based
- * cpu-clock-tick sw counter, which is always available even if
- * no PMU support.
+ * If it's cycles then fall back to hrtimer based cpu-clock sw
+ * counter, which is always available even if no PMU support.
*
* PPC returns ENXIO until 2.6.37 (behavior changed with commit
* b0a873e).
*/
- scnprintf(msg, msgsize, "%s",
-"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
-
evsel->core.attr.type = PERF_TYPE_SOFTWARE;
- evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ evsel->core.attr.config = target__has_cpu(target)
+ ? PERF_COUNT_SW_CPU_CLOCK
+ : PERF_COUNT_SW_TASK_CLOCK;
+ scnprintf(msg, msgsize,
+ "The cycles event is not supported, trying to fall back to %s",
+ target__has_cpu(target) ? "cpu-clock" : "task-clock");
zfree(&evsel->name);
return true;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index d791316a1792..efbb6e848287 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -191,6 +191,7 @@ struct perf_missing_features {
bool code_page_size;
bool weight_struct;
bool read_lost;
+ bool branch_counters;
};
extern struct perf_missing_features perf_missing_features;
@@ -459,7 +460,8 @@ static inline bool evsel__is_clock(const struct evsel *evsel)
evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
}
-bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize);
+bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
+ char *msg, size_t msgsize);
int evsel__open_strerror(struct evsel *evsel, struct target *target,
int err, char *msg, size_t size);
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index fefc72066c4e..ac17a3cb59dc 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -293,9 +293,9 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
*/
phdr = elf_newphdr(e, 1);
phdr[0].p_type = PT_LOAD;
- phdr[0].p_offset = 0;
- phdr[0].p_vaddr = 0;
- phdr[0].p_paddr = 0;
+ phdr[0].p_offset = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET;
+ phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET;
phdr[0].p_filesz = csize;
phdr[0].p_memsz = csize;
phdr[0].p_flags = PF_X | PF_R;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index e86b9439ffee..3fe28edc3d01 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1444,7 +1444,9 @@ static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
nodes = new_nodes;
size += 4;
}
- ret = memory_node__read(&nodes[cnt++], idx);
+ ret = memory_node__read(&nodes[cnt], idx);
+ if (!ret)
+ cnt += 1;
}
out:
closedir(dir);
@@ -1847,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node);
- bpf_event__print_bpf_prog_info(&node->info_linear->info,
- env, fp);
+ __bpf_event__print_bpf_prog_info(&node->info_linear->info,
+ env, fp);
}
up_read(&env->bpf_progs.lock);
@@ -2145,6 +2147,14 @@ static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
__print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps,
pmu_caps->pmu_name);
}
+
+ if (strcmp(perf_env__arch(&ff->ph->env), "x86") == 0 &&
+ perf_env__has_pmu_mapping(&ff->ph->env, "ibs_op")) {
+ char *max_precise = perf_env__find_pmu_cap(&ff->ph->env, "cpu", "max_precise");
+
+ if (max_precise != NULL && atoi(max_precise) == 0)
+ fprintf(fp, "# AMD systems uses ibs_op// PMU for some precise events, e.g.: cycles:p, see the 'perf list' man page for further details.\n");
+ }
}
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
@@ -3178,7 +3188,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
/* after reading from file, translate offset to address */
bpil_offs_to_addr(info_linear);
info_node->info_linear = info_linear;
- perf_env__insert_bpf_prog_info(env, info_node);
+ __perf_env__insert_bpf_prog_info(env, info_node);
}
up_write(&env->bpf_progs.lock);
@@ -3225,7 +3235,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
if (__do_read(ff, node->data, data_size))
goto out;
- perf_env__insert_btf(env, node);
+ __perf_env__insert_btf(env, node);
node = NULL;
}
@@ -3259,7 +3269,9 @@ static int process_compressed(struct feat_fd *ff,
}
static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
- char ***caps, unsigned int *max_branches)
+ char ***caps, unsigned int *max_branches,
+ unsigned int *br_cntr_nr,
+ unsigned int *br_cntr_width)
{
char *name, *value, *ptr;
u32 nr_pmu_caps, i;
@@ -3294,6 +3306,12 @@ static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
if (!strcmp(name, "branches"))
*max_branches = atoi(value);
+ if (!strcmp(name, "branch_counter_nr"))
+ *br_cntr_nr = atoi(value);
+
+ if (!strcmp(name, "branch_counter_width"))
+ *br_cntr_width = atoi(value);
+
free(value);
free(name);
}
@@ -3318,7 +3336,9 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
{
int ret = __process_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
&ff->ph->env.cpu_pmu_caps,
- &ff->ph->env.max_branches);
+ &ff->ph->env.max_branches,
+ &ff->ph->env.br_cntr_nr,
+ &ff->ph->env.br_cntr_width);
if (!ret && !ff->ph->env.cpu_pmu_caps)
pr_debug("cpu pmu capabilities not available\n");
@@ -3347,7 +3367,9 @@ static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
for (i = 0; i < nr_pmu; i++) {
ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps,
&pmu_caps[i].caps,
- &pmu_caps[i].max_branches);
+ &pmu_caps[i].max_branches,
+ &pmu_caps[i].br_cntr_nr,
+ &pmu_caps[i].br_cntr_width);
if (ret)
goto err;
@@ -4369,9 +4391,10 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
ret += fprintf(fp, "... ");
map = cpu_map__new_data(&ev->cpus.cpus);
- if (map)
+ if (map) {
ret += cpu_map__fprintf(map, fp);
- else
+ perf_cpu_map__put(map);
+ } else
ret += fprintf(fp, "failed to get cpus\n");
break;
default:
diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
index 43bd1ca62d58..52d0ce302ca0 100644
--- a/tools/perf/util/hisi-ptt.c
+++ b/tools/perf/util/hisi-ptt.c
@@ -123,6 +123,7 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
if (dump_trace)
hisi_ptt_dump_event(ptt, data, size);
+ free(data);
return 0;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 0888b7163b7c..fa359180ebf8 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -491,8 +491,8 @@ static int hist_entry__init(struct hist_entry *he,
}
if (symbol_conf.res_sample) {
- he->res_samples = calloc(sizeof(struct res_sample),
- symbol_conf.res_sample);
+ he->res_samples = calloc(symbol_conf.res_sample,
+ sizeof(struct res_sample));
if (!he->res_samples)
goto err_srcline;
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index afc9f1c7f4dc..4a0aea0c9e00 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -82,6 +82,9 @@ enum hist_column {
HISTC_ADDR_TO,
HISTC_ADDR,
HISTC_SIMD,
+ HISTC_TYPE,
+ HISTC_TYPE_OFFSET,
+ HISTC_SYMBOL_OFFSET,
HISTC_NR_COLS, /* Last entry */
};
@@ -457,7 +460,6 @@ struct hist_browser_timer {
int refresh;
};
-struct annotation_options;
struct res_sample;
enum rstype {
@@ -473,16 +475,13 @@ struct block_hist;
void attr_to_script(char *buf, struct perf_event_attr *attr);
int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
- struct hist_browser_timer *hbt,
- struct annotation_options *annotation_opts);
+ struct hist_browser_timer *hbt);
int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel,
- struct hist_browser_timer *hbt,
- struct annotation_options *annotation_opts);
+ struct hist_browser_timer *hbt);
int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt,
- float min_pcnt, struct perf_env *env, bool warn_lost_event,
- struct annotation_options *annotation_options);
+ float min_pcnt, struct perf_env *env, bool warn_lost_event);
int script_browse(const char *script_opt, struct evsel *evsel);
@@ -492,8 +491,7 @@ int res_sample_browse(struct res_sample *res_samples, int num_res,
void res_sample_init(void);
int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel,
- float min_percent, struct perf_env *env,
- struct annotation_options *annotation_opts);
+ float min_percent, struct perf_env *env);
#else
static inline
int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
@@ -501,23 +499,20 @@ int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused,
struct hist_browser_timer *hbt __maybe_unused,
float min_pcnt __maybe_unused,
struct perf_env *env __maybe_unused,
- bool warn_lost_event __maybe_unused,
- struct annotation_options *annotation_options __maybe_unused)
+ bool warn_lost_event __maybe_unused)
{
return 0;
}
static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
struct evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused,
- struct annotation_options *annotation_options __maybe_unused)
+ struct hist_browser_timer *hbt __maybe_unused)
{
return 0;
}
static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
struct evsel *evsel __maybe_unused,
- struct hist_browser_timer *hbt __maybe_unused,
- struct annotation_options *annotation_opts __maybe_unused)
+ struct hist_browser_timer *hbt __maybe_unused)
{
return 0;
}
@@ -541,8 +536,7 @@ static inline void res_sample_init(void) {}
static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused,
struct evsel *evsel __maybe_unused,
float min_percent __maybe_unused,
- struct perf_env *env __maybe_unused,
- struct annotation_options *annotation_opts __maybe_unused)
+ struct perf_env *env __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
index 7d99a084e82d..01fb25a1150a 100644
--- a/tools/perf/util/include/dwarf-regs.h
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -2,6 +2,9 @@
#ifndef _PERF_DWARF_REGS_H_
#define _PERF_DWARF_REGS_H_
+#define DWARF_REG_PC 0xd3af9c /* random number */
+#define DWARF_REG_FB 0xd3affb /* random number */
+
#ifdef HAVE_DWARF_SUPPORT
const char *get_arch_regstr(unsigned int n);
/*
@@ -10,6 +13,22 @@ const char *get_arch_regstr(unsigned int n);
* machine: ELF machine signature (EM_*)
*/
const char *get_dwarf_regstr(unsigned int n, unsigned int machine);
+
+int get_arch_regnum(const char *name);
+/*
+ * get_dwarf_regnum - Returns DWARF regnum from register name
+ * name: architecture register name
+ * machine: ELF machine signature (EM_*)
+ */
+int get_dwarf_regnum(const char *name, unsigned int machine);
+
+#else /* HAVE_DWARF_SUPPORT */
+
+static inline int get_dwarf_regnum(const char *name __maybe_unused,
+ unsigned int machine __maybe_unused)
+{
+ return -1;
+}
#endif
#ifdef HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
diff --git a/tools/perf/util/include/linux/linkage.h b/tools/perf/util/include/linux/linkage.h
index 75e2248416f5..178b00205fe6 100644
--- a/tools/perf/util/include/linux/linkage.h
+++ b/tools/perf/util/include/linux/linkage.h
@@ -115,6 +115,10 @@
SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK)
#endif
+#ifndef SYM_FUNC_ALIAS_MEMFUNC
+#define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS
+#endif
+
// In the kernel sources (include/linux/cfi_types.h), this has a different
// definition when CONFIG_CFI_CLANG is used, for tools/ just use the !clang
// definition:
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 90c750150b19..b397a769006f 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -453,7 +453,7 @@ static struct thread *findnew_guest_code(struct machine *machine,
* Guest code can be found in hypervisor process at the same address
* so copy host maps.
*/
- err = maps__clone(thread, thread__maps(host_thread));
+ err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
thread__put(host_thread);
if (err)
goto out_err;
@@ -1285,33 +1285,46 @@ static u64 find_entry_trampoline(struct dso *dso)
#define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
#define X86_64_ENTRY_TRAMPOLINE 0x6000
+struct machine__map_x86_64_entry_trampolines_args {
+ struct maps *kmaps;
+ bool found;
+};
+
+static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
+{
+ struct machine__map_x86_64_entry_trampolines_args *args = data;
+ struct map *dest_map;
+ struct kmap *kmap = __map__kmap(map);
+
+ if (!kmap || !is_entry_trampoline(kmap->name))
+ return 0;
+
+ dest_map = maps__find(args->kmaps, map__pgoff(map));
+ if (dest_map != map)
+ map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
+
+ args->found = true;
+ return 0;
+}
+
/* Map x86_64 PTI entry trampolines */
int machine__map_x86_64_entry_trampolines(struct machine *machine,
struct dso *kernel)
{
- struct maps *kmaps = machine__kernel_maps(machine);
+ struct machine__map_x86_64_entry_trampolines_args args = {
+ .kmaps = machine__kernel_maps(machine),
+ .found = false,
+ };
int nr_cpus_avail, cpu;
- bool found = false;
- struct map_rb_node *rb_node;
u64 pgoff;
/*
* In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset.
*/
- maps__for_each_entry(kmaps, rb_node) {
- struct map *dest_map, *map = rb_node->map;
- struct kmap *kmap = __map__kmap(map);
-
- if (!kmap || !is_entry_trampoline(kmap->name))
- continue;
+ maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
- dest_map = maps__find(kmaps, map__pgoff(map));
- if (dest_map != map)
- map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
- found = true;
- }
- if (found || machine->trampolines_mapped)
+ if (args.found || machine->trampolines_mapped)
return 0;
pgoff = find_entry_trampoline(kernel);
@@ -1359,8 +1372,7 @@ __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
if (machine->vmlinux_map == NULL)
return -ENOMEM;
- map__set_map_ip(machine->vmlinux_map, identity__map_ip);
- map__set_unmap_ip(machine->vmlinux_map, identity__map_ip);
+ map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
}
@@ -1750,12 +1762,11 @@ int machine__create_kernel_maps(struct machine *machine)
if (end == ~0ULL) {
/* update end address of the kernel map using adjacent module address */
- struct map_rb_node *rb_node = maps__find_node(machine__kernel_maps(machine),
- machine__kernel_map(machine));
- struct map_rb_node *next = map_rb_node__next(rb_node);
+ struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
+ machine__kernel_map(machine));
if (next)
- machine__set_kernel_mmap(machine, start, map__start(next->map));
+ machine__set_kernel_mmap(machine, start, map__start(next));
}
out_put:
@@ -2157,9 +2168,13 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
if (dump_trace)
perf_event__fprintf_task(event, stdout);
- if (thread != NULL)
- thread__put(thread);
-
+ if (thread != NULL) {
+ if (symbol_conf.keep_exited_threads)
+ thread__set_exited(thread, /*exited=*/true);
+ else
+ machine__remove_thread(machine, thread);
+ }
+ thread__put(thread);
return 0;
}
@@ -3395,16 +3410,8 @@ int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv
int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
{
struct maps *maps = machine__kernel_maps(machine);
- struct map_rb_node *pos;
- int err = 0;
- maps__for_each_entry(maps, pos) {
- err = fn(pos->map, priv);
- if (err != 0) {
- break;
- }
- }
- return err;
+ return maps__for_each_map(maps, fn, priv);
}
bool machine__is_lock_function(struct machine *machine, u64 addr)
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index f64b83004421..54c67cb7ecef 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -109,8 +109,7 @@ void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
map__set_pgoff(map, pgoff);
map__set_reloc(map, 0);
map__set_dso(map, dso__get(dso));
- map__set_map_ip(map, map__dso_map_ip);
- map__set_unmap_ip(map, map__dso_unmap_ip);
+ map__set_mapping_type(map, MAPPING_TYPE__DSO);
map__set_erange_warned(map, false);
refcount_set(map__refcnt(map), 1);
}
@@ -172,7 +171,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
map__init(result, start, start + len, pgoff, dso);
if (anon || no_dso) {
- map->map_ip = map->unmap_ip = identity__map_ip;
+ map->mapping_type = MAPPING_TYPE__IDENTITY;
/*
* Set memory without DSO as loaded. All map__find_*
@@ -630,18 +629,3 @@ struct maps *map__kmaps(struct map *map)
}
return kmap->kmaps;
}
-
-u64 map__dso_map_ip(const struct map *map, u64 ip)
-{
- return ip - map__start(map) + map__pgoff(map);
-}
-
-u64 map__dso_unmap_ip(const struct map *map, u64 ip)
-{
- return ip + map__start(map) - map__pgoff(map);
-}
-
-u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip)
-{
- return ip;
-}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 1b53d53adc86..49756716cb13 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -16,23 +16,25 @@ struct dso;
struct maps;
struct machine;
+enum mapping_type {
+ /* map__map_ip/map__unmap_ip are given as offsets in the DSO. */
+ MAPPING_TYPE__DSO,
+ /* map__map_ip/map__unmap_ip are just the given ip value. */
+ MAPPING_TYPE__IDENTITY,
+};
+
DECLARE_RC_STRUCT(map) {
u64 start;
u64 end;
- bool erange_warned:1;
- bool priv:1;
- u32 prot;
u64 pgoff;
u64 reloc;
-
- /* ip -> dso rip */
- u64 (*map_ip)(const struct map *, u64);
- /* dso rip -> ip */
- u64 (*unmap_ip)(const struct map *, u64);
-
struct dso *dso;
refcount_t refcnt;
+ u32 prot;
u32 flags;
+ enum mapping_type mapping_type:8;
+ bool erange_warned;
+ bool priv;
};
struct kmap;
@@ -41,38 +43,11 @@ struct kmap *__map__kmap(struct map *map);
struct kmap *map__kmap(struct map *map);
struct maps *map__kmaps(struct map *map);
-/* ip -> dso rip */
-u64 map__dso_map_ip(const struct map *map, u64 ip);
-/* dso rip -> ip */
-u64 map__dso_unmap_ip(const struct map *map, u64 ip);
-/* Returns ip */
-u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip);
-
static inline struct dso *map__dso(const struct map *map)
{
return RC_CHK_ACCESS(map)->dso;
}
-static inline u64 map__map_ip(const struct map *map, u64 ip)
-{
- return RC_CHK_ACCESS(map)->map_ip(map, ip);
-}
-
-static inline u64 map__unmap_ip(const struct map *map, u64 ip)
-{
- return RC_CHK_ACCESS(map)->unmap_ip(map, ip);
-}
-
-static inline void *map__map_ip_ptr(struct map *map)
-{
- return RC_CHK_ACCESS(map)->map_ip;
-}
-
-static inline void* map__unmap_ip_ptr(struct map *map)
-{
- return RC_CHK_ACCESS(map)->unmap_ip;
-}
-
static inline u64 map__start(const struct map *map)
{
return RC_CHK_ACCESS(map)->start;
@@ -123,6 +98,34 @@ static inline size_t map__size(const struct map *map)
return map__end(map) - map__start(map);
}
+/* ip -> dso rip */
+static inline u64 map__dso_map_ip(const struct map *map, u64 ip)
+{
+ return ip - map__start(map) + map__pgoff(map);
+}
+
+/* dso rip -> ip */
+static inline u64 map__dso_unmap_ip(const struct map *map, u64 rip)
+{
+ return rip + map__start(map) - map__pgoff(map);
+}
+
+static inline u64 map__map_ip(const struct map *map, u64 ip_or_rip)
+{
+ if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO)
+ return map__dso_map_ip(map, ip_or_rip);
+ else
+ return ip_or_rip;
+}
+
+static inline u64 map__unmap_ip(const struct map *map, u64 ip_or_rip)
+{
+ if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO)
+ return map__dso_unmap_ip(map, ip_or_rip);
+ else
+ return ip_or_rip;
+}
+
/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
u64 map__rip_2objdump(struct map *map, u64 rip);
@@ -294,13 +297,13 @@ static inline void map__set_dso(struct map *map, struct dso *dso)
RC_CHK_ACCESS(map)->dso = dso;
}
-static inline void map__set_map_ip(struct map *map, u64 (*map_ip)(const struct map *map, u64 ip))
+static inline void map__set_mapping_type(struct map *map, enum mapping_type type)
{
- RC_CHK_ACCESS(map)->map_ip = map_ip;
+ RC_CHK_ACCESS(map)->mapping_type = type;
}
-static inline void map__set_unmap_ip(struct map *map, u64 (*unmap_ip)(const struct map *map, u64 rip))
+static inline enum mapping_type map__mapping_type(struct map *map)
{
- RC_CHK_ACCESS(map)->unmap_ip = unmap_ip;
+ return RC_CHK_ACCESS(map)->mapping_type;
}
#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index 233438c95b53..0334fc18d9c6 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -10,6 +10,68 @@
#include "ui/ui.h"
#include "unwind.h"
+struct map_rb_node {
+ struct rb_node rb_node;
+ struct map *map;
+};
+
+#define maps__for_each_entry(maps, map) \
+ for (map = maps__first(maps); map; map = map_rb_node__next(map))
+
+#define maps__for_each_entry_safe(maps, map, next) \
+ for (map = maps__first(maps), next = map_rb_node__next(map); map; \
+ map = next, next = map_rb_node__next(map))
+
+static struct rb_root *maps__entries(struct maps *maps)
+{
+ return &RC_CHK_ACCESS(maps)->entries;
+}
+
+static struct rw_semaphore *maps__lock(struct maps *maps)
+{
+ return &RC_CHK_ACCESS(maps)->lock;
+}
+
+static struct map **maps__maps_by_name(struct maps *maps)
+{
+ return RC_CHK_ACCESS(maps)->maps_by_name;
+}
+
+static struct map_rb_node *maps__first(struct maps *maps)
+{
+ struct rb_node *first = rb_first(maps__entries(maps));
+
+ if (first)
+ return rb_entry(first, struct map_rb_node, rb_node);
+ return NULL;
+}
+
+static struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
+{
+ struct rb_node *next;
+
+ if (!node)
+ return NULL;
+
+ next = rb_next(&node->rb_node);
+
+ if (!next)
+ return NULL;
+
+ return rb_entry(next, struct map_rb_node, rb_node);
+}
+
+static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
+{
+ struct map_rb_node *rb_node;
+
+ maps__for_each_entry(maps, rb_node) {
+ if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
+ return rb_node;
+ }
+ return NULL;
+}
+
static void maps__init(struct maps *maps, struct machine *machine)
{
refcount_set(maps__refcnt(maps), 1);
@@ -196,6 +258,41 @@ void maps__put(struct maps *maps)
RC_CHK_PUT(maps);
}
+int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
+{
+ struct map_rb_node *pos;
+ int ret = 0;
+
+ down_read(maps__lock(maps));
+ maps__for_each_entry(maps, pos) {
+ ret = cb(pos->map, data);
+ if (ret)
+ break;
+ }
+ up_read(maps__lock(maps));
+ return ret;
+}
+
+void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
+{
+ struct map_rb_node *pos, *next;
+ unsigned int start_nr_maps;
+
+ down_write(maps__lock(maps));
+
+ start_nr_maps = maps__nr_maps(maps);
+ maps__for_each_entry_safe(maps, pos, next) {
+ if (cb(pos->map, data)) {
+ __maps__remove(maps, pos);
+ --RC_CHK_ACCESS(maps)->nr_maps;
+ }
+ }
+ if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps))
+ __maps__free_maps_by_name(maps);
+
+ up_write(maps__lock(maps));
+}
+
struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
{
struct map *map = maps__find(maps, addr);
@@ -210,31 +307,40 @@ struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
return NULL;
}
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
-{
+struct maps__find_symbol_by_name_args {
+ struct map **mapp;
+ const char *name;
struct symbol *sym;
- struct map_rb_node *pos;
+};
- down_read(maps__lock(maps));
+static int maps__find_symbol_by_name_cb(struct map *map, void *data)
+{
+ struct maps__find_symbol_by_name_args *args = data;
- maps__for_each_entry(maps, pos) {
- sym = map__find_symbol_by_name(pos->map, name);
+ args->sym = map__find_symbol_by_name(map, args->name);
+ if (!args->sym)
+ return 0;
- if (sym == NULL)
- continue;
- if (!map__contains_symbol(pos->map, sym)) {
- sym = NULL;
- continue;
- }
- if (mapp != NULL)
- *mapp = pos->map;
- goto out;
+ if (!map__contains_symbol(map, args->sym)) {
+ args->sym = NULL;
+ return 0;
}
- sym = NULL;
-out:
- up_read(maps__lock(maps));
- return sym;
+ if (args->mapp != NULL)
+ *args->mapp = map__get(map);
+ return 1;
+}
+
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
+{
+ struct maps__find_symbol_by_name_args args = {
+ .mapp = mapp,
+ .name = name,
+ .sym = NULL,
+ };
+
+ maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
+ return args.sym;
}
int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
@@ -253,41 +359,46 @@ int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
return ams->ms.sym ? 0 : -1;
}
-size_t maps__fprintf(struct maps *maps, FILE *fp)
-{
- size_t printed = 0;
- struct map_rb_node *pos;
+struct maps__fprintf_args {
+ FILE *fp;
+ size_t printed;
+};
- down_read(maps__lock(maps));
+static int maps__fprintf_cb(struct map *map, void *data)
+{
+ struct maps__fprintf_args *args = data;
- maps__for_each_entry(maps, pos) {
- printed += fprintf(fp, "Map:");
- printed += map__fprintf(pos->map, fp);
- if (verbose > 2) {
- printed += dso__fprintf(map__dso(pos->map), fp);
- printed += fprintf(fp, "--\n");
- }
+ args->printed += fprintf(args->fp, "Map:");
+ args->printed += map__fprintf(map, args->fp);
+ if (verbose > 2) {
+ args->printed += dso__fprintf(map__dso(map), args->fp);
+ args->printed += fprintf(args->fp, "--\n");
}
+ return 0;
+}
- up_read(maps__lock(maps));
+size_t maps__fprintf(struct maps *maps, FILE *fp)
+{
+ struct maps__fprintf_args args = {
+ .fp = fp,
+ .printed = 0,
+ };
+
+ maps__for_each_map(maps, maps__fprintf_cb, &args);
- return printed;
+ return args.printed;
}
-int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
+/*
+ * Find first map where end > map->start.
+ * Same as find_vma() in kernel.
+ */
+static struct rb_node *first_ending_after(struct maps *maps, const struct map *map)
{
struct rb_root *root;
struct rb_node *next, *first;
- int err = 0;
-
- down_write(maps__lock(maps));
root = maps__entries(maps);
-
- /*
- * Find first map where end > map->start.
- * Same as find_vma() in kernel.
- */
next = root->rb_node;
first = NULL;
while (next) {
@@ -301,8 +412,23 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
} else
next = next->rb_right;
}
+ return first;
+}
- next = first;
+/*
+ * Adds new to maps, if new overlaps existing entries then the existing maps are
+ * adjusted or removed so that new fits without overlapping any entries.
+ */
+int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
+{
+
+ struct rb_node *next;
+ int err = 0;
+ FILE *fp = debug_file();
+
+ down_write(maps__lock(maps));
+
+ next = first_ending_after(maps, new);
while (next && !err) {
struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
next = rb_next(&pos->rb_node);
@@ -311,27 +437,27 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
* Stop if current map starts after map->end.
* Maps are ordered by start: next will not overlap for sure.
*/
- if (map__start(pos->map) >= map__end(map))
+ if (map__start(pos->map) >= map__end(new))
break;
if (verbose >= 2) {
if (use_browser) {
pr_debug("overlapping maps in %s (disable tui for more info)\n",
- map__dso(map)->name);
+ map__dso(new)->name);
} else {
- fputs("overlapping maps:\n", fp);
- map__fprintf(map, fp);
+ pr_debug("overlapping maps:\n");
+ map__fprintf(new, fp);
map__fprintf(pos->map, fp);
}
}
- rb_erase_init(&pos->rb_node, root);
+ rb_erase_init(&pos->rb_node, maps__entries(maps));
/*
* Now check if we need to create new maps for areas not
* overlapped by the new map:
*/
- if (map__start(map) > map__start(pos->map)) {
+ if (map__start(new) > map__start(pos->map)) {
struct map *before = map__clone(pos->map);
if (before == NULL) {
@@ -339,7 +465,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
goto put_map;
}
- map__set_end(before, map__start(map));
+ map__set_end(before, map__start(new));
err = __maps__insert(maps, before);
if (err) {
map__put(before);
@@ -351,7 +477,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
map__put(before);
}
- if (map__end(map) < map__end(pos->map)) {
+ if (map__end(new) < map__end(pos->map)) {
struct map *after = map__clone(pos->map);
if (after == NULL) {
@@ -359,10 +485,10 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
goto put_map;
}
- map__set_start(after, map__end(map));
- map__add_pgoff(after, map__end(map) - map__start(pos->map));
- assert(map__map_ip(pos->map, map__end(map)) ==
- map__map_ip(after, map__end(map)));
+ map__set_start(after, map__end(new));
+ map__add_pgoff(after, map__end(new) - map__start(pos->map));
+ assert(map__map_ip(pos->map, map__end(new)) ==
+ map__map_ip(after, map__end(new)));
err = __maps__insert(maps, after);
if (err) {
map__put(after);
@@ -376,16 +502,14 @@ put_map:
map__put(pos->map);
free(pos);
}
+ /* Add the map. */
+ err = __maps__insert(maps, new);
up_write(maps__lock(maps));
return err;
}
-/*
- * XXX This should not really _copy_ te maps, but refcount them.
- */
-int maps__clone(struct thread *thread, struct maps *parent)
+int maps__copy_from(struct maps *maps, struct maps *parent)
{
- struct maps *maps = thread__maps(thread);
int err;
struct map_rb_node *rb_node;
@@ -416,17 +540,6 @@ out_unlock:
return err;
}
-struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
-{
- struct map_rb_node *rb_node;
-
- maps__for_each_entry(maps, rb_node) {
- if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
- return rb_node;
- }
- return NULL;
-}
-
struct map *maps__find(struct maps *maps, u64 ip)
{
struct rb_node *p;
@@ -452,26 +565,275 @@ out:
return m ? m->map : NULL;
}
-struct map_rb_node *maps__first(struct maps *maps)
+static int map__strcmp(const void *a, const void *b)
{
- struct rb_node *first = rb_first(maps__entries(maps));
+ const struct map *map_a = *(const struct map **)a;
+ const struct map *map_b = *(const struct map **)b;
+ const struct dso *dso_a = map__dso(map_a);
+ const struct dso *dso_b = map__dso(map_b);
+ int ret = strcmp(dso_a->short_name, dso_b->short_name);
- if (first)
- return rb_entry(first, struct map_rb_node, rb_node);
- return NULL;
+ if (ret == 0 && map_a != map_b) {
+ /*
+ * Ensure distinct but name equal maps have an order in part to
+ * aid reference counting.
+ */
+ ret = (int)map__start(map_a) - (int)map__start(map_b);
+ if (ret == 0)
+ ret = (int)((intptr_t)map_a - (intptr_t)map_b);
+ }
+
+ return ret;
}
-struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
+static int map__strcmp_name(const void *name, const void *b)
{
- struct rb_node *next;
+ const struct dso *dso = map__dso(*(const struct map **)b);
- if (!node)
- return NULL;
+ return strcmp(name, dso->short_name);
+}
- next = rb_next(&node->rb_node);
+void __maps__sort_by_name(struct maps *maps)
+{
+ qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
+}
- if (!next)
+static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
+{
+ struct map_rb_node *rb_node;
+ struct map **maps_by_name = realloc(maps__maps_by_name(maps),
+ maps__nr_maps(maps) * sizeof(struct map *));
+ int i = 0;
+
+ if (maps_by_name == NULL)
+ return -1;
+
+ up_read(maps__lock(maps));
+ down_write(maps__lock(maps));
+
+ RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
+ RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
+
+ maps__for_each_entry(maps, rb_node)
+ maps_by_name[i++] = map__get(rb_node->map);
+
+ __maps__sort_by_name(maps);
+
+ up_write(maps__lock(maps));
+ down_read(maps__lock(maps));
+
+ return 0;
+}
+
+static struct map *__maps__find_by_name(struct maps *maps, const char *name)
+{
+ struct map **mapp;
+
+ if (maps__maps_by_name(maps) == NULL &&
+ map__groups__sort_by_name_from_rbtree(maps))
return NULL;
- return rb_entry(next, struct map_rb_node, rb_node);
+ mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
+ sizeof(*mapp), map__strcmp_name);
+ if (mapp)
+ return *mapp;
+ return NULL;
+}
+
+struct map *maps__find_by_name(struct maps *maps, const char *name)
+{
+ struct map_rb_node *rb_node;
+ struct map *map;
+
+ down_read(maps__lock(maps));
+
+
+ if (RC_CHK_ACCESS(maps)->last_search_by_name) {
+ const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
+
+ if (strcmp(dso->short_name, name) == 0) {
+ map = RC_CHK_ACCESS(maps)->last_search_by_name;
+ goto out_unlock;
+ }
+ }
+ /*
+ * If we have maps->maps_by_name, then the name isn't in the rbtree,
+ * as maps->maps_by_name mirrors the rbtree when lookups by name are
+ * made.
+ */
+ map = __maps__find_by_name(maps, name);
+ if (map || maps__maps_by_name(maps) != NULL)
+ goto out_unlock;
+
+ /* Fallback to traversing the rbtree... */
+ maps__for_each_entry(maps, rb_node) {
+ struct dso *dso;
+
+ map = rb_node->map;
+ dso = map__dso(map);
+ if (strcmp(dso->short_name, name) == 0) {
+ RC_CHK_ACCESS(maps)->last_search_by_name = map;
+ goto out_unlock;
+ }
+ }
+ map = NULL;
+
+out_unlock:
+ up_read(maps__lock(maps));
+ return map;
+}
+
+struct map *maps__find_next_entry(struct maps *maps, struct map *map)
+{
+ struct map_rb_node *rb_node = maps__find_node(maps, map);
+ struct map_rb_node *next = map_rb_node__next(rb_node);
+
+ if (next)
+ return next->map;
+
+ return NULL;
+}
+
+void maps__fixup_end(struct maps *maps)
+{
+ struct map_rb_node *prev = NULL, *curr;
+
+ down_write(maps__lock(maps));
+
+ maps__for_each_entry(maps, curr) {
+ if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map)))
+ map__set_end(prev->map, map__start(curr->map));
+
+ prev = curr;
+ }
+
+ /*
+ * We still haven't the actual symbols, so guess the
+ * last map final address.
+ */
+ if (curr && !map__end(curr->map))
+ map__set_end(curr->map, ~0ULL);
+
+ up_write(maps__lock(maps));
+}
+
+/*
+ * Merges map into maps by splitting the new map within the existing map
+ * regions.
+ */
+int maps__merge_in(struct maps *kmaps, struct map *new_map)
+{
+ struct map_rb_node *rb_node;
+ struct rb_node *first;
+ bool overlaps;
+ LIST_HEAD(merged);
+ int err = 0;
+
+ down_read(maps__lock(kmaps));
+ first = first_ending_after(kmaps, new_map);
+ rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL;
+ overlaps = rb_node && map__start(rb_node->map) < map__end(new_map);
+ up_read(maps__lock(kmaps));
+
+ if (!overlaps)
+ return maps__insert(kmaps, new_map);
+
+ maps__for_each_entry(kmaps, rb_node) {
+ struct map *old_map = rb_node->map;
+
+ /* no overload with this one */
+ if (map__end(new_map) < map__start(old_map) ||
+ map__start(new_map) >= map__end(old_map))
+ continue;
+
+ if (map__start(new_map) < map__start(old_map)) {
+ /*
+ * |new......
+ * |old....
+ */
+ if (map__end(new_map) < map__end(old_map)) {
+ /*
+ * |new......| -> |new..|
+ * |old....| -> |old....|
+ */
+ map__set_end(new_map, map__start(old_map));
+ } else {
+ /*
+ * |new.............| -> |new..| |new..|
+ * |old....| -> |old....|
+ */
+ struct map_list_node *m = map_list_node__new();
+
+ if (!m) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ m->map = map__clone(new_map);
+ if (!m->map) {
+ free(m);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ map__set_end(m->map, map__start(old_map));
+ list_add_tail(&m->node, &merged);
+ map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
+ map__set_start(new_map, map__end(old_map));
+ }
+ } else {
+ /*
+ * |new......
+ * |old....
+ */
+ if (map__end(new_map) < map__end(old_map)) {
+ /*
+ * |new..| -> x
+ * |old.........| -> |old.........|
+ */
+ map__put(new_map);
+ new_map = NULL;
+ break;
+ } else {
+ /*
+ * |new......| -> |new...|
+ * |old....| -> |old....|
+ */
+ map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
+ map__set_start(new_map, map__end(old_map));
+ }
+ }
+ }
+
+out:
+ while (!list_empty(&merged)) {
+ struct map_list_node *old_node;
+
+ old_node = list_entry(merged.next, struct map_list_node, node);
+ list_del_init(&old_node->node);
+ if (!err)
+ err = maps__insert(kmaps, old_node->map);
+ map__put(old_node->map);
+ free(old_node);
+ }
+
+ if (new_map) {
+ if (!err)
+ err = maps__insert(kmaps, new_map);
+ map__put(new_map);
+ }
+ return err;
+}
+
+void maps__load_first(struct maps *maps)
+{
+ struct map_rb_node *first;
+
+ down_read(maps__lock(maps));
+
+ first = maps__first(maps);
+ if (first)
+ map__load(first->map);
+
+ up_read(maps__lock(maps));
}
diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h
index 83144e0645ed..d836d04c9402 100644
--- a/tools/perf/util/maps.h
+++ b/tools/perf/util/maps.h
@@ -14,24 +14,18 @@ struct ref_reloc_sym;
struct machine;
struct map;
struct maps;
-struct thread;
-struct map_rb_node {
- struct rb_node rb_node;
+struct map_list_node {
+ struct list_head node;
struct map *map;
};
-struct map_rb_node *maps__first(struct maps *maps);
-struct map_rb_node *map_rb_node__next(struct map_rb_node *node);
-struct map_rb_node *maps__find_node(struct maps *maps, struct map *map);
-struct map *maps__find(struct maps *maps, u64 addr);
-
-#define maps__for_each_entry(maps, map) \
- for (map = maps__first(maps); map; map = map_rb_node__next(map))
+static inline struct map_list_node *map_list_node__new(void)
+{
+ return malloc(sizeof(struct map_list_node));
+}
-#define maps__for_each_entry_safe(maps, map, next) \
- for (map = maps__first(maps), next = map_rb_node__next(map); map; \
- map = next, next = map_rb_node__next(map))
+struct map *maps__find(struct maps *maps, u64 addr);
DECLARE_RC_STRUCT(maps) {
struct rb_root entries;
@@ -58,7 +52,7 @@ struct kmap {
struct maps *maps__new(struct machine *machine);
bool maps__empty(struct maps *maps);
-int maps__clone(struct thread *thread, struct maps *parent);
+int maps__copy_from(struct maps *maps, struct maps *parent);
struct maps *maps__get(struct maps *maps);
void maps__put(struct maps *maps);
@@ -71,26 +65,16 @@ static inline void __maps__zput(struct maps **map)
#define maps__zput(map) __maps__zput(&map)
-static inline struct rb_root *maps__entries(struct maps *maps)
-{
- return &RC_CHK_ACCESS(maps)->entries;
-}
+/* Iterate over map calling cb for each entry. */
+int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data);
+/* Iterate over map removing an entry if cb returns true. */
+void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data);
static inline struct machine *maps__machine(struct maps *maps)
{
return RC_CHK_ACCESS(maps)->machine;
}
-static inline struct rw_semaphore *maps__lock(struct maps *maps)
-{
- return &RC_CHK_ACCESS(maps)->lock;
-}
-
-static inline struct map **maps__maps_by_name(struct maps *maps)
-{
- return RC_CHK_ACCESS(maps)->maps_by_name;
-}
-
static inline unsigned int maps__nr_maps(const struct maps *maps)
{
return RC_CHK_ACCESS(maps)->nr_maps;
@@ -125,12 +109,18 @@ struct addr_map_symbol;
int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams);
-int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp);
+int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new);
struct map *maps__find_by_name(struct maps *maps, const char *name);
+struct map *maps__find_next_entry(struct maps *maps, struct map *map);
+
int maps__merge_in(struct maps *kmaps, struct map *new_map);
void __maps__sort_by_name(struct maps *maps);
+void maps__fixup_end(struct maps *maps);
+
+void maps__load_first(struct maps *maps);
+
#endif // __PERF_MAPS_H
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 954b235e12e5..3a2e3687878c 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -100,11 +100,14 @@ int perf_mem_events__parse(const char *str)
return -1;
}
-static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
+static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu,
+ struct perf_mem_event *e)
{
+ char sysfs_name[100];
char path[PATH_MAX];
struct stat st;
+ scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
return !stat(path, &st);
}
@@ -120,7 +123,6 @@ int perf_mem_events__init(void)
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
struct perf_mem_event *e = perf_mem_events__ptr(j);
- char sysfs_name[100];
struct perf_pmu *pmu = NULL;
/*
@@ -136,12 +138,12 @@ int perf_mem_events__init(void)
* of core PMU.
*/
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name);
- e->supported |= perf_mem_event__supported(mnt, sysfs_name);
+ e->supported |= perf_mem_event__supported(mnt, pmu, e);
+ if (e->supported) {
+ found = true;
+ break;
+ }
}
-
- if (e->supported)
- found = true;
}
return found ? 0 : -ENOENT;
@@ -167,13 +169,10 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
int idx)
{
const char *mnt = sysfs__mount();
- char sysfs_name[100];
struct perf_pmu *pmu = NULL;
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
- pmu->name);
- if (!perf_mem_event__supported(mnt, sysfs_name)) {
+ if (!perf_mem_event__supported(mnt, pmu, e)) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(idx, pmu->name));
}
@@ -183,6 +182,7 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
char **rec_tmp, int *tmp_nr)
{
+ const char *mnt = sysfs__mount();
int i = *argv_nr, k = 0;
struct perf_mem_event *e;
@@ -211,6 +211,9 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
while ((pmu = perf_pmus__scan(pmu)) != NULL) {
const char *s = perf_mem_events__name(j, pmu->name);
+ if (!perf_mem_event__supported(mnt, pmu, e))
+ continue;
+
rec_argv[i++] = "-e";
if (s) {
char *copy = strdup(s);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index ca3e0404f187..966cca5a3e88 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -286,7 +286,7 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids,
*out_metric_events = NULL;
ids_size = hashmap__size(ids);
- metric_events = calloc(sizeof(void *), ids_size + 1);
+ metric_events = calloc(ids_size + 1, sizeof(void *));
if (!metric_events)
return -ENOMEM;
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 49093b21ee2d..122ee198a86e 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -295,15 +295,14 @@ int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu
map->core.flush = mp->flush;
- map->comp_level = mp->comp_level;
#ifndef PYTHON_PERF
- if (zstd_init(&map->zstd_data, map->comp_level)) {
+ if (zstd_init(&map->zstd_data, mp->comp_level)) {
pr_debug2("failed to init mmap compressor, error %d\n", errno);
return -1;
}
#endif
- if (map->comp_level && !perf_mmap__aio_enabled(map)) {
+ if (mp->comp_level && !perf_mmap__aio_enabled(map)) {
map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->data == MAP_FAILED) {
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index f944c3cd5efa..0df6e1621c7e 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -39,7 +39,6 @@ struct mmap {
#endif
struct mmap_cpu_mask affinity_mask;
void *data;
- int comp_level;
struct perf_data_file *file;
struct zstd_data zstd_data;
};
diff --git a/tools/perf/util/parse-branch-options.c b/tools/perf/util/parse-branch-options.c
index fd67d204d720..f7f7aff3d85a 100644
--- a/tools/perf/util/parse-branch-options.c
+++ b/tools/perf/util/parse-branch-options.c
@@ -36,6 +36,7 @@ static const struct branch_mode branch_modes[] = {
BRANCH_OPT("stack", PERF_SAMPLE_BRANCH_CALL_STACK),
BRANCH_OPT("hw_index", PERF_SAMPLE_BRANCH_HW_INDEX),
BRANCH_OPT("priv", PERF_SAMPLE_BRANCH_PRIV_SAVE),
+ BRANCH_OPT("counter", PERF_SAMPLE_BRANCH_COUNTERS),
BRANCH_END
};
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index aa2f5c6fc7fc..66eabcea4242 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -976,7 +976,7 @@ static int config_term_pmu(struct perf_event_attr *attr,
struct parse_events_error *err)
{
if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) {
- const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+ struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
if (!pmu) {
char *err_str;
@@ -986,15 +986,23 @@ static int config_term_pmu(struct perf_event_attr *attr,
err_str, /*help=*/NULL);
return -EINVAL;
}
- if (perf_pmu__supports_legacy_cache(pmu)) {
+ /*
+ * Rewrite the PMU event to a legacy cache one unless the PMU
+ * doesn't support legacy cache events or the event is present
+ * within the PMU.
+ */
+ if (perf_pmu__supports_legacy_cache(pmu) &&
+ !perf_pmu__have_event(pmu, term->config)) {
attr->type = PERF_TYPE_HW_CACHE;
return parse_events__decode_legacy_cache(term->config, pmu->type,
&attr->config);
- } else
+ } else {
term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
+ term->no_value = true;
+ }
}
if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) {
- const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
+ struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
if (!pmu) {
char *err_str;
@@ -1004,10 +1012,19 @@ static int config_term_pmu(struct perf_event_attr *attr,
err_str, /*help=*/NULL);
return -EINVAL;
}
- attr->type = PERF_TYPE_HARDWARE;
- attr->config = term->val.num;
- if (perf_pmus__supports_extended_type())
- attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+ /*
+ * If the PMU has a sysfs or json event prefer it over
+ * legacy. ARM requires this.
+ */
+ if (perf_pmu__have_event(pmu, term->config)) {
+ term->type_term = PARSE_EVENTS__TERM_TYPE_USER;
+ term->no_value = true;
+ } else {
+ attr->type = PERF_TYPE_HARDWARE;
+ attr->config = term->val.num;
+ if (perf_pmus__supports_extended_type())
+ attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+ }
return 0;
}
if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER ||
@@ -1381,6 +1398,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
YYLTYPE *loc = loc_;
LIST_HEAD(config_terms);
struct parse_events_terms parsed_terms;
+ bool alias_rewrote_terms = false;
pmu = parse_state->fake_pmu ?: perf_pmus__find(name);
@@ -1433,7 +1451,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return evsel ? 0 : -ENOMEM;
}
- if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms, &info, err)) {
+ /* Configure attr/terms with a known PMU, this will set hardcoded terms. */
+ if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
+ parse_events_terms__exit(&parsed_terms);
+ return -EINVAL;
+ }
+
+ /* Look for event names in the terms and rewrite into format based terms. */
+ if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms,
+ &info, &alias_rewrote_terms, err)) {
parse_events_terms__exit(&parsed_terms);
return -EINVAL;
}
@@ -1447,11 +1473,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
strbuf_release(&sb);
}
- /*
- * Configure hardcoded terms first, no need to check
- * return value when called with fail == 0 ;)
- */
- if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
+ /* Configure attr/terms again if an alias was expanded. */
+ if (alias_rewrote_terms &&
+ config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
parse_events_terms__exit(&parsed_terms);
return -EINVAL;
}
diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c
index e1e2d701599c..1de3b69cdf4a 100644
--- a/tools/perf/util/perf_api_probe.c
+++ b/tools/perf/util/perf_api_probe.c
@@ -64,7 +64,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
struct perf_cpu cpu;
int ret, i = 0;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus)
return false;
cpu = perf_cpu_map__cpu(cpus, 0);
@@ -140,7 +140,7 @@ bool perf_can_record_cpu_wide(void)
struct perf_cpu cpu;
int fd;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus)
return false;
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 2247991451f3..8f04d3b7f3ec 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -55,6 +55,7 @@ static void __p_branch_sample_type(char *buf, size_t size, u64 value)
bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
bit_name(TYPE_SAVE), bit_name(HW_INDEX), bit_name(PRIV_SAVE),
+ bit_name(COUNTERS),
{ .name = NULL, }
};
#undef bit_name
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index d3c9aa4326be..3c9609944a2f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1494,12 +1494,14 @@ static int check_info_data(struct perf_pmu *pmu,
* defined for the alias
*/
int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms,
- struct perf_pmu_info *info, struct parse_events_error *err)
+ struct perf_pmu_info *info, bool *rewrote_terms,
+ struct parse_events_error *err)
{
struct parse_events_term *term, *h;
struct perf_pmu_alias *alias;
int ret;
+ *rewrote_terms = false;
info->per_pkg = false;
/*
@@ -1521,7 +1523,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_
NULL);
return ret;
}
-
+ *rewrote_terms = true;
ret = check_info_data(pmu, alias, info, err, term->err_term);
if (ret)
return ret;
@@ -1615,6 +1617,8 @@ bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
bool perf_pmu__have_event(struct perf_pmu *pmu, const char *name)
{
+ if (!name)
+ return false;
if (perf_pmu__find_alias(pmu, name, /*load=*/ true) != NULL)
return true;
if (pmu->cpu_aliases_added || !pmu->events_table)
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index d2895d415f08..424c3fee0949 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -201,7 +201,8 @@ int perf_pmu__config_terms(const struct perf_pmu *pmu,
__u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name);
int perf_pmu__format_type(struct perf_pmu *pmu, const char *name);
int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms,
- struct perf_pmu_info *info, struct parse_events_error *err);
+ struct perf_pmu_info *info, bool *rewrote_terms,
+ struct parse_events_error *err);
int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb);
int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load);
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index b0fc48be623f..9e47712507cc 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -66,7 +66,7 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus
put_tracing_file(events_path);
if (events_fd < 0) {
- printf("Error: failed to open tracing events directory\n");
+ pr_err("Error: failed to open tracing events directory\n");
return;
}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 1a5b7fa459b2..a1a796043691 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -149,10 +149,32 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
return 0;
}
+struct kernel_get_module_map_cb_args {
+ const char *module;
+ struct map *result;
+};
+
+static int kernel_get_module_map_cb(struct map *map, void *data)
+{
+ struct kernel_get_module_map_cb_args *args = data;
+ struct dso *dso = map__dso(map);
+ const char *short_name = dso->short_name; /* short_name is "[module]" */
+ u16 short_name_len = dso->short_name_len;
+
+ if (strncmp(short_name + 1, args->module, short_name_len - 2) == 0 &&
+ args->module[short_name_len - 2] == '\0') {
+ args->result = map__get(map);
+ return 1;
+ }
+ return 0;
+}
+
static struct map *kernel_get_module_map(const char *module)
{
- struct maps *maps = machine__kernel_maps(host_machine);
- struct map_rb_node *pos;
+ struct kernel_get_module_map_cb_args args = {
+ .module = module,
+ .result = NULL,
+ };
/* A file path -- this is an offline module */
if (module && strchr(module, '/'))
@@ -164,19 +186,9 @@ static struct map *kernel_get_module_map(const char *module)
return map__get(map);
}
- maps__for_each_entry(maps, pos) {
- /* short_name is "[module]" */
- struct dso *dso = map__dso(pos->map);
- const char *short_name = dso->short_name;
- u16 short_name_len = dso->short_name_len;
+ maps__for_each_map(machine__kernel_maps(host_machine), kernel_get_module_map_cb, &args);
- if (strncmp(short_name + 1, module,
- short_name_len - 2) == 0 &&
- module[short_name_len - 2] == '\0') {
- return map__get(pos->map);
- }
- }
- return NULL;
+ return args.result;
}
struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index f171360b0ef4..c8923375e30d 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -23,6 +23,7 @@
#include "event.h"
#include "dso.h"
#include "debug.h"
+#include "debuginfo.h"
#include "intlist.h"
#include "strbuf.h"
#include "strlist.h"
@@ -31,128 +32,9 @@
#include "probe-file.h"
#include "string2.h"
-#ifdef HAVE_DEBUGINFOD_SUPPORT
-#include <elfutils/debuginfod.h>
-#endif
-
/* Kprobe tracer basic type is up to u64 */
#define MAX_BASIC_TYPE_BITS 64
-/* Dwarf FL wrappers */
-static char *debuginfo_path; /* Currently dummy */
-
-static const Dwfl_Callbacks offline_callbacks = {
- .find_debuginfo = dwfl_standard_find_debuginfo,
- .debuginfo_path = &debuginfo_path,
-
- .section_address = dwfl_offline_section_address,
-
- /* We use this table for core files too. */
- .find_elf = dwfl_build_id_find_elf,
-};
-
-/* Get a Dwarf from offline image */
-static int debuginfo__init_offline_dwarf(struct debuginfo *dbg,
- const char *path)
-{
- GElf_Addr dummy;
- int fd;
-
- fd = open(path, O_RDONLY);
- if (fd < 0)
- return fd;
-
- dbg->dwfl = dwfl_begin(&offline_callbacks);
- if (!dbg->dwfl)
- goto error;
-
- dwfl_report_begin(dbg->dwfl);
- dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd);
- if (!dbg->mod)
- goto error;
-
- dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias);
- if (!dbg->dbg)
- goto error;
-
- dwfl_module_build_id(dbg->mod, &dbg->build_id, &dummy);
-
- dwfl_report_end(dbg->dwfl, NULL, NULL);
-
- return 0;
-error:
- if (dbg->dwfl)
- dwfl_end(dbg->dwfl);
- else
- close(fd);
- memset(dbg, 0, sizeof(*dbg));
-
- return -ENOENT;
-}
-
-static struct debuginfo *__debuginfo__new(const char *path)
-{
- struct debuginfo *dbg = zalloc(sizeof(*dbg));
- if (!dbg)
- return NULL;
-
- if (debuginfo__init_offline_dwarf(dbg, path) < 0)
- zfree(&dbg);
- if (dbg)
- pr_debug("Open Debuginfo file: %s\n", path);
- return dbg;
-}
-
-enum dso_binary_type distro_dwarf_types[] = {
- DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
- DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
- DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
- DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
- DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
- DSO_BINARY_TYPE__NOT_FOUND,
-};
-
-struct debuginfo *debuginfo__new(const char *path)
-{
- enum dso_binary_type *type;
- char buf[PATH_MAX], nil = '\0';
- struct dso *dso;
- struct debuginfo *dinfo = NULL;
- struct build_id bid;
-
- /* Try to open distro debuginfo files */
- dso = dso__new(path);
- if (!dso)
- goto out;
-
- /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */
- if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0)
- dso__set_build_id(dso, &bid);
-
- for (type = distro_dwarf_types;
- !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
- type++) {
- if (dso__read_binary_type_filename(dso, *type, &nil,
- buf, PATH_MAX) < 0)
- continue;
- dinfo = __debuginfo__new(buf);
- }
- dso__put(dso);
-
-out:
- /* if failed to open all distro debuginfo, open given binary */
- return dinfo ? : __debuginfo__new(path);
-}
-
-void debuginfo__delete(struct debuginfo *dbg)
-{
- if (dbg) {
- if (dbg->dwfl)
- dwfl_end(dbg->dwfl);
- free(dbg);
- }
-}
-
/*
* Probe finder related functions
*/
@@ -722,7 +604,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
if (ret <= 0 || nops == 0) {
pf->fb_ops = NULL;
-#if _ELFUTILS_PREREQ(0, 142)
+#ifdef HAVE_DWARF_CFI_SUPPORT
} else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
(pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) {
if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 &&
@@ -733,7 +615,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
free(frame);
return -ENOENT;
}
-#endif
+#endif /* HAVE_DWARF_CFI_SUPPORT */
}
/* Call finder's callback handler */
@@ -1258,7 +1140,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
pf->machine = ehdr.e_machine;
-#if _ELFUTILS_PREREQ(0, 142)
+#ifdef HAVE_DWARF_CFI_SUPPORT
do {
GElf_Shdr shdr;
@@ -1268,7 +1150,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
pf->cfi_dbg = dwarf_getcfi(dbg->dbg);
} while (0);
-#endif
+#endif /* HAVE_DWARF_CFI_SUPPORT */
ret = debuginfo__find_probe_location(dbg, pf);
return ret;
@@ -1677,44 +1559,6 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
return (ret < 0) ? ret : af.nvls;
}
-/* For the kernel module, we need a special code to get a DIE */
-int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
- bool adjust_offset)
-{
- int n, i;
- Elf32_Word shndx;
- Elf_Scn *scn;
- Elf *elf;
- GElf_Shdr mem, *shdr;
- const char *p;
-
- elf = dwfl_module_getelf(dbg->mod, &dbg->bias);
- if (!elf)
- return -EINVAL;
-
- /* Get the number of relocations */
- n = dwfl_module_relocations(dbg->mod);
- if (n < 0)
- return -ENOENT;
- /* Search the relocation related .text section */
- for (i = 0; i < n; i++) {
- p = dwfl_module_relocation_info(dbg->mod, i, &shndx);
- if (strcmp(p, ".text") == 0) {
- /* OK, get the section header */
- scn = elf_getscn(elf, shndx);
- if (!scn)
- return -ENOENT;
- shdr = gelf_getshdr(scn, &mem);
- if (!shdr)
- return -ENOENT;
- *offs = shdr->sh_addr;
- if (adjust_offset)
- *offs -= shdr->sh_offset;
- }
- }
- return 0;
-}
-
/* Reverse search */
int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
struct perf_probe_point *ppt)
@@ -2009,41 +1853,6 @@ found:
return (ret < 0) ? ret : lf.found;
}
-#ifdef HAVE_DEBUGINFOD_SUPPORT
-/* debuginfod doesn't require the comp_dir but buildid is required */
-static int get_source_from_debuginfod(const char *raw_path,
- const char *sbuild_id, char **new_path)
-{
- debuginfod_client *c = debuginfod_begin();
- const char *p = raw_path;
- int fd;
-
- if (!c)
- return -ENOMEM;
-
- fd = debuginfod_find_source(c, (const unsigned char *)sbuild_id,
- 0, p, new_path);
- pr_debug("Search %s from debuginfod -> %d\n", p, fd);
- if (fd >= 0)
- close(fd);
- debuginfod_end(c);
- if (fd < 0) {
- pr_debug("Failed to find %s in debuginfod (%s)\n",
- raw_path, sbuild_id);
- return -ENOENT;
- }
- pr_debug("Got a source %s\n", *new_path);
-
- return 0;
-}
-#else
-static inline int get_source_from_debuginfod(const char *raw_path __maybe_unused,
- const char *sbuild_id __maybe_unused,
- char **new_path __maybe_unused)
-{
- return -ENOTSUP;
-}
-#endif
/*
* Find a src file from a DWARF tag path. Prepend optional source path prefix
* and chop off leading directories that do not exist. Result is passed back as
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 8bc1c80d3c1c..3add5ff516e1 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -24,21 +24,7 @@ static inline int is_c_varname(const char *name)
#ifdef HAVE_DWARF_SUPPORT
#include "dwarf-aux.h"
-
-/* TODO: export debuginfo data structure even if no dwarf support */
-
-/* debug information structure */
-struct debuginfo {
- Dwarf *dbg;
- Dwfl_Module *mod;
- Dwfl *dwfl;
- Dwarf_Addr bias;
- const unsigned char *build_id;
-};
-
-/* This also tries to open distro debuginfo */
-struct debuginfo *debuginfo__new(const char *path);
-void debuginfo__delete(struct debuginfo *dbg);
+#include "debuginfo.h"
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
int debuginfo__find_trace_events(struct debuginfo *dbg,
@@ -49,9 +35,6 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr,
struct perf_probe_point *ppt);
-int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
- bool adjust_offset);
-
/* Find a line range */
int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 9eb5c6a08999..87e817b3cf7e 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -237,8 +237,8 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
evsel = evlist__last(temp_evlist);
- if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
- struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
+ if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
+ struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
if (cpus)
cpu = perf_cpu_map__cpu(cpus, 0);
diff --git a/tools/perf/util/s390-cpumcf-kernel.h b/tools/perf/util/s390-cpumcf-kernel.h
index f55ca07f3ca1..74b36644e384 100644
--- a/tools/perf/util/s390-cpumcf-kernel.h
+++ b/tools/perf/util/s390-cpumcf-kernel.h
@@ -12,6 +12,8 @@
#define S390_CPUMCF_DIAG_DEF 0xfeef /* Counter diagnostic entry ID */
#define PERF_EVENT_CPUM_CF_DIAG 0xBC000 /* Event: Counter sets */
#define PERF_EVENT_CPUM_SF_DIAG 0xBD000 /* Event: Combined-sampling */
+#define PERF_EVENT_PAI_CRYPTO_ALL 0x1000 /* Event: CRYPTO_ALL */
+#define PERF_EVENT_PAI_NNPA_ALL 0x1800 /* Event: NNPA_ALL */
struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
unsigned int def:16; /* 0-15 Data Entry Format */
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index 115b16edb451..53383e97ec9d 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -51,8 +51,6 @@ static bool s390_cpumcfdg_testctr(struct perf_sample *sample)
struct cf_trailer_entry *te;
struct cf_ctrset_entry *cep, ce;
- if (!len)
- return false;
while (offset < len) {
cep = (struct cf_ctrset_entry *)(buf + offset);
ce.def = be16_to_cpu(cep->def);
@@ -125,6 +123,9 @@ static int get_counterset_start(int setnr)
return 128;
case CPUMF_CTR_SET_MT_DIAG: /* Diagnostic counter set */
return 448;
+ case PERF_EVENT_PAI_NNPA_ALL: /* PAI NNPA counter set */
+ case PERF_EVENT_PAI_CRYPTO_ALL: /* PAI CRYPTO counter set */
+ return setnr;
default:
return -1;
}
@@ -212,27 +213,120 @@ static void s390_cpumcfdg_dump(struct perf_pmu *pmu, struct perf_sample *sample)
}
}
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
+/*
+ * Check for consistency of PAI_CRYPTO/PAI_NNPA raw data.
+ */
+struct pai_data { /* Event number and value */
+ u16 event_nr;
+ u64 event_val;
+} __packed;
+
+#pragma GCC diagnostic pop
+
+/*
+ * Test for valid raw data. At least one PAI event should be in the raw
+ * data section.
+ */
+static bool s390_pai_all_test(struct perf_sample *sample)
+{
+ size_t len = sample->raw_size;
+
+ if (len < 0xa)
+ return false;
+ return true;
+}
+
+static void s390_pai_all_dump(struct evsel *evsel, struct perf_sample *sample)
+{
+ size_t len = sample->raw_size, offset = 0;
+ unsigned char *p = sample->raw_data;
+ const char *color = PERF_COLOR_BLUE;
+ struct pai_data pai_data;
+ char *ev_name;
+
+ while (offset < len) {
+ memcpy(&pai_data.event_nr, p, sizeof(pai_data.event_nr));
+ pai_data.event_nr = be16_to_cpu(pai_data.event_nr);
+ p += sizeof(pai_data.event_nr);
+ offset += sizeof(pai_data.event_nr);
+
+ memcpy(&pai_data.event_val, p, sizeof(pai_data.event_val));
+ pai_data.event_val = be64_to_cpu(pai_data.event_val);
+ p += sizeof(pai_data.event_val);
+ offset += sizeof(pai_data.event_val);
+
+ ev_name = get_counter_name(evsel->core.attr.config,
+ pai_data.event_nr, evsel->pmu);
+ color_fprintf(stdout, color, "\tCounter:%03d %s Value:%#018lx\n",
+ pai_data.event_nr, ev_name ?: "<unknown>",
+ pai_data.event_val);
+ free(ev_name);
+
+ if (offset + 0xa > len)
+ break;
+ }
+ color_fprintf(stdout, color, "\n");
+}
+
/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events
- * and if the event was triggered by a counter set diagnostic event display
- * its raw data.
+ * and if the event was triggered by a
+ * - counter set diagnostic event
+ * - processor activity assist (PAI) crypto counter event
+ * - processor activity assist (PAI) neural network processor assist (NNPA)
+ * counter event
+ * display its raw data.
* The function is only invoked when the dump flag -D is set.
+ *
+ * Function evlist__s390_sample_raw() is defined as call back after it has
+ * been verified that the perf.data file was created on s390 platform.
*/
-void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
+void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event,
+ struct perf_sample *sample)
{
+ const char *pai_name;
struct evsel *evsel;
if (event->header.type != PERF_RECORD_SAMPLE)
return;
evsel = evlist__event2evsel(evlist, event);
- if (evsel == NULL ||
- evsel->core.attr.config != PERF_EVENT_CPUM_CF_DIAG)
+ if (!evsel)
+ return;
+
+ /* Check for raw data in sample */
+ if (!sample->raw_size || !sample->raw_data)
return;
/* Display raw data on screen */
- if (!s390_cpumcfdg_testctr(sample)) {
- pr_err("Invalid counter set data encountered\n");
+ if (evsel->core.attr.config == PERF_EVENT_CPUM_CF_DIAG) {
+ if (!evsel->pmu)
+ evsel->pmu = perf_pmus__find("cpum_cf");
+ if (!s390_cpumcfdg_testctr(sample))
+ pr_err("Invalid counter set data encountered\n");
+ else
+ s390_cpumcfdg_dump(evsel->pmu, sample);
+ return;
+ }
+
+ switch (evsel->core.attr.config) {
+ case PERF_EVENT_PAI_NNPA_ALL:
+ pai_name = "NNPA_ALL";
+ break;
+ case PERF_EVENT_PAI_CRYPTO_ALL:
+ pai_name = "CRYPTO_ALL";
+ break;
+ default:
return;
}
- s390_cpumcfdg_dump(evsel->pmu, sample);
+
+ if (!s390_pai_all_test(sample)) {
+ pr_err("Invalid %s raw data encountered\n", pai_name);
+ } else {
+ if (!evsel->pmu)
+ evsel->pmu = perf_pmus__find_by_type(evsel->core.attr.type);
+ s390_pai_all_dump(evsel, sample);
+ }
}
diff --git a/tools/perf/util/sample.h b/tools/perf/util/sample.h
index c92ad0f51ecd..70b2c3135555 100644
--- a/tools/perf/util/sample.h
+++ b/tools/perf/util/sample.h
@@ -113,6 +113,7 @@ struct perf_sample {
void *raw_data;
struct ip_callchain *callchain;
struct branch_stack *branch_stack;
+ u64 *branch_stack_cntr;
struct regs_dump user_regs;
struct regs_dump intr_regs;
struct stack_dump user_stack;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 603091317bed..b072ac5d3bc2 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -490,6 +490,9 @@ static int perl_start_script(const char *script, int argc, const char **argv,
scripting_context->session = session;
command_line = malloc((argc + 2) * sizeof(const char *));
+ if (!command_line)
+ return -ENOMEM;
+
command_line[0] = "";
command_line[1] = script;
for (i = 2; i < argc + 2; i++)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 94312741443a..860e1837ba96 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -353,6 +353,8 @@ static PyObject *get_field_numeric_entry(struct tep_event *event,
if (is_array) {
list = PyList_New(field->arraylen);
+ if (!list)
+ Py_FatalError("couldn't create Python list");
item_size = field->size / field->arraylen;
n_items = field->arraylen;
} else {
@@ -754,7 +756,7 @@ static void regs_map(struct regs_dump *regs, uint64_t mask, const char *arch, ch
}
}
-static void set_regs_in_dict(PyObject *dict,
+static int set_regs_in_dict(PyObject *dict,
struct perf_sample *sample,
struct evsel *evsel)
{
@@ -770,6 +772,8 @@ static void set_regs_in_dict(PyObject *dict,
*/
int size = __sw_hweight64(attr->sample_regs_intr) * 28;
char *bf = malloc(size);
+ if (!bf)
+ return -1;
regs_map(&sample->intr_regs, attr->sample_regs_intr, arch, bf, size);
@@ -781,6 +785,8 @@ static void set_regs_in_dict(PyObject *dict,
pydict_set_item_string_decref(dict, "uregs",
_PyUnicode_FromString(bf));
free(bf);
+
+ return 0;
}
static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
@@ -920,7 +926,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
PyLong_FromUnsignedLongLong(sample->cyc_cnt));
}
- set_regs_in_dict(dict, sample, evsel);
+ if (set_regs_in_dict(dict, sample, evsel))
+ Py_FatalError("Failed to setting regs in dict");
return dict;
}
@@ -1918,12 +1925,18 @@ static int python_start_script(const char *script, int argc, const char **argv,
scripting_context->session = session;
#if PY_MAJOR_VERSION < 3
command_line = malloc((argc + 1) * sizeof(const char *));
+ if (!command_line)
+ return -1;
+
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
PyImport_AppendInittab(name, initperf_trace_context);
#else
command_line = malloc((argc + 1) * sizeof(wchar_t *));
+ if (!command_line)
+ return -1;
+
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 1e9aa8ed15b6..199d3e8df315 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -115,6 +115,11 @@ static int perf_session__open(struct perf_session *session, int repipe_fd)
return -1;
}
+ if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) {
+ /* Auxiliary events may reference exited threads, hold onto dead ones. */
+ symbol_conf.keep_exited_threads = true;
+ }
+
if (perf_data__is_pipe(data))
return 0;
@@ -1150,9 +1155,13 @@ static void callchain__printf(struct evsel *evsel,
i, callchain->ips[i]);
}
-static void branch_stack__printf(struct perf_sample *sample, bool callstack)
+static void branch_stack__printf(struct perf_sample *sample,
+ struct evsel *evsel)
{
struct branch_entry *entries = perf_sample__branch_entries(sample);
+ bool callstack = evsel__has_branch_callstack(evsel);
+ u64 *branch_stack_cntr = sample->branch_stack_cntr;
+ struct perf_env *env = evsel__env(evsel);
uint64_t i;
if (!callstack) {
@@ -1194,6 +1203,13 @@ static void branch_stack__printf(struct perf_sample *sample, bool callstack)
}
}
}
+
+ if (branch_stack_cntr) {
+ printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n",
+ sample->branch_stack->nr, env->br_cntr_width, env->br_cntr_nr);
+ for (i = 0; i < sample->branch_stack->nr; i++)
+ printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]);
+ }
}
static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
@@ -1355,7 +1371,7 @@ static void dump_sample(struct evsel *evsel, union perf_event *event,
callchain__printf(evsel, sample);
if (evsel__has_br_stack(evsel))
- branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
+ branch_stack__printf(sample, evsel);
if (sample_type & PERF_SAMPLE_REGS_USER)
regs_user__printf(sample, arch);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 80e4f6132740..30254eb63709 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -24,6 +24,7 @@
#include "strbuf.h"
#include "mem-events.h"
#include "annotate.h"
+#include "annotate-data.h"
#include "event.h"
#include "time-utils.h"
#include "cgroup.h"
@@ -418,6 +419,52 @@ struct sort_entry sort_sym = {
.se_width_idx = HISTC_SYMBOL,
};
+/* --sort symoff */
+
+static int64_t
+sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ int64_t ret;
+
+ ret = sort__sym_cmp(left, right);
+ if (ret)
+ return ret;
+
+ return left->ip - right->ip;
+}
+
+static int64_t
+sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ int64_t ret;
+
+ ret = sort__sym_sort(left, right);
+ if (ret)
+ return ret;
+
+ return left->ip - right->ip;
+}
+
+static int
+hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
+{
+ struct symbol *sym = he->ms.sym;
+
+ if (sym == NULL)
+ return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
+
+ return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
+}
+
+struct sort_entry sort_sym_offset = {
+ .se_header = "Symbol Offset",
+ .se_cmp = sort__symoff_cmp,
+ .se_sort = sort__symoff_sort,
+ .se_snprintf = hist_entry__symoff_snprintf,
+ .se_filter = hist_entry__sym_filter,
+ .se_width_idx = HISTC_SYMBOL_OFFSET,
+};
+
/* --sort srcline */
char *hist_entry__srcline(struct hist_entry *he)
@@ -583,21 +630,21 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
{
struct symbol *sym = he->ms.sym;
- struct annotation *notes;
+ struct annotated_branch *branch;
double ipc = 0.0, coverage = 0.0;
char tmp[64];
if (!sym)
return repsep_snprintf(bf, size, "%-*s", width, "-");
- notes = symbol__annotation(sym);
+ branch = symbol__annotation(sym)->branch;
- if (notes->hit_cycles)
- ipc = notes->hit_insn / ((double)notes->hit_cycles);
+ if (branch && branch->hit_cycles)
+ ipc = branch->hit_insn / ((double)branch->hit_cycles);
- if (notes->total_insn) {
- coverage = notes->cover_insn * 100.0 /
- ((double)notes->total_insn);
+ if (branch && branch->total_insn) {
+ coverage = branch->cover_insn * 100.0 /
+ ((double)branch->total_insn);
}
snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
@@ -2094,7 +2141,7 @@ struct sort_entry sort_dso_size = {
.se_width_idx = HISTC_DSO_SIZE,
};
-/* --sort dso_size */
+/* --sort addr */
static int64_t
sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
@@ -2131,6 +2178,152 @@ struct sort_entry sort_addr = {
.se_width_idx = HISTC_ADDR,
};
+/* --sort type */
+
+struct annotated_data_type unknown_type = {
+ .self = {
+ .type_name = (char *)"(unknown)",
+ .children = LIST_HEAD_INIT(unknown_type.self.children),
+ },
+};
+
+static int64_t
+sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return sort__addr_cmp(left, right);
+}
+
+static void sort__type_init(struct hist_entry *he)
+{
+ if (he->mem_type)
+ return;
+
+ he->mem_type = hist_entry__get_data_type(he);
+ if (he->mem_type == NULL) {
+ he->mem_type = &unknown_type;
+ he->mem_type_off = 0;
+ }
+}
+
+static int64_t
+sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ struct annotated_data_type *left_type = left->mem_type;
+ struct annotated_data_type *right_type = right->mem_type;
+
+ if (!left_type) {
+ sort__type_init(left);
+ left_type = left->mem_type;
+ }
+
+ if (!right_type) {
+ sort__type_init(right);
+ right_type = right->mem_type;
+ }
+
+ return strcmp(left_type->self.type_name, right_type->self.type_name);
+}
+
+static int64_t
+sort__type_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ return sort__type_collapse(left, right);
+}
+
+static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
+}
+
+struct sort_entry sort_type = {
+ .se_header = "Data Type",
+ .se_cmp = sort__type_cmp,
+ .se_collapse = sort__type_collapse,
+ .se_sort = sort__type_sort,
+ .se_init = sort__type_init,
+ .se_snprintf = hist_entry__type_snprintf,
+ .se_width_idx = HISTC_TYPE,
+};
+
+/* --sort typeoff */
+
+static int64_t
+sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ struct annotated_data_type *left_type = left->mem_type;
+ struct annotated_data_type *right_type = right->mem_type;
+ int64_t ret;
+
+ if (!left_type) {
+ sort__type_init(left);
+ left_type = left->mem_type;
+ }
+
+ if (!right_type) {
+ sort__type_init(right);
+ right_type = right->mem_type;
+ }
+
+ ret = strcmp(left_type->self.type_name, right_type->self.type_name);
+ if (ret)
+ return ret;
+ return left->mem_type_off - right->mem_type_off;
+}
+
+static void fill_member_name(char *buf, size_t sz, struct annotated_member *m,
+ int offset, bool first)
+{
+ struct annotated_member *child;
+
+ if (list_empty(&m->children))
+ return;
+
+ list_for_each_entry(child, &m->children, node) {
+ if (child->offset <= offset && offset < child->offset + child->size) {
+ int len = 0;
+
+ /* It can have anonymous struct/union members */
+ if (child->var_name) {
+ len = scnprintf(buf, sz, "%s%s",
+ first ? "" : ".", child->var_name);
+ first = false;
+ }
+
+ fill_member_name(buf + len, sz - len, child, offset, first);
+ return;
+ }
+ }
+}
+
+static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width __maybe_unused)
+{
+ struct annotated_data_type *he_type = he->mem_type;
+ char buf[4096];
+
+ buf[0] = '\0';
+ if (list_empty(&he_type->self.children))
+ snprintf(buf, sizeof(buf), "no field");
+ else
+ fill_member_name(buf, sizeof(buf), &he_type->self,
+ he->mem_type_off, true);
+ buf[4095] = '\0';
+
+ return repsep_snprintf(bf, size, "%s %+d (%s)", he_type->self.type_name,
+ he->mem_type_off, buf);
+}
+
+struct sort_entry sort_type_offset = {
+ .se_header = "Data Type Offset",
+ .se_cmp = sort__type_cmp,
+ .se_collapse = sort__typeoff_sort,
+ .se_sort = sort__typeoff_sort,
+ .se_init = sort__type_init,
+ .se_snprintf = hist_entry__typeoff_snprintf,
+ .se_width_idx = HISTC_TYPE_OFFSET,
+};
+
struct sort_dimension {
const char *name;
@@ -2185,7 +2378,10 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_ADDR, "addr", sort_addr),
DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
- DIM(SORT_SIMD, "simd", sort_simd)
+ DIM(SORT_SIMD, "simd", sort_simd),
+ DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
+ DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
+ DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
};
#undef DIM
@@ -3205,6 +3401,8 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
list->thread = 1;
} else if (sd->entry == &sort_comm) {
list->comm = 1;
+ } else if (sd->entry == &sort_type_offset) {
+ symbol_conf.annotate_data_member = true;
}
return __sort_dimension__add(sd, list, level);
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index ecfb7f1359d5..6f6b4189a389 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -15,6 +15,7 @@
struct option;
struct thread;
+struct annotated_data_type;
extern regex_t parent_regex;
extern const char *sort_order;
@@ -34,6 +35,7 @@ extern struct sort_entry sort_dso_to;
extern struct sort_entry sort_sym_from;
extern struct sort_entry sort_sym_to;
extern struct sort_entry sort_srcline;
+extern struct sort_entry sort_type;
extern const char default_mem_sort_order[];
extern bool chk_double_cl;
@@ -111,6 +113,7 @@ struct hist_entry {
u64 p_stage_cyc;
u8 cpumode;
u8 depth;
+ int mem_type_off;
struct simd_flags simd_flags;
/* We are added by hists__add_dummy_entry. */
@@ -154,6 +157,7 @@ struct hist_entry {
struct perf_hpp_list *hpp_list;
struct hist_entry *parent_he;
struct hist_entry_ops *ops;
+ struct annotated_data_type *mem_type;
union {
/* this is for hierarchical entry structure */
struct {
@@ -243,6 +247,9 @@ enum sort_type {
SORT_LOCAL_RETIRE_LAT,
SORT_GLOBAL_RETIRE_LAT,
SORT_SIMD,
+ SORT_ANNOTATE_DATA_TYPE,
+ SORT_ANNOTATE_DATA_TYPE_OFFSET,
+ SORT_SYM_OFFSET,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index afe6db8e7bf4..8c61f8627ebc 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -898,7 +898,7 @@ static bool hybrid_uniquify(struct evsel *evsel, struct perf_stat_config *config
static void uniquify_counter(struct perf_stat_config *config, struct evsel *counter)
{
- if (config->no_merge || hybrid_uniquify(counter, config))
+ if (config->aggr_mode == AGGR_NONE || hybrid_uniquify(counter, config))
uniquify_event_name(counter);
}
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 1c5c3eeba4cf..e31426167852 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -264,7 +264,7 @@ static void print_ll_miss(struct perf_stat_config *config,
static const double color_ratios[3] = {20.0, 10.0, 5.0};
print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, color_ratios,
- "of all L1-icache accesses");
+ "of all LL-cache accesses");
}
static void print_dtlb_miss(struct perf_stat_config *config,
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index ec3506042217..b0bcf92f0f9c 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
if (!counter->per_pkg)
return 0;
- if (perf_cpu_map__empty(cpus))
+ if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
return 0;
if (!mask) {
@@ -592,7 +592,7 @@ void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *ev
{
struct evsel *evsel;
- if (config->no_merge)
+ if (config->aggr_mode == AGGR_NONE)
return;
evlist__for_each_entry(evlist, evsel)
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 325d0fad1842..4357ba114822 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -76,7 +76,6 @@ struct perf_stat_config {
bool null_run;
bool ru_display;
bool big_num;
- bool no_merge;
bool hybrid_merge;
bool walltime_run_table;
bool all_kernel;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 9e7eeaf616b8..4b934ed3bfd1 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1392,8 +1392,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
map__set_end(map, map__start(map) + shdr->sh_size);
map__set_pgoff(map, shdr->sh_offset);
- map__set_map_ip(map, map__dso_map_ip);
- map__set_unmap_ip(map, map__dso_unmap_ip);
+ map__set_mapping_type(map, MAPPING_TYPE__DSO);
/* Ensure maps are correctly ordered */
if (kmaps) {
int err;
@@ -1455,8 +1454,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
map__set_end(curr_map, map__start(curr_map) + shdr->sh_size);
map__set_pgoff(curr_map, shdr->sh_offset);
} else {
- map__set_map_ip(curr_map, identity__map_ip);
- map__set_unmap_ip(curr_map, identity__map_ip);
+ map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
}
curr_dso->symtab_type = dso->symtab_type;
if (maps__insert(kmaps, curr_map))
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index a81a14769bd1..1da8b713509c 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -159,9 +159,10 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
goto out_free;
ret = read_build_id(buf, buf_size, bid, need_swap);
- if (ret == 0)
+ if (ret == 0) {
ret = bid->size;
- break;
+ break;
+ }
}
} else {
Elf64_Ehdr ehdr;
@@ -210,9 +211,10 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
goto out_free;
ret = read_build_id(buf, buf_size, bid, need_swap);
- if (ret == 0)
+ if (ret == 0) {
ret = bid->size;
- break;
+ break;
+ }
}
}
out_free:
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 82cc74b9358e..be212ba157dc 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -48,11 +48,6 @@ static bool symbol__is_idle(const char *name);
int vmlinux_path__nr_entries;
char **vmlinux_path;
-struct map_list_node {
- struct list_head node;
- struct map *map;
-};
-
struct symbol_conf symbol_conf = {
.nanosecs = false,
.use_modules = true,
@@ -90,11 +85,6 @@ static enum dso_binary_type binary_type_symtab[] = {
#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
-static struct map_list_node *map_list_node__new(void)
-{
- return malloc(sizeof(struct map_list_node));
-}
-
static bool symbol_type__filter(char symbol_type)
{
symbol_type = toupper(symbol_type);
@@ -270,29 +260,6 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
curr->end = roundup(curr->start, 4096) + 4096;
}
-void maps__fixup_end(struct maps *maps)
-{
- struct map_rb_node *prev = NULL, *curr;
-
- down_write(maps__lock(maps));
-
- maps__for_each_entry(maps, curr) {
- if (prev != NULL && !map__end(prev->map))
- map__set_end(prev->map, map__start(curr->map));
-
- prev = curr;
- }
-
- /*
- * We still haven't the actual symbols, so guess the
- * last map final address.
- */
- if (curr && !map__end(curr->map))
- map__set_end(curr->map, ~0ULL);
-
- up_write(maps__lock(maps));
-}
-
struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
{
size_t namelen = strlen(name) + 1;
@@ -956,8 +923,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
return -1;
}
- map__set_map_ip(curr_map, identity__map_ip);
- map__set_unmap_ip(curr_map, identity__map_ip);
+ map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
if (maps__insert(kmaps, curr_map)) {
dso__put(ndso);
return -1;
@@ -1148,33 +1114,35 @@ out_delete_from:
return ret;
}
+static int do_validate_kcore_modules_cb(struct map *old_map, void *data)
+{
+ struct rb_root *modules = data;
+ struct module_info *mi;
+ struct dso *dso;
+
+ if (!__map__is_kmodule(old_map))
+ return 0;
+
+ dso = map__dso(old_map);
+ /* Module must be in memory at the same address */
+ mi = find_module(dso->short_name, modules);
+ if (!mi || mi->start != map__start(old_map))
+ return -EINVAL;
+
+ return 0;
+}
+
static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
{
struct rb_root modules = RB_ROOT;
- struct map_rb_node *old_node;
int err;
err = read_proc_modules(filename, &modules);
if (err)
return err;
- maps__for_each_entry(kmaps, old_node) {
- struct map *old_map = old_node->map;
- struct module_info *mi;
- struct dso *dso;
+ err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules);
- if (!__map__is_kmodule(old_map)) {
- continue;
- }
- dso = map__dso(old_map);
- /* Module must be in memory at the same address */
- mi = find_module(dso->short_name, &modules);
- if (!mi || mi->start != map__start(old_map)) {
- err = -EINVAL;
- goto out;
- }
- }
-out:
delete_modules(&modules);
return err;
}
@@ -1271,101 +1239,15 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
return 0;
}
-/*
- * Merges map into maps by splitting the new map within the existing map
- * regions.
- */
-int maps__merge_in(struct maps *kmaps, struct map *new_map)
+static bool remove_old_maps(struct map *map, void *data)
{
- struct map_rb_node *rb_node;
- LIST_HEAD(merged);
- int err = 0;
-
- maps__for_each_entry(kmaps, rb_node) {
- struct map *old_map = rb_node->map;
-
- /* no overload with this one */
- if (map__end(new_map) < map__start(old_map) ||
- map__start(new_map) >= map__end(old_map))
- continue;
-
- if (map__start(new_map) < map__start(old_map)) {
- /*
- * |new......
- * |old....
- */
- if (map__end(new_map) < map__end(old_map)) {
- /*
- * |new......| -> |new..|
- * |old....| -> |old....|
- */
- map__set_end(new_map, map__start(old_map));
- } else {
- /*
- * |new.............| -> |new..| |new..|
- * |old....| -> |old....|
- */
- struct map_list_node *m = map_list_node__new();
-
- if (!m) {
- err = -ENOMEM;
- goto out;
- }
-
- m->map = map__clone(new_map);
- if (!m->map) {
- free(m);
- err = -ENOMEM;
- goto out;
- }
-
- map__set_end(m->map, map__start(old_map));
- list_add_tail(&m->node, &merged);
- map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
- map__set_start(new_map, map__end(old_map));
- }
- } else {
- /*
- * |new......
- * |old....
- */
- if (map__end(new_map) < map__end(old_map)) {
- /*
- * |new..| -> x
- * |old.........| -> |old.........|
- */
- map__put(new_map);
- new_map = NULL;
- break;
- } else {
- /*
- * |new......| -> |new...|
- * |old....| -> |old....|
- */
- map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
- map__set_start(new_map, map__end(old_map));
- }
- }
- }
-
-out:
- while (!list_empty(&merged)) {
- struct map_list_node *old_node;
-
- old_node = list_entry(merged.next, struct map_list_node, node);
- list_del_init(&old_node->node);
- if (!err)
- err = maps__insert(kmaps, old_node->map);
- map__put(old_node->map);
- free(old_node);
- }
+ const struct map *map_to_save = data;
- if (new_map) {
- if (!err)
- err = maps__insert(kmaps, new_map);
- map__put(new_map);
- }
- return err;
+ /*
+ * We need to preserve eBPF maps even if they are covered by kcore,
+ * because we need to access eBPF dso for source data.
+ */
+ return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map);
}
static int dso__load_kcore(struct dso *dso, struct map *map,
@@ -1374,7 +1256,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
struct map *replacement_map = NULL;
- struct map_rb_node *old_node, *next;
struct machine *machine;
bool is_64_bit;
int err, fd;
@@ -1421,17 +1302,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
}
/* Remove old maps */
- maps__for_each_entry_safe(kmaps, old_node, next) {
- struct map *old_map = old_node->map;
-
- /*
- * We need to preserve eBPF maps even if they are
- * covered by kcore, because we need to access
- * eBPF dso for source data.
- */
- if (old_map != map && !__map__is_bpf_prog(old_map))
- maps__remove(kmaps, old_map);
- }
+ maps__remove_maps(kmaps, remove_old_maps, map);
machine->trampolines_mapped = false;
/* Find the kernel map using the '_stext' symbol */
@@ -1475,8 +1346,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
map__set_start(map, map__start(new_map));
map__set_end(map, map__end(new_map));
map__set_pgoff(map, map__pgoff(new_map));
- map__set_map_ip(map, map__map_ip_ptr(new_map));
- map__set_unmap_ip(map, map__unmap_ip_ptr(new_map));
+ map__set_mapping_type(map, map__mapping_type(new_map));
/* Ensure maps are correctly ordered */
map_ref = map__get(map);
maps__remove(kmaps, map_ref);
@@ -2067,124 +1937,6 @@ out:
return ret;
}
-static int map__strcmp(const void *a, const void *b)
-{
- const struct map *map_a = *(const struct map **)a;
- const struct map *map_b = *(const struct map **)b;
- const struct dso *dso_a = map__dso(map_a);
- const struct dso *dso_b = map__dso(map_b);
- int ret = strcmp(dso_a->short_name, dso_b->short_name);
-
- if (ret == 0 && map_a != map_b) {
- /*
- * Ensure distinct but name equal maps have an order in part to
- * aid reference counting.
- */
- ret = (int)map__start(map_a) - (int)map__start(map_b);
- if (ret == 0)
- ret = (int)((intptr_t)map_a - (intptr_t)map_b);
- }
-
- return ret;
-}
-
-static int map__strcmp_name(const void *name, const void *b)
-{
- const struct dso *dso = map__dso(*(const struct map **)b);
-
- return strcmp(name, dso->short_name);
-}
-
-void __maps__sort_by_name(struct maps *maps)
-{
- qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
-}
-
-static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
-{
- struct map_rb_node *rb_node;
- struct map **maps_by_name = realloc(maps__maps_by_name(maps),
- maps__nr_maps(maps) * sizeof(struct map *));
- int i = 0;
-
- if (maps_by_name == NULL)
- return -1;
-
- up_read(maps__lock(maps));
- down_write(maps__lock(maps));
-
- RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
- RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
-
- maps__for_each_entry(maps, rb_node)
- maps_by_name[i++] = map__get(rb_node->map);
-
- __maps__sort_by_name(maps);
-
- up_write(maps__lock(maps));
- down_read(maps__lock(maps));
-
- return 0;
-}
-
-static struct map *__maps__find_by_name(struct maps *maps, const char *name)
-{
- struct map **mapp;
-
- if (maps__maps_by_name(maps) == NULL &&
- map__groups__sort_by_name_from_rbtree(maps))
- return NULL;
-
- mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
- sizeof(*mapp), map__strcmp_name);
- if (mapp)
- return *mapp;
- return NULL;
-}
-
-struct map *maps__find_by_name(struct maps *maps, const char *name)
-{
- struct map_rb_node *rb_node;
- struct map *map;
-
- down_read(maps__lock(maps));
-
-
- if (RC_CHK_ACCESS(maps)->last_search_by_name) {
- const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
-
- if (strcmp(dso->short_name, name) == 0) {
- map = RC_CHK_ACCESS(maps)->last_search_by_name;
- goto out_unlock;
- }
- }
- /*
- * If we have maps->maps_by_name, then the name isn't in the rbtree,
- * as maps->maps_by_name mirrors the rbtree when lookups by name are
- * made.
- */
- map = __maps__find_by_name(maps, name);
- if (map || maps__maps_by_name(maps) != NULL)
- goto out_unlock;
-
- /* Fallback to traversing the rbtree... */
- maps__for_each_entry(maps, rb_node) {
- struct dso *dso;
-
- map = rb_node->map;
- dso = map__dso(map);
- if (strcmp(dso->short_name, name) == 0) {
- RC_CHK_ACCESS(maps)->last_search_by_name = map;
- goto out_unlock;
- }
- }
- map = NULL;
-
-out_unlock:
- up_read(maps__lock(maps));
- return map;
-}
-
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, bool vmlinux_allocated)
{
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index af87c46b3f89..071837ddce2a 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -189,7 +189,6 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
void symbols__fixup_duplicate(struct rb_root_cached *symbols);
void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms);
-void maps__fixup_end(struct maps *maps);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index 0b589570d1d0..c114bbceef40 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -42,7 +42,11 @@ struct symbol_conf {
inline_name,
disable_add2line_warn,
buildid_mmap2,
- guest_code;
+ guest_code,
+ lazy_load_kernel_maps,
+ keep_exited_threads,
+ annotate_data_member,
+ annotate_data_sample;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index a0579c7d7b9e..2a0289c14959 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -665,18 +665,74 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
}
#endif
+struct perf_event__synthesize_modules_maps_cb_args {
+ struct perf_tool *tool;
+ perf_event__handler_t process;
+ struct machine *machine;
+ union perf_event *event;
+};
+
+static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
+{
+ struct perf_event__synthesize_modules_maps_cb_args *args = data;
+ union perf_event *event = args->event;
+ struct dso *dso;
+ size_t size;
+
+ if (!__map__is_kmodule(map))
+ return 0;
+
+ dso = map__dso(map);
+ if (symbol_conf.buildid_mmap2) {
+ size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
+ event->mmap2.header.type = PERF_RECORD_MMAP2;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size));
+ memset(event->mmap2.filename + size, 0, args->machine->id_hdr_size);
+ event->mmap2.header.size += args->machine->id_hdr_size;
+ event->mmap2.start = map__start(map);
+ event->mmap2.len = map__size(map);
+ event->mmap2.pid = args->machine->pid;
+
+ memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1);
+
+ perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false);
+ } else {
+ size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, args->machine->id_hdr_size);
+ event->mmap.header.size += args->machine->id_hdr_size;
+ event->mmap.start = map__start(map);
+ event->mmap.len = map__size(map);
+ event->mmap.pid = args->machine->pid;
+
+ memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1);
+ }
+
+ if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
+ return -1;
+
+ return 0;
+}
+
int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
struct machine *machine)
{
- int rc = 0;
- struct map_rb_node *pos;
+ int rc;
struct maps *maps = machine__kernel_maps(machine);
- union perf_event *event;
- size_t size = symbol_conf.buildid_mmap2 ?
- sizeof(event->mmap2) : sizeof(event->mmap);
+ struct perf_event__synthesize_modules_maps_cb_args args = {
+ .tool = tool,
+ .process = process,
+ .machine = machine,
+ };
+ size_t size = symbol_conf.buildid_mmap2
+ ? sizeof(args.event->mmap2)
+ : sizeof(args.event->mmap);
- event = zalloc(size + machine->id_hdr_size);
- if (event == NULL) {
+ args.event = zalloc(size + machine->id_hdr_size);
+ if (args.event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
return -1;
@@ -687,53 +743,13 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
* __perf_event_mmap
*/
if (machine__is_host(machine))
- event->header.misc = PERF_RECORD_MISC_KERNEL;
+ args.event->header.misc = PERF_RECORD_MISC_KERNEL;
else
- event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
-
- maps__for_each_entry(maps, pos) {
- struct map *map = pos->map;
- struct dso *dso;
+ args.event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- if (!__map__is_kmodule(map))
- continue;
+ rc = maps__for_each_map(maps, perf_event__synthesize_modules_maps_cb, &args);
- dso = map__dso(map);
- if (symbol_conf.buildid_mmap2) {
- size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
- event->mmap2.header.type = PERF_RECORD_MMAP2;
- event->mmap2.header.size = (sizeof(event->mmap2) -
- (sizeof(event->mmap2.filename) - size));
- memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
- event->mmap2.header.size += machine->id_hdr_size;
- event->mmap2.start = map__start(map);
- event->mmap2.len = map__size(map);
- event->mmap2.pid = machine->pid;
-
- memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1);
-
- perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
- } else {
- size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
- event->mmap.header.type = PERF_RECORD_MMAP;
- event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, machine->id_hdr_size);
- event->mmap.header.size += machine->id_hdr_size;
- event->mmap.start = map__start(map);
- event->mmap.len = map__size(map);
- event->mmap.pid = machine->pid;
-
- memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1);
- }
-
- if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
- rc = -1;
- break;
- }
- }
-
- free(event);
+ free(args.event);
return rc;
}
@@ -1039,11 +1055,11 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (thread_nr > n)
thread_nr = n;
- synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
+ synthesize_threads = calloc(thread_nr, sizeof(pthread_t));
if (synthesize_threads == NULL)
goto free_dirent;
- args = calloc(sizeof(*args), thread_nr);
+ args = calloc(thread_nr, sizeof(*args));
if (args == NULL)
goto free_threads;
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index fe5e6991ae4b..89c47a5098e2 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -345,38 +345,36 @@ int thread__insert_map(struct thread *thread, struct map *map)
if (ret)
return ret;
- maps__fixup_overlappings(thread__maps(thread), map, stderr);
- return maps__insert(thread__maps(thread), map);
+ return maps__fixup_overlap_and_insert(thread__maps(thread), map);
}
-static int __thread__prepare_access(struct thread *thread)
+struct thread__prepare_access_maps_cb_args {
+ int err;
+ struct maps *maps;
+};
+
+static int thread__prepare_access_maps_cb(struct map *map, void *data)
{
bool initialized = false;
- int err = 0;
- struct maps *maps = thread__maps(thread);
- struct map_rb_node *rb_node;
-
- down_read(maps__lock(maps));
-
- maps__for_each_entry(maps, rb_node) {
- err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized);
- if (err || initialized)
- break;
- }
+ struct thread__prepare_access_maps_cb_args *args = data;
- up_read(maps__lock(maps));
+ args->err = unwind__prepare_access(args->maps, map, &initialized);
- return err;
+ return (args->err || initialized) ? 1 : 0;
}
static int thread__prepare_access(struct thread *thread)
{
- int err = 0;
+ struct thread__prepare_access_maps_cb_args args = {
+ .err = 0,
+ };
- if (dwarf_callchain_users)
- err = __thread__prepare_access(thread);
+ if (dwarf_callchain_users) {
+ args.maps = thread__maps(thread);
+ maps__for_each_map(thread__maps(thread), thread__prepare_access_maps_cb, &args);
+ }
- return err;
+ return args.err;
}
static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
@@ -385,14 +383,14 @@ static int thread__clone_maps(struct thread *thread, struct thread *parent, bool
if (thread__pid(thread) == thread__pid(parent))
return thread__prepare_access(thread);
- if (thread__maps(thread) == thread__maps(parent)) {
+ if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) {
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
thread__pid(thread), thread__tid(thread),
thread__pid(parent), thread__tid(parent));
return 0;
}
/* But this one is new process, copy maps. */
- return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0;
+ return do_maps_clone ? maps__copy_from(thread__maps(thread), thread__maps(parent)) : 0;
}
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index e79225a0ea46..0df775b5c110 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -36,13 +36,22 @@ struct thread_rb_node {
};
DECLARE_RC_STRUCT(thread) {
+ /** @maps: mmaps associated with this thread. */
struct maps *maps;
pid_t pid_; /* Not all tools update this */
+ /** @tid: thread ID number unique to a machine. */
pid_t tid;
+ /** @ppid: parent process of the process this thread belongs to. */
pid_t ppid;
int cpu;
int guest_cpu; /* For QEMU thread */
refcount_t refcnt;
+ /**
+ * @exited: Has the thread had an exit event. Such threads are usually
+ * removed from the machine's threads but some events/tools require
+ * access to dead threads.
+ */
+ bool exited;
bool comm_set;
int comm_len;
struct list_head namespaces_list;
@@ -189,6 +198,11 @@ static inline refcount_t *thread__refcnt(struct thread *thread)
return &RC_CHK_ACCESS(thread)->refcnt;
}
+static inline void thread__set_exited(struct thread *thread, bool exited)
+{
+ RC_CHK_ACCESS(thread)->exited = exited;
+}
+
static inline bool thread__comm_set(const struct thread *thread)
{
return RC_CHK_ACCESS(thread)->comm_set;
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index be7157de0451..4db3d1bd686c 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -28,6 +28,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
struct record_opts *opts = &top->record_opts;
struct target *target = &opts->target;
size_t ret = 0;
+ int nr_cpus;
if (top->samples) {
samples_per_sec = top->samples / top->delay_secs;
@@ -93,19 +94,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
else
ret += SNPRINTF(bf + ret, size - ret, " (all");
+ nr_cpus = perf_cpu_map__nr(top->evlist->core.user_requested_cpus);
if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
- perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
- ? "s" : "",
+ nr_cpus > 1 ? "s" : "",
target->cpu_list);
else {
if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")");
else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
- perf_cpu_map__nr(top->evlist->core.user_requested_cpus),
- perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
- ? "s" : "");
+ nr_cpus, nr_cpus > 1 ? "s" : "");
}
perf_top__reset_sample_counters(top);
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index a8b0d79bd96c..4c5588dbb131 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -21,7 +21,6 @@ struct perf_top {
struct perf_tool tool;
struct evlist *evlist, *sb_evlist;
struct record_opts record_opts;
- struct annotation_options annotation_opts;
struct evswitch evswitch;
/*
* Symbols will be added here in perf_event__process_sample and will
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 8554db3fc0d7..6013335a8dae 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -46,6 +46,7 @@ static int __report_module(struct addr_location *al, u64 ip,
{
Dwfl_Module *mod;
struct dso *dso = NULL;
+ Dwarf_Addr base;
/*
* Some callers will use al->sym, so we can't just use the
* cheaper thread__find_map() here.
@@ -58,13 +59,25 @@ static int __report_module(struct addr_location *al, u64 ip,
if (!dso)
return 0;
+ /*
+ * The generated JIT DSO files only map the code segment without
+ * ELF headers. Since JIT codes used to be packed in a memory
+ * segment, calculating the base address using pgoff falls into
+ * a different code in another DSO. So just use the map->start
+ * directly to pick the correct one.
+ */
+ if (!strncmp(dso->long_name, "/tmp/jitted-", 12))
+ base = map__start(al->map);
+ else
+ base = map__start(al->map) - map__pgoff(al->map);
+
mod = dwfl_addrmodule(ui->dwfl, ip);
if (mod) {
Dwarf_Addr s;
dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
- if (s != map__start(al->map) - map__pgoff(al->map))
- mod = 0;
+ if (s != base)
+ mod = NULL;
}
if (!mod) {
@@ -72,14 +85,14 @@ static int __report_module(struct addr_location *al, u64 ip,
__symbol__join_symfs(filename, sizeof(filename), dso->long_name);
mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
- map__start(al->map) - map__pgoff(al->map), false);
+ base, false);
}
if (!mod) {
char filename[PATH_MAX];
if (dso__build_id_filename(dso, filename, sizeof(filename), false))
mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
- map__start(al->map) - map__pgoff(al->map), false);
+ base, false);
}
if (mod) {
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index c0641882fd2f..dac536e28360 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -302,12 +302,31 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
return 0;
}
+struct read_unwind_spec_eh_frame_maps_cb_args {
+ struct dso *dso;
+ u64 base_addr;
+};
+
+static int read_unwind_spec_eh_frame_maps_cb(struct map *map, void *data)
+{
+
+ struct read_unwind_spec_eh_frame_maps_cb_args *args = data;
+
+ if (map__dso(map) == args->dso && map__start(map) - map__pgoff(map) < args->base_addr)
+ args->base_addr = map__start(map) - map__pgoff(map);
+
+ return 0;
+}
+
+
static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
u64 *table_data, u64 *segbase,
u64 *fde_count)
{
- struct map_rb_node *map_node;
- u64 base_addr = UINT64_MAX;
+ struct read_unwind_spec_eh_frame_maps_cb_args args = {
+ .dso = dso,
+ .base_addr = UINT64_MAX,
+ };
int ret, fd;
if (dso->data.eh_frame_hdr_offset == 0) {
@@ -325,16 +344,11 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
return -EINVAL;
}
- maps__for_each_entry(thread__maps(ui->thread), map_node) {
- struct map *map = map_node->map;
- u64 start = map__start(map);
+ maps__for_each_map(thread__maps(ui->thread), read_unwind_spec_eh_frame_maps_cb, &args);
- if (map__dso(map) == dso && start < base_addr)
- base_addr = start;
- }
- base_addr -= dso->data.elf_base_addr;
+ args.base_addr -= dso->data.elf_base_addr;
/* Address of .eh_frame_hdr */
- *segbase = base_addr + dso->data.eh_frame_hdr_addr;
+ *segbase = args.base_addr + dso->data.eh_frame_hdr_addr;
ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset,
table_data, fde_count);
if (ret)
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index ae3eee69b659..df8963796187 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -140,23 +140,34 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s
return dso;
}
+struct machine__thread_dso_type_maps_cb_args {
+ struct machine *machine;
+ enum dso_type dso_type;
+};
+
+static int machine__thread_dso_type_maps_cb(struct map *map, void *data)
+{
+ struct machine__thread_dso_type_maps_cb_args *args = data;
+ struct dso *dso = map__dso(map);
+
+ if (!dso || dso->long_name[0] != '/')
+ return 0;
+
+ args->dso_type = dso__type(dso, args->machine);
+ return (args->dso_type != DSO__TYPE_UNKNOWN) ? 1 : 0;
+}
+
static enum dso_type machine__thread_dso_type(struct machine *machine,
struct thread *thread)
{
- enum dso_type dso_type = DSO__TYPE_UNKNOWN;
- struct map_rb_node *rb_node;
-
- maps__for_each_entry(thread__maps(thread), rb_node) {
- struct dso *dso = map__dso(rb_node->map);
+ struct machine__thread_dso_type_maps_cb_args args = {
+ .machine = machine,
+ .dso_type = DSO__TYPE_UNKNOWN,
+ };
- if (!dso || dso->long_name[0] != '/')
- continue;
- dso_type = dso__type(dso, machine);
- if (dso_type != DSO__TYPE_UNKNOWN)
- break;
- }
+ maps__for_each_map(thread__maps(thread), machine__thread_dso_type_maps_cb, &args);
- return dso_type;
+ return args.dso_type;
}
#if BITS_PER_LONG == 64
diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
index 48dd2b018c47..57027e0ac7b6 100644
--- a/tools/perf/util/zstd.c
+++ b/tools/perf/util/zstd.c
@@ -7,35 +7,9 @@
int zstd_init(struct zstd_data *data, int level)
{
- size_t ret;
-
- data->dstream = ZSTD_createDStream();
- if (data->dstream == NULL) {
- pr_err("Couldn't create decompression stream.\n");
- return -1;
- }
-
- ret = ZSTD_initDStream(data->dstream);
- if (ZSTD_isError(ret)) {
- pr_err("Failed to initialize decompression stream: %s\n", ZSTD_getErrorName(ret));
- return -1;
- }
-
- if (!level)
- return 0;
-
- data->cstream = ZSTD_createCStream();
- if (data->cstream == NULL) {
- pr_err("Couldn't create compression stream.\n");
- return -1;
- }
-
- ret = ZSTD_initCStream(data->cstream, level);
- if (ZSTD_isError(ret)) {
- pr_err("Failed to initialize compression stream: %s\n", ZSTD_getErrorName(ret));
- return -1;
- }
-
+ data->comp_level = level;
+ data->dstream = NULL;
+ data->cstream = NULL;
return 0;
}
@@ -54,7 +28,7 @@ int zstd_fini(struct zstd_data *data)
return 0;
}
-size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
+ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
void *src, size_t src_size, size_t max_record_size,
size_t process_header(void *record, size_t increment))
{
@@ -63,6 +37,21 @@ size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t
ZSTD_outBuffer output;
void *record;
+ if (!data->cstream) {
+ data->cstream = ZSTD_createCStream();
+ if (data->cstream == NULL) {
+ pr_err("Couldn't create compression stream.\n");
+ return -1;
+ }
+
+ ret = ZSTD_initCStream(data->cstream, data->comp_level);
+ if (ZSTD_isError(ret)) {
+ pr_err("Failed to initialize compression stream: %s\n",
+ ZSTD_getErrorName(ret));
+ return -1;
+ }
+ }
+
while (input.pos < input.size) {
record = dst;
size = process_header(record, 0);
@@ -96,6 +85,20 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
ZSTD_inBuffer input = { src, src_size, 0 };
ZSTD_outBuffer output = { dst, dst_size, 0 };
+ if (!data->dstream) {
+ data->dstream = ZSTD_createDStream();
+ if (data->dstream == NULL) {
+ pr_err("Couldn't create decompression stream.\n");
+ return 0;
+ }
+
+ ret = ZSTD_initDStream(data->dstream);
+ if (ZSTD_isError(ret)) {
+ pr_err("Failed to initialize decompression stream: %s\n",
+ ZSTD_getErrorName(ret));
+ return 0;
+ }
+ }
while (input.pos < input.size) {
ret = ZSTD_decompressStream(data->dstream, &output, &input);
if (ZSTD_isError(ret)) {
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index d9d9923af85c..a4b902f9e1c4 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -15,7 +15,7 @@ LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
endif
-CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
+override CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
$(OUTPUT)%.o : %.c
$(ECHO) " CC " $@
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 0b12c36902d8..030b388800f0 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -13,6 +13,7 @@ ldflags-y += --wrap=cxl_hdm_decode_init
ldflags-y += --wrap=cxl_dvsec_rr_decode
ldflags-y += --wrap=devm_cxl_add_rch_dport
ldflags-y += --wrap=cxl_rcd_component_reg_phys
+ldflags-y += --wrap=cxl_endpoint_parse_cdat
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
@@ -65,4 +66,6 @@ cxl_core-y += config_check.o
cxl_core-y += cxl_core_test.o
cxl_core-y += cxl_core_exports.o
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
+
obj-m += test/
diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild
index 61d5f7bcddf9..6b1927897856 100644
--- a/tools/testing/cxl/test/Kbuild
+++ b/tools/testing/cxl/test/Kbuild
@@ -8,3 +8,5 @@ obj-m += cxl_mock_mem.o
cxl_test-y := cxl.o
cxl_mock-y := mock.o
cxl_mock_mem-y := mem.o
+
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index a3cdbb2be038..908e0d083936 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -15,6 +15,8 @@
static int interleave_arithmetic;
+#define FAKE_QTG_ID 42
+
#define NR_CXL_HOST_BRIDGES 2
#define NR_CXL_SINGLE_HOST 1
#define NR_CXL_RCH 1
@@ -209,7 +211,7 @@ static struct {
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
- .qtg_id = 0,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 4UL,
},
.target = { 0 },
@@ -224,7 +226,7 @@ static struct {
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
- .qtg_id = 1,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 8UL,
},
.target = { 0, 1, },
@@ -239,7 +241,7 @@ static struct {
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
- .qtg_id = 2,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 4UL,
},
.target = { 0 },
@@ -254,7 +256,7 @@ static struct {
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
- .qtg_id = 3,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 8UL,
},
.target = { 0, 1, },
@@ -269,7 +271,7 @@ static struct {
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
- .qtg_id = 4,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 4UL,
},
.target = { 2 },
@@ -284,7 +286,7 @@ static struct {
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
- .qtg_id = 5,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M,
},
.target = { 3 },
@@ -301,7 +303,7 @@ static struct {
.granularity = 4,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
- .qtg_id = 0,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 8UL,
},
.target = { 0, },
@@ -317,7 +319,7 @@ static struct {
.granularity = 0,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
- .qtg_id = 1,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 8UL,
},
.target = { 0, 1, },
@@ -333,7 +335,7 @@ static struct {
.granularity = 0,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
- .qtg_id = 0,
+ .qtg_id = FAKE_QTG_ID,
.window_size = SZ_256M * 16UL,
},
.target = { 0, 1, 0, 1, },
@@ -976,6 +978,48 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
return 0;
}
+/*
+ * Faking the cxl_dpa_perf for the memdev when appropriate.
+ */
+static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ dpa_perf->qos_class = FAKE_QTG_ID;
+ dpa_perf->dpa_range = *range;
+ dpa_perf->coord.read_latency = 500;
+ dpa_perf->coord.write_latency = 500;
+ dpa_perf->coord.read_bandwidth = 1000;
+ dpa_perf->coord.write_bandwidth = 1000;
+}
+
+static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+ struct cxl_root *cxl_root __free(put_cxl_root) =
+ find_cxl_root(port);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct range pmem_range = {
+ .start = cxlds->pmem_res.start,
+ .end = cxlds->pmem_res.end,
+ };
+ struct range ram_range = {
+ .start = cxlds->ram_res.start,
+ .end = cxlds->ram_res.end,
+ };
+
+ if (!cxl_root)
+ return;
+
+ if (range_len(&ram_range))
+ dpa_perf_setup(port, &ram_range, &mds->ram_perf);
+
+ if (range_len(&pmem_range))
+ dpa_perf_setup(port, &pmem_range, &mds->pmem_perf);
+
+ cxl_memdev_update_perf(cxlmd);
+}
+
static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_adev = is_mock_adev,
.is_mock_bridge = is_mock_bridge,
@@ -989,6 +1033,7 @@ static struct cxl_mock_ops cxl_mock_ops = {
.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
+ .cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
};
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 1a61e68e3095..6f737941dc0e 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -285,6 +285,20 @@ resource_size_t __wrap_cxl_rcd_component_reg_phys(struct device *dev,
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcd_component_reg_phys, CXL);
+void __wrap_cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+
+ if (ops && ops->is_mock_dev(cxlmd->dev.parent))
+ ops->cxl_endpoint_parse_cdat(port);
+ else
+ cxl_endpoint_parse_cdat(port);
+ put_cxl_mock_ops(index);
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_endpoint_parse_cdat, CXL);
+
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
MODULE_IMPORT_NS(CXL);
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index a94223750346..d1b0271d2822 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -25,6 +25,7 @@ struct cxl_mock_ops {
int (*devm_cxl_add_passthrough_decoder)(struct cxl_port *port);
int (*devm_cxl_enumerate_decoders)(
struct cxl_hdm *hdm, struct cxl_endpoint_dvsec_info *info);
+ void (*cxl_endpoint_parse_cdat)(struct cxl_port *port);
};
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
index 3bf506d4a63c..a6cf69a665e8 100644
--- a/tools/testing/kunit/configs/all_tests.config
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -23,10 +23,16 @@ CONFIG_USB4=y
CONFIG_NET=y
CONFIG_MCTP=y
+CONFIG_MCTP_FLOWS=y
CONFIG_INET=y
CONFIG_MPTCP=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_WLAN_VENDOR_INTEL=y
+CONFIG_IWLWIFI=y
+
CONFIG_DAMON=y
CONFIG_DAMON_VADDR=y
CONFIG_DAMON_PADDR=y
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 8153251ea389..91a3627f301a 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -82,4 +82,6 @@ libnvdimm-$(CONFIG_NVDIMM_KEYS) += $(NVDIMM_SRC)/security.o
libnvdimm-y += libnvdimm_test.o
libnvdimm-y += config_check.o
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
+
obj-m += test/
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 15b6a111c3be..082db6b68060 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -191,6 +191,8 @@ run_tests: all
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests \
+ SRC_PATH=$(shell readlink -e $$(pwd)) \
+ OBJ_PATH=$(BUILD) \
O=$(abs_objtree); \
done;
@@ -241,7 +243,10 @@ ifdef INSTALL_PATH
@ret=1; \
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install \
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install \
+ INSTALL_PATH=$(INSTALL_PATH)/$$TARGET \
+ SRC_PATH=$(shell readlink -e $$(pwd)) \
+ OBJ_PATH=$(INSTALL_PATH) \
O=$(abs_objtree) \
$(if $(FORCE_TARGETS),|| exit); \
ret=$$((ret * $$?)); \
diff --git a/tools/testing/selftests/alsa/test-pcmtest-driver.c b/tools/testing/selftests/alsa/test-pcmtest-driver.c
index a52ecd43dbe3..ca81afa4ee90 100644
--- a/tools/testing/selftests/alsa/test-pcmtest-driver.c
+++ b/tools/testing/selftests/alsa/test-pcmtest-driver.c
@@ -127,11 +127,11 @@ FIXTURE_SETUP(pcmtest) {
int err;
if (geteuid())
- SKIP(exit(-1), "This test needs root to run!");
+ SKIP(return, "This test needs root to run!");
err = read_patterns();
if (err)
- SKIP(exit(-1), "Can't read patterns. Probably, module isn't loaded");
+ SKIP(return, "Can't read patterns. Probably, module isn't loaded");
card_name = malloc(127);
ASSERT_NE(card_name, NULL);
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 5c2cc7e8c5d0..0445ac38bc07 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -1,6 +1,5 @@
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
-exceptions # JIT does not support calling kfunc bpf_throw: -524
fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # needs CONFIG_FPROBE
kprobe_multi_test # needs CONFIG_FPROBE
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index fd15017ed3b1..84cb5500e8ef 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -41,6 +41,19 @@ CFLAGS += -g $(OPT_FLAGS) -rdynamic \
LDFLAGS += $(SAN_LDFLAGS)
LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread
+# The following tests perform type punning and they may break strict
+# aliasing rules, which are exploited by both GCC and clang by default
+# while optimizing. This can lead to broken programs.
+progs/bind4_prog.c-CFLAGS := -fno-strict-aliasing
+progs/bind6_prog.c-CFLAGS := -fno-strict-aliasing
+progs/dynptr_fail.c-CFLAGS := -fno-strict-aliasing
+progs/linked_list_fail.c-CFLAGS := -fno-strict-aliasing
+progs/map_kptr_fail.c-CFLAGS := -fno-strict-aliasing
+progs/syscall.c-CFLAGS := -fno-strict-aliasing
+progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing
+progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing
+progs/timer_crash.c-CFLAGS := -fno-strict-aliasing
+
ifneq ($(LLVM),)
# Silence some warnings when compiled with clang
CFLAGS += -Wno-unused-command-line-argument
@@ -64,6 +77,15 @@ TEST_INST_SUBDIRS := no_alu32
ifneq ($(BPF_GCC),)
TEST_GEN_PROGS += test_progs-bpf_gcc
TEST_INST_SUBDIRS += bpf_gcc
+
+# The following tests contain C code that, although technically legal,
+# triggers GCC warnings that cannot be disabled: declaration of
+# anonymous struct types in function parameter lists.
+progs/btf_dump_test_case_bitfields.c-CFLAGS := -Wno-error
+progs/btf_dump_test_case_namespacing.c-CFLAGS := -Wno-error
+progs/btf_dump_test_case_packing.c-CFLAGS := -Wno-error
+progs/btf_dump_test_case_padding.c-CFLAGS := -Wno-error
+progs/btf_dump_test_case_syntax.c-CFLAGS := -Wno-error
endif
ifneq ($(CLANG_CPUV4),)
@@ -110,7 +132,7 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \
- xdp_features
+ xdp_features bpf_test_no_cfi.ko
TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi
@@ -175,8 +197,7 @@ endif
# NOTE: Semicolon at the end is critical to override lib.mk's default static
# rule for binaries.
$(notdir $(TEST_GEN_PROGS) \
- $(TEST_GEN_PROGS_EXTENDED) \
- $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
+ $(TEST_GEN_PROGS_EXTENDED)): %: $(OUTPUT)/% ;
# sort removes libbpf duplicates when not cross-building
MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \
@@ -233,6 +254,12 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmo
$(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_testmod
$(Q)cp bpf_testmod/bpf_testmod.ko $@
+$(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch])
+ $(call msg,MOD,,$@)
+ $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation
+ $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_no_cfi
+ $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@
+
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
ifneq ($(CROSS_COMPILE),)
CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
@@ -382,11 +409,11 @@ endif
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \
-I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \
- -I$(abspath $(OUTPUT)/../usr/include)
+ -I$(abspath $(OUTPUT)/../usr/include) \
+ -Wno-compare-distinct-pointer-types
# TODO: enable me -Wsign-compare
-CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \
- -Wno-compare-distinct-pointer-types
+CLANG_CFLAGS = $(CLANG_SYS_INCLUDES)
$(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline
@@ -504,7 +531,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \
$(wildcard $(BPFDIR)/*.bpf.h) \
| $(TRUNNER_OUTPUT) $$(BPFOBJ)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
- $(TRUNNER_BPF_CFLAGS))
+ $(TRUNNER_BPF_CFLAGS) \
+ $$($$<-CFLAGS))
$(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
@@ -514,6 +542,7 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$(@:.skel.h=.subskel.h)
+ $(Q)rm -f $$(<:.o=.linked1.o) $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
@@ -522,6 +551,7 @@ $(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked3.o) $$(<:.o=.llinked2.o)
$(Q)diff $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
$(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@
+ $(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.bpf.o))
@@ -532,6 +562,7 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
$(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@
$(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h)
+ $(Q)rm -f $$(@:.skel.h=.linked1.o) $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o)
endif
# ensure we set up tests.h header generation rule just once
@@ -606,6 +637,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
flow_dissector_load.h \
ip_check_defrag_frags.h
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
+ $(OUTPUT)/bpf_test_no_cfi.ko \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
$(OUTPUT)/sign-file \
@@ -729,11 +761,12 @@ $(OUTPUT)/uprobe_multi: uprobe_multi.c
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
+EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \
no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
+ bpf_test_no_cfi.ko \
liburandom_read.so)
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index 9af79c7a9b58..9b974e425af3 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -115,7 +115,7 @@ the insn 20 undoes map_value addition. It is currently impossible for the
verifier to understand such speculative pointer arithmetic.
Hence `this patch`__ addresses it on the compiler side. It was committed on llvm 12.
-__ https://reviews.llvm.org/D85570
+__ https://github.com/llvm/llvm-project/commit/ddf1864ace484035e3cde5e83b3a31ac81e059c6
The corresponding C code
@@ -165,7 +165,7 @@ This is due to a llvm BPF backend bug. `The fix`__
has been pushed to llvm 10.x release branch and will be
available in 10.0.1. The patch is available in llvm 11.0.0 trunk.
-__ https://reviews.llvm.org/D78466
+__ https://github.com/llvm/llvm-project/commit/3cb7e7bf959dcd3b8080986c62e10a75c7af43f0
bpf_verif_scale/loop6.bpf.o test failure with Clang 12
======================================================
@@ -204,7 +204,7 @@ r5(w5) is eventually saved on stack at insn #24 for later use.
This cause later verifier failure. The bug has been `fixed`__ in
Clang 13.
-__ https://reviews.llvm.org/D97479
+__ https://github.com/llvm/llvm-project/commit/1959ead525b8830cc8a345f45e1c3ef9902d3229
BPF CO-RE-based tests and Clang version
=======================================
@@ -221,11 +221,11 @@ failures:
- __builtin_btf_type_id() [0_, 1_, 2_];
- __builtin_preserve_type_info(), __builtin_preserve_enum_value() [3_, 4_].
-.. _0: https://reviews.llvm.org/D74572
-.. _1: https://reviews.llvm.org/D74668
-.. _2: https://reviews.llvm.org/D85174
-.. _3: https://reviews.llvm.org/D83878
-.. _4: https://reviews.llvm.org/D83242
+.. _0: https://github.com/llvm/llvm-project/commit/6b01b465388b204d543da3cf49efd6080db094a9
+.. _1: https://github.com/llvm/llvm-project/commit/072cde03aaa13a2c57acf62d79876bf79aa1919f
+.. _2: https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99
+.. _3: https://github.com/llvm/llvm-project/commit/6d218b4adb093ff2e9764febbbc89f429412006c
+.. _4: https://github.com/llvm/llvm-project/commit/6d6750696400e7ce988d66a1a00e1d0cb32815f8
Floating-point tests and Clang version
======================================
@@ -234,7 +234,7 @@ Certain selftests, e.g. core_reloc, require support for the floating-point
types, which was introduced in `Clang 13`__. The older Clang versions will
either crash when compiling these tests, or generate an incorrect BTF.
-__ https://reviews.llvm.org/D83289
+__ https://github.com/llvm/llvm-project/commit/a7137b238a07d9399d3ae96c0b461571bd5aa8b2
Kernel function call test and Clang version
===========================================
@@ -248,7 +248,7 @@ Without it, the error from compiling bpf selftests looks like:
libbpf: failed to find BTF for extern 'tcp_slow_start' [25] section: -2
-__ https://reviews.llvm.org/D93563
+__ https://github.com/llvm/llvm-project/commit/886f9ff53155075bd5f1e994f17b85d1e1b7470c
btf_tag test and Clang version
==============================
@@ -264,8 +264,8 @@ Without them, the btf_tag selftest will be skipped and you will observe:
#<test_num> btf_tag:SKIP
-.. _0: https://reviews.llvm.org/D111588
-.. _1: https://reviews.llvm.org/D111199
+.. _0: https://github.com/llvm/llvm-project/commit/a162b67c98066218d0d00aa13b99afb95d9bb5e6
+.. _1: https://github.com/llvm/llvm-project/commit/3466e00716e12e32fdb100e3fcfca5c2b3e8d784
Clang dependencies for static linking tests
===========================================
@@ -274,7 +274,7 @@ linked_vars, linked_maps, and linked_funcs tests depend on `Clang fix`__ to
generate valid BTF information for weak variables. Please make sure you use
Clang that contains the fix.
-__ https://reviews.llvm.org/D100362
+__ https://github.com/llvm/llvm-project/commit/968292cb93198442138128d850fd54dc7edc0035
Clang relocation changes
========================
@@ -292,7 +292,7 @@ Here, ``type 2`` refers to new relocation type ``R_BPF_64_ABS64``.
To fix this issue, user newer libbpf.
.. Links
-.. _clang reloc patch: https://reviews.llvm.org/D102712
+.. _clang reloc patch: https://github.com/llvm/llvm-project/commit/6a2ea84600ba4bd3b2733bd8f08f5115eb32164b
.. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst
Clang dependencies for the u32 spill test (xdpwall)
@@ -304,6 +304,6 @@ from running test_progs will look like:
.. code-block:: console
- test_xdpwall:FAIL:Does LLVM have https://reviews.llvm.org/D109073? unexpected error: -4007
+ test_xdpwall:FAIL:Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5? unexpected error: -4007
-__ https://reviews.llvm.org/D109073
+__ https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index 73ce11b0547d..1724d50ba942 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -323,14 +323,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
break;
case 'p':
env.producer_cnt = strtol(arg, NULL, 10);
- if (env.producer_cnt <= 0) {
+ if (env.producer_cnt < 0) {
fprintf(stderr, "Invalid producer count: %s\n", arg);
argp_usage(state);
}
break;
case 'c':
env.consumer_cnt = strtol(arg, NULL, 10);
- if (env.consumer_cnt <= 0) {
+ if (env.consumer_cnt < 0) {
fprintf(stderr, "Invalid consumer count: %s\n", arg);
argp_usage(state);
}
@@ -607,6 +607,10 @@ static void setup_benchmark(void)
bench->setup();
for (i = 0; i < env.consumer_cnt; i++) {
+ if (!bench->consumer_thread) {
+ fprintf(stderr, "benchmark doesn't support consumers!\n");
+ exit(1);
+ }
err = pthread_create(&state.consumers[i], NULL,
bench->consumer_thread, (void *)(long)i);
if (err) {
@@ -626,6 +630,10 @@ static void setup_benchmark(void)
env.prod_cpus.next_cpu = env.cons_cpus.next_cpu;
for (i = 0; i < env.producer_cnt; i++) {
+ if (!bench->producer_thread) {
+ fprintf(stderr, "benchmark doesn't support producers!\n");
+ exit(1);
+ }
err = pthread_create(&state.producers[i], NULL,
bench->producer_thread, (void *)(long)i);
if (err) {
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index f44875f8b367..0d749006d107 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -260,11 +260,11 @@ extern void bpf_throw(u64 cookie) __ksym;
#define __is_signed_type(type) (((type)(-1)) < (type)1)
-#define __bpf_cmp(LHS, OP, SIGN, PRED, RHS, DEFAULT) \
+#define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT) \
({ \
__label__ l_true; \
bool ret = DEFAULT; \
- asm volatile goto("if %[lhs] " SIGN #OP " %[rhs] goto %l[l_true]" \
+ asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]" \
:: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \
ret = !DEFAULT; \
l_true: \
@@ -276,7 +276,7 @@ l_true: \
* __lhs OP __rhs below will catch the mistake.
* Be aware that we check only __lhs to figure out the sign of compare.
*/
-#define _bpf_cmp(LHS, OP, RHS, NOFLIP) \
+#define _bpf_cmp(LHS, OP, RHS, UNLIKELY) \
({ \
typeof(LHS) __lhs = (LHS); \
typeof(RHS) __rhs = (RHS); \
@@ -285,14 +285,17 @@ l_true: \
(void)(__lhs OP __rhs); \
if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \
if (sizeof(__rhs) == 8) \
- ret = __bpf_cmp(__lhs, OP, "", "r", __rhs, NOFLIP); \
+ /* "i" will truncate 64-bit constant into s32, \
+ * so we have to use extra register via "r". \
+ */ \
+ ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY); \
else \
- ret = __bpf_cmp(__lhs, OP, "", "i", __rhs, NOFLIP); \
+ ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY); \
} else { \
if (sizeof(__rhs) == 8) \
- ret = __bpf_cmp(__lhs, OP, "s", "r", __rhs, NOFLIP); \
+ ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY); \
else \
- ret = __bpf_cmp(__lhs, OP, "s", "i", __rhs, NOFLIP); \
+ ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY); \
} \
ret; \
})
@@ -304,7 +307,7 @@ l_true: \
#ifndef bpf_cmp_likely
#define bpf_cmp_likely(LHS, OP, RHS) \
({ \
- bool ret; \
+ bool ret = 0; \
if (__builtin_strcmp(#OP, "==") == 0) \
ret = _bpf_cmp(LHS, !=, RHS, false); \
else if (__builtin_strcmp(#OP, "!=") == 0) \
@@ -318,7 +321,7 @@ l_true: \
else if (__builtin_strcmp(#OP, ">=") == 0) \
ret = _bpf_cmp(LHS, <, RHS, false); \
else \
- (void) "bug"; \
+ asm volatile("r0 " #OP " invalid compare"); \
ret; \
})
#endif
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index b4e78c1eb37b..14ebe7d9e1a3 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -9,7 +9,7 @@ struct bpf_sock_addr_kern;
* Error code
*/
extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
- struct bpf_dynptr *ptr__uninit) __ksym;
+ struct bpf_dynptr *ptr__uninit) __ksym __weak;
/* Description
* Initializes an xdp-type dynptr
@@ -17,7 +17,7 @@ extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
* Error code
*/
extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags,
- struct bpf_dynptr *ptr__uninit) __ksym;
+ struct bpf_dynptr *ptr__uninit) __ksym __weak;
/* Description
* Obtain a read-only pointer to the dynptr's data
@@ -26,7 +26,7 @@ extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags,
* buffer if unable to obtain a direct pointer
*/
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
- void *buffer, __u32 buffer__szk) __ksym;
+ void *buffer, __u32 buffer__szk) __ksym __weak;
/* Description
* Obtain a read-write pointer to the dynptr's data
@@ -35,13 +35,13 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
* buffer if unable to obtain a direct pointer
*/
extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset,
- void *buffer, __u32 buffer__szk) __ksym;
+ void *buffer, __u32 buffer__szk) __ksym __weak;
-extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym;
-extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym;
-extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym;
-extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym;
-extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym;
+extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym __weak;
+extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym __weak;
+extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym __weak;
+extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym __weak;
+extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym __weak;
/* Description
* Modify the address of a AF_UNIX sockaddr.
@@ -51,9 +51,19 @@ extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clo
extern int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
const __u8 *sun_path, __u32 sun_path__sz) __ksym;
+/* Description
+ * Allocate and configure a reqsk and link it with a listener and skb.
+ * Returns
+ * Error code
+ */
+struct sock;
+struct bpf_tcp_req_attrs;
+extern int bpf_sk_assign_tcp_reqsk(struct __sk_buff *skb, struct sock *sk,
+ struct bpf_tcp_req_attrs *attrs, int attrs__sz) __ksym;
+
void *bpf_cast_to_kern_ctx(void *) __ksym;
-void *bpf_rdonly_cast(void *obj, __u32 btf_id) __ksym;
+extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
extern int bpf_get_file_xattr(struct file *file, const char *name,
struct bpf_dynptr *value_ptr) __ksym;
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile
new file mode 100644
index 000000000000..ed5143b79edf
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile
@@ -0,0 +1,19 @@
+BPF_TEST_NO_CFI_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
+KDIR ?= $(abspath $(BPF_TEST_NO_CFI_DIR)/../../../../..)
+
+ifeq ($(V),1)
+Q =
+else
+Q = @
+endif
+
+MODULES = bpf_test_no_cfi.ko
+
+obj-m += bpf_test_no_cfi.o
+
+all:
+ +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) modules
+
+clean:
+ +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) clean
+
diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
new file mode 100644
index 000000000000..b1dd889d5d7d
--- /dev/null
+++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+struct bpf_test_no_cfi_ops {
+ void (*fn_1)(void);
+ void (*fn_2)(void);
+};
+
+static int dummy_init(struct btf *btf)
+{
+ return 0;
+}
+
+static int dummy_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ return 0;
+}
+
+static int dummy_reg(void *kdata)
+{
+ return 0;
+}
+
+static void dummy_unreg(void *kdata)
+{
+}
+
+static const struct bpf_verifier_ops dummy_verifier_ops;
+
+static void bpf_test_no_cfi_ops__fn_1(void)
+{
+}
+
+static void bpf_test_no_cfi_ops__fn_2(void)
+{
+}
+
+static struct bpf_test_no_cfi_ops __test_no_cif_ops = {
+ .fn_1 = bpf_test_no_cfi_ops__fn_1,
+ .fn_2 = bpf_test_no_cfi_ops__fn_2,
+};
+
+static struct bpf_struct_ops test_no_cif_ops = {
+ .verifier_ops = &dummy_verifier_ops,
+ .init = dummy_init,
+ .init_member = dummy_init_member,
+ .reg = dummy_reg,
+ .unreg = dummy_unreg,
+ .name = "bpf_test_no_cfi_ops",
+ .owner = THIS_MODULE,
+};
+
+static int bpf_test_no_cfi_init(void)
+{
+ int ret;
+
+ ret = register_bpf_struct_ops(&test_no_cif_ops,
+ bpf_test_no_cfi_ops);
+ if (!ret)
+ return -EINVAL;
+
+ test_no_cif_ops.cfi_stubs = &__test_no_cif_ops;
+ ret = register_bpf_struct_ops(&test_no_cif_ops,
+ bpf_test_no_cfi_ops);
+ return ret;
+}
+
+static void bpf_test_no_cfi_exit(void)
+{
+}
+
+module_init(bpf_test_no_cfi_init);
+module_exit(bpf_test_no_cfi_exit);
+
+MODULE_AUTHOR("Kuifeng Lee");
+MODULE_DESCRIPTION("BPF no cfi_stubs test module");
+MODULE_LICENSE("Dual BSD/GPL");
+
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 91907b321f91..098ddd067224 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
+#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
+#include <linux/delay.h>
#include <linux/error-injection.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -341,12 +343,12 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.write = bpf_testmod_test_write,
};
-BTF_SET8_START(bpf_testmod_common_kfunc_ids)
+BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
-BTF_SET8_END(bpf_testmod_common_kfunc_ids)
+BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
.owner = THIS_MODULE,
@@ -492,7 +494,7 @@ __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused
return arg;
}
-BTF_SET8_START(bpf_testmod_check_kfunc_ids)
+BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
@@ -518,13 +520,95 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
-BTF_SET8_END(bpf_testmod_check_kfunc_ids)
+BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
+
+static int bpf_testmod_ops_init(struct btf *btf)
+{
+ return 0;
+}
+
+static bool bpf_testmod_ops_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
+}
+
+static int bpf_testmod_ops_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
+ /* For data fields, this function has to copy it and return
+ * 1 to indicate that the data has been handled by the
+ * struct_ops type, or the verifier will reject the map if
+ * the value of the data field is not zero.
+ */
+ ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
+ return 1;
+ }
+ return 0;
+}
static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_testmod_check_kfunc_ids,
};
+static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
+ .is_valid_access = bpf_testmod_ops_is_valid_access,
+};
+
+static int bpf_dummy_reg(void *kdata)
+{
+ struct bpf_testmod_ops *ops = kdata;
+
+ /* Some test cases (ex. struct_ops_maybe_null) may not have test_2
+ * initialized, so we need to check for NULL.
+ */
+ if (ops->test_2)
+ ops->test_2(4, ops->data);
+
+ return 0;
+}
+
+static void bpf_dummy_unreg(void *kdata)
+{
+}
+
+static int bpf_testmod_test_1(void)
+{
+ return 0;
+}
+
+static void bpf_testmod_test_2(int a, int b)
+{
+}
+
+static int bpf_testmod_ops__test_maybe_null(int dummy,
+ struct task_struct *task__nullable)
+{
+ return 0;
+}
+
+static struct bpf_testmod_ops __bpf_testmod_ops = {
+ .test_1 = bpf_testmod_test_1,
+ .test_2 = bpf_testmod_test_2,
+ .test_maybe_null = bpf_testmod_ops__test_maybe_null,
+};
+
+struct bpf_struct_ops bpf_bpf_testmod_ops = {
+ .verifier_ops = &bpf_testmod_verifier_ops,
+ .init = bpf_testmod_ops_init,
+ .init_member = bpf_testmod_ops_init_member,
+ .reg = bpf_dummy_reg,
+ .unreg = bpf_dummy_unreg,
+ .cfi_stubs = &__bpf_testmod_ops,
+ .name = "bpf_testmod_ops",
+ .owner = THIS_MODULE,
+};
+
extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
@@ -535,6 +619,7 @@ static int bpf_testmod_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
+ ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
@@ -544,7 +629,15 @@ static int bpf_testmod_init(void)
static void bpf_testmod_exit(void)
{
- return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
+ /* Need to wait for all references to be dropped because
+ * bpf_kfunc_call_test_release() which currently resides in kernel can
+ * be called after bpf_testmod is unloaded. Once release function is
+ * moved into the module this wait can be removed.
+ */
+ while (refcount_read(&prog_test_struct.cnt) > 1)
+ msleep(20);
+
+ sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
}
module_init(bpf_testmod_init);
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
index f32793efe095..971458acfac3 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
@@ -5,6 +5,8 @@
#include <linux/types.h>
+struct task_struct;
+
struct bpf_testmod_test_read_ctx {
char *buf;
loff_t off;
@@ -28,4 +30,19 @@ struct bpf_iter_testmod_seq {
int cnt;
};
+struct bpf_testmod_ops {
+ int (*test_1)(void);
+ void (*test_2)(int a, int b);
+ /* Used to test nullable arguments. */
+ int (*test_maybe_null)(int dummy, struct task_struct *task);
+
+ /* The following fields are used to test shadow copies. */
+ char onebyte;
+ struct {
+ int a;
+ int b;
+ } unsupported;
+ int data;
+};
+
#endif /* _BPF_TESTMOD_H */
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index c125c441abc7..01f241ea2c67 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -81,6 +81,7 @@ CONFIG_NF_NAT=y
CONFIG_RC_CORE=y
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
+CONFIG_SYN_COOKIES=y
CONFIG_TEST_BPF=m
CONFIG_USERFAULTFD=y
CONFIG_VSOCKETS=y
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index e770912fc1d2..4c6ada5b270b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -35,7 +35,7 @@ static int check_load(const char *file, enum bpf_prog_type type)
}
bpf_program__set_type(prog, type);
- bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS);
+ bpf_program__set_flags(prog, testing_prog_flags());
bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags);
err = bpf_object__load(obj);
diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
index c2e886399e3c..ecf89df78109 100644
--- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -27,7 +27,7 @@ static void verify_success(const char *prog_name)
struct bpf_program *prog;
struct bpf_link *link = NULL;
pid_t child_pid;
- int status;
+ int status, err;
skel = cpumask_success__open();
if (!ASSERT_OK_PTR(skel, "cpumask_success__open"))
@@ -36,8 +36,8 @@ static void verify_success(const char *prog_name)
skel->bss->pid = getpid();
skel->bss->nr_cpus = libbpf_num_possible_cpus();
- cpumask_success__load(skel);
- if (!ASSERT_OK_PTR(skel, "cpumask_success__load"))
+ err = cpumask_success__load(skel);
+ if (!ASSERT_OK(err, "cpumask_success__load"))
goto cleanup;
prog = bpf_object__find_program_by_name(skel->obj, prog_name);
diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
index 4951aa978f33..3b7c57fe55a5 100644
--- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
+++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c
@@ -626,50 +626,6 @@ err:
return false;
}
-/* Request BPF program instructions after all rewrites are applied,
- * e.g. verifier.c:convert_ctx_access() is done.
- */
-static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
-{
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- __u32 xlated_prog_len;
- __u32 buf_element_size = sizeof(struct bpf_insn);
-
- if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("bpf_prog_get_info_by_fd failed");
- return -1;
- }
-
- xlated_prog_len = info.xlated_prog_len;
- if (xlated_prog_len % buf_element_size) {
- printf("Program length %d is not multiple of %d\n",
- xlated_prog_len, buf_element_size);
- return -1;
- }
-
- *cnt = xlated_prog_len / buf_element_size;
- *buf = calloc(*cnt, buf_element_size);
- if (!buf) {
- perror("can't allocate xlated program buffer");
- return -ENOMEM;
- }
-
- bzero(&info, sizeof(info));
- info.xlated_prog_len = xlated_prog_len;
- info.xlated_prog_insns = (__u64)(unsigned long)*buf;
- if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("second bpf_prog_get_info_by_fd failed");
- goto out_free_buf;
- }
-
- return 0;
-
-out_free_buf:
- free(*buf);
- return -1;
-}
-
static void print_insn(void *private_data, const char *fmt, ...)
{
va_list args;
diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
index 5c0ebe6ba866..dcb9e5070cc3 100644
--- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
+++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
@@ -72,6 +72,6 @@ fail:
bpf_tc_hook_destroy(&qdisc_hook);
close_netns(nstoken);
}
- SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
+ SYS_NOFAIL("ip netns del " NS_TEST);
decap_sanity__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
index 4ad4cd69152e..3379df2d4cf2 100644
--- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c
@@ -298,6 +298,6 @@ void test_fib_lookup(void)
fail:
if (nstoken)
close_netns(nstoken);
- SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
+ SYS_NOFAIL("ip netns del " NS_TEST);
fib_lookup__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
index d4b1901f7879..f3932941bbaa 100644
--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
@@ -19,6 +19,7 @@ static const char *kmulti_syms[] = {
};
#define KMULTI_CNT ARRAY_SIZE(kmulti_syms)
static __u64 kmulti_addrs[KMULTI_CNT];
+static __u64 kmulti_cookies[] = { 3, 1, 2 };
#define KPROBE_FUNC "bpf_fentry_test1"
static __u64 kprobe_addr;
@@ -31,6 +32,8 @@ static noinline void uprobe_func(void)
asm volatile ("");
}
+#define PERF_EVENT_COOKIE 0xdeadbeef
+
static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
ssize_t offset, ssize_t entry_offset)
{
@@ -62,6 +65,8 @@ again:
ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
"kprobe_addr");
+ ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie");
+
if (!info.perf_event.kprobe.func_name) {
ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
@@ -81,6 +86,8 @@ again:
goto again;
}
+ ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie");
+
err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
strlen(TP_NAME));
ASSERT_EQ(err, 0, "cmp_tp_name");
@@ -96,10 +103,17 @@ again:
goto again;
}
+ ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie");
+
err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
strlen(UPROBE_FILE));
ASSERT_EQ(err, 0, "cmp_file_name");
break;
+ case BPF_PERF_EVENT_EVENT:
+ ASSERT_EQ(info.perf_event.event.type, PERF_TYPE_SOFTWARE, "event_type");
+ ASSERT_EQ(info.perf_event.event.config, PERF_COUNT_SW_PAGE_FAULTS, "event_config");
+ ASSERT_EQ(info.perf_event.event.cookie, PERF_EVENT_COOKIE, "event_cookie");
+ break;
default:
err = -1;
break;
@@ -139,6 +153,7 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
.attach_mode = PROBE_ATTACH_MODE_LINK,
.retprobe = type == BPF_PERF_EVENT_KRETPROBE,
+ .bpf_cookie = PERF_EVENT_COOKIE,
);
ssize_t entry_offset = 0;
struct bpf_link *link;
@@ -163,10 +178,13 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
static void test_tp_fill_link_info(struct test_fill_link_info *skel)
{
+ DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
struct bpf_link *link;
int link_fd, err;
- link = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
+ link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts);
if (!ASSERT_OK_PTR(link, "attach_tp"))
return;
@@ -176,16 +194,53 @@ static void test_tp_fill_link_info(struct test_fill_link_info *skel)
bpf_link__destroy(link);
}
+static void test_event_fill_link_info(struct test_fill_link_info *skel)
+{
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
+ struct bpf_link *link;
+ int link_fd, err, pfd;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_PAGE_FAULTS,
+ .freq = 1,
+ .sample_freq = 1,
+ .size = sizeof(struct perf_event_attr),
+ };
+
+ pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */,
+ -1 /* group id */, 0 /* flags */);
+ if (!ASSERT_GE(pfd, 0, "perf_event_open"))
+ return;
+
+ link = bpf_program__attach_perf_event_opts(skel->progs.event_run, pfd, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_event"))
+ goto error;
+
+ link_fd = bpf_link__fd(link);
+ err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_EVENT, 0, 0, 0);
+ ASSERT_OK(err, "verify_perf_link_info");
+ bpf_link__destroy(link);
+
+error:
+ close(pfd);
+}
+
static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
enum bpf_perf_event_type type)
{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts,
+ .retprobe = type == BPF_PERF_EVENT_URETPROBE,
+ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
struct bpf_link *link;
int link_fd, err;
- link = bpf_program__attach_uprobe(skel->progs.uprobe_run,
- type == BPF_PERF_EVENT_URETPROBE,
- 0, /* self pid */
- UPROBE_FILE, uprobe_offset);
+ link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run,
+ 0, /* self pid */
+ UPROBE_FILE, uprobe_offset,
+ &opts);
if (!ASSERT_OK_PTR(link, "attach_uprobe"))
return;
@@ -195,11 +250,11 @@ static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
bpf_link__destroy(link);
}
-static int verify_kmulti_link_info(int fd, bool retprobe)
+static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies)
{
+ __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT];
struct bpf_link_info info;
__u32 len = sizeof(info);
- __u64 addrs[KMULTI_CNT];
int flags, i, err;
memset(&info, 0, sizeof(info));
@@ -221,18 +276,22 @@ again:
if (!info.kprobe_multi.addrs) {
info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ info.kprobe_multi.cookies = ptr_to_u64(cookies);
goto again;
}
- for (i = 0; i < KMULTI_CNT; i++)
+ for (i = 0; i < KMULTI_CNT; i++) {
ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs");
+ ASSERT_EQ(cookies[i], has_cookies ? kmulti_cookies[i] : 0,
+ "kmulti_cookies_value");
+ }
return 0;
}
static void verify_kmulti_invalid_user_buffer(int fd)
{
+ __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT];
struct bpf_link_info info;
__u32 len = sizeof(info);
- __u64 addrs[KMULTI_CNT];
int err, i;
memset(&info, 0, sizeof(info));
@@ -266,7 +325,20 @@ static void verify_kmulti_invalid_user_buffer(int fd)
info.kprobe_multi.count = KMULTI_CNT;
info.kprobe_multi.addrs = 0x1; /* invalid addr */
err = bpf_link_get_info_by_fd(fd, &info, &len);
- ASSERT_EQ(err, -EFAULT, "invalid_buff");
+ ASSERT_EQ(err, -EFAULT, "invalid_buff_addrs");
+
+ info.kprobe_multi.count = KMULTI_CNT;
+ info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ info.kprobe_multi.cookies = 0x1; /* invalid addr */
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EFAULT, "invalid_buff_cookies");
+
+ /* cookies && !count */
+ info.kprobe_multi.count = 0;
+ info.kprobe_multi.addrs = ptr_to_u64(NULL);
+ info.kprobe_multi.cookies = ptr_to_u64(cookies);
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ ASSERT_EQ(err, -EINVAL, "invalid_cookies_count");
}
static int symbols_cmp_r(const void *a, const void *b)
@@ -278,13 +350,15 @@ static int symbols_cmp_r(const void *a, const void *b)
}
static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
- bool retprobe, bool invalid)
+ bool retprobe, bool cookies,
+ bool invalid)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
struct bpf_link *link;
int link_fd, err;
opts.syms = kmulti_syms;
+ opts.cookies = cookies ? kmulti_cookies : NULL;
opts.cnt = KMULTI_CNT;
opts.retprobe = retprobe;
link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts);
@@ -293,7 +367,7 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
link_fd = bpf_link__fd(link);
if (!invalid) {
- err = verify_kmulti_link_info(link_fd, retprobe);
+ err = verify_kmulti_link_info(link_fd, retprobe, cookies);
ASSERT_OK(err, "verify_kmulti_link_info");
} else {
verify_kmulti_invalid_user_buffer(link_fd);
@@ -513,6 +587,8 @@ void test_fill_link_info(void)
test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true);
if (test__start_subtest("tracepoint_link_info"))
test_tp_fill_link_info(skel);
+ if (test__start_subtest("event_link_info"))
+ test_event_fill_link_info(skel);
uprobe_offset = get_uprobe_offset(&uprobe_func);
if (test__start_subtest("uprobe_link_info"))
@@ -523,12 +599,16 @@ void test_fill_link_info(void)
qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r);
for (i = 0; i < KMULTI_CNT; i++)
kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]);
- if (test__start_subtest("kprobe_multi_link_info"))
- test_kprobe_multi_fill_link_info(skel, false, false);
- if (test__start_subtest("kretprobe_multi_link_info"))
- test_kprobe_multi_fill_link_info(skel, true, false);
+ if (test__start_subtest("kprobe_multi_link_info")) {
+ test_kprobe_multi_fill_link_info(skel, false, false, false);
+ test_kprobe_multi_fill_link_info(skel, false, true, false);
+ }
+ if (test__start_subtest("kretprobe_multi_link_info")) {
+ test_kprobe_multi_fill_link_info(skel, true, false, false);
+ test_kprobe_multi_fill_link_info(skel, true, true, false);
+ }
if (test__start_subtest("kprobe_multi_invalid_ubuff"))
- test_kprobe_multi_fill_link_info(skel, true, true);
+ test_kprobe_multi_fill_link_info(skel, true, true, true);
if (test__start_subtest("uprobe_multi_link_info"))
test_uprobe_multi_fill_link_info(skel, false, false);
diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
index 57c814f5f6a7..8dd2af9081f4 100644
--- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
+++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c
@@ -59,9 +59,9 @@ static int setup_topology(bool ipv6)
/* Wait for up to 5s for links to come up */
for (i = 0; i < 5; ++i) {
if (ipv6)
- up = !system("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6 " &>/dev/null");
+ up = !SYS_NOFAIL("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6);
else
- up = !system("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR " &>/dev/null");
+ up = !SYS_NOFAIL("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR);
if (up)
break;
diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c
index bf84d4a1d9ae..3c440370c1f0 100644
--- a/tools/testing/selftests/bpf/prog_tests/iters.c
+++ b/tools/testing/selftests/bpf/prog_tests/iters.c
@@ -193,6 +193,7 @@ static void subtest_task_iters(void)
ASSERT_EQ(skel->bss->procs_cnt, 1, "procs_cnt");
ASSERT_EQ(skel->bss->threads_cnt, thread_num + 1, "threads_cnt");
ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 1, "proc_threads_cnt");
+ ASSERT_EQ(skel->bss->invalid_cnt, 0, "invalid_cnt");
pthread_mutex_unlock(&do_nothing_mutex);
for (int i = 0; i < thread_num; i++)
ASSERT_OK(pthread_join(thread_ids[i], &ret), "pthread_join");
diff --git a/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
new file mode 100644
index 000000000000..7def158da9eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <test_progs.h>
+
+#include "linux/filter.h"
+#include "kptr_xchg_inline.skel.h"
+
+void test_kptr_xchg_inline(void)
+{
+ struct kptr_xchg_inline *skel;
+ struct bpf_insn *insn = NULL;
+ struct bpf_insn exp;
+ unsigned int cnt;
+ int err;
+
+#if !(defined(__x86_64__) || defined(__aarch64__) || \
+ (defined(__riscv) && __riscv_xlen == 64))
+ test__skip();
+ return;
+#endif
+
+ skel = kptr_xchg_inline__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_load"))
+ return;
+
+ err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt);
+ if (!ASSERT_OK(err, "prog insn"))
+ goto out;
+
+ /* The original instructions are:
+ * r1 = map[id:xxx][0]+0
+ * r2 = 0
+ * call bpf_kptr_xchg#yyy
+ *
+ * call bpf_kptr_xchg#yyy will be inlined as:
+ * r0 = r2
+ * r0 = atomic64_xchg((u64 *)(r1 +0), r0)
+ */
+ if (!ASSERT_GT(cnt, 5, "insn cnt"))
+ goto out;
+
+ exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
+ if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov"))
+ goto out;
+
+ exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
+ if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg"))
+ goto out;
+out:
+ free(insn);
+ kptr_xchg_inline__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
index 9f766ddd946a..4ed46ed58a7b 100644
--- a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c
@@ -30,6 +30,8 @@ void test_libbpf_probe_prog_types(void)
if (prog_type == BPF_PROG_TYPE_UNSPEC)
continue;
+ if (strcmp(prog_type_name, "__MAX_BPF_PROG_TYPE") == 0)
+ continue;
if (!test__start_subtest(prog_type_name))
continue;
@@ -68,6 +70,8 @@ void test_libbpf_probe_map_types(void)
if (map_type == BPF_MAP_TYPE_UNSPEC)
continue;
+ if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0)
+ continue;
if (!test__start_subtest(map_type_name))
continue;
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
index eb34d612d6f8..62ea855ec4d0 100644
--- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
@@ -132,6 +132,9 @@ static void test_libbpf_bpf_map_type_str(void)
const char *map_type_str;
char buf[256];
+ if (map_type == __MAX_BPF_MAP_TYPE)
+ continue;
+
map_type_name = btf__str_by_offset(btf, e->name_off);
map_type_str = libbpf_bpf_map_type_str(map_type);
ASSERT_OK_PTR(map_type_str, map_type_name);
@@ -186,6 +189,9 @@ static void test_libbpf_bpf_prog_type_str(void)
const char *prog_type_str;
char buf[256];
+ if (prog_type == __MAX_BPF_PROG_TYPE)
+ continue;
+
prog_type_name = btf__str_by_offset(btf, e->name_off);
prog_type_str = libbpf_bpf_prog_type_str(prog_type);
ASSERT_OK_PTR(prog_type_str, prog_type_name);
diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
index 7a3fa2ff567b..90a98e23be61 100644
--- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c
+++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
@@ -169,9 +169,9 @@ void test_log_fixup(void)
if (test__start_subtest("bad_core_relo_trunc_none"))
bad_core_relo(0, TRUNC_NONE /* full buf */);
if (test__start_subtest("bad_core_relo_trunc_partial"))
- bad_core_relo(280, TRUNC_PARTIAL /* truncate original log a bit */);
+ bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
if (test__start_subtest("bad_core_relo_trunc_full"))
- bad_core_relo(220, TRUNC_FULL /* truncate also libbpf's message patch */);
+ bad_core_relo(240, TRUNC_FULL /* truncate also libbpf's message patch */);
if (test__start_subtest("bad_core_relo_subprog"))
bad_core_relo_subprog();
if (test__start_subtest("missing_map"))
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
index e9190574e79f..fb1eb8c67361 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
@@ -27,8 +27,6 @@
} \
})
-#define NETNS "ns_lwt"
-
static inline int netns_create(void)
{
return system("ip netns add " NETNS);
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
index 59b38569f310..835a1d756c16 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
@@ -54,6 +54,7 @@
#include <stdbool.h>
#include <stdlib.h>
+#define NETNS "ns_lwt_redirect"
#include "lwt_helpers.h"
#include "test_progs.h"
#include "network_helpers.h"
@@ -85,7 +86,7 @@ static void ping_dev(const char *dev, bool is_ingress)
snprintf(ip, sizeof(ip), "20.0.0.%d", link_index);
/* We won't get a reply. Don't fail here */
- SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1",
+ SYS_NOFAIL("ping %s -c1 -W1 -s %d",
ip, ICMP_PAYLOAD_SIZE);
}
@@ -203,6 +204,7 @@ static int setup_redirect_target(const char *target_dev, bool need_mac)
if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
goto fail;
+ SYS(fail, "sysctl -w net.ipv6.conf.all.disable_ipv6=1");
SYS(fail, "ip link add link_err type dummy");
SYS(fail, "ip link set lo up");
SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
index f4bb2d5fcae0..03825d2b45a8 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
@@ -48,6 +48,7 @@
* For case 2, force UDP packets to overflow fq limit. As long as kernel
* is not crashed, it is considered successful.
*/
+#define NETNS "ns_lwt_reroute"
#include "lwt_helpers.h"
#include "network_helpers.h"
#include <linux/net_tstamp.h>
@@ -63,7 +64,7 @@
static void ping_once(const char *ip)
{
/* We won't get a reply. Don't fail here */
- SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1",
+ SYS_NOFAIL("ping %s -c1 -W1 -s %d",
ip, ICMP_PAYLOAD_SIZE);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index 7c0be7cf550b..8f8d792307c1 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -79,7 +79,7 @@ static void cleanup_netns(struct nstoken *nstoken)
if (nstoken)
close_netns(nstoken);
- SYS_NOFAIL("ip netns del %s &> /dev/null", NS_TEST);
+ SYS_NOFAIL("ip netns del %s", NS_TEST);
}
static int verify_tsk(int map_fd, int client_fd)
diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
index 3f1f58d3a729..a1f7e7378a64 100644
--- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c
@@ -29,6 +29,10 @@ static void test_success(void)
bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_global_subprog, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_lock, true);
+ bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_unlock, true);
err = rcu_read_lock__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto out;
@@ -75,6 +79,8 @@ static const char * const inproper_region_tests[] = {
"inproper_sleepable_helper",
"inproper_sleepable_kfunc",
"nested_rcu_region",
+ "rcu_read_lock_global_subprog_lock",
+ "rcu_read_lock_global_subprog_unlock",
};
static void test_inproper_region(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
new file mode 100644
index 000000000000..3405923fe4e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/read_vsyscall.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include "test_progs.h"
+#include "read_vsyscall.skel.h"
+
+#if defined(__x86_64__)
+/* For VSYSCALL_ADDR */
+#include <asm/vsyscall.h>
+#else
+/* To prevent build failure on non-x86 arch */
+#define VSYSCALL_ADDR 0UL
+#endif
+
+struct read_ret_desc {
+ const char *name;
+ int ret;
+} all_read[] = {
+ { .name = "probe_read_kernel", .ret = -ERANGE },
+ { .name = "probe_read_kernel_str", .ret = -ERANGE },
+ { .name = "probe_read", .ret = -ERANGE },
+ { .name = "probe_read_str", .ret = -ERANGE },
+ { .name = "probe_read_user", .ret = -EFAULT },
+ { .name = "probe_read_user_str", .ret = -EFAULT },
+ { .name = "copy_from_user", .ret = -EFAULT },
+ { .name = "copy_from_user_task", .ret = -EFAULT },
+};
+
+void test_read_vsyscall(void)
+{
+ struct read_vsyscall *skel;
+ unsigned int i;
+ int err;
+
+#if !defined(__x86_64__)
+ test__skip();
+ return;
+#endif
+ skel = read_vsyscall__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "read_vsyscall open_load"))
+ return;
+
+ skel->bss->target_pid = getpid();
+ err = read_vsyscall__attach(skel);
+ if (!ASSERT_EQ(err, 0, "read_vsyscall attach"))
+ goto out;
+
+ /* userspace may don't have vsyscall page due to LEGACY_VSYSCALL_NONE,
+ * but it doesn't affect the returned error codes.
+ */
+ skel->bss->user_ptr = (void *)VSYSCALL_ADDR;
+ usleep(1);
+
+ for (i = 0; i < ARRAY_SIZE(all_read); i++)
+ ASSERT_EQ(skel->bss->read_ret[i], all_read[i].ret, all_read[i].name);
+out:
+ read_vsyscall__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
index 820d0bcfc474..eb74363f9f70 100644
--- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
+++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c
@@ -840,7 +840,7 @@ static int load_range_cmp_prog(struct range x, struct range y, enum op op,
.log_level = 2,
.log_buf = log_buf,
.log_size = log_sz,
- .prog_flags = BPF_F_TEST_REG_INVARIANTS,
+ .prog_flags = testing_prog_flags(),
);
/* ; skip exit block below
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
index b0583309a94e..9c11938fe597 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c
@@ -214,7 +214,7 @@ void test_sock_destroy(void)
cleanup:
if (nstoken)
close_netns(nstoken);
- SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+ SYS_NOFAIL("ip netns del " TEST_NS);
if (cgroup_fd >= 0)
close(cgroup_fd);
sock_destroy_prog__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
index 0c365f36c73b..d56e18b25528 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c
@@ -112,7 +112,7 @@ void test_sock_iter_batch(void)
{
struct nstoken *nstoken = NULL;
- SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+ SYS_NOFAIL("ip netns del " TEST_NS);
SYS(done, "ip netns add %s", TEST_NS);
SYS(done, "ip -net %s link set dev lo up", TEST_NS);
@@ -131,5 +131,5 @@ void test_sock_iter_batch(void)
close_netns(nstoken);
done:
- SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null");
+ SYS_NOFAIL("ip netns del " TEST_NS);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
index 18d451be57c8..2b0068742ef9 100644
--- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c
@@ -48,6 +48,8 @@ static struct {
{ "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" },
{ "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" },
+ { "lock_global_subprog_call1", "global function calls are not allowed while holding a lock" },
+ { "lock_global_subprog_call2", "global function calls are not allowed while holding a lock" },
};
static int match_regex(const char *pattern, const char *string)
diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
index ea8537c54413..c33c05161a9e 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c
@@ -117,12 +117,6 @@ static void test_recursion(void)
ASSERT_OK(err, "lookup map_b");
ASSERT_EQ(value, 100, "map_b value");
- prog_fd = bpf_program__fd(skel->progs.on_lookup);
- memset(&info, 0, sizeof(info));
- err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
- ASSERT_OK(err, "get prog info");
- ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion");
-
prog_fd = bpf_program__fd(skel->progs.on_update);
memset(&info, 0, sizeof(info));
err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 518f143c5b0f..dbe06aeaa2b2 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -188,6 +188,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
{
struct nstoken *nstoken = NULL;
char src_fwd_addr[IFADDR_STR_LEN+1] = {};
+ char src_addr[IFADDR_STR_LEN + 1] = {};
int err;
if (result->dev_mode == MODE_VETH) {
@@ -208,6 +209,9 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
if (get_ifaddr("src_fwd", src_fwd_addr))
goto fail;
+ if (get_ifaddr("src", src_addr))
+ goto fail;
+
result->ifindex_src = if_nametoindex("src");
if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src"))
goto fail;
@@ -270,6 +274,13 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global");
SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global");
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip neigh add " IP4_SRC " dev src_fwd lladdr %s", src_addr);
+ SYS(fail, "ip neigh add " IP6_SRC " dev src_fwd lladdr %s", src_addr);
+ SYS(fail, "ip neigh add " IP4_DST " dev dst_fwd lladdr %s", MAC_DST);
+ SYS(fail, "ip neigh add " IP6_DST " dev dst_fwd lladdr %s", MAC_DST);
+ }
+
close_netns(nstoken);
/** setup in 'dst' namespace */
@@ -280,6 +291,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
SYS(fail, "ip addr add " IP4_DST "/32 dev dst");
SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad");
SYS(fail, "ip link set dev dst up");
+ SYS(fail, "ip link set dev lo up");
SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global");
SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global");
@@ -457,7 +469,7 @@ static int set_forwarding(bool enable)
return 0;
}
-static void rcv_tstamp(int fd, const char *expected, size_t s)
+static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
{
struct __kernel_timespec pkt_ts = {};
char ctl[CMSG_SPACE(sizeof(pkt_ts))];
@@ -478,7 +490,7 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
ret = recvmsg(fd, &msg, 0);
if (!ASSERT_EQ(ret, s, "recvmsg"))
- return;
+ return -1;
ASSERT_STRNEQ(data, expected, s, "expected rcv data");
cmsg = CMSG_FIRSTHDR(&msg);
@@ -487,6 +499,12 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec;
+ if (tstamp) {
+ /* caller will check the tstamp itself */
+ *tstamp = pkt_ns;
+ return 0;
+ }
+
ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp");
ret = clock_gettime(CLOCK_REALTIME, &now_ts);
@@ -496,6 +514,60 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp"))
ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC,
"check rcv tstamp");
+ return 0;
+}
+
+static void rcv_tstamp(int fd, const char *expected, size_t s)
+{
+ __rcv_tstamp(fd, expected, s, NULL);
+}
+
+static int wait_netstamp_needed_key(void)
+{
+ int opt = 1, srv_fd = -1, cli_fd = -1, nretries = 0, err, n;
+ char buf[] = "testing testing";
+ struct nstoken *nstoken;
+ __u64 tstamp = 0;
+
+ nstoken = open_netns(NS_DST);
+ if (!nstoken)
+ return -1;
+
+ srv_fd = start_server(AF_INET6, SOCK_DGRAM, "::1", 0, 0);
+ if (!ASSERT_GE(srv_fd, 0, "start_server"))
+ goto done;
+
+ err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
+ &opt, sizeof(opt));
+ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)"))
+ goto done;
+
+ cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS);
+ if (!ASSERT_GE(cli_fd, 0, "connect_to_fd"))
+ goto done;
+
+again:
+ n = write(cli_fd, buf, sizeof(buf));
+ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+ goto done;
+ err = __rcv_tstamp(srv_fd, buf, sizeof(buf), &tstamp);
+ if (!ASSERT_OK(err, "__rcv_tstamp"))
+ goto done;
+ if (!tstamp && nretries++ < 5) {
+ sleep(1);
+ printf("netstamp_needed_key retry#%d\n", nretries);
+ goto again;
+ }
+
+done:
+ if (!tstamp && srv_fd != -1) {
+ close(srv_fd);
+ srv_fd = -1;
+ }
+ if (cli_fd != -1)
+ close(cli_fd);
+ close_netns(nstoken);
+ return srv_fd;
}
static void snd_tstamp(int fd, char *b, size_t s)
@@ -832,11 +904,20 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
{
struct test_tc_dtime *skel;
struct nstoken *nstoken;
- int err;
+ int hold_tstamp_fd, err;
+
+ /* Hold a sk with the SOCK_TIMESTAMP set to ensure there
+ * is no delay in the kernel net_enable_timestamp().
+ * This ensures the following tests must have
+ * non zero rcv tstamp in the recvmsg().
+ */
+ hold_tstamp_fd = wait_netstamp_needed_key();
+ if (!ASSERT_GE(hold_tstamp_fd, 0, "wait_netstamp_needed_key"))
+ return;
skel = test_tc_dtime__open();
if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open"))
- return;
+ goto done;
skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
@@ -881,6 +962,7 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
done:
test_tc_dtime__destroy(skel);
+ close(hold_tstamp_fd);
}
static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c
new file mode 100644
index 000000000000..eaf441dc7e79
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdlib.h>
+#include <net/if.h>
+
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "test_tcp_custom_syncookie.skel.h"
+
+static struct test_tcp_custom_syncookie_case {
+ int family, type;
+ char addr[16];
+ char name[10];
+} test_cases[] = {
+ {
+ .name = "IPv4 TCP",
+ .family = AF_INET,
+ .type = SOCK_STREAM,
+ .addr = "127.0.0.1",
+ },
+ {
+ .name = "IPv6 TCP",
+ .family = AF_INET6,
+ .type = SOCK_STREAM,
+ .addr = "::1",
+ },
+};
+
+static int setup_netns(void)
+{
+ if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns"))
+ return -1;
+
+ if (!ASSERT_OK(system("ip link set dev lo up"), "ip"))
+ goto err;
+
+ if (!ASSERT_OK(write_sysctl("/proc/sys/net/ipv4/tcp_ecn", "1"),
+ "write_sysctl"))
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int setup_tc(struct test_tcp_custom_syncookie *skel)
+{
+ LIBBPF_OPTS(bpf_tc_hook, qdisc_lo, .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, tc_attach,
+ .prog_fd = bpf_program__fd(skel->progs.tcp_custom_syncookie));
+
+ qdisc_lo.ifindex = if_nametoindex("lo");
+ if (!ASSERT_OK(bpf_tc_hook_create(&qdisc_lo), "qdisc add dev lo clsact"))
+ goto err;
+
+ if (!ASSERT_OK(bpf_tc_attach(&qdisc_lo, &tc_attach),
+ "filter add dev lo ingress"))
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+#define msg "Hello World"
+#define msglen 11
+
+static void transfer_message(int sender, int receiver)
+{
+ char buf[msglen];
+ int ret;
+
+ ret = send(sender, msg, msglen, 0);
+ if (!ASSERT_EQ(ret, msglen, "send"))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+
+ ret = recv(receiver, buf, msglen, 0);
+ if (!ASSERT_EQ(ret, msglen, "recv"))
+ return;
+
+ ret = strncmp(buf, msg, msglen);
+ if (!ASSERT_EQ(ret, 0, "strncmp"))
+ return;
+}
+
+static void create_connection(struct test_tcp_custom_syncookie_case *test_case)
+{
+ int server, client, child;
+
+ server = start_server(test_case->family, test_case->type, test_case->addr, 0, 0);
+ if (!ASSERT_NEQ(server, -1, "start_server"))
+ return;
+
+ client = connect_to_fd(server, 0);
+ if (!ASSERT_NEQ(client, -1, "connect_to_fd"))
+ goto close_server;
+
+ child = accept(server, NULL, 0);
+ if (!ASSERT_NEQ(child, -1, "accept"))
+ goto close_client;
+
+ transfer_message(client, child);
+ transfer_message(child, client);
+
+ close(child);
+close_client:
+ close(client);
+close_server:
+ close(server);
+}
+
+void test_tcp_custom_syncookie(void)
+{
+ struct test_tcp_custom_syncookie *skel;
+ int i;
+
+ if (setup_netns())
+ return;
+
+ skel = test_tcp_custom_syncookie__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ if (setup_tc(skel))
+ goto destroy_skel;
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ if (!test__start_subtest(test_cases[i].name))
+ continue;
+
+ skel->bss->handled_syn = false;
+ skel->bss->handled_ack = false;
+
+ create_connection(&test_cases[i]);
+
+ ASSERT_EQ(skel->bss->handled_syn, true, "SYN is not handled at tc.");
+ ASSERT_EQ(skel->bss->handled_ack, true, "ACK is not handled at tc");
+ }
+
+destroy_skel:
+ system("tc qdisc del dev lo clsact");
+
+ test_tcp_custom_syncookie__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c
new file mode 100644
index 000000000000..01dc2613c8a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#include "struct_ops_maybe_null.skel.h"
+#include "struct_ops_maybe_null_fail.skel.h"
+
+/* Test that the verifier accepts a program that access a nullable pointer
+ * with a proper check.
+ */
+static void maybe_null(void)
+{
+ struct struct_ops_maybe_null *skel;
+
+ skel = struct_ops_maybe_null__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_and_load"))
+ return;
+
+ struct_ops_maybe_null__destroy(skel);
+}
+
+/* Test that the verifier rejects a program that access a nullable pointer
+ * without a check beforehand.
+ */
+static void maybe_null_fail(void)
+{
+ struct struct_ops_maybe_null_fail *skel;
+
+ skel = struct_ops_maybe_null_fail__open_and_load();
+ if (ASSERT_ERR_PTR(skel, "struct_ops_module_fail__open_and_load"))
+ return;
+
+ struct_ops_maybe_null_fail__destroy(skel);
+}
+
+void test_struct_ops_maybe_null(void)
+{
+ /* The verifier verifies the programs at load time, so testing both
+ * programs in the same compile-unit is complicated. We run them in
+ * separate objects to simplify the testing.
+ */
+ if (test__start_subtest("maybe_null"))
+ maybe_null();
+ if (test__start_subtest("maybe_null_fail"))
+ maybe_null_fail();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
new file mode 100644
index 000000000000..7d6facf46ebb
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <time.h>
+
+#include "struct_ops_module.skel.h"
+
+static void check_map_info(struct bpf_map_info *info)
+{
+ struct bpf_btf_info btf_info;
+ char btf_name[256];
+ u32 btf_info_len = sizeof(btf_info);
+ int err, fd;
+
+ fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id);
+ if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd"))
+ return;
+
+ memset(&btf_info, 0, sizeof(btf_info));
+ btf_info.name = ptr_to_u64(btf_name);
+ btf_info.name_len = sizeof(btf_name);
+ err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len);
+ if (!ASSERT_OK(err, "get_value_type_btf_obj_info"))
+ goto cleanup;
+
+ if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name"))
+ goto cleanup;
+
+cleanup:
+ close(fd);
+}
+
+static void test_struct_ops_load(void)
+{
+ struct struct_ops_module *skel;
+ struct bpf_map_info info = {};
+ struct bpf_link *link;
+ int err;
+ u32 len;
+
+ skel = struct_ops_module__open();
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ return;
+
+ skel->struct_ops.testmod_1->data = 13;
+ skel->struct_ops.testmod_1->test_2 = skel->progs.test_3;
+ /* Since test_2() is not being used, it should be disabled from
+ * auto-loading, or it will fail to load.
+ */
+ bpf_program__set_autoload(skel->progs.test_2, false);
+
+ err = struct_ops_module__load(skel);
+ if (!ASSERT_OK(err, "struct_ops_module_load"))
+ goto cleanup;
+
+ len = sizeof(info);
+ err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info,
+ &len);
+ if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
+ goto cleanup;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ ASSERT_OK_PTR(link, "attach_test_mod_1");
+
+ /* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c
+ *
+ * In bpf_testmod.c it will pass 4 and 13 (the value of data) to
+ * .test_2. So, the value of test_2_result should be 20 (4 + 13 +
+ * 3).
+ */
+ ASSERT_EQ(skel->bss->test_2_result, 20, "check_shadow_variables");
+
+ bpf_link__destroy(link);
+
+ check_map_info(&info);
+
+cleanup:
+ struct_ops_module__destroy(skel);
+}
+
+void serial_test_struct_ops_module(void)
+{
+ if (test__start_subtest("test_struct_ops_load"))
+ test_struct_ops_load();
+}
+
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c
new file mode 100644
index 000000000000..106ea447965a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <testing_helpers.h>
+
+static void load_bpf_test_no_cfi(void)
+{
+ int fd;
+ int err;
+
+ fd = open("bpf_test_no_cfi.ko", O_RDONLY);
+ if (!ASSERT_GE(fd, 0, "open"))
+ return;
+
+ /* The module will try to register a struct_ops type without
+ * cfi_stubs and with cfi_stubs.
+ *
+ * The one without cfi_stub should fail. The module will be loaded
+ * successfully only if the result of the registration is as
+ * expected, or it fails.
+ */
+ err = finit_module(fd, "", 0);
+ close(fd);
+ if (!ASSERT_OK(err, "finit_module"))
+ return;
+
+ err = delete_module("bpf_test_no_cfi", 0);
+ ASSERT_OK(err, "delete_module");
+}
+
+void test_struct_ops_no_cfi(void)
+{
+ if (test__start_subtest("load_bpf_test_no_cfi"))
+ load_bpf_test_no_cfi();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
index 2b3c6dd66259..5f1fb0a2ea56 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
@@ -118,9 +118,9 @@ fail:
static void cleanup(void)
{
SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0");
- SYS_NOFAIL("ip link del veth1 2> /dev/null");
- SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1);
- SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1);
+ SYS_NOFAIL("ip link del veth1");
+ SYS_NOFAIL("ip link del %s", VXLAN_TUNL_DEV1);
+ SYS_NOFAIL("ip link del %s", IP6VXLAN_TUNL_DEV1);
}
static int add_vxlan_tunnel(void)
@@ -265,9 +265,9 @@ fail:
static void delete_ipip_tunnel(void)
{
SYS_NOFAIL("ip -n at_ns0 link delete dev %s", IPIP_TUNL_DEV0);
- SYS_NOFAIL("ip -n at_ns0 fou del port 5555 2> /dev/null");
+ SYS_NOFAIL("ip -n at_ns0 fou del port 5555");
SYS_NOFAIL("ip link delete dev %s", IPIP_TUNL_DEV1);
- SYS_NOFAIL("ip fou del port 5555 2> /dev/null");
+ SYS_NOFAIL("ip fou del port 5555");
}
static int add_xfrm_tunnel(void)
@@ -346,13 +346,13 @@ fail:
static void delete_xfrm_tunnel(void)
{
- SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32 2> /dev/null",
+ SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32",
IP4_ADDR_TUNL_DEV1, IP4_ADDR_TUNL_DEV0);
- SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32 2> /dev/null",
+ SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32",
IP4_ADDR_TUNL_DEV0, IP4_ADDR_TUNL_DEV1);
- SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d 2> /dev/null",
+ SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d",
IP4_ADDR_VETH0, IP4_ADDR1_VETH1, XFRM_SPI_IN_TO_OUT);
- SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d 2> /dev/null",
+ SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d",
IP4_ADDR1_VETH1, IP4_ADDR_VETH0, XFRM_SPI_OUT_TO_IN);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c
index 760ad96b4be0..d66687f1ee6a 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer.c
@@ -4,10 +4,29 @@
#include "timer.skel.h"
#include "timer_failure.skel.h"
+#define NUM_THR 8
+
+static void *spin_lock_thread(void *arg)
+{
+ int i, err, prog_fd = *(int *)arg;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ for (i = 0; i < 10000; i++) {
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "test_run_opts err") ||
+ !ASSERT_OK(topts.retval, "test_run_opts retval"))
+ break;
+ }
+
+ pthread_exit(arg);
+}
+
static int timer(struct timer *timer_skel)
{
- int err, prog_fd;
+ int i, err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
+ pthread_t thread_id[NUM_THR];
+ void *ret;
err = timer__attach(timer_skel);
if (!ASSERT_OK(err, "timer_attach"))
@@ -43,6 +62,20 @@ static int timer(struct timer *timer_skel)
/* check that code paths completed */
ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok");
+ prog_fd = bpf_program__fd(timer_skel->progs.race);
+ for (i = 0; i < NUM_THR; i++) {
+ err = pthread_create(&thread_id[i], NULL,
+ &spin_lock_thread, &prog_fd);
+ if (!ASSERT_OK(err, "pthread_create"))
+ break;
+ }
+
+ while (i) {
+ err = pthread_join(thread_id[--i], &ret);
+ if (ASSERT_OK(err, "pthread_join"))
+ ASSERT_EQ(ret, (void *)&prog_fd, "pthread_join");
+ }
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c
new file mode 100644
index 000000000000..fc4a175d8d76
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/token.c
@@ -0,0 +1,1052 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include "cap_helpers.h"
+#include <fcntl.h>
+#include <sched.h>
+#include <signal.h>
+#include <unistd.h>
+#include <linux/filter.h>
+#include <linux/unistd.h>
+#include <linux/mount.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/un.h>
+#include "priv_map.skel.h"
+#include "priv_prog.skel.h"
+#include "dummy_st_ops_success.skel.h"
+#include "token_lsm.skel.h"
+
+static inline int sys_mount(const char *dev_name, const char *dir_name,
+ const char *type, unsigned long flags,
+ const void *data)
+{
+ return syscall(__NR_mount, dev_name, dir_name, type, flags, data);
+}
+
+static inline int sys_fsopen(const char *fsname, unsigned flags)
+{
+ return syscall(__NR_fsopen, fsname, flags);
+}
+
+static inline int sys_fspick(int dfd, const char *path, unsigned flags)
+{
+ return syscall(__NR_fspick, dfd, path, flags);
+}
+
+static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux)
+{
+ return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux);
+}
+
+static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags)
+{
+ return syscall(__NR_fsmount, fs_fd, flags, ms_flags);
+}
+
+static inline int sys_move_mount(int from_dfd, const char *from_path,
+ int to_dfd, const char *to_path,
+ unsigned flags)
+{
+ return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, flags);
+}
+
+static int drop_priv_caps(__u64 *old_caps)
+{
+ return cap_disable_effective((1ULL << CAP_BPF) |
+ (1ULL << CAP_PERFMON) |
+ (1ULL << CAP_NET_ADMIN) |
+ (1ULL << CAP_SYS_ADMIN), old_caps);
+}
+
+static int restore_priv_caps(__u64 old_caps)
+{
+ return cap_enable_effective(old_caps, NULL);
+}
+
+static int set_delegate_mask(int fs_fd, const char *key, __u64 mask, const char *mask_str)
+{
+ char buf[32];
+ int err;
+
+ if (!mask_str) {
+ if (mask == ~0ULL) {
+ mask_str = "any";
+ } else {
+ snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)mask);
+ mask_str = buf;
+ }
+ }
+
+ err = sys_fsconfig(fs_fd, FSCONFIG_SET_STRING, key,
+ mask_str, 0);
+ if (err < 0)
+ err = -errno;
+ return err;
+}
+
+#define zclose(fd) do { if (fd >= 0) close(fd); fd = -1; } while (0)
+
+struct bpffs_opts {
+ __u64 cmds;
+ __u64 maps;
+ __u64 progs;
+ __u64 attachs;
+ const char *cmds_str;
+ const char *maps_str;
+ const char *progs_str;
+ const char *attachs_str;
+};
+
+static int create_bpffs_fd(void)
+{
+ int fs_fd;
+
+ /* create VFS context */
+ fs_fd = sys_fsopen("bpf", 0);
+ ASSERT_GE(fs_fd, 0, "fs_fd");
+
+ return fs_fd;
+}
+
+static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts)
+{
+ int mnt_fd, err;
+
+ /* set up token delegation mount options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str);
+ if (!ASSERT_OK(err, "fs_cfg_cmds"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_maps", opts->maps, opts->maps_str);
+ if (!ASSERT_OK(err, "fs_cfg_maps"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_progs", opts->progs, opts->progs_str);
+ if (!ASSERT_OK(err, "fs_cfg_progs"))
+ return err;
+ err = set_delegate_mask(fs_fd, "delegate_attachs", opts->attachs, opts->attachs_str);
+ if (!ASSERT_OK(err, "fs_cfg_attachs"))
+ return err;
+
+ /* instantiate FS object */
+ err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0);
+ if (err < 0)
+ return -errno;
+
+ /* create O_PATH fd for detached mount */
+ mnt_fd = sys_fsmount(fs_fd, 0, 0);
+ if (err < 0)
+ return -errno;
+
+ return mnt_fd;
+}
+
+/* send FD over Unix domain (AF_UNIX) socket */
+static int sendfd(int sockfd, int fd)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ int fds[1] = { fd }, err;
+ char iobuf[1];
+ struct iovec io = {
+ .iov_base = iobuf,
+ .iov_len = sizeof(iobuf),
+ };
+ union {
+ char buf[CMSG_SPACE(sizeof(fds))];
+ struct cmsghdr align;
+ } u;
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+ msg.msg_control = u.buf;
+ msg.msg_controllen = sizeof(u.buf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(fds));
+ memcpy(CMSG_DATA(cmsg), fds, sizeof(fds));
+
+ err = sendmsg(sockfd, &msg, 0);
+ if (err < 0)
+ err = -errno;
+ if (!ASSERT_EQ(err, 1, "sendmsg"))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* receive FD over Unix domain (AF_UNIX) socket */
+static int recvfd(int sockfd, int *fd)
+{
+ struct msghdr msg = {};
+ struct cmsghdr *cmsg;
+ int fds[1], err;
+ char iobuf[1];
+ struct iovec io = {
+ .iov_base = iobuf,
+ .iov_len = sizeof(iobuf),
+ };
+ union {
+ char buf[CMSG_SPACE(sizeof(fds))];
+ struct cmsghdr align;
+ } u;
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+ msg.msg_control = u.buf;
+ msg.msg_controllen = sizeof(u.buf);
+
+ err = recvmsg(sockfd, &msg, 0);
+ if (err < 0)
+ err = -errno;
+ if (!ASSERT_EQ(err, 1, "recvmsg"))
+ return -EINVAL;
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (!ASSERT_OK_PTR(cmsg, "cmsg_null") ||
+ !ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(fds)), "cmsg_len") ||
+ !ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET, "cmsg_level") ||
+ !ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS, "cmsg_type"))
+ return -EINVAL;
+
+ memcpy(fds, CMSG_DATA(cmsg), sizeof(fds));
+ *fd = fds[0];
+
+ return 0;
+}
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = write(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+static int write_file(const char *path, const void *buf, size_t count)
+{
+ int fd;
+ ssize_t ret;
+
+ fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW);
+ if (fd < 0)
+ return -1;
+
+ ret = write_nointr(fd, buf, count);
+ close(fd);
+ if (ret < 0 || (size_t)ret != count)
+ return -1;
+
+ return 0;
+}
+
+static int create_and_enter_userns(void)
+{
+ uid_t uid;
+ gid_t gid;
+ char map[100];
+
+ uid = getuid();
+ gid = getgid();
+
+ if (unshare(CLONE_NEWUSER))
+ return -1;
+
+ if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) &&
+ errno != ENOENT)
+ return -1;
+
+ snprintf(map, sizeof(map), "0 %d 1", uid);
+ if (write_file("/proc/self/uid_map", map, strlen(map)))
+ return -1;
+
+
+ snprintf(map, sizeof(map), "0 %d 1", gid);
+ if (write_file("/proc/self/gid_map", map, strlen(map)))
+ return -1;
+
+ if (setgid(0))
+ return -1;
+
+ if (setuid(0))
+ return -1;
+
+ return 0;
+}
+
+typedef int (*child_callback_fn)(int bpffs_fd, struct token_lsm *lsm_skel);
+
+static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callback)
+{
+ int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1;
+ struct token_lsm *lsm_skel = NULL;
+
+ /* load and attach LSM "policy" before we go into unpriv userns */
+ lsm_skel = token_lsm__open_and_load();
+ if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel_load")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ lsm_skel->bss->my_pid = getpid();
+ err = token_lsm__attach(lsm_skel);
+ if (!ASSERT_OK(err, "lsm_skel_attach"))
+ goto cleanup;
+
+ /* setup userns with root mappings */
+ err = create_and_enter_userns();
+ if (!ASSERT_OK(err, "create_and_enter_userns"))
+ goto cleanup;
+
+ /* setup mountns to allow creating BPF FS (fsopen("bpf")) from unpriv process */
+ err = unshare(CLONE_NEWNS);
+ if (!ASSERT_OK(err, "create_mountns"))
+ goto cleanup;
+
+ err = sys_mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (!ASSERT_OK(err, "remount_root"))
+ goto cleanup;
+
+ fs_fd = create_bpffs_fd();
+ if (!ASSERT_GE(fs_fd, 0, "create_bpffs_fd")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* ensure unprivileged child cannot set delegation options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_maps", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_maps_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_progs", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_progs_eperm");
+ err = set_delegate_mask(fs_fd, "delegate_attachs", 0x1, NULL);
+ ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm");
+
+ /* pass BPF FS context object to parent */
+ err = sendfd(sock_fd, fs_fd);
+ if (!ASSERT_OK(err, "send_fs_fd"))
+ goto cleanup;
+ zclose(fs_fd);
+
+ /* avoid mucking around with mount namespaces and mounting at
+ * well-known path, just get detach-mounted BPF FS fd back from parent
+ */
+ err = recvfd(sock_fd, &mnt_fd);
+ if (!ASSERT_OK(err, "recv_mnt_fd"))
+ goto cleanup;
+
+ /* try to fspick() BPF FS and try to add some delegation options */
+ fs_fd = sys_fspick(mnt_fd, "", FSPICK_EMPTY_PATH);
+ if (!ASSERT_GE(fs_fd, 0, "bpffs_fspick")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* ensure unprivileged child cannot reconfigure to set delegation options */
+ err = set_delegate_mask(fs_fd, "delegate_cmds", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_maps", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_maps_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_progs", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_progs_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = set_delegate_mask(fs_fd, "delegate_attachs", 0, "any");
+ if (!ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm_reconfig")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ zclose(fs_fd);
+
+ bpffs_fd = openat(mnt_fd, ".", 0, O_RDWR);
+ if (!ASSERT_GE(bpffs_fd, 0, "bpffs_open")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* create BPF token FD and pass it to parent for some extra checks */
+ token_fd = bpf_token_create(bpffs_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "child_token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ err = sendfd(sock_fd, token_fd);
+ if (!ASSERT_OK(err, "send_token_fd"))
+ goto cleanup;
+ zclose(token_fd);
+
+ /* do custom test logic with customly set up BPF FS instance */
+ err = callback(bpffs_fd, lsm_skel);
+ if (!ASSERT_OK(err, "test_callback"))
+ goto cleanup;
+
+ err = 0;
+cleanup:
+ zclose(sock_fd);
+ zclose(mnt_fd);
+ zclose(fs_fd);
+ zclose(bpffs_fd);
+ zclose(token_fd);
+
+ lsm_skel->bss->my_pid = 0;
+ token_lsm__destroy(lsm_skel);
+
+ exit(-err);
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd)
+{
+ int fs_fd = -1, mnt_fd = -1, token_fd = -1, err;
+
+ err = recvfd(sock_fd, &fs_fd);
+ if (!ASSERT_OK(err, "recv_bpffs_fd"))
+ goto cleanup;
+
+ mnt_fd = materialize_bpffs_fd(fs_fd, bpffs_opts);
+ if (!ASSERT_GE(mnt_fd, 0, "materialize_bpffs_fd")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+ zclose(fs_fd);
+
+ /* pass BPF FS context object to parent */
+ err = sendfd(sock_fd, mnt_fd);
+ if (!ASSERT_OK(err, "send_mnt_fd"))
+ goto cleanup;
+ zclose(mnt_fd);
+
+ /* receive BPF token FD back from child for some extra tests */
+ err = recvfd(sock_fd, &token_fd);
+ if (!ASSERT_OK(err, "recv_token_fd"))
+ goto cleanup;
+
+ err = wait_for_pid(child_pid);
+ ASSERT_OK(err, "waitpid_child");
+
+cleanup:
+ zclose(sock_fd);
+ zclose(fs_fd);
+ zclose(mnt_fd);
+ zclose(token_fd);
+
+ if (child_pid > 0)
+ (void)kill(child_pid, SIGKILL);
+}
+
+static void subtest_userns(struct bpffs_opts *bpffs_opts,
+ child_callback_fn child_cb)
+{
+ int sock_fds[2] = { -1, -1 };
+ int child_pid = 0, err;
+
+ err = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds);
+ if (!ASSERT_OK(err, "socketpair"))
+ goto cleanup;
+
+ child_pid = fork();
+ if (!ASSERT_GE(child_pid, 0, "fork"))
+ goto cleanup;
+
+ if (child_pid == 0) {
+ zclose(sock_fds[0]);
+ return child(sock_fds[1], bpffs_opts, child_cb);
+
+ } else {
+ zclose(sock_fds[1]);
+ return parent(child_pid, bpffs_opts, sock_fds[0]);
+ }
+
+cleanup:
+ zclose(sock_fds[0]);
+ zclose(sock_fds[1]);
+ if (child_pid > 0)
+ (void)kill(child_pid, SIGKILL);
+}
+
+static int userns_map_create(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts);
+ int err, token_fd = -1, map_fd = -1;
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* while inside non-init userns, we need both a BPF token *and*
+ * CAP_BPF inside current userns to create privileged map; let's test
+ * that neither BPF token alone nor namespaced CAP_BPF is sufficient
+ */
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* no token, no CAP_BPF -> fail */
+ map_opts.map_flags = 0;
+ map_opts.token_fd = 0;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* token without CAP_BPF -> fail */
+ map_opts.map_flags = BPF_F_TOKEN_FD;
+ map_opts.token_fd = token_fd;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */
+ err = restore_priv_caps(old_caps);
+ if (!ASSERT_OK(err, "restore_caps"))
+ goto cleanup;
+
+ /* CAP_BPF without token -> fail */
+ map_opts.map_flags = 0;
+ map_opts.token_fd = 0;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* finally, namespaced CAP_BPF + token -> success */
+ map_opts.map_flags = BPF_F_TOKEN_FD;
+ map_opts.token_fd = token_fd;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts);
+ if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+ zclose(token_fd);
+ zclose(map_fd);
+ return err;
+}
+
+static int userns_btf_load(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, btf_opts);
+ int err, token_fd = -1, btf_fd = -1;
+ const void *raw_btf_data;
+ struct btf *btf = NULL;
+ __u32 raw_btf_size;
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* while inside non-init userns, we need both a BPF token *and*
+ * CAP_BPF inside current userns to create privileged map; let's test
+ * that neither BPF token alone nor namespaced CAP_BPF is sufficient
+ */
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* setup a trivial BTF data to load to the kernel */
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ goto cleanup;
+
+ ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data"))
+ goto cleanup;
+
+ /* no token + no CAP_BPF -> failure */
+ btf_opts.btf_flags = 0;
+ btf_opts.token_fd = 0;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_LT(btf_fd, 0, "no_token_no_cap_should_fail"))
+ goto cleanup;
+
+ /* token + no CAP_BPF -> failure */
+ btf_opts.btf_flags = BPF_F_TOKEN_FD;
+ btf_opts.token_fd = token_fd;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_LT(btf_fd, 0, "token_no_cap_should_fail"))
+ goto cleanup;
+
+ /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */
+ err = restore_priv_caps(old_caps);
+ if (!ASSERT_OK(err, "restore_caps"))
+ goto cleanup;
+
+ /* token + CAP_BPF -> success */
+ btf_opts.btf_flags = BPF_F_TOKEN_FD;
+ btf_opts.token_fd = token_fd;
+ btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts);
+ if (!ASSERT_GT(btf_fd, 0, "token_and_cap_success"))
+ goto cleanup;
+
+ err = 0;
+cleanup:
+ btf__free(btf);
+ zclose(btf_fd);
+ zclose(token_fd);
+ return err;
+}
+
+static int userns_prog_load(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts);
+ int err, token_fd = -1, prog_fd = -1;
+ struct bpf_insn insns[] = {
+ /* bpf_jiffies64() requires CAP_BPF */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ /* bpf_get_current_task() requires CAP_PERFMON */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_task),
+ /* r0 = 0; exit; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ size_t insn_cnt = ARRAY_SIZE(insns);
+ __u64 old_caps = 0;
+
+ /* create BPF token from BPF FS mount */
+ token_fd = bpf_token_create(mnt_fd, NULL);
+ if (!ASSERT_GT(token_fd, 0, "token_create")) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* validate we can successfully load BPF program with token; this
+ * being XDP program (CAP_NET_ADMIN) using bpf_jiffies64() (CAP_BPF)
+ * and bpf_get_current_task() (CAP_PERFMON) helpers validates we have
+ * BPF token wired properly in a bunch of places in the kernel
+ */
+ prog_opts.prog_flags = BPF_F_TOKEN_FD;
+ prog_opts.token_fd = token_fd;
+ prog_opts.expected_attach_type = BPF_XDP;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_GT(prog_fd, 0, "prog_fd")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ /* no token + caps -> failure */
+ prog_opts.prog_flags = 0;
+ prog_opts.token_fd = 0;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ err = drop_priv_caps(&old_caps);
+ if (!ASSERT_OK(err, "drop_caps"))
+ goto cleanup;
+
+ /* no caps + token -> failure */
+ prog_opts.prog_flags = BPF_F_TOKEN_FD;
+ prog_opts.token_fd = token_fd;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ /* no caps + no token -> definitely a failure */
+ prog_opts.prog_flags = 0;
+ prog_opts.token_fd = 0;
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL",
+ insns, insn_cnt, &prog_opts);
+ if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) {
+ err = -EPERM;
+ goto cleanup;
+ }
+
+ err = 0;
+cleanup:
+ zclose(prog_fd);
+ zclose(token_fd);
+ return err;
+}
+
+static int userns_obj_priv_map(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct priv_map *skel;
+ int err;
+
+ skel = priv_map__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ priv_map__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* use bpf_token_path to provide BPF FS path */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = priv_map__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+
+ err = priv_map__load(skel);
+ priv_map__destroy(skel);
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct priv_prog *skel;
+ int err;
+
+ skel = priv_prog__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ priv_prog__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* use bpf_token_path to provide BPF FS path */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+
+ /* provide BPF token, but reject bpf_token_capable() with LSM */
+ lsm_skel->bss->reject_capable = true;
+ lsm_skel->bss->reject_cmd = false;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cap_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_token_lsm_reject_cap_load"))
+ return -EINVAL;
+
+ /* provide BPF token, but reject bpf_token_cmd() with LSM */
+ lsm_skel->bss->reject_capable = false;
+ lsm_skel->bss->reject_cmd = true;
+ skel = priv_prog__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cmd_open"))
+ return -EINVAL;
+ err = priv_prog__load(skel);
+ priv_prog__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_token_lsm_reject_cmd_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command,
+ * which should cause struct_ops application to fail, as BTF won't be uploaded
+ * into the kernel, even if STRUCT_OPS programs themselves are allowed
+ */
+static int validate_struct_ops_load(int mnt_fd, bool expect_success)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char buf[256];
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
+ opts.bpf_token_path = buf;
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
+ return -EINVAL;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (expect_success) {
+ if (!ASSERT_OK(err, "obj_token_path_load"))
+ return -EINVAL;
+ } else /* expect failure */ {
+ if (!ASSERT_ERR(err, "obj_token_path_load"))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int userns_obj_priv_btf_fail(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ return validate_struct_ops_load(mnt_fd, false /* should fail */);
+}
+
+static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ return validate_struct_ops_load(mnt_fd, true /* should succeed */);
+}
+
+#define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH"
+#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs"
+
+static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ /* before we mount BPF FS with token delegation, struct_ops skeleton
+ * should fail to load
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* mount custom BPF FS over /sys/fs/bpf so that libbpf can create BPF
+ * token automatically and implicitly
+ */
+ err = sys_move_mount(mnt_fd, "", AT_FDCWD, "/sys/fs/bpf", MOVE_MOUNT_F_EMPTY_PATH);
+ if (!ASSERT_OK(err, "move_mount_bpffs"))
+ return -EINVAL;
+
+ /* disable implicit BPF token creation by setting
+ * LIBBPF_BPF_TOKEN_PATH envvar to empty value, load should fail
+ */
+ err = setenv(TOKEN_ENVVAR, "", 1 /*overwrite*/);
+ if (!ASSERT_OK(err, "setenv_token_path"))
+ return -EINVAL;
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_token_envvar_disabled_load")) {
+ unsetenv(TOKEN_ENVVAR);
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+ unsetenv(TOKEN_ENVVAR);
+
+ /* now the same struct_ops skeleton should succeed thanks to libppf
+ * creating BPF token from /sys/fs/bpf mount point
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load"))
+ return -EINVAL;
+
+ dummy_st_ops_success__destroy(skel);
+
+ /* now disable implicit token through empty bpf_token_path, should fail */
+ opts.bpf_token_path = "";
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open"))
+ return -EINVAL;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct dummy_st_ops_success *skel;
+ int err;
+
+ /* before we mount BPF FS with token delegation, struct_ops skeleton
+ * should fail to load
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
+ dummy_st_ops_success__destroy(skel);
+ return -EINVAL;
+ }
+
+ /* mount custom BPF FS over custom location, so libbpf can't create
+ * BPF token implicitly, unless pointed to it through
+ * LIBBPF_BPF_TOKEN_PATH envvar
+ */
+ rmdir(TOKEN_BPFFS_CUSTOM);
+ if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom"))
+ goto err_out;
+ err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH);
+ if (!ASSERT_OK(err, "move_mount_bpffs"))
+ goto err_out;
+
+ /* even though we have BPF FS with delegation, it's not at default
+ * /sys/fs/bpf location, so we still fail to load until envvar is set up
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load2")) {
+ dummy_st_ops_success__destroy(skel);
+ goto err_out;
+ }
+
+ err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/);
+ if (!ASSERT_OK(err, "setenv_token_path"))
+ goto err_out;
+
+ /* now the same struct_ops skeleton should succeed thanks to libppf
+ * creating BPF token from custom mount point
+ */
+ skel = dummy_st_ops_success__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load"))
+ goto err_out;
+
+ dummy_st_ops_success__destroy(skel);
+
+ /* now disable implicit token through empty bpf_token_path, envvar
+ * will be ignored, should fail
+ */
+ opts.bpf_token_path = "";
+ skel = dummy_st_ops_success__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open"))
+ goto err_out;
+
+ err = dummy_st_ops_success__load(skel);
+ dummy_st_ops_success__destroy(skel);
+ if (!ASSERT_ERR(err, "obj_empty_token_path_load"))
+ goto err_out;
+
+ rmdir(TOKEN_BPFFS_CUSTOM);
+ unsetenv(TOKEN_ENVVAR);
+ return 0;
+err_out:
+ rmdir(TOKEN_BPFFS_CUSTOM);
+ unsetenv(TOKEN_ENVVAR);
+ return -EINVAL;
+}
+
+#define bit(n) (1ULL << (n))
+
+void test_token(void)
+{
+ if (test__start_subtest("map_token")) {
+ struct bpffs_opts opts = {
+ .cmds_str = "map_create",
+ .maps_str = "stack",
+ };
+
+ subtest_userns(&opts, userns_map_create);
+ }
+ if (test__start_subtest("btf_token")) {
+ struct bpffs_opts opts = {
+ .cmds = 1ULL << BPF_BTF_LOAD,
+ };
+
+ subtest_userns(&opts, userns_btf_load);
+ }
+ if (test__start_subtest("prog_token")) {
+ struct bpffs_opts opts = {
+ .cmds_str = "PROG_LOAD",
+ .progs_str = "XDP",
+ .attachs_str = "xdp",
+ };
+
+ subtest_userns(&opts, userns_prog_load);
+ }
+ if (test__start_subtest("obj_priv_map")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_MAP_CREATE),
+ .maps = bit(BPF_MAP_TYPE_QUEUE),
+ };
+
+ subtest_userns(&opts, userns_obj_priv_map);
+ }
+ if (test__start_subtest("obj_priv_prog")) {
+ struct bpffs_opts opts = {
+ .cmds = bit(BPF_PROG_LOAD),
+ .progs = bit(BPF_PROG_TYPE_KPROBE),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_prog);
+ }
+ if (test__start_subtest("obj_priv_btf_fail")) {
+ struct bpffs_opts opts = {
+ /* disallow BTF loading */
+ .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_btf_fail);
+ }
+ if (test__start_subtest("obj_priv_btf_success")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_btf_success);
+ }
+ if (test__start_subtest("obj_priv_implicit_token")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_implicit_token);
+ }
+ if (test__start_subtest("obj_priv_implicit_token_envvar")) {
+ struct bpffs_opts opts = {
+ /* allow BTF loading */
+ .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
+ .maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
+ .progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
+ .attachs = ~0ULL,
+ };
+
+ subtest_userns(&opts, userns_obj_priv_implicit_token_envvar);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
new file mode 100644
index 000000000000..a222df765bc3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "tracing_failure.skel.h"
+
+static void test_bpf_spin_lock(bool is_spin_lock)
+{
+ struct tracing_failure *skel;
+ int err;
+
+ skel = tracing_failure__open();
+ if (!ASSERT_OK_PTR(skel, "tracing_failure__open"))
+ return;
+
+ if (is_spin_lock)
+ bpf_program__set_autoload(skel->progs.test_spin_lock, true);
+ else
+ bpf_program__set_autoload(skel->progs.test_spin_unlock, true);
+
+ err = tracing_failure__load(skel);
+ if (!ASSERT_OK(err, "tracing_failure__load"))
+ goto out;
+
+ err = tracing_failure__attach(skel);
+ ASSERT_ERR(err, "tracing_failure__attach");
+
+out:
+ tracing_failure__destroy(skel);
+}
+
+void test_tracing_failure(void)
+{
+ if (test__start_subtest("bpf_spin_lock"))
+ test_bpf_spin_lock(true);
+ if (test__start_subtest("bpf_spin_unlock"))
+ test_bpf_spin_lock(false);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index d62c5bf00e71..9c6072a19745 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -28,6 +28,7 @@
#include "verifier_div0.skel.h"
#include "verifier_div_overflow.skel.h"
#include "verifier_global_subprogs.skel.h"
+#include "verifier_global_ptr_args.skel.h"
#include "verifier_gotol.skel.h"
#include "verifier_helper_access_var_len.skel.h"
#include "verifier_helper_packet_access.skel.h"
@@ -140,6 +141,7 @@ void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_st
void test_verifier_div0(void) { RUN(verifier_div0); }
void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); }
void test_verifier_global_subprogs(void) { RUN(verifier_global_subprogs); }
+void test_verifier_global_ptr_args(void) { RUN(verifier_global_ptr_args); }
void test_verifier_gotol(void) { RUN(verifier_gotol); }
void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); }
void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); }
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
index c3b45745cbcc..6d8b54124cb3 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
@@ -511,7 +511,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
- if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
+ if (!ASSERT_EQ(query_opts.feature_flags, 0,
"bond query_opts.feature_flags"))
goto out;
@@ -601,7 +601,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
- ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
+ ASSERT_EQ(query_opts.feature_flags, 0,
"bond query_opts.feature_flags");
out:
bpf_link__destroy(link);
diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
index f3927829a55a..4599154c8e9b 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdpwall.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c
@@ -9,7 +9,7 @@ void test_xdpwall(void)
struct xdpwall *skel;
skel = xdpwall__open_and_load();
- ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?");
+ ASSERT_OK_PTR(skel, "Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5?");
xdpwall__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/progs/async_stack_depth.c b/tools/testing/selftests/bpf/progs/async_stack_depth.c
index 3517c0e01206..36734683acbd 100644
--- a/tools/testing/selftests/bpf/progs/async_stack_depth.c
+++ b/tools/testing/selftests/bpf/progs/async_stack_depth.c
@@ -30,7 +30,7 @@ static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer)
}
SEC("tc")
-__failure __msg("combined stack size of 2 calls is 576. Too large")
+__failure __msg("combined stack size of 2 calls is")
int pseudo_call_check(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
@@ -45,7 +45,7 @@ int pseudo_call_check(struct __sk_buff *ctx)
}
SEC("tc")
-__failure __msg("combined stack size of 2 calls is 608. Too large")
+__failure __msg("combined stack size of 2 calls is")
int async_call_root_check(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
diff --git a/tools/testing/selftests/bpf/progs/bpf_compiler.h b/tools/testing/selftests/bpf/progs/bpf_compiler.h
new file mode 100644
index 000000000000..a7c343dc82e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_compiler.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_COMPILER_H__
+#define __BPF_COMPILER_H__
+
+#define DO_PRAGMA_(X) _Pragma(#X)
+
+#if __clang__
+#define __pragma_loop_unroll DO_PRAGMA_(clang loop unroll(enable))
+#else
+/* In GCC -funroll-loops, which is enabled with -O2, should have the
+ same impact than the loop-unroll-enable pragma above. */
+#define __pragma_loop_unroll
+#endif
+
+#if __clang__
+#define __pragma_loop_unroll_count(N) DO_PRAGMA_(clang loop unroll_count(N))
+#else
+#define __pragma_loop_unroll_count(N) DO_PRAGMA_(GCC unroll N)
+#endif
+
+#if __clang__
+#define __pragma_loop_unroll_full DO_PRAGMA_(clang loop unroll(full))
+#else
+#define __pragma_loop_unroll_full DO_PRAGMA_(GCC unroll 65534)
+#endif
+
+#if __clang__
+#define __pragma_loop_no_unroll DO_PRAGMA_(clang loop unroll(disable))
+#else
+#define __pragma_loop_no_unroll DO_PRAGMA_(GCC unroll 1)
+#endif
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index 2fd59970c43a..fb2f5513e29e 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -80,7 +80,7 @@
#define __imm(name) [name]"i"(name)
#define __imm_const(name, expr) [name]"i"(expr)
#define __imm_addr(name) [name]"i"(&name)
-#define __imm_ptr(name) [name]"p"(&name)
+#define __imm_ptr(name) [name]"r"(&name)
#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr))
/* Magic constants used with __retval() */
diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
index e8bd4b7b5ef7..7001965d1cc3 100644
--- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
+++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
@@ -51,9 +51,25 @@
#define ICSK_TIME_LOSS_PROBE 5
#define ICSK_TIME_REO_TIMEOUT 6
+#define ETH_ALEN 6
#define ETH_HLEN 14
+#define ETH_P_IP 0x0800
#define ETH_P_IPV6 0x86DD
+#define NEXTHDR_TCP 6
+
+#define TCPOPT_NOP 1
+#define TCPOPT_EOL 0
+#define TCPOPT_MSS 2
+#define TCPOPT_WINDOW 3
+#define TCPOPT_TIMESTAMP 8
+#define TCPOPT_SACK_PERM 4
+
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_TIMESTAMP 10
+#define TCPOLEN_SACK_PERM 2
+
#define CHECKSUM_NONE 0
#define CHECKSUM_PARTIAL 3
diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
index 610c2427fd93..3500e4b69ebe 100644
--- a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
+++ b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
@@ -27,32 +27,6 @@ bool is_cgroup1 = 0;
struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym;
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
-static void __on_lookup(struct cgroup *cgrp)
-{
- bpf_cgrp_storage_delete(&map_a, cgrp);
- bpf_cgrp_storage_delete(&map_b, cgrp);
-}
-
-SEC("fentry/bpf_local_storage_lookup")
-int BPF_PROG(on_lookup)
-{
- struct task_struct *task = bpf_get_current_task_btf();
- struct cgroup *cgrp;
-
- if (is_cgroup1) {
- cgrp = bpf_task_get_cgroup1(task, target_hid);
- if (!cgrp)
- return 0;
-
- __on_lookup(cgrp);
- bpf_cgroup_release(cgrp);
- return 0;
- }
-
- __on_lookup(task->cgroups->dfl_cgrp);
- return 0;
-}
-
static void __on_update(struct cgroup *cgrp)
{
long *ptr;
diff --git a/tools/testing/selftests/bpf/progs/connect_unix_prog.c b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
index ca8aa2f116b3..2ef0e0c46d17 100644
--- a/tools/testing/selftests/bpf/progs/connect_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect_unix_prog.c
@@ -28,8 +28,7 @@ int connect_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 0;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h
index 0cd4aebb97cf..c705d8112a35 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_common.h
+++ b/tools/testing/selftests/bpf/progs/cpumask_common.h
@@ -23,41 +23,42 @@ struct array_map {
__uint(max_entries, 1);
} __cpumask_map SEC(".maps");
-struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
-void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
-struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
-u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
-u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
+struct bpf_cpumask *bpf_cpumask_create(void) __ksym __weak;
+void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym __weak;
+struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym __weak;
+u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym __weak;
u32 bpf_cpumask_first_and(const struct cpumask *src1,
- const struct cpumask *src2) __ksym;
-void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
-void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
-bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
-bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
-bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
-void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
-void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
+ const struct cpumask *src2) __ksym __weak;
+void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym __weak;
bool bpf_cpumask_and(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
- const struct cpumask *src2) __ksym;
+ const struct cpumask *src2) __ksym __weak;
void bpf_cpumask_or(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
- const struct cpumask *src2) __ksym;
+ const struct cpumask *src2) __ksym __weak;
void bpf_cpumask_xor(struct bpf_cpumask *cpumask,
const struct cpumask *src1,
- const struct cpumask *src2) __ksym;
-bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
-bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
-bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
-bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
-bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
-void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
-u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym;
-u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym;
-u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
-
-void bpf_rcu_read_lock(void) __ksym;
-void bpf_rcu_read_unlock(void) __ksym;
+ const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak;
+bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym __weak;
+bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym __weak;
+void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym __weak;
+u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym __weak;
+u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
+ const struct cpumask *src2) __ksym __weak;
+u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak;
+
+void bpf_rcu_read_lock(void) __ksym __weak;
+void bpf_rcu_read_unlock(void) __ksym __weak;
static inline const struct cpumask *cast(struct bpf_cpumask *cpumask)
{
diff --git a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
index 9c078f34bbb2..5a76754f846b 100644
--- a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c
@@ -27,8 +27,7 @@ int getpeername_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 1;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
index ac7145111497..7867113c696f 100644
--- a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c
@@ -27,8 +27,7 @@ int getsockname_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 1;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index fe971992e635..3db416606f2f 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -5,6 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "bpf_compiler.h"
#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0]))
@@ -78,8 +79,8 @@ int iter_err_unsafe_asm_loop(const void *ctx)
"*(u32 *)(r1 + 0) = r6;" /* invalid */
:
: [it]"r"(&it),
- [small_arr]"p"(small_arr),
- [zero]"p"(zero),
+ [small_arr]"r"(small_arr),
+ [zero]"r"(zero),
__imm(bpf_iter_num_new),
__imm(bpf_iter_num_next),
__imm(bpf_iter_num_destroy)
@@ -183,7 +184,7 @@ int iter_pragma_unroll_loop(const void *ctx)
MY_PID_GUARD();
bpf_iter_num_new(&it, 0, 2);
-#pragma nounroll
+ __pragma_loop_no_unroll
for (i = 0; i < 3; i++) {
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
@@ -238,7 +239,7 @@ int iter_multiple_sequential_loops(const void *ctx)
bpf_iter_num_destroy(&it);
bpf_iter_num_new(&it, 0, 2);
-#pragma nounroll
+ __pragma_loop_no_unroll
for (i = 0; i < 3; i++) {
v = bpf_iter_num_next(&it);
bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
diff --git a/tools/testing/selftests/bpf/progs/iters_task.c b/tools/testing/selftests/bpf/progs/iters_task.c
index c9b4055cd410..e4d53e40ff20 100644
--- a/tools/testing/selftests/bpf/progs/iters_task.c
+++ b/tools/testing/selftests/bpf/progs/iters_task.c
@@ -10,7 +10,7 @@
char _license[] SEC("license") = "GPL";
pid_t target_pid;
-int procs_cnt, threads_cnt, proc_threads_cnt;
+int procs_cnt, threads_cnt, proc_threads_cnt, invalid_cnt;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
@@ -26,6 +26,16 @@ int iter_task_for_each_sleep(void *ctx)
procs_cnt = threads_cnt = proc_threads_cnt = 0;
bpf_rcu_read_lock();
+ bpf_for_each(task, pos, NULL, ~0U) {
+ /* Below instructions shouldn't be executed for invalid flags */
+ invalid_cnt++;
+ }
+
+ bpf_for_each(task, pos, NULL, BPF_TASK_ITER_PROC_THREADS) {
+ /* Below instructions shouldn't be executed for invalid task__nullable */
+ invalid_cnt++;
+ }
+
bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS)
if (pos->pid == target_pid)
procs_cnt++;
diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
new file mode 100644
index 000000000000..2414ac20b6d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bin_data {
+ char blob[32];
+};
+
+#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
+private(kptr) struct bin_data __kptr * ptr;
+
+SEC("tc")
+__naked int kptr_xchg_inline(void)
+{
+ asm volatile (
+ "r1 = %[ptr] ll;"
+ "r2 = 0;"
+ "call %[bpf_kptr_xchg];"
+ "if r0 == 0 goto 1f;"
+ "r1 = r0;"
+ "r2 = 0;"
+ "call %[bpf_obj_drop_impl];"
+ "1:"
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_addr(ptr),
+ __imm(bpf_kptr_xchg),
+ __imm(bpf_obj_drop_impl)
+ : __clobber_all
+ );
+}
+
+/* BTF FUNC records are not generated for kfuncs referenced
+ * from inline assembly. These records are necessary for
+ * libbpf to link the program. The function below is a hack
+ * to ensure that BTF FUNC records are generated.
+ */
+void __btf_root(void)
+{
+ bpf_obj_drop(NULL);
+}
diff --git a/tools/testing/selftests/bpf/progs/loop4.c b/tools/testing/selftests/bpf/progs/loop4.c
index b35337926d66..0de0357f57cc 100644
--- a/tools/testing/selftests/bpf/progs/loop4.c
+++ b/tools/testing/selftests/bpf/progs/loop4.c
@@ -3,6 +3,8 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
char _license[] SEC("license") = "GPL";
SEC("socket")
@@ -10,7 +12,7 @@ int combinations(volatile struct __sk_buff* skb)
{
int ret = 0, i;
-#pragma nounroll
+ __pragma_loop_no_unroll
for (i = 0; i < 20; i++)
if (skb->len)
ret |= 1 << i;
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
index 3325da17ec81..efaf622c28dd 100644
--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
@@ -316,7 +316,7 @@ struct lpm_trie {
} __attribute__((preserve_access_index));
struct lpm_key {
- struct bpf_lpm_trie_key trie_key;
+ struct bpf_lpm_trie_key_hdr trie_key;
__u32 data;
};
diff --git a/tools/testing/selftests/bpf/progs/priv_map.c b/tools/testing/selftests/bpf/progs/priv_map.c
new file mode 100644
index 000000000000..9085be50f03b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_map.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_QUEUE);
+ __uint(max_entries, 1);
+ __type(value, __u32);
+} priv_map SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c
new file mode 100644
index 000000000000..3c7b2b618c8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/priv_prog.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe")
+int kprobe_prog(void *ctx)
+{
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index de3b6e4e4d0a..6957d9f2805e 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -8,6 +8,7 @@
#include "profiler.h"
#include "err.h"
#include "bpf_experimental.h"
+#include "bpf_compiler.h"
#ifndef NULL
#define NULL 0
@@ -169,7 +170,7 @@ static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct,
int spid)
{
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == spid)
@@ -185,7 +186,7 @@ static INLINE void populate_ancestors(struct task_struct* task,
ancestors_data->num_ancestors = 0;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) {
parent = BPF_CORE_READ(parent, real_parent);
@@ -212,7 +213,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
size_t filepart_length;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
filepart_length =
@@ -261,7 +262,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local,
pids_cgrp_id___local);
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
struct cgroup_subsys_state* subsys =
@@ -402,7 +403,7 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
if (kill_data == NULL)
return 0;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
if (arr_struct->array[i].meta.pid == 0) {
@@ -482,7 +483,7 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
struct dentry* parent_dentry;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
filepart_length =
@@ -508,7 +509,7 @@ is_ancestor_in_allowed_inodes(struct dentry* filp_dentry)
{
struct dentry* parent_dentry;
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < MAX_PATH_DEPTH; i++) {
u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino);
@@ -629,7 +630,7 @@ int raw_tracepoint__sched_process_exit(void* ctx)
struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
#ifdef UNROLL
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 026d573ce179..86484f07e1d1 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -8,6 +8,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
+#include "bpf_compiler.h"
#define FUNCTION_NAME_LEN 64
#define FILE_NAME_LEN 128
@@ -298,11 +299,11 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
#if defined(USE_ITER)
/* no for loop, no unrolling */
#elif defined(NO_UNROLL)
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#elif defined(UNROLL_COUNT)
-#pragma clang loop unroll_count(UNROLL_COUNT)
+ __pragma_loop_unroll_count(UNROLL_COUNT)
#else
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
#endif /* NO_UNROLL */
/* Unwind python stack */
#ifdef USE_ITER
diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
index 14fb01437fb8..ab3a532b7dd6 100644
--- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c
+++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c
@@ -319,3 +319,123 @@ int cross_rcu_region(void *ctx)
bpf_rcu_read_unlock();
return 0;
}
+
+__noinline
+static int static_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog(NULL);
+}
+
+__noinline
+static int static_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog_lock(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog_lock(NULL);
+}
+
+__noinline
+static int static_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_unlock();
+ if (bpf_get_prandom_u32())
+ return ret + 42;
+ return ret + bpf_get_prandom_u32();
+}
+
+__noinline
+int global_subprog_unlock(u64 a)
+{
+ volatile int ret = a;
+
+ return ret + static_subprog_unlock(NULL);
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ ret += static_subprog(ctx);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ if (bpf_get_prandom_u32())
+ ret += global_subprog(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ ret += static_subprog_lock(ctx);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog_lock(void *ctx)
+{
+ volatile int ret = 0;
+
+ ret += global_subprog_lock(ret);
+ bpf_rcu_read_unlock();
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += static_subprog_unlock(ctx);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int rcu_read_lock_global_subprog_unlock(void *ctx)
+{
+ volatile int ret = 0;
+
+ bpf_rcu_read_lock();
+ ret += global_subprog_unlock(ret);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/read_vsyscall.c b/tools/testing/selftests/bpf/progs/read_vsyscall.c
new file mode 100644
index 000000000000..986f96687ae1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/read_vsyscall.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+int target_pid = 0;
+void *user_ptr = 0;
+int read_ret[8];
+
+char _license[] SEC("license") = "GPL";
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int do_probe_read(void *ctx)
+{
+ char buf[8];
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ read_ret[0] = bpf_probe_read_kernel(buf, sizeof(buf), user_ptr);
+ read_ret[1] = bpf_probe_read_kernel_str(buf, sizeof(buf), user_ptr);
+ read_ret[2] = bpf_probe_read(buf, sizeof(buf), user_ptr);
+ read_ret[3] = bpf_probe_read_str(buf, sizeof(buf), user_ptr);
+ read_ret[4] = bpf_probe_read_user(buf, sizeof(buf), user_ptr);
+ read_ret[5] = bpf_probe_read_user_str(buf, sizeof(buf), user_ptr);
+
+ return 0;
+}
+
+SEC("fentry.s/" SYS_PREFIX "sys_nanosleep")
+int do_copy_from_user(void *ctx)
+{
+ char buf[8];
+
+ if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
+ return 0;
+
+ read_ret[6] = bpf_copy_from_user(buf, sizeof(buf), user_ptr);
+ read_ret[7] = bpf_copy_from_user_task(buf, sizeof(buf), user_ptr,
+ bpf_get_current_task_btf(), 0);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
index 4dfbc8552558..1c7ab44bccfa 100644
--- a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c
@@ -27,8 +27,7 @@ int recvmsg_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 1;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_ADDRESS,
sizeof(SERVUN_ADDRESS) - 1) != 0)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
index 1f67e832666e..d8869b03dda9 100644
--- a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c
@@ -28,8 +28,7 @@ int sendmsg_unix_prog(struct bpf_sock_addr *ctx)
if (sa_kern->uaddrlen != unaddrlen)
return 0;
- sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr,
- bpf_core_type_id_kernel(struct sockaddr_un));
+ sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un);
if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS,
sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
index 3e745793b27a..46d6eb2a3b17 100644
--- a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
+++ b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
@@ -12,8 +12,6 @@ int cookie_found = 0;
__u64 cookie = 0;
__u32 omem = 0;
-void *bpf_rdonly_cast(void *, __u32) __ksym;
-
struct {
__uint(type, BPF_MAP_TYPE_SK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
@@ -29,7 +27,7 @@ int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage)
if (local_storage_ptr != local_storage)
return 0;
- sk = bpf_rdonly_cast(sk_ptr, bpf_core_type_id_kernel(struct sock));
+ sk = bpf_core_cast(sk_ptr, struct sock);
if (sk->sk_cookie.counter != cookie)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
index ffbbfe1fa1c1..96531b0d9d55 100644
--- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c
+++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c
@@ -32,7 +32,7 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx)
if (!sk)
return 0;
- sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock));
+ sk = bpf_core_cast(sk, struct sock);
if (sk->sk_family != AF_INET6 ||
sk->sk_state != TCP_LISTEN ||
!ipv6_addr_loopback(&sk->sk_v6_rcv_saddr))
@@ -68,7 +68,7 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx)
if (!sk)
return 0;
- sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock));
+ sk = bpf_core_cast(sk, struct sock);
if (sk->sk_family != AF_INET6 ||
!ipv6_addr_loopback(&sk->sk_v6_rcv_saddr))
return 0;
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index 40df2cc26eaf..f74459eead26 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -10,6 +10,8 @@
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
typedef uint32_t pid_t;
struct task_struct {};
@@ -419,9 +421,9 @@ static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg,
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif
for (int i = 0; i < STROBE_MAX_MAP_ENTRIES; ++i) {
if (i >= map.cnt)
@@ -560,25 +562,25 @@ static void *read_strobe_meta(struct task_struct *task,
payload_off = sizeof(data->payload);
#else
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_INTS; ++i) {
read_int_var(cfg, i, tls_base, &value, data);
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_STRS; ++i) {
payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off);
}
#ifdef NO_UNROLL
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
#else
-#pragma unroll
+ __pragma_loop_unroll
#endif /* NO_UNROLL */
for (int i = 0; i < STROBE_MAX_MAPS; ++i) {
payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off);
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
new file mode 100644
index 000000000000..b450f72e744a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t tgid = 0;
+
+/* This is a test BPF program that uses struct_ops to access an argument
+ * that may be NULL. This is a test for the verifier to ensure that it can
+ * rip PTR_MAYBE_NULL correctly.
+ */
+SEC("struct_ops/test_maybe_null")
+int BPF_PROG(test_maybe_null, int dummy,
+ struct task_struct *task)
+{
+ if (task)
+ tgid = task->tgid;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_maybe_null = (void *)test_maybe_null,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
new file mode 100644
index 000000000000..6283099ec383
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+pid_t tgid = 0;
+
+SEC("struct_ops/test_maybe_null_struct_ptr")
+int BPF_PROG(test_maybe_null_struct_ptr, int dummy,
+ struct task_struct *task)
+{
+ tgid = task->tgid;
+
+ return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_struct_ptr = {
+ .test_maybe_null = (void *)test_maybe_null_struct_ptr,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c
new file mode 100644
index 000000000000..25952fa09348
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+
+char _license[] SEC("license") = "GPL";
+
+int test_2_result = 0;
+
+SEC("struct_ops/test_1")
+int BPF_PROG(test_1)
+{
+ return 0xdeadbeef;
+}
+
+SEC("struct_ops/test_2")
+void BPF_PROG(test_2, int a, int b)
+{
+ test_2_result = a + b;
+}
+
+SEC("struct_ops/test_3")
+int BPF_PROG(test_3, int a, int b)
+{
+ test_2_result = a + b + 3;
+ return a + b + 3;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_ops testmod_1 = {
+ .test_1 = (void *)test_1,
+ .test_2 = (void *)test_2,
+ .data = 0x1,
+};
+
diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
index 4542dc683b44..f1853c38aada 100644
--- a/tools/testing/selftests/bpf/progs/task_ls_recursion.c
+++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c
@@ -27,23 +27,6 @@ struct {
__type(value, long);
} map_b SEC(".maps");
-SEC("fentry/bpf_local_storage_lookup")
-int BPF_PROG(on_lookup)
-{
- struct task_struct *task = bpf_get_current_task_btf();
-
- if (!test_pid || task->pid != test_pid)
- return 0;
-
- /* The bpf_task_storage_delete will call
- * bpf_local_storage_lookup. The prog->active will
- * stop the recursion.
- */
- bpf_task_storage_delete(&map_a, task);
- bpf_task_storage_delete(&map_b, task);
- return 0;
-}
-
SEC("fentry/bpf_local_storage_update")
int BPF_PROG(on_update)
{
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index 66b304982245..683c8aaa63da 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -20,8 +20,11 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
#include "test_cls_redirect.h"
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+
#ifdef SUBPROGS
#define INLINING __noinline
#else
@@ -267,7 +270,7 @@ static INLINING void pkt_ipv4_checksum(struct iphdr *iph)
uint32_t acc = 0;
uint16_t *ipw = (uint16_t *)iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) {
acc += ipw[i];
}
@@ -294,7 +297,7 @@ bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
};
*is_fragment = false;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 6; i++) {
switch (exthdr.next) {
case IPPROTO_FRAGMENT:
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
index f41c81212ee9..da54c09e9a15 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c
@@ -23,6 +23,8 @@
#include "test_cls_redirect.h"
#include "bpf_kfuncs.h"
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
index 22aba3f6e344..6fc8b9d66e34 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
@@ -80,7 +80,7 @@ int test_core_type_id(void *ctx)
* to detect whether this test has to be executed, however strange
* that might look like.
*
- * [0] https://reviews.llvm.org/D85174
+ * [0] https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99
*/
#if __has_builtin(__builtin_preserve_type_info)
struct core_reloc_type_id_output *out = (void *)&data.out;
diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
index 69509f8bb680..6afa834756e9 100644
--- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c
+++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c
@@ -33,6 +33,12 @@ int BPF_PROG(tp_run)
return 0;
}
+SEC("perf_event")
+int event_run(void *ctx)
+{
+ return 0;
+}
+
SEC("kprobe.multi")
int BPF_PROG(kmulti_run)
{
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index 17a9f59bf5f3..fc69ff18880d 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -5,7 +5,7 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#define MAX_STACK (512 - 3 * 32 + 8)
+#define MAX_STACK 260
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
@@ -30,6 +30,10 @@ int f3(int, struct __sk_buff *skb, int);
__attribute__ ((noinline))
int f2(int val, struct __sk_buff *skb)
{
+ volatile char buf[MAX_STACK] = {};
+
+ __sink(buf[MAX_STACK - 1]);
+
return f1(skb) + f3(val, skb, 1);
}
@@ -44,7 +48,7 @@ int f3(int val, struct __sk_buff *skb, int var)
}
SEC("tc")
-__failure __msg("combined stack size of 4 calls is 544")
+__failure __msg("combined stack size of 3 calls is")
int global_func1(struct __sk_buff *skb)
{
return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
index 9a06e5eb1fbe..143c8a4852bf 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c
@@ -26,6 +26,23 @@ int kprobe_typedef_ctx(void *ctx)
return kprobe_typedef_ctx_subprog(ctx);
}
+/* s390x defines:
+ *
+ * typedef user_pt_regs bpf_user_pt_regs_t;
+ * typedef struct { ... } user_pt_regs;
+ *
+ * And so "canonical" underlying struct type is anonymous.
+ * So on s390x only valid ways to have PTR_TO_CTX argument in global subprogs
+ * are:
+ * - bpf_user_pt_regs_t *ctx (typedef);
+ * - struct bpf_user_pt_regs_t *ctx (backwards compatible struct hack);
+ * - void *ctx __arg_ctx (arg:ctx tag)
+ *
+ * Other architectures also allow using underlying struct types (e.g.,
+ * `struct pt_regs *ctx` for x86-64)
+ */
+#ifndef bpf_target_s390
+
#define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL)))
__weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx)
@@ -40,6 +57,8 @@ int kprobe_resolved_ctx(void *ctx)
return kprobe_struct_ctx_subprog(ctx);
}
+#endif
+
/* this is current hack to make this work on old kernels */
struct bpf_user_pt_regs_t {};
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 48ff2b2ad5e7..fed66f36adb6 100644
--- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
@@ -6,6 +6,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
+
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -131,7 +133,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 10; i++) {
struct sr6_tlv_t tlv;
@@ -302,7 +304,7 @@ int __encap_srh(struct __sk_buff *skb)
seg = (struct ip6_addr_t *)((char *)srh + sizeof(*srh));
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (unsigned long long lo = 0; lo < 4; lo++) {
seg->lo = bpf_cpu_to_be64(4 - lo);
seg->hi = bpf_cpu_to_be64(hi);
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index f416032ba858..b295f9b721bf 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -21,6 +21,32 @@ struct {
__type(value, __u32);
} mim_hash SEC(".maps");
+/* The following three maps are used to test
+ * perf_event_array map can be an inner
+ * map of hash/array_of_maps.
+ */
+struct perf_event_array {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+} inner_map0 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_array_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_hash_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
SEC("xdp")
int xdp_mimtest0(struct xdp_md *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
index 4bdd65b5aa2d..2fdc44e76624 100644
--- a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
+++ b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c
@@ -6,13 +6,13 @@
char tp_name[128];
-SEC("lsm/bpf")
+SEC("lsm.s/bpf")
int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size)
{
switch (cmd) {
case BPF_RAW_TRACEPOINT_OPEN:
- bpf_probe_read_user_str(tp_name, sizeof(tp_name) - 1,
- (void *)attr->raw_tracepoint.name);
+ bpf_copy_from_user(tp_name, sizeof(tp_name) - 1,
+ (void *)attr->raw_tracepoint.name);
break;
default:
break;
diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
index a7278f064368..5059050f74f6 100644
--- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
@@ -6,6 +6,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
+
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@ -134,7 +136,7 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb,
// we can only go as far as ~10 TLVs due to the BPF max stack size
// workaround: define induction variable "i" as "long" instead
// of "int" to prevent alu32 sub-register spilling.
- #pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (long i = 0; i < 100; i++) {
struct sr6_tlv_t tlv;
diff --git a/tools/testing/selftests/bpf/progs/test_siphash.h b/tools/testing/selftests/bpf/progs/test_siphash.h
new file mode 100644
index 000000000000..5d3a7ec36780
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_siphash.h
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#ifndef _TEST_SIPHASH_H
+#define _TEST_SIPHASH_H
+
+/* include/linux/bitops.h */
+static inline u64 rol64(u64 word, unsigned int shift)
+{
+ return (word << (shift & 63)) | (word >> ((-shift) & 63));
+}
+
+/* include/linux/siphash.h */
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+/* lib/siphash.c */
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
+
+#define PREAMBLE(len) \
+ u64 v0 = SIPHASH_CONST_0; \
+ u64 v1 = SIPHASH_CONST_1; \
+ u64 v2 = SIPHASH_CONST_2; \
+ u64 v3 = SIPHASH_CONST_3; \
+ u64 b = ((u64)(len)) << 56; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define POSTAMBLE \
+ v3 ^= b; \
+ SIPROUND; \
+ SIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+static inline u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+ PREAMBLE(16)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ POSTAMBLE
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
index c482110cfc95..a724a70c6700 100644
--- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -3,12 +3,14 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
char _license[] SEC("license") = "GPL";
SEC("tc")
int process(struct __sk_buff *skb)
{
- #pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < 5; i++) {
if (skb->cb[i] != i + 1)
return 1;
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
index b2440a0ff422..d8d77bdffd3d 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -101,4 +101,69 @@ int bpf_spin_lock_test(struct __sk_buff *skb)
err:
return err;
}
+
+struct bpf_spin_lock lockA __hidden SEC(".data.A");
+
+__noinline
+static int static_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ return ret;
+ return ret + ctx->len;
+}
+
+__noinline
+static int static_subprog_lock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ ret = static_subprog(ctx);
+ bpf_spin_lock(&lockA);
+ return ret + ctx->len;
+}
+
+__noinline
+static int static_subprog_unlock(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ ret = static_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret + ctx->len;
+}
+
+SEC("tc")
+int lock_static_subprog_call(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = static_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("tc")
+int lock_static_subprog_lock(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ ret = static_subprog_lock(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("tc")
+int lock_static_subprog_unlock(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ ret = static_subprog_unlock(ctx);
+ return ret;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
index 86cd183ef6dc..43f40c4fe241 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c
@@ -201,4 +201,48 @@ CHECK(innermapval_mapval, &iv->lock, &v->lock);
#undef CHECK
+__noinline
+int global_subprog(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ ret += ctx->protocol;
+ return ret + ctx->mark;
+}
+
+__noinline
+static int static_subprog_call_global(struct __sk_buff *ctx)
+{
+ volatile int ret = 0;
+
+ if (ctx->protocol)
+ return ret;
+ return ret + ctx->len + global_subprog(ctx);
+}
+
+SEC("?tc")
+int lock_global_subprog_call1(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = global_subprog(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
+SEC("?tc")
+int lock_global_subprog_call2(struct __sk_buff *ctx)
+{
+ int ret = 0;
+
+ bpf_spin_lock(&lockA);
+ if (ctx->mark == 42)
+ ret = static_subprog_call_global(ctx);
+ bpf_spin_unlock(&lockA);
+ return ret;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 553a282d816a..7f74077d6622 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -9,6 +9,8 @@
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
@@ -30,7 +32,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -59,7 +61,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
index 2b64bc563a12..68a75436e8af 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -9,6 +9,8 @@
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
@@ -30,7 +32,7 @@ static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -57,7 +59,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index 5489823c83fc..efc3c61f7852 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -9,6 +9,8 @@
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
#define MAX_ULONG_STR_LEN 0xF
@@ -31,7 +33,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
@@ -57,7 +59,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx)
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
index e6e678aa9874..404124a93892 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -19,6 +19,9 @@
#include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h>
+#include "bpf_compiler.h"
+
+#pragma GCC diagnostic ignored "-Waddress-of-packed-member"
static const int cfg_port = 8000;
@@ -81,7 +84,7 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph)
iph->check = 0;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++)
csum += *iph16++;
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
new file mode 100644
index 000000000000..c8e4553648bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tracing_net.h"
+#include "bpf_kfuncs.h"
+#include "test_siphash.h"
+#include "test_tcp_custom_syncookie.h"
+
+#define MAX_PACKET_OFF 0xffff
+
+/* Hash is calculated for each client and split into ISN and TS.
+ *
+ * MSB LSB
+ * ISN: | 31 ... 8 | 7 6 | 5 | 4 | 3 2 1 0 |
+ * | Hash_1 | MSS | ECN | SACK | WScale |
+ *
+ * TS: | 31 ... 8 | 7 ... 0 |
+ * | Random | Hash_2 |
+ */
+#define COOKIE_BITS 8
+#define COOKIE_MASK (((__u32)1 << COOKIE_BITS) - 1)
+
+enum {
+ /* 0xf is invalid thus means that SYN did not have WScale. */
+ BPF_SYNCOOKIE_WSCALE_MASK = (1 << 4) - 1,
+ BPF_SYNCOOKIE_SACK = (1 << 4),
+ BPF_SYNCOOKIE_ECN = (1 << 5),
+};
+
+#define MSS_LOCAL_IPV4 65495
+#define MSS_LOCAL_IPV6 65476
+
+const __u16 msstab4[] = {
+ 536,
+ 1300,
+ 1460,
+ MSS_LOCAL_IPV4,
+};
+
+const __u16 msstab6[] = {
+ 1280 - 60, /* IPV6_MIN_MTU - 60 */
+ 1480 - 60,
+ 9000 - 60,
+ MSS_LOCAL_IPV6,
+};
+
+static siphash_key_t test_key_siphash = {
+ { 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }
+};
+
+struct tcp_syncookie {
+ struct __sk_buff *skb;
+ void *data;
+ void *data_end;
+ struct ethhdr *eth;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct tcphdr *tcp;
+ __be32 *ptr32;
+ struct bpf_tcp_req_attrs attrs;
+ u32 off;
+ u32 cookie;
+ u64 first;
+};
+
+bool handled_syn, handled_ack;
+
+static int tcp_load_headers(struct tcp_syncookie *ctx)
+{
+ ctx->data = (void *)(long)ctx->skb->data;
+ ctx->data_end = (void *)(long)ctx->skb->data_end;
+ ctx->eth = (struct ethhdr *)(long)ctx->skb->data;
+
+ if (ctx->eth + 1 > ctx->data_end)
+ goto err;
+
+ switch (bpf_ntohs(ctx->eth->h_proto)) {
+ case ETH_P_IP:
+ ctx->ipv4 = (struct iphdr *)(ctx->eth + 1);
+
+ if (ctx->ipv4 + 1 > ctx->data_end)
+ goto err;
+
+ if (ctx->ipv4->ihl != sizeof(*ctx->ipv4) / 4)
+ goto err;
+
+ if (ctx->ipv4->version != 4)
+ goto err;
+
+ if (ctx->ipv4->protocol != IPPROTO_TCP)
+ goto err;
+
+ ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1);
+ break;
+ case ETH_P_IPV6:
+ ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1);
+
+ if (ctx->ipv6 + 1 > ctx->data_end)
+ goto err;
+
+ if (ctx->ipv6->version != 6)
+ goto err;
+
+ if (ctx->ipv6->nexthdr != NEXTHDR_TCP)
+ goto err;
+
+ ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1);
+ break;
+ default:
+ goto err;
+ }
+
+ if (ctx->tcp + 1 > ctx->data_end)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int tcp_reload_headers(struct tcp_syncookie *ctx)
+{
+ /* Without volatile,
+ * R3 32-bit pointer arithmetic prohibited
+ */
+ volatile u64 data_len = ctx->skb->data_end - ctx->skb->data;
+
+ if (ctx->tcp->doff < sizeof(*ctx->tcp) / 4)
+ goto err;
+
+ /* Needed to calculate csum and parse TCP options. */
+ if (bpf_skb_change_tail(ctx->skb, data_len + 60 - ctx->tcp->doff * 4, 0))
+ goto err;
+
+ ctx->data = (void *)(long)ctx->skb->data;
+ ctx->data_end = (void *)(long)ctx->skb->data_end;
+ ctx->eth = (struct ethhdr *)(long)ctx->skb->data;
+ if (ctx->ipv4) {
+ ctx->ipv4 = (struct iphdr *)(ctx->eth + 1);
+ ctx->ipv6 = NULL;
+ ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1);
+ } else {
+ ctx->ipv4 = NULL;
+ ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1);
+ ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1);
+ }
+
+ if ((void *)ctx->tcp + 60 > ctx->data_end)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static __sum16 tcp_v4_csum(struct tcp_syncookie *ctx, __wsum csum)
+{
+ return csum_tcpudp_magic(ctx->ipv4->saddr, ctx->ipv4->daddr,
+ ctx->tcp->doff * 4, IPPROTO_TCP, csum);
+}
+
+static __sum16 tcp_v6_csum(struct tcp_syncookie *ctx, __wsum csum)
+{
+ return csum_ipv6_magic(&ctx->ipv6->saddr, &ctx->ipv6->daddr,
+ ctx->tcp->doff * 4, IPPROTO_TCP, csum);
+}
+
+static int tcp_validate_header(struct tcp_syncookie *ctx)
+{
+ s64 csum;
+
+ if (tcp_reload_headers(ctx))
+ goto err;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (ctx->ipv4) {
+ /* check tcp_v4_csum(csum) is 0 if not on lo. */
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, ctx->ipv4->ihl * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (csum_fold(csum) != 0)
+ goto err;
+ } else if (ctx->ipv6) {
+ /* check tcp_v6_csum(csum) is 0 if not on lo. */
+ }
+
+ return 0;
+err:
+ return -1;
+}
+
+static __always_inline void *next(struct tcp_syncookie *ctx, __u32 sz)
+{
+ __u64 off = ctx->off;
+ __u8 *data;
+
+ /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */
+ if (off > MAX_PACKET_OFF - sz)
+ return NULL;
+
+ data = ctx->data + off;
+ barrier_var(data);
+ if (data + sz >= ctx->data_end)
+ return NULL;
+
+ ctx->off += sz;
+ return data;
+}
+
+static int tcp_parse_option(__u32 index, struct tcp_syncookie *ctx)
+{
+ __u8 *opcode, *opsize, *wscale;
+ __u32 *tsval, *tsecr;
+ __u16 *mss;
+ __u32 off;
+
+ off = ctx->off;
+ opcode = next(ctx, 1);
+ if (!opcode)
+ goto stop;
+
+ if (*opcode == TCPOPT_EOL)
+ goto stop;
+
+ if (*opcode == TCPOPT_NOP)
+ goto next;
+
+ opsize = next(ctx, 1);
+ if (!opsize)
+ goto stop;
+
+ if (*opsize < 2)
+ goto stop;
+
+ switch (*opcode) {
+ case TCPOPT_MSS:
+ mss = next(ctx, 2);
+ if (*opsize == TCPOLEN_MSS && ctx->tcp->syn && mss)
+ ctx->attrs.mss = get_unaligned_be16(mss);
+ break;
+ case TCPOPT_WINDOW:
+ wscale = next(ctx, 1);
+ if (*opsize == TCPOLEN_WINDOW && ctx->tcp->syn && wscale) {
+ ctx->attrs.wscale_ok = 1;
+ ctx->attrs.snd_wscale = *wscale;
+ }
+ break;
+ case TCPOPT_TIMESTAMP:
+ tsval = next(ctx, 4);
+ tsecr = next(ctx, 4);
+ if (*opsize == TCPOLEN_TIMESTAMP && tsval && tsecr) {
+ ctx->attrs.rcv_tsval = get_unaligned_be32(tsval);
+ ctx->attrs.rcv_tsecr = get_unaligned_be32(tsecr);
+
+ if (ctx->tcp->syn && ctx->attrs.rcv_tsecr)
+ ctx->attrs.tstamp_ok = 0;
+ else
+ ctx->attrs.tstamp_ok = 1;
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+ if (*opsize == TCPOLEN_SACK_PERM && ctx->tcp->syn)
+ ctx->attrs.sack_ok = 1;
+ break;
+ }
+
+ ctx->off = off + *opsize;
+next:
+ return 0;
+stop:
+ return 1;
+}
+
+static void tcp_parse_options(struct tcp_syncookie *ctx)
+{
+ ctx->off = (__u8 *)(ctx->tcp + 1) - (__u8 *)ctx->data,
+
+ bpf_loop(40, tcp_parse_option, ctx, 0);
+}
+
+static int tcp_validate_sysctl(struct tcp_syncookie *ctx)
+{
+ if ((ctx->ipv4 && ctx->attrs.mss != MSS_LOCAL_IPV4) ||
+ (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6))
+ goto err;
+
+ if (!ctx->attrs.wscale_ok || ctx->attrs.snd_wscale != 7)
+ goto err;
+
+ if (!ctx->attrs.tstamp_ok)
+ goto err;
+
+ if (!ctx->attrs.sack_ok)
+ goto err;
+
+ if (!ctx->tcp->ece || !ctx->tcp->cwr)
+ goto err;
+
+ return 0;
+err:
+ return -1;
+}
+
+static void tcp_prepare_cookie(struct tcp_syncookie *ctx)
+{
+ u32 seq = bpf_ntohl(ctx->tcp->seq);
+ u64 first = 0, second;
+ int mssind = 0;
+ u32 hash;
+
+ if (ctx->ipv4) {
+ for (mssind = ARRAY_SIZE(msstab4) - 1; mssind; mssind--)
+ if (ctx->attrs.mss >= msstab4[mssind])
+ break;
+
+ ctx->attrs.mss = msstab4[mssind];
+
+ first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr;
+ } else if (ctx->ipv6) {
+ for (mssind = ARRAY_SIZE(msstab6) - 1; mssind; mssind--)
+ if (ctx->attrs.mss >= msstab6[mssind])
+ break;
+
+ ctx->attrs.mss = msstab6[mssind];
+
+ first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 |
+ ctx->ipv6->daddr.in6_u.u6_addr32[0];
+ }
+
+ second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest;
+ hash = siphash_2u64(first, second, &test_key_siphash);
+
+ if (ctx->attrs.tstamp_ok) {
+ ctx->attrs.rcv_tsecr = bpf_get_prandom_u32();
+ ctx->attrs.rcv_tsecr &= ~COOKIE_MASK;
+ ctx->attrs.rcv_tsecr |= hash & COOKIE_MASK;
+ }
+
+ hash &= ~COOKIE_MASK;
+ hash |= mssind << 6;
+
+ if (ctx->attrs.wscale_ok)
+ hash |= ctx->attrs.snd_wscale & BPF_SYNCOOKIE_WSCALE_MASK;
+
+ if (ctx->attrs.sack_ok)
+ hash |= BPF_SYNCOOKIE_SACK;
+
+ if (ctx->attrs.tstamp_ok && ctx->tcp->ece && ctx->tcp->cwr)
+ hash |= BPF_SYNCOOKIE_ECN;
+
+ ctx->cookie = hash;
+}
+
+static void tcp_write_options(struct tcp_syncookie *ctx)
+{
+ ctx->ptr32 = (__be32 *)(ctx->tcp + 1);
+
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_MSS << 24 | TCPOLEN_MSS << 16 |
+ ctx->attrs.mss);
+
+ if (ctx->attrs.wscale_ok)
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_WINDOW << 16 |
+ TCPOLEN_WINDOW << 8 |
+ ctx->attrs.snd_wscale);
+
+ if (ctx->attrs.tstamp_ok) {
+ if (ctx->attrs.sack_ok)
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_SACK_PERM << 24 |
+ TCPOLEN_SACK_PERM << 16 |
+ TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP);
+ else
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 |
+ TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP);
+
+ *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsecr);
+ *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsval);
+ } else if (ctx->attrs.sack_ok) {
+ *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 |
+ TCPOPT_SACK_PERM << 8 |
+ TCPOLEN_SACK_PERM);
+ }
+}
+
+static int tcp_handle_syn(struct tcp_syncookie *ctx)
+{
+ s64 csum;
+
+ if (tcp_validate_header(ctx))
+ goto err;
+
+ tcp_parse_options(ctx);
+
+ if (tcp_validate_sysctl(ctx))
+ goto err;
+
+ tcp_prepare_cookie(ctx);
+ tcp_write_options(ctx);
+
+ swap(ctx->tcp->source, ctx->tcp->dest);
+ ctx->tcp->check = 0;
+ ctx->tcp->ack_seq = bpf_htonl(bpf_ntohl(ctx->tcp->seq) + 1);
+ ctx->tcp->seq = bpf_htonl(ctx->cookie);
+ ctx->tcp->doff = ((long)ctx->ptr32 - (long)ctx->tcp) >> 2;
+ ctx->tcp->ack = 1;
+ if (!ctx->attrs.tstamp_ok || !ctx->tcp->ece || !ctx->tcp->cwr)
+ ctx->tcp->ece = 0;
+ ctx->tcp->cwr = 0;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0);
+ if (csum < 0)
+ goto err;
+
+ if (ctx->ipv4) {
+ swap(ctx->ipv4->saddr, ctx->ipv4->daddr);
+ ctx->tcp->check = tcp_v4_csum(ctx, csum);
+
+ ctx->ipv4->check = 0;
+ ctx->ipv4->tos = 0;
+ ctx->ipv4->tot_len = bpf_htons((long)ctx->ptr32 - (long)ctx->ipv4);
+ ctx->ipv4->id = 0;
+ ctx->ipv4->ttl = 64;
+
+ csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, sizeof(*ctx->ipv4), 0);
+ if (csum < 0)
+ goto err;
+
+ ctx->ipv4->check = csum_fold(csum);
+ } else if (ctx->ipv6) {
+ swap(ctx->ipv6->saddr, ctx->ipv6->daddr);
+ ctx->tcp->check = tcp_v6_csum(ctx, csum);
+
+ *(__be32 *)ctx->ipv6 = bpf_htonl(0x60000000);
+ ctx->ipv6->payload_len = bpf_htons((long)ctx->ptr32 - (long)ctx->tcp);
+ ctx->ipv6->hop_limit = 64;
+ }
+
+ swap_array(ctx->eth->h_source, ctx->eth->h_dest);
+
+ if (bpf_skb_change_tail(ctx->skb, (long)ctx->ptr32 - (long)ctx->eth, 0))
+ goto err;
+
+ return bpf_redirect(ctx->skb->ifindex, 0);
+err:
+ return TC_ACT_SHOT;
+}
+
+static int tcp_validate_cookie(struct tcp_syncookie *ctx)
+{
+ u32 cookie = bpf_ntohl(ctx->tcp->ack_seq) - 1;
+ u32 seq = bpf_ntohl(ctx->tcp->seq) - 1;
+ u64 first = 0, second;
+ int mssind;
+ u32 hash;
+
+ if (ctx->ipv4)
+ first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr;
+ else if (ctx->ipv6)
+ first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 |
+ ctx->ipv6->daddr.in6_u.u6_addr32[0];
+
+ second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest;
+ hash = siphash_2u64(first, second, &test_key_siphash);
+
+ if (ctx->attrs.tstamp_ok)
+ hash -= ctx->attrs.rcv_tsecr & COOKIE_MASK;
+ else
+ hash &= ~COOKIE_MASK;
+
+ hash -= cookie & ~COOKIE_MASK;
+ if (hash)
+ goto err;
+
+ mssind = (cookie & (3 << 6)) >> 6;
+ if (ctx->ipv4) {
+ if (mssind > ARRAY_SIZE(msstab4))
+ goto err;
+
+ ctx->attrs.mss = msstab4[mssind];
+ } else {
+ if (mssind > ARRAY_SIZE(msstab6))
+ goto err;
+
+ ctx->attrs.mss = msstab6[mssind];
+ }
+
+ ctx->attrs.snd_wscale = cookie & BPF_SYNCOOKIE_WSCALE_MASK;
+ ctx->attrs.rcv_wscale = ctx->attrs.snd_wscale;
+ ctx->attrs.wscale_ok = ctx->attrs.snd_wscale == BPF_SYNCOOKIE_WSCALE_MASK;
+ ctx->attrs.sack_ok = cookie & BPF_SYNCOOKIE_SACK;
+ ctx->attrs.ecn_ok = cookie & BPF_SYNCOOKIE_ECN;
+
+ return 0;
+err:
+ return -1;
+}
+
+static int tcp_handle_ack(struct tcp_syncookie *ctx)
+{
+ struct bpf_sock_tuple tuple;
+ struct bpf_sock *skc;
+ int ret = TC_ACT_OK;
+ struct sock *sk;
+ u32 tuple_size;
+
+ if (ctx->ipv4) {
+ tuple.ipv4.saddr = ctx->ipv4->saddr;
+ tuple.ipv4.daddr = ctx->ipv4->daddr;
+ tuple.ipv4.sport = ctx->tcp->source;
+ tuple.ipv4.dport = ctx->tcp->dest;
+ tuple_size = sizeof(tuple.ipv4);
+ } else if (ctx->ipv6) {
+ __builtin_memcpy(tuple.ipv6.saddr, &ctx->ipv6->saddr, sizeof(tuple.ipv6.saddr));
+ __builtin_memcpy(tuple.ipv6.daddr, &ctx->ipv6->daddr, sizeof(tuple.ipv6.daddr));
+ tuple.ipv6.sport = ctx->tcp->source;
+ tuple.ipv6.dport = ctx->tcp->dest;
+ tuple_size = sizeof(tuple.ipv6);
+ } else {
+ goto out;
+ }
+
+ skc = bpf_skc_lookup_tcp(ctx->skb, &tuple, tuple_size, -1, 0);
+ if (!skc)
+ goto out;
+
+ if (skc->state != TCP_LISTEN)
+ goto release;
+
+ sk = (struct sock *)bpf_skc_to_tcp_sock(skc);
+ if (!sk)
+ goto err;
+
+ if (tcp_validate_header(ctx))
+ goto err;
+
+ tcp_parse_options(ctx);
+
+ if (tcp_validate_cookie(ctx))
+ goto err;
+
+ ret = bpf_sk_assign_tcp_reqsk(ctx->skb, sk, &ctx->attrs, sizeof(ctx->attrs));
+ if (ret < 0)
+ goto err;
+
+release:
+ bpf_sk_release(skc);
+out:
+ return ret;
+
+err:
+ ret = TC_ACT_SHOT;
+ goto release;
+}
+
+SEC("tc")
+int tcp_custom_syncookie(struct __sk_buff *skb)
+{
+ struct tcp_syncookie ctx = {
+ .skb = skb,
+ };
+
+ if (tcp_load_headers(&ctx))
+ return TC_ACT_OK;
+
+ if (ctx.tcp->rst)
+ return TC_ACT_OK;
+
+ if (ctx.tcp->syn) {
+ if (ctx.tcp->ack)
+ return TC_ACT_OK;
+
+ handled_syn = true;
+
+ return tcp_handle_syn(&ctx);
+ }
+
+ handled_ack = true;
+
+ return tcp_handle_ack(&ctx);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
new file mode 100644
index 000000000000..29a6a53cf229
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#ifndef _TEST_TCP_SYNCOOKIE_H
+#define _TEST_TCP_SYNCOOKIE_H
+
+#define __packed __attribute__((__packed__))
+#define __force
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define swap(a, b) \
+ do { \
+ typeof(a) __tmp = (a); \
+ (a) = (b); \
+ (b) = __tmp; \
+ } while (0)
+
+#define swap_array(a, b) \
+ do { \
+ typeof(a) __tmp[sizeof(a)]; \
+ __builtin_memcpy(__tmp, a, sizeof(a)); \
+ __builtin_memcpy(a, b, sizeof(a)); \
+ __builtin_memcpy(b, __tmp, sizeof(a)); \
+ } while (0)
+
+/* asm-generic/unaligned.h */
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed * __pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return bpf_ntohs(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return bpf_ntohl(__get_unaligned_t(__be32, p));
+}
+
+/* lib/checksum.c */
+static inline u32 from64to32(u64 x)
+{
+ /* add up 32-bit and 32-bit for 32+c bit */
+ x = (x & 0xffffffff) + (x >> 32);
+ /* add up carry.. */
+ x = (x & 0xffffffff) + (x >> 32);
+ return (u32)x;
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum)
+{
+ unsigned long long s = (__force u32)sum;
+
+ s += (__force u32)saddr;
+ s += (__force u32)daddr;
+#ifdef __BIG_ENDIAN
+ s += proto + len;
+#else
+ s += (proto + len) << 8;
+#endif
+ return (__force __wsum)from64to32(s);
+}
+
+/* asm-generic/checksum.h */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/* net/ipv6/ip6_checksum.c */
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum csum)
+{
+ int carry;
+ __u32 ulen;
+ __u32 uproto;
+ __u32 sum = (__force u32)csum;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[0];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[1];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[2];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)saddr->in6_u.u6_addr32[3];
+ carry = (sum < (__force u32)saddr->in6_u.u6_addr32[3]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[0];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[1];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[2];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)daddr->in6_u.u6_addr32[3];
+ carry = (sum < (__force u32)daddr->in6_u.u6_addr32[3]);
+ sum += carry;
+
+ ulen = (__force u32)bpf_htonl((__u32)len);
+ sum += ulen;
+ carry = (sum < ulen);
+ sum += carry;
+
+ uproto = (__force u32)bpf_htonl(proto);
+ sum += uproto;
+ carry = (sum < uproto);
+ sum += carry;
+
+ return csum_fold((__force __wsum)sum);
+}
+#endif
diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index cf7ed8cbb1fe..a3f3f43fc195 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
@@ -59,7 +59,7 @@ int bpf_testcb(struct bpf_sock_ops *skops)
asm volatile (
"%[op] = *(u32 *)(%[skops] +96)"
- : [op] "+r"(op)
+ : [op] "=r"(op)
: [skops] "r"(skops)
:);
diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index d7a9a74b7245..8caf58be5818 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
@@ -19,6 +19,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
+#include "bpf_compiler.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
@@ -137,7 +138,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp)
iph->ttl = 8;
next_iph = (__u16 *)iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
index 78c368e71797..67a77944ef29 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c
@@ -18,11 +18,11 @@
#include "test_iptunnel_common.h"
#include "bpf_kfuncs.h"
-const size_t tcphdr_sz = sizeof(struct tcphdr);
-const size_t udphdr_sz = sizeof(struct udphdr);
-const size_t ethhdr_sz = sizeof(struct ethhdr);
-const size_t iphdr_sz = sizeof(struct iphdr);
-const size_t ipv6hdr_sz = sizeof(struct ipv6hdr);
+#define tcphdr_sz sizeof(struct tcphdr)
+#define udphdr_sz sizeof(struct udphdr)
+#define ethhdr_sz sizeof(struct ethhdr)
+#define iphdr_sz sizeof(struct iphdr)
+#define ipv6hdr_sz sizeof(struct ipv6hdr)
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
index c98fb44156f0..93267a68825b 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
@@ -15,6 +15,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "test_iptunnel_common.h"
+#include "bpf_compiler.h"
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
@@ -133,7 +134,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp)
iph->ttl = 8;
next_iph = (__u16 *)iph;
-#pragma clang loop unroll(disable)
+ __pragma_loop_no_unroll
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 42c8f6ded0e4..5c7e4758a0ca 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -15,6 +15,7 @@
#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
@@ -362,7 +363,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
iph->ttl = 4;
next_iph_u16 = (__u16 *) iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
@@ -409,7 +410,7 @@ int send_icmp_reply(void *data, void *data_end)
iph->saddr = tmp_addr;
iph->check = 0;
next_iph_u16 = (__u16 *) iph;
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c
index 8b946c8188c6..f615da97df26 100644
--- a/tools/testing/selftests/bpf/progs/timer.c
+++ b/tools/testing/selftests/bpf/progs/timer.c
@@ -51,7 +51,8 @@ struct {
__uint(max_entries, 1);
__type(key, int);
__type(value, struct elem);
-} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps");
+} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
+ race_array SEC(".maps");
__u64 bss_data;
__u64 abs_data;
@@ -390,3 +391,34 @@ int BPF_PROG2(test5, int, a)
return 0;
}
+
+static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
+{
+ bpf_timer_start(timer, 1000000, 0);
+ return 0;
+}
+
+SEC("syscall")
+int race(void *ctx)
+{
+ struct bpf_timer *timer;
+ int err, race_key = 0;
+ struct elem init;
+
+ __builtin_memset(&init, 0, sizeof(struct elem));
+ bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
+
+ timer = bpf_map_lookup_elem(&race_array, &race_key);
+ if (!timer)
+ return 1;
+
+ err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
+ if (err && err != -EBUSY)
+ return 1;
+
+ bpf_timer_set_callback(timer, race_timer_callback);
+ bpf_timer_start(timer, 0, 0);
+ bpf_timer_cancel(timer);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/token_lsm.c b/tools/testing/selftests/bpf/progs/token_lsm.c
new file mode 100644
index 000000000000..e4d59b6ba743
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/token_lsm.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int my_pid;
+bool reject_capable;
+bool reject_cmd;
+
+SEC("lsm/bpf_token_capable")
+int BPF_PROG(token_capable, struct bpf_token *token, int cap)
+{
+ if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+ if (reject_capable)
+ return -1;
+ return 0;
+}
+
+SEC("lsm/bpf_token_cmd")
+int BPF_PROG(token_cmd, struct bpf_token *token, enum bpf_cmd cmd)
+{
+ if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+ if (reject_cmd)
+ return -1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c
new file mode 100644
index 000000000000..d41665d2ec8c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tracing_failure.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?fentry/bpf_spin_lock")
+int BPF_PROG(test_spin_lock, struct bpf_spin_lock *lock)
+{
+ return 0;
+}
+
+SEC("?fentry/bpf_spin_unlock")
+int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c
index a9629ac230fd..9d808b8f4ab0 100644
--- a/tools/testing/selftests/bpf/progs/type_cast.c
+++ b/tools/testing/selftests/bpf/progs/type_cast.c
@@ -4,6 +4,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#include "bpf_kfuncs.h"
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
@@ -19,9 +20,6 @@ char name[IFNAMSIZ];
unsigned int inum;
unsigned int meta_len, frag0_len, kskb_len, kskb2_len;
-void *bpf_cast_to_kern_ctx(void *) __ksym;
-void *bpf_rdonly_cast(void *, __u32) __ksym;
-
SEC("?xdp")
int md_xdp(struct xdp_md *ctx)
{
@@ -48,13 +46,12 @@ int md_skb(struct __sk_buff *skb)
/* Simulate the following kernel macro:
* #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
*/
- shared_info = bpf_rdonly_cast(kskb->head + kskb->end,
- bpf_core_type_id_kernel(struct skb_shared_info));
+ shared_info = bpf_core_cast(kskb->head + kskb->end, struct skb_shared_info);
meta_len = shared_info->meta_len;
frag0_len = shared_info->frag_list->len;
/* kskb2 should be equal to kskb */
- kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff));
+ kskb2 = bpf_core_cast(kskb, typeof(*kskb2));
kskb2_len = kskb2->len;
return 0;
}
@@ -65,7 +62,7 @@ int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
struct task_struct *task, *task_dup;
task = bpf_get_current_task_btf();
- task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct));
+ task_dup = bpf_core_cast(task, struct task_struct);
(void)bpf_task_storage_get(&enter_id, task_dup, 0, 0);
return 0;
}
@@ -73,7 +70,7 @@ int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
SEC("?tracepoint/syscalls/sys_enter_nanosleep")
int kctx_u64(void *ctx)
{
- u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64));
+ u64 *kctx = bpf_core_cast(ctx, u64);
(void)kctx;
return 0;
diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
index be95570ab382..28b602ac9cbe 100644
--- a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
+++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c
@@ -568,7 +568,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("direct packet access: test23 (x += pkt_ptr, 4)")
-__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)")
+__failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void test23_x_pkt_ptr_4(void)
{
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
new file mode 100644
index 000000000000..4ab0ef18d7eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
+#include "xdp_metadata.h"
+#include "bpf_kfuncs.h"
+
+extern struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
+extern void bpf_task_release(struct task_struct *p) __ksym __weak;
+
+__weak int subprog_trusted_task_nullable(struct task_struct *task __arg_trusted __arg_nullable)
+{
+ if (!task)
+ return 0;
+ return task->pid + task->tgid;
+}
+
+__weak int subprog_trusted_task_nullable_extra_layer(struct task_struct *task __arg_trusted __arg_nullable)
+{
+ return subprog_trusted_task_nullable(task) + subprog_trusted_task_nullable(NULL);
+}
+
+SEC("?tp_btf/task_newtask")
+__success __log_level(2)
+__msg("Validating subprog_trusted_task_nullable() func#1...")
+__msg(": R1=trusted_ptr_or_null_task_struct(")
+int trusted_task_arg_nullable(void *ctx)
+{
+ struct task_struct *t1 = bpf_get_current_task_btf();
+ struct task_struct *t2 = bpf_task_acquire(t1);
+ int res = 0;
+
+ /* known NULL */
+ res += subprog_trusted_task_nullable(NULL);
+
+ /* known non-NULL */
+ res += subprog_trusted_task_nullable(t1);
+ res += subprog_trusted_task_nullable_extra_layer(t1);
+
+ /* unknown if NULL or not */
+ res += subprog_trusted_task_nullable(t2);
+ res += subprog_trusted_task_nullable_extra_layer(t2);
+
+ if (t2) {
+ /* known non-NULL after explicit NULL check, just in case */
+ res += subprog_trusted_task_nullable(t2);
+ res += subprog_trusted_task_nullable_extra_layer(t2);
+
+ bpf_task_release(t2);
+ }
+
+ return res;
+}
+
+__weak int subprog_trusted_task_nonnull(struct task_struct *task __arg_trusted)
+{
+ return task->pid + task->tgid;
+}
+
+SEC("?kprobe")
+__failure __log_level(2)
+__msg("R1 type=scalar expected=ptr_, trusted_ptr_, rcu_ptr_")
+__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')")
+int trusted_task_arg_nonnull_fail1(void *ctx)
+{
+ return subprog_trusted_task_nonnull(NULL);
+}
+
+SEC("?tp_btf/task_newtask")
+__failure __log_level(2)
+__msg("R1 type=ptr_or_null_ expected=ptr_, trusted_ptr_, rcu_ptr_")
+__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')")
+int trusted_task_arg_nonnull_fail2(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+ struct task_struct *nullable;
+ int res;
+
+ nullable = bpf_task_acquire(t);
+
+ /* should fail, PTR_TO_BTF_ID_OR_NULL */
+ res = subprog_trusted_task_nonnull(nullable);
+
+ if (nullable)
+ bpf_task_release(nullable);
+
+ return res;
+}
+
+SEC("?kprobe")
+__success __log_level(2)
+__msg("Validating subprog_trusted_task_nonnull() func#1...")
+__msg(": R1=trusted_ptr_task_struct(")
+int trusted_task_arg_nonnull(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+
+ return subprog_trusted_task_nonnull(t);
+}
+
+struct task_struct___local {} __attribute__((preserve_access_index));
+
+__weak int subprog_nullable_task_flavor(
+ struct task_struct___local *task __arg_trusted __arg_nullable)
+{
+ char buf[16];
+
+ if (!task)
+ return 0;
+
+ return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0);
+}
+
+SEC("?uprobe.s")
+__success __log_level(2)
+__msg("Validating subprog_nullable_task_flavor() func#1...")
+__msg(": R1=trusted_ptr_or_null_task_struct(")
+int flavor_ptr_nullable(void *ctx)
+{
+ struct task_struct___local *t = (void *)bpf_get_current_task_btf();
+
+ return subprog_nullable_task_flavor(t);
+}
+
+__weak int subprog_nonnull_task_flavor(struct task_struct___local *task __arg_trusted)
+{
+ char buf[16];
+
+ return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0);
+}
+
+SEC("?uprobe.s")
+__success __log_level(2)
+__msg("Validating subprog_nonnull_task_flavor() func#1...")
+__msg(": R1=trusted_ptr_task_struct(")
+int flavor_ptr_nonnull(void *ctx)
+{
+ struct task_struct *t = bpf_get_current_task_btf();
+
+ return subprog_nonnull_task_flavor((void *)t);
+}
+
+__weak int subprog_trusted_destroy(struct task_struct *task __arg_trusted)
+{
+ bpf_task_release(task); /* should be rejected */
+
+ return 0;
+}
+
+SEC("?tp_btf/task_newtask")
+__failure __log_level(2)
+__msg("release kernel function bpf_task_release expects refcounted PTR_TO_BTF_ID")
+int BPF_PROG(trusted_destroy_fail, struct task_struct *task, u64 clone_flags)
+{
+ return subprog_trusted_destroy(task);
+}
+
+__weak int subprog_trusted_acq_rel(struct task_struct *task __arg_trusted)
+{
+ struct task_struct *owned;
+
+ owned = bpf_task_acquire(task);
+ if (!owned)
+ return 0;
+
+ bpf_task_release(owned); /* this one is OK, we acquired it locally */
+
+ return 0;
+}
+
+SEC("?tp_btf/task_newtask")
+__success __log_level(2)
+int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags)
+{
+ return subprog_trusted_acq_rel(task);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
index 67dddd941891..baff5ffe9405 100644
--- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
@@ -115,6 +115,35 @@ int arg_tag_nullable_ptr_fail(void *ctx)
return subprog_nullable_ptr_bad(&x);
}
+typedef struct {
+ int x;
+} user_struct_t;
+
+__noinline __weak int subprog_user_anon_mem(user_struct_t *t)
+{
+ return t ? t->x : 0;
+}
+
+SEC("?tracepoint")
+__failure __log_level(2)
+__msg("invalid bpf_context access")
+__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')")
+int anon_user_mem_invalid(void *ctx)
+{
+ /* can't pass PTR_TO_CTX as user memory */
+ return subprog_user_anon_mem(ctx);
+}
+
+SEC("?tracepoint")
+__success __log_level(2)
+__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype")
+int anon_user_mem_valid(void *ctx)
+{
+ user_struct_t t = { .x = 42 };
+
+ return subprog_user_anon_mem(&t);
+}
+
__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
{
return (*p1) * (*p2); /* good, no need for NULL checks */
diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
index 5905e036e0ea..a955a6358206 100644
--- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
+++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c
@@ -239,4 +239,74 @@ int bpf_loop_iter_limit_nested(void *unused)
return 1000 * a + b + c;
}
+struct iter_limit_bug_ctx {
+ __u64 a;
+ __u64 b;
+ __u64 c;
+};
+
+static __naked void iter_limit_bug_cb(void)
+{
+ /* This is the same as C code below, but written
+ * in assembly to control which branches are fall-through.
+ *
+ * switch (bpf_get_prandom_u32()) {
+ * case 1: ctx->a = 42; break;
+ * case 2: ctx->b = 42; break;
+ * default: ctx->c = 42; break;
+ * }
+ */
+ asm volatile (
+ "r9 = r2;"
+ "call %[bpf_get_prandom_u32];"
+ "r1 = r0;"
+ "r2 = 42;"
+ "r0 = 0;"
+ "if r1 == 0x1 goto 1f;"
+ "if r1 == 0x2 goto 2f;"
+ "*(u64 *)(r9 + 16) = r2;"
+ "exit;"
+ "1: *(u64 *)(r9 + 0) = r2;"
+ "exit;"
+ "2: *(u64 *)(r9 + 8) = r2;"
+ "exit;"
+ :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all
+ );
+}
+
+SEC("tc")
+__failure
+__flag(BPF_F_TEST_STATE_FREQ)
+int iter_limit_bug(struct __sk_buff *skb)
+{
+ struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
+
+ bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
+
+ /* This is the same as C code below,
+ * written in assembly to guarantee checks order.
+ *
+ * if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
+ * asm volatile("r1 /= 0;":::"r1");
+ */
+ asm volatile (
+ "r1 = *(u64 *)%[ctx_a];"
+ "if r1 != 42 goto 1f;"
+ "r1 = *(u64 *)%[ctx_b];"
+ "if r1 != 42 goto 1f;"
+ "r1 = *(u64 *)%[ctx_c];"
+ "if r1 != 7 goto 1f;"
+ "r1 /= 0;"
+ "1:"
+ :
+ : [ctx_a]"m"(ctx.a),
+ [ctx_b]"m"(ctx.b),
+ [ctx_c]"m"(ctx.c)
+ : "r1"
+ );
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c
index 71735dbf33d4..e07b43b78fd2 100644
--- a/tools/testing/selftests/bpf/progs/verifier_loops1.c
+++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c
@@ -259,4 +259,28 @@ l0_%=: r2 += r1; \
" ::: __clobber_all);
}
+SEC("xdp")
+__success
+__naked void not_an_inifinite_loop(void)
+{
+ asm volatile (" \
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0xff; \
+ *(u64 *)(r10 - 8) = r0; \
+ r0 = 0; \
+loop_%=: \
+ r0 = *(u64 *)(r10 - 8); \
+ if r0 > 10 goto exit_%=; \
+ r0 += 1; \
+ *(u64 *)(r10 - 8) = r0; \
+ r0 = 0; \
+ goto loop_%=; \
+exit_%=: \
+ r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
index 39fe3372e0e0..85e48069c9e6 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
@@ -217,7 +217,7 @@ __naked void uninit_u32_from_the_stack(void)
SEC("tc")
__description("Spill a u32 const scalar. Refill as u16. Offset to skb->data")
-__failure __msg("invalid access to packet")
+__success __retval(0)
__naked void u16_offset_to_skb_data(void)
{
asm volatile (" \
@@ -225,13 +225,19 @@ __naked void u16_offset_to_skb_data(void)
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
- r4 = *(u16*)(r10 - 8); \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r4 = *(u16*)(r10 - 8);"
+#else
+ "r4 = *(u16*)(r10 - 6);"
+#endif
+ " \
r0 = r2; \
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\
r0 += r4; \
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
+ /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
if r0 > r3 goto l0_%=; \
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
+ /* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
@@ -243,7 +249,7 @@ l0_%=: r0 = 0; \
SEC("tc")
__description("Spill u32 const scalars. Refill as u64. Offset to skb->data")
-__failure __msg("invalid access to packet")
+__failure __msg("math between pkt pointer and register with unbounded min value is not allowed")
__naked void u64_offset_to_skb_data(void)
{
asm volatile (" \
@@ -253,13 +259,11 @@ __naked void u64_offset_to_skb_data(void)
w7 = 20; \
*(u32*)(r10 - 4) = r6; \
*(u32*)(r10 - 8) = r7; \
- r4 = *(u16*)(r10 - 8); \
+ r4 = *(u64*)(r10 - 8); \
r0 = r2; \
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4= */ \
r0 += r4; \
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
if r0 > r3 goto l0_%=; \
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \
exit; \
@@ -270,7 +274,7 @@ l0_%=: r0 = 0; \
}
SEC("tc")
-__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data")
+__description("Spill a u32 const scalar. Refill as u16 from MSB. Offset to skb->data")
__failure __msg("invalid access to packet")
__naked void _6_offset_to_skb_data(void)
{
@@ -279,7 +283,13 @@ __naked void _6_offset_to_skb_data(void)
r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
w4 = 20; \
*(u32*)(r10 - 8) = r4; \
- r4 = *(u16*)(r10 - 6); \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r4 = *(u16*)(r10 - 6);"
+#else
+ "r4 = *(u16*)(r10 - 8);"
+#endif
+ " \
r0 = r2; \
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
r0 += r4; \
@@ -454,9 +464,9 @@ l0_%=: r1 >>= 16; \
SEC("raw_tp")
__log_level(2)
__success
-__msg("fp-8=0m??mmmm")
-__msg("fp-16=00mm??mm")
-__msg("fp-24=00mm???m")
+__msg("fp-8=0m??scalar()")
+__msg("fp-16=00mm??scalar()")
+__msg("fp-24=00mm???scalar()")
__naked void spill_subregs_preserve_stack_zero(void)
{
asm volatile (
@@ -495,14 +505,14 @@ char single_byte_buf[1] SEC(".data.single_byte_buf");
SEC("raw_tp")
__log_level(2)
__success
-/* make sure fp-8 is all STACK_ZERO */
-__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=00000000")
+/* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */
+__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=0")
/* but fp-16 is spilled IMPRECISE zero const reg */
__msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=0 R10=fp0 fp-16_w=0")
-/* validate that assigning R2 from STACK_ZERO doesn't mark register
+/* validate that assigning R2 from STACK_SPILL with zero value doesn't mark register
* precise immediately; if necessary, it will be marked precise later
*/
-__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=00000000")
+__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=0")
/* similarly, when R2 is assigned from spilled register, it is initially
* imprecise, but will be marked precise later once it is used in precise context
*/
@@ -520,14 +530,14 @@ __msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0")
__naked void partial_stack_load_preserves_zeros(void)
{
asm volatile (
- /* fp-8 is all STACK_ZERO */
+ /* fp-8 is value zero (represented by a zero value fake reg) */
".8byte %[fp8_st_zero];" /* LLVM-18+: *(u64 *)(r10 -8) = 0; */
/* fp-16 is const zero register */
"r0 = 0;"
"*(u64 *)(r10 -16) = r0;"
- /* load single U8 from non-aligned STACK_ZERO slot */
+ /* load single U8 from non-aligned spilled value zero slot */
"r1 = %[single_byte_buf];"
"r2 = *(u8 *)(r10 -1);"
"r1 += r2;"
@@ -539,7 +549,7 @@ __naked void partial_stack_load_preserves_zeros(void)
"r1 += r2;"
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
- /* load single U16 from non-aligned STACK_ZERO slot */
+ /* load single U16 from non-aligned spilled value zero slot */
"r1 = %[single_byte_buf];"
"r2 = *(u16 *)(r10 -2);"
"r1 += r2;"
@@ -551,7 +561,7 @@ __naked void partial_stack_load_preserves_zeros(void)
"r1 += r2;"
"*(u8 *)(r1 + 0) = r2;" /* this should be fine */
- /* load single U32 from non-aligned STACK_ZERO slot */
+ /* load single U32 from non-aligned spilled value zero slot */
"r1 = %[single_byte_buf];"
"r2 = *(u32 *)(r10 -4);"
"r1 += r2;"
@@ -583,6 +593,47 @@ __naked void partial_stack_load_preserves_zeros(void)
: __clobber_common);
}
+SEC("raw_tp")
+__log_level(2)
+__success
+/* fp-4 is STACK_ZERO */
+__msg("2: (62) *(u32 *)(r10 -4) = 0 ; R10=fp0 fp-8=0000????")
+__msg("4: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8=0000????")
+__msg("5: (0f) r1 += r2")
+__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1")
+__msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)")
+__naked void partial_stack_load_preserves_partial_zeros(void)
+{
+ asm volatile (
+ /* fp-4 is value zero */
+ ".8byte %[fp4_st_zero];" /* LLVM-18+: *(u32 *)(r10 -4) = 0; */
+
+ /* load single U8 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u8 *)(r10 -1);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U16 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u16 *)(r10 -2);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ /* load single U32 from non-aligned stack zero slot */
+ "r1 = %[single_byte_buf];"
+ "r2 = *(u32 *)(r10 -4);"
+ "r1 += r2;"
+ "*(u8 *)(r1 + 0) = r2;" /* this should be fine */
+
+ "r0 = 0;"
+ "exit;"
+ :
+ : __imm_ptr(single_byte_buf),
+ __imm_insn(fp4_st_zero, BPF_ST_MEM(BPF_W, BPF_REG_FP, -4, 0))
+ : __clobber_common);
+}
+
char two_byte_buf[2] SEC(".data.two_byte_buf");
SEC("raw_tp")
@@ -737,4 +788,460 @@ __naked void stack_load_preserves_const_precision_subreg(void)
: __clobber_common);
}
+SEC("xdp")
+__description("32-bit spilled reg range should be tracked")
+__success __retval(0)
+__naked void spill_32bit_range_track(void)
+{
+ asm volatile(" \
+ call %[bpf_ktime_get_ns]; \
+ /* Make r0 bounded. */ \
+ r0 &= 65535; \
+ /* Assign an ID to r0. */ \
+ r1 = r0; \
+ /* 32-bit spill r0 to stack. */ \
+ *(u32*)(r10 - 8) = r0; \
+ /* Boundary check on r0. */ \
+ if r0 < 1 goto l0_%=; \
+ /* 32-bit fill r1 from stack. */ \
+ r1 = *(u32*)(r10 - 8); \
+ /* r1 == r0 => r1 >= 1 always. */ \
+ if r1 >= 1 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. \
+ * Do an invalid memory access if the verifier \
+ * follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("64-bit spill of 64-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_64bit_of_64bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x80000000; \
+ r0 <<= 32; \
+ /* 64-bit spill r0 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r0; \
+ /* 64-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u64*)(r10 - 8); \
+ /* Compare r1 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit spill of 32-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_32bit_of_32bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0x80000000; \
+ /* 32-bit spill r0 to stack - should assign an ID. */\
+ *(u32*)(r10 - 8) = r0; \
+ /* 32-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u32*)(r10 - 8); \
+ /* Compare r1 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("16-bit spill of 16-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_16bit_of_16bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x8000; \
+ /* 16-bit spill r0 to stack - should assign an ID. */\
+ *(u16*)(r10 - 8) = r0; \
+ /* 16-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u16*)(r10 - 8); \
+ /* Compare r1 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("8-bit spill of 8-bit reg should assign ID")
+__success __retval(0)
+__naked void spill_8bit_of_8bit_ok(void)
+{
+ asm volatile (" \
+ /* Roll one bit to make the register inexact. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x80; \
+ /* 8-bit spill r0 to stack - should assign an ID. */\
+ *(u8*)(r10 - 8) = r0; \
+ /* 8-bit fill r1 from stack - should preserve the ID. */\
+ r1 = *(u8*)(r10 - 8); \
+ /* Compare r1 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. \
+ */ \
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("spill unbounded reg, then range check src")
+__success __retval(0)
+__naked void spill_unbounded(void)
+{
+ asm volatile (" \
+ /* Produce an unbounded scalar. */ \
+ call %[bpf_get_prandom_u32]; \
+ /* Spill r0 to stack. */ \
+ *(u64*)(r10 - 8) = r0; \
+ /* Boundary check on r0. */ \
+ if r0 > 16 goto l0_%=; \
+ /* Fill r0 from stack. */ \
+ r0 = *(u64*)(r10 - 8); \
+ /* Boundary check on r0 with predetermined result. */\
+ if r0 <= 16 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill")
+__success __retval(0)
+__naked void fill_32bit_after_spill_64bit(void)
+{
+ asm volatile(" \
+ /* Randomize the upper 32 bits. */ \
+ call %[bpf_get_prandom_u32]; \
+ r0 <<= 32; \
+ /* 64-bit spill r0 to stack. */ \
+ *(u64*)(r10 - 8) = r0; \
+ /* 32-bit fill r0 from stack. */ \
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r0 = *(u32*)(r10 - 8);"
+#else
+ "r0 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Boundary check on r0 with predetermined result. */\
+ if r0 == 0 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+l0_%=: exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill of 32-bit value should preserve ID")
+__success __retval(0)
+__naked void fill_32bit_after_spill_64bit_preserve_id(void)
+{
+ asm volatile (" \
+ /* Randomize the lower 32 bits. */ \
+ call %[bpf_get_prandom_u32]; \
+ w0 &= 0xffffffff; \
+ /* 64-bit spill r0 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r0; \
+ /* 32-bit fill r1 from stack - should preserve the ID. */\
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r1 = *(u32*)(r10 - 8);"
+#else
+ "r1 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Compare r1 with another register to trigger find_equal_scalars. */\
+ r2 = 0; \
+ if r1 != r2 goto l0_%=; \
+ /* The result of this comparison is predefined. */\
+ if r0 == r2 goto l0_%=; \
+ /* Dead branch: the verifier should prune it. Do an invalid memory\
+ * access if the verifier follows it. \
+ */ \
+ r0 = *(u64*)(r9 + 0); \
+ exit; \
+l0_%=: r0 = 0; \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+SEC("xdp")
+__description("32-bit fill after 64-bit spill should clear ID")
+__failure __msg("math between ctx pointer and 4294967295 is not allowed")
+__naked void fill_32bit_after_spill_64bit_clear_id(void)
+{
+ asm volatile (" \
+ r6 = r1; \
+ /* Roll one bit to force the verifier to track both branches. */\
+ call %[bpf_get_prandom_u32]; \
+ r0 &= 0x8; \
+ /* Put a large number into r1. */ \
+ r1 = 0xffffffff; \
+ r1 <<= 32; \
+ r1 += r0; \
+ /* 64-bit spill r1 to stack - should assign an ID. */\
+ *(u64*)(r10 - 8) = r1; \
+ /* 32-bit fill r2 from stack - should clear the ID. */\
+ "
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ "r2 = *(u32*)(r10 - 8);"
+#else
+ "r2 = *(u32*)(r10 - 4);"
+#endif
+ " \
+ /* Compare r2 with another register to trigger find_equal_scalars.\
+ * Having one random bit is important here, otherwise the verifier cuts\
+ * the corners. If the ID was mistakenly preserved on fill, this would\
+ * cause the verifier to think that r1 is also equal to zero in one of\
+ * the branches, and equal to eight on the other branch.\
+ */ \
+ r3 = 0; \
+ if r2 != r3 goto l0_%=; \
+l0_%=: r1 >>= 32; \
+ /* The verifier shouldn't propagate r2's range to r1, so it should\
+ * still remember r1 = 0xffffffff and reject the below.\
+ */ \
+ r6 += r1; \
+ r0 = *(u32*)(r6 + 0); \
+ exit; \
+" :
+ : __imm(bpf_get_prandom_u32)
+ : __clobber_all);
+}
+
+/* stacksafe(): check if stack spill of an imprecise scalar in old state
+ * is considered equivalent to STACK_{MISC,INVALID} in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 11 insns")
+/* STACK_INVALID should prevent verifier in unpriv mode from
+ * considering states equivalent and force an error on second
+ * verification path (entry - label 1 - label 2).
+ */
+__failure_unpriv
+__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg_unpriv("9: (95) exit")
+__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)")
+__msg_unpriv("invalid read from stack off -8+2 size 8")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_imprecise_scalar_vs_cur_stack_misc(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure STACK_{MISC,INVALID} at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u16*)(r10 - 8) = r0;"
+ "*(u16*)(r10 - 4) = r0;"
+"2:"
+ /* read fp-8, should be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check that stack spill of a precise scalar in old state
+ * is not considered equivalent to STACK_MISC in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* verifier should visit 'if r1 == 0x2a ...' two times:
+ * - once for path entry - label 2;
+ * - once for path entry - label 1 - label 2.
+ */
+__msg("if r1 == 0x2a goto pc+0")
+__msg("if r1 == 0x2a goto pc+0")
+__msg("processed 15 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_precise_scalar_vs_cur_stack_misc(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure STACK_MISC at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u64*)(r10 - 8) = r0;"
+ "*(u32*)(r10 - 4) = r0;"
+"2:"
+ /* read fp-8, should not be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ /* use r1 in precise context */
+ "if r1 == 42 goto +0;"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check if STACK_MISC in old state is considered
+ * equivalent to stack spill of a scalar in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+__msg("8: (79) r0 = *(u64 *)(r10 -8)")
+__msg("8: safe")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_misc_vs_cur_scalar(void)
+{
+ asm volatile(
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure STACK_{MISC,INVALID} at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u16*)(r10 - 8) = r0;"
+ "*(u16*)(r10 - 4) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure scalar at fp-8 */
+ "r0 = 42;"
+ "*(u64*)(r10 - 8) = r0;"
+"2:"
+ /* read fp-8, should be considered safe on second visit */
+ "r0 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
+/* stacksafe(): check that STACK_MISC in old state is not considered
+ * equivalent to stack spill of a non-scalar in cur state.
+ */
+SEC("socket")
+__success __log_level(2)
+/* verifier should process exit instructions twice:
+ * - once for path entry - label 2;
+ * - once for path entry - label 1 - label 2.
+ */
+__msg("r1 = *(u64 *)(r10 -8)")
+__msg("exit")
+__msg("r1 = *(u64 *)(r10 -8)")
+__msg("exit")
+__msg("processed 11 insns")
+__flag(BPF_F_TEST_STATE_FREQ)
+__naked void old_stack_misc_vs_cur_ctx_ptr(void)
+{
+ asm volatile(
+ /* remember context pointer in r9 */
+ "r9 = r1;"
+ /* get a random value for branching */
+ "call %[bpf_ktime_get_ns];"
+ "if r0 == 0 goto 1f;"
+ /* conjure STACK_MISC at fp-8 */
+ "call %[bpf_ktime_get_ns];"
+ "*(u64*)(r10 - 8) = r0;"
+ "*(u32*)(r10 - 4) = r0;"
+ "goto 2f;"
+"1:"
+ /* conjure context pointer in fp-8 */
+ "*(u64*)(r10 - 8) = r9;"
+"2:"
+ /* read fp-8, should not be considered safe on second visit */
+ "r1 = *(u64*)(r10 - 8);"
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
index 9c1aa69650f8..fb316c080c84 100644
--- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c
@@ -330,7 +330,7 @@ l1_%=: r7 = r0; \
SEC("cgroup/skb")
__description("spin_lock: test10 lock in subprog without unlock")
-__failure __msg("unlock is missing")
+__success
__failure_unpriv __msg_unpriv("")
__naked void lock_in_subprog_without_unlock(void)
{
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 518329c666e9..7ea9785738b5 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -7,6 +7,8 @@
#include <bpf/bpf_endian.h>
#include <asm/errno.h>
+#include "bpf_compiler.h"
+
#define TC_ACT_OK 0
#define TC_ACT_SHOT 2
@@ -151,11 +153,11 @@ static __always_inline __u16 csum_ipv6_magic(const struct in6_addr *saddr,
__u64 sum = csum;
int i;
-#pragma unroll
+ __pragma_loop_unroll
for (i = 0; i < 4; i++)
sum += (__u32)saddr->in6_u.u6_addr32[i];
-#pragma unroll
+ __pragma_loop_unroll
for (i = 0; i < 4; i++)
sum += (__u32)daddr->in6_u.u6_addr32[i];
diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c
index 54cf1765118b..44e2b0ef23ae 100644
--- a/tools/testing/selftests/bpf/progs/xdping_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdping_kern.c
@@ -15,6 +15,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
+#include "bpf_compiler.h"
#include "xdping.h"
struct {
@@ -116,7 +117,7 @@ int xdping_client(struct xdp_md *ctx)
return XDP_PASS;
if (pinginfo->start) {
-#pragma clang loop unroll(full)
+ __pragma_loop_unroll_full
for (i = 0; i < XDPING_MAX_COUNT; i++) {
if (pinginfo->times[i] == 0)
break;
diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c
index f01391021218..ba57601c2a4d 100644
--- a/tools/testing/selftests/bpf/test_loader.c
+++ b/tools/testing/selftests/bpf/test_loader.c
@@ -181,7 +181,7 @@ static int parse_test_spec(struct test_loader *tester,
memset(spec, 0, sizeof(*spec));
spec->prog_name = bpf_program__name(prog);
- spec->prog_flags = BPF_F_TEST_REG_INVARIANTS; /* by default be strict */
+ spec->prog_flags = testing_prog_flags();
btf = bpf_object__btf(obj);
if (!btf) {
@@ -688,7 +688,7 @@ static void process_subtest(struct test_loader *tester,
++nr_progs;
specs = calloc(nr_progs, sizeof(struct test_spec));
- if (!ASSERT_OK_PTR(specs, "Can't alloc specs array"))
+ if (!ASSERT_OK_PTR(specs, "specs_alloc"))
return;
i = 0;
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index c028d621c744..d98c72dc563e 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -211,7 +211,7 @@ static void test_lpm_map(int keysize)
volatile size_t n_matches, n_matches_after_delete;
size_t i, j, n_nodes, n_lookups;
struct tlpm_node *t, *list = NULL;
- struct bpf_lpm_trie_key *key;
+ struct bpf_lpm_trie_key_u8 *key;
uint8_t *data, *value;
int r, map;
@@ -331,8 +331,8 @@ static void test_lpm_map(int keysize)
static void test_lpm_ipaddr(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
- struct bpf_lpm_trie_key *key_ipv4;
- struct bpf_lpm_trie_key *key_ipv6;
+ struct bpf_lpm_trie_key_u8 *key_ipv4;
+ struct bpf_lpm_trie_key_u8 *key_ipv6;
size_t key_size_ipv4;
size_t key_size_ipv6;
int map_fd_ipv4;
@@ -423,7 +423,7 @@ static void test_lpm_ipaddr(void)
static void test_lpm_delete(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
- struct bpf_lpm_trie_key *key;
+ struct bpf_lpm_trie_key_u8 *key;
size_t key_size;
int map_fd;
__u64 value;
@@ -532,7 +532,7 @@ static void test_lpm_delete(void)
static void test_lpm_get_next_key(void)
{
LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC);
- struct bpf_lpm_trie_key *key_p, *next_key_p;
+ struct bpf_lpm_trie_key_u8 *key_p, *next_key_p;
size_t key_size;
__u32 value = 0;
int map_fd;
@@ -693,9 +693,9 @@ static void *lpm_test_command(void *arg)
{
int i, j, ret, iter, key_size;
struct lpm_mt_test_info *info = arg;
- struct bpf_lpm_trie_key *key_p;
+ struct bpf_lpm_trie_key_u8 *key_p;
- key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32);
+ key_size = sizeof(*key_p) + sizeof(__u32);
key_p = alloca(key_size);
for (iter = 0; iter < info->iter; iter++)
for (i = 0; i < MAX_TEST_KEYS; i++) {
@@ -717,7 +717,7 @@ static void *lpm_test_command(void *arg)
ret = bpf_map_lookup_elem(info->map_fd, key_p, &value);
assert(ret == 0 || errno == ENOENT);
} else {
- struct bpf_lpm_trie_key *next_key_p = alloca(key_size);
+ struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size);
ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p);
assert(ret == 0 || errno == ENOENT || errno == ENOMEM);
}
@@ -752,7 +752,7 @@ static void test_lpm_multi_thread(void)
/* create a trie */
value_size = sizeof(__u32);
- key_size = sizeof(struct bpf_lpm_trie_key) + value_size;
+ key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size;
map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts);
/* create 4 threads to test update, delete, lookup and get_next_key */
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 767e0693df10..dfbab214f4d1 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1190,7 +1190,11 @@ static void test_map_in_map(void)
goto out_map_in_map;
}
- bpf_object__load(obj);
+ err = bpf_object__load(obj);
+ if (err) {
+ printf("Failed to load test prog\n");
+ goto out_map_in_map;
+ }
map = bpf_object__find_map_by_name(obj, "mim_array");
if (!map) {
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 1b9387890148..808550986f30 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -547,24 +547,6 @@ int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
return bpf_map__fd(map);
}
-static bool is_jit_enabled(void)
-{
- const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
- bool enabled = false;
- int sysctl_fd;
-
- sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
- if (sysctl_fd != -1) {
- char tmpc;
-
- if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
- enabled = (tmpc != '0');
- close(sysctl_fd);
- }
-
- return enabled;
-}
-
int compare_map_keys(int map1_fd, int map2_fd)
{
__u32 key, next_key;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 2f9f6f250f17..80df51244886 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -385,10 +385,15 @@ int test__join_cgroup(const char *path);
goto goto_label; \
})
+#define ALL_TO_DEV_NULL " >/dev/null 2>&1"
+
#define SYS_NOFAIL(fmt, ...) \
({ \
char cmd[1024]; \
- snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ int n; \
+ n = snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ if (n < sizeof(cmd) && sizeof(cmd) - n >= sizeof(ALL_TO_DEV_NULL)) \
+ strcat(cmd, ALL_TO_DEV_NULL); \
system(cmd); \
})
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index b0068a9d2cfe..80c42583f597 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -19,6 +19,7 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
+#include "testing_helpers.h"
#include "bpf_util.h"
#ifndef ENOTSUPP
@@ -679,7 +680,7 @@ static int load_path(const struct sock_addr_test *test, const char *path)
bpf_program__set_type(prog, BPF_PROG_TYPE_CGROUP_SOCK_ADDR);
bpf_program__set_expected_attach_type(prog, test->expected_attach_type);
- bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS);
+ bpf_program__set_flags(prog, testing_prog_flags());
err = bpf_object__load(obj);
if (err) {
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index f36e41435be7..df04bda1c927 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -67,6 +67,7 @@
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
+#define F_NEEDS_JIT_ENABLED (1 << 2)
/* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
#define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
@@ -74,6 +75,7 @@
1ULL << CAP_BPF)
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
static bool unpriv_disabled = false;
+static bool jit_disabled;
static int skips;
static bool verbose = false;
static int verif_log_level = 0;
@@ -1341,48 +1343,6 @@ static bool cmp_str_seq(const char *log, const char *exp)
return true;
}
-static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt)
-{
- __u32 buf_element_size = sizeof(struct bpf_insn);
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- __u32 xlated_prog_len;
- struct bpf_insn *buf;
-
- if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("bpf_prog_get_info_by_fd failed");
- return NULL;
- }
-
- xlated_prog_len = info.xlated_prog_len;
- if (xlated_prog_len % buf_element_size) {
- printf("Program length %d is not multiple of %d\n",
- xlated_prog_len, buf_element_size);
- return NULL;
- }
-
- *cnt = xlated_prog_len / buf_element_size;
- buf = calloc(*cnt, buf_element_size);
- if (!buf) {
- perror("can't allocate xlated program buffer");
- return NULL;
- }
-
- bzero(&info, sizeof(info));
- info.xlated_prog_len = xlated_prog_len;
- info.xlated_prog_insns = (__u64)(unsigned long)buf;
- if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
- perror("second bpf_prog_get_info_by_fd failed");
- goto out_free_buf;
- }
-
- return buf;
-
-out_free_buf:
- free(buf);
- return NULL;
-}
-
static bool is_null_insn(struct bpf_insn *insn)
{
struct bpf_insn null_insn = {};
@@ -1505,7 +1465,7 @@ static void print_insn(struct bpf_insn *buf, int cnt)
static bool check_xlated_program(struct bpf_test *test, int fd_prog)
{
struct bpf_insn *buf;
- int cnt;
+ unsigned int cnt;
bool result = true;
bool check_expected = !is_null_insn(test->expected_insns);
bool check_unexpected = !is_null_insn(test->unexpected_insns);
@@ -1513,8 +1473,7 @@ static bool check_xlated_program(struct bpf_test *test, int fd_prog)
if (!check_expected && !check_unexpected)
goto out;
- buf = get_xlated_program(fd_prog, &cnt);
- if (!buf) {
+ if (get_xlated_program(fd_prog, &buf, &cnt)) {
printf("FAIL: can't get xlated program\n");
result = false;
goto out;
@@ -1567,6 +1526,13 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
__u32 pflags;
int i, err;
+ if ((test->flags & F_NEEDS_JIT_ENABLED) && jit_disabled) {
+ printf("SKIP (requires BPF JIT)\n");
+ skips++;
+ sched_yield();
+ return;
+ }
+
fd_prog = -1;
for (i = 0; i < MAX_NR_MAPS; i++)
map_fds[i] = -1;
@@ -1588,7 +1554,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (fixup_skips != skips)
return;
- pflags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS;
+ pflags = testing_prog_flags();
if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
pflags |= BPF_F_STRICT_ALIGNMENT;
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
@@ -1887,6 +1853,8 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
+ jit_disabled = !is_jit_enabled();
+
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index d2458c1b1671..28b6646662af 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -252,6 +252,34 @@ __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
int extra_prog_load_log_flags = 0;
+int testing_prog_flags(void)
+{
+ static int cached_flags = -1;
+ static int prog_flags[] = { BPF_F_TEST_RND_HI32, BPF_F_TEST_REG_INVARIANTS };
+ static struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int insn_cnt = ARRAY_SIZE(insns), i, fd, flags = 0;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+
+ if (cached_flags >= 0)
+ return cached_flags;
+
+ for (i = 0; i < ARRAY_SIZE(prog_flags); i++) {
+ opts.prog_flags = prog_flags[i];
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "flag-test", "GPL",
+ insns, insn_cnt, &opts);
+ if (fd >= 0) {
+ flags |= prog_flags[i];
+ close(fd);
+ }
+ }
+
+ cached_flags = flags;
+ return cached_flags;
+}
+
int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
@@ -276,7 +304,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
if (type != BPF_PROG_TYPE_UNSPEC && bpf_program__type(prog) != type)
bpf_program__set_type(prog, type);
- flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS;
+ flags = bpf_program__flags(prog) | testing_prog_flags();
bpf_program__set_flags(prog, flags);
err = bpf_object__load(obj);
@@ -299,7 +327,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.kern_version = kern_version,
- .prog_flags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS,
+ .prog_flags = testing_prog_flags(),
.log_level = extra_prog_load_log_flags,
.log_buf = log_buf,
.log_size = log_buf_sz,
@@ -328,12 +356,12 @@ __u64 read_perf_max_sample_freq(void)
return sample_freq;
}
-static int finit_module(int fd, const char *param_values, int flags)
+int finit_module(int fd, const char *param_values, int flags)
{
return syscall(__NR_finit_module, fd, param_values, flags);
}
-static int delete_module(const char *name, int flags)
+int delete_module(const char *name, int flags)
{
return syscall(__NR_delete_module, name, flags);
}
@@ -387,3 +415,63 @@ int kern_sync_rcu(void)
{
return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
}
+
+int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
+{
+ __u32 buf_element_size = sizeof(struct bpf_insn);
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 xlated_prog_len;
+
+ if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("bpf_prog_get_info_by_fd failed");
+ return -1;
+ }
+
+ xlated_prog_len = info.xlated_prog_len;
+ if (xlated_prog_len % buf_element_size) {
+ printf("Program length %u is not multiple of %u\n",
+ xlated_prog_len, buf_element_size);
+ return -1;
+ }
+
+ *cnt = xlated_prog_len / buf_element_size;
+ *buf = calloc(*cnt, buf_element_size);
+ if (!buf) {
+ perror("can't allocate xlated program buffer");
+ return -ENOMEM;
+ }
+
+ bzero(&info, sizeof(info));
+ info.xlated_prog_len = xlated_prog_len;
+ info.xlated_prog_insns = (__u64)(unsigned long)*buf;
+ if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("second bpf_prog_get_info_by_fd failed");
+ goto out_free_buf;
+ }
+
+ return 0;
+
+out_free_buf:
+ free(*buf);
+ *buf = NULL;
+ return -1;
+}
+
+bool is_jit_enabled(void)
+{
+ const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
+ bool enabled = false;
+ int sysctl_fd;
+
+ sysctl_fd = open(jit_sysctl, O_RDONLY);
+ if (sysctl_fd != -1) {
+ char tmpc;
+
+ if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
+ enabled = (tmpc != '0');
+ close(sysctl_fd);
+ }
+
+ return enabled;
+}
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index 35284faff4f2..d55f6ab12433 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -36,6 +36,8 @@ __u64 read_perf_max_sample_freq(void);
int load_bpf_testmod(bool verbose);
int unload_bpf_testmod(bool verbose);
int kern_sync_rcu(void);
+int finit_module(int fd, const char *param_values, int flags);
+int delete_module(const char *name, int flags);
static inline __u64 get_time_ns(void)
{
@@ -46,4 +48,12 @@ static inline __u64 get_time_ns(void)
return (u64)t.tv_sec * 1000000000 + t.tv_nsec;
}
+struct bpf_insn;
+/* Request BPF program instructions after all rewrites are applied,
+ * e.g. verifier.c:convert_ctx_access() is done.
+ */
+int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt);
+int testing_prog_flags(void);
+bool is_jit_enabled(void);
+
#endif /* __TESTING_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 4faa898ff7fc..27fd7ed3e4b0 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -271,7 +271,7 @@ ssize_t get_uprobe_offset(const void *addr)
* addi r2,r2,XXXX
*/
{
- const u32 *insn = (const u32 *)(uintptr_t)addr;
+ const __u32 *insn = (const __u32 *)(uintptr_t)addr;
if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
diff --git a/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c b/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
index a535d41dc20d..59125b22ae39 100644
--- a/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
+++ b/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
@@ -57,6 +57,7 @@
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = { { 0, MAIN_TYPE }, { 12, CALLBACK_TYPE } },
@@ -90,6 +91,7 @@
.expected_insns = { HELPER_CALL_INSN() },
.unexpected_insns = { PSEUDO_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
@@ -127,6 +129,7 @@
.expected_insns = { HELPER_CALL_INSN() },
.unexpected_insns = { PSEUDO_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = {
@@ -165,6 +168,7 @@
.expected_insns = { PSEUDO_CALL_INSN() },
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.runs = 0,
.func_info = {
@@ -235,6 +239,7 @@
},
.unexpected_insns = { HELPER_CALL_INSN() },
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.result = ACCEPT,
.func_info = {
{ 0, MAIN_TYPE },
@@ -252,6 +257,7 @@
.unexpected_insns = { HELPER_CALL_INSN() },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .flags = F_NEEDS_JIT_ENABLED,
.func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
.func_info_cnt = 2,
BTF_TYPES
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 8a2ff81d8350..0a9293a57211 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -183,10 +183,10 @@
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "mark_precise: frame0: last_idx 7 first_idx 7\
- mark_precise: frame0: parent state regs=r4 stack=:\
+ mark_precise: frame0: parent state regs=r4 stack=-8:\
mark_precise: frame0: last_idx 6 first_idx 4\
- mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\
- mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\
+ mark_precise: frame0: regs=r4 stack=-8 before 6: (b7) r0 = -1\
+ mark_precise: frame0: regs=r4 stack=-8 before 5: (79) r4 = *(u64 *)(r10 -8)\
mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\
mark_precise: frame0: parent state regs=r0 stack=:\
mark_precise: frame0: last_idx 3 first_idx 3\
diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c
index 534576f06df1..c59e4adb905d 100644
--- a/tools/testing/selftests/core/close_range_test.c
+++ b/tools/testing/selftests/core/close_range_test.c
@@ -12,6 +12,7 @@
#include <syscall.h>
#include <unistd.h>
#include <sys/resource.h>
+#include <linux/close_range.h>
#include "../kselftest_harness.h"
#include "../clone3/clone3_selftests.h"
diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile
index 8a72bb7de70f..03a089165d3f 100644
--- a/tools/testing/selftests/drivers/net/bonding/Makefile
+++ b/tools/testing/selftests/drivers/net/bonding/Makefile
@@ -15,7 +15,10 @@ TEST_PROGS := \
TEST_FILES := \
lag_lib.sh \
bond_topo_2d1c.sh \
- bond_topo_3d1c.sh \
- net_forwarding_lib.sh
+ bond_topo_3d1c.sh
+
+TEST_INCLUDES := \
+ ../../../net/forwarding/lib.sh \
+ ../../../net/lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
index 6358df5752f9..1ec7f59db7f4 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
@@ -20,21 +20,21 @@
# +------+ +------+
#
# We use veths instead of physical interfaces
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/../../../net/forwarding/lib.sh
set -e
-tmp=$(mktemp -q dump.XXXXXX)
cleanup() {
ip link del fab-br0 >/dev/null 2>&1 || :
ip link del fbond >/dev/null 2>&1 || :
ip link del veth1-bond >/dev/null 2>&1 || :
ip link del veth2-bond >/dev/null 2>&1 || :
- modprobe -r bonding >/dev/null 2>&1 || :
- rm -f -- ${tmp}
}
trap cleanup 0 1 2
cleanup
-sleep 1
# create the bridge
ip link add fab-br0 address 52:54:00:3B:7C:A6 mtu 1500 type bridge \
@@ -67,13 +67,12 @@ ip link set fab-br0 up
ip link set fbond up
ip addr add dev fab-br0 10.0.0.3
-tcpdump -n -i veth1-end -e ether proto 0x8809 >${tmp} 2>&1 &
-sleep 15
-pkill tcpdump >/dev/null 2>&1
rc=0
-num=$(grep "packets captured" ${tmp} | awk '{print $1}')
-if test "$num" -gt 0; then
- echo "PASS, captured ${num}"
+tc qdisc add dev veth1-end clsact
+tc filter add dev veth1-end ingress protocol 0x8809 pref 1 handle 101 flower skip_hw action pass
+if slowwait_for_counter 15 2 \
+ tc_rule_handle_stats_get "dev veth1-end ingress" 101 ".packets" "" &> /dev/null; then
+ echo "PASS, captured 2"
else
echo "FAIL"
rc=1
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
index 862e947e17c7..8293dbc7c18f 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh
@@ -11,7 +11,7 @@ ALL_TESTS="
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
bond_check_flags()
{
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh b/tools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh
index 89af402fabbe..78d3e0fe6604 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh
@@ -17,6 +17,11 @@
# +----------------+
#
# We use veths instead of physical interfaces
+REQUIRE_MZ=no
+NUM_NETIFS=0
+lib_dir=$(dirname "$0")
+source "$lib_dir"/../../../net/forwarding/lib.sh
+
sw="sw-$(mktemp -u XXXXXX)"
host="ns-$(mktemp -u XXXXXX)"
@@ -26,6 +31,16 @@ cleanup()
ip netns del $host
}
+wait_lladdr_dad()
+{
+ $@ | grep fe80 | grep -qv tentative
+}
+
+wait_bond_up()
+{
+ $@ | grep -q 'state UP'
+}
+
trap cleanup 0 1 2
ip netns add $sw
@@ -37,8 +52,8 @@ ip -n $host link add veth1 type veth peer name veth1 netns $sw
ip -n $sw link add br0 type bridge
ip -n $sw link set br0 up
sw_lladdr=$(ip -n $sw addr show br0 | awk '/fe80/{print $2}' | cut -d'/' -f1)
-# sleep some time to make sure bridge lladdr pass DAD
-sleep 2
+# wait some time to make sure bridge lladdr pass DAD
+slowwait 2 wait_lladdr_dad ip -n $sw addr show br0
ip -n $host link add bond0 type bond mode 1 ns_ip6_target ${sw_lladdr} \
arp_validate 3 arp_interval 1000
@@ -53,7 +68,7 @@ ip -n $sw link set veth1 master br0
ip -n $sw link set veth0 up
ip -n $sw link set veth1 up
-sleep 5
+slowwait 5 wait_bond_up ip -n $host link show bond0
rc=0
if ip -n $host link show bond0 | grep -q LOWER_UP; then
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
index c54d1697f439..41d0859feb7d 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
@@ -45,15 +45,23 @@ skip_ns()
}
active_slave=""
+active_slave_changed()
+{
+ local old_active_slave=$1
+ local new_active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" \
+ ".[].linkinfo.info_data.active_slave")
+ [ "$new_active_slave" != "$old_active_slave" -a "$new_active_slave" != "null" ]
+}
+
check_active_slave()
{
local target_active_slave=$1
+ slowwait 5 active_slave_changed $active_slave
active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
test "$active_slave" = "$target_active_slave"
check_err $? "Current active slave is $active_slave but not $target_active_slave"
}
-
# Test bonding prio option
prio_test()
{
@@ -62,6 +70,8 @@ prio_test()
# create bond
bond_reset "${param}"
+ # set active_slave to primary eth1 specifically
+ ip -n ${s_ns} link set bond0 type bond active_slave eth1
# check bonding member prio value
ip -n ${s_ns} link set eth0 type bond_slave prio 0
@@ -84,13 +94,13 @@ prio_test()
# active slave should be the higher prio slave
ip -n ${s_ns} link set $active_slave down
- bond_check_connection "fail over"
check_active_slave eth2
+ bond_check_connection "fail over"
# when only 1 slave is up
ip -n ${s_ns} link set $active_slave down
- bond_check_connection "only 1 slave up"
check_active_slave eth0
+ bond_check_connection "only 1 slave up"
# when a higher prio slave change to up
ip -n ${s_ns} link set eth2 up
@@ -140,8 +150,8 @@ prio_test()
check_active_slave "eth1"
ip -n ${s_ns} link set $active_slave down
- bond_check_connection "change slave prio"
check_active_slave "eth0"
+ bond_check_connection "change slave prio"
fi
}
@@ -162,7 +172,7 @@ prio_arp()
local mode=$1
for primary_reselect in 0 1 2; do
- prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
+ prio_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect"
done
}
@@ -178,7 +188,7 @@ prio_ns()
fi
for primary_reselect in 0 1 2; do
- prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
+ prio_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect"
done
}
@@ -194,9 +204,18 @@ prio()
for mode in $modes; do
prio_miimon $mode
- prio_arp $mode
- prio_ns $mode
done
+ prio_arp "active-backup"
+ prio_ns "active-backup"
+}
+
+wait_mii_up()
+{
+ for i in $(seq 0 2); do
+ mii_status=$(cmd_jq "ip -n ${s_ns} -j -d link show eth$i" ".[].linkinfo.info_slave_data.mii_status")
+ [ ${mii_status} != "UP" ] && return 1
+ done
+ return 0
}
arp_validate_test()
@@ -211,7 +230,7 @@ arp_validate_test()
[ $RET -ne 0 ] && log_test "arp_validate" "$retmsg"
# wait for a while to make sure the mii status stable
- sleep 5
+ slowwait 5 wait_mii_up
for i in $(seq 0 2); do
mii_status=$(cmd_jq "ip -n ${s_ns} -j -d link show eth$i" ".[].linkinfo.info_slave_data.mii_status")
if [ ${mii_status} != "UP" ]; then
@@ -276,10 +295,13 @@ garp_test()
active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
ip -n ${s_ns} link set ${active_slave} down
- exp_num=$(echo "${param}" | cut -f6 -d ' ')
- sleep $((exp_num + 2))
+ # wait for active link change
+ slowwait 2 active_slave_changed $active_slave
+ exp_num=$(echo "${param}" | cut -f6 -d ' ')
active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave")
+ slowwait_for_counter $((exp_num + 5)) $exp_num \
+ tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}"
# check result
real_num=$(tc_rule_handle_stats_get "dev s${active_slave#eth} ingress" 101 ".packets" "-n ${g_ns}")
@@ -296,8 +318,8 @@ garp_test()
num_grat_arp()
{
local val
- for val in 10 20 30 50; do
- garp_test "mode active-backup miimon 100 num_grat_arp $val peer_notify_delay 1000"
+ for val in 10 20 30; do
+ garp_test "mode active-backup miimon 10 num_grat_arp $val peer_notify_delay 100"
log_test "num_grat_arp" "active-backup miimon num_grat_arp $val"
done
}
diff --git a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
index a509ef949dcf..195ef83cfbf1 100644
--- a/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
@@ -28,7 +28,7 @@
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source ${lib_dir}/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
s_ns="s-$(mktemp -u XXXXXX)"
c_ns="c-$(mktemp -u XXXXXX)"
@@ -73,7 +73,6 @@ server_create()
ip -n ${s_ns} link set bond0 up
ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0
ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0
- sleep 2
}
# Reset bond with new mode and options
@@ -96,7 +95,8 @@ bond_reset()
ip -n ${s_ns} link set bond0 up
ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0
ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0
- sleep 2
+ # Wait for IPv6 address ready as it needs DAD
+ slowwait 2 ip netns exec ${s_ns} ping6 ${c_ip6} -c 1 -W 0.1 &> /dev/null
}
server_destroy()
@@ -150,7 +150,7 @@ bond_check_connection()
{
local msg=${1:-"check connection"}
- sleep 2
+ slowwait 2 ip netns exec ${s_ns} ping ${c_ip4} -c 1 -W 0.1 &> /dev/null
ip netns exec ${s_ns} ping ${c_ip4} -c5 -i 0.1 &>/dev/null
check_err $? "${msg}: ping failed"
ip netns exec ${s_ns} ping6 ${c_ip6} -c5 -i 0.1 &>/dev/null
diff --git a/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
index 5cfe7d8ebc25..e6fa24eded5b 100755
--- a/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
+++ b/tools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh
@@ -14,7 +14,7 @@ ALL_TESTS="
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
source "$lib_dir"/lag_lib.sh
diff --git a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
index 2a268b17b61f..bf9bcd1b5ec0 100644
--- a/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
+++ b/tools/testing/selftests/drivers/net/bonding/lag_lib.sh
@@ -48,6 +48,17 @@ test_LAG_cleanup()
ip link add mv0 link "$name" up address "$ucaddr" type macvlan
# Used to test dev->mc handling
ip address add "$addr6" dev "$name"
+
+ # Check that addresses were added as expected
+ (grep_bridge_fdb "$ucaddr" bridge fdb show dev dummy1 ||
+ grep_bridge_fdb "$ucaddr" bridge fdb show dev dummy2) >/dev/null
+ check_err $? "macvlan unicast address not found on a slave"
+
+ # mcaddr is added asynchronously by addrconf_dad_work(), use busywait
+ (busywait 10000 grep_bridge_fdb "$mcaddr" bridge fdb show dev dummy1 ||
+ grep_bridge_fdb "$mcaddr" bridge fdb show dev dummy2) >/dev/null
+ check_err $? "IPv6 solicited-node multicast mac address not found on a slave"
+
ip link set dev "$name" down
ip link del "$name"
@@ -96,13 +107,12 @@ lag_setup2x2()
NAMESPACES="${namespaces}"
}
-# cleanup all lag related namespaces and remove the bonding module
+# cleanup all lag related namespaces
lag_cleanup()
{
for n in ${NAMESPACES}; do
ip netns delete ${n} >/dev/null 2>&1 || true
done
- modprobe -r bonding
}
SWITCH="lag_node1"
@@ -148,7 +158,7 @@ test_bond_recovery()
create_bond $@
# verify connectivity
- ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 >/dev/null 2>&1
+ slowwait 2 ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 -W 0.1 &> /dev/null
check_err $? "No connectivity"
# force the links of the bond down
@@ -158,7 +168,7 @@ test_bond_recovery()
ip netns exec ${SWITCH} ip link set eth1 down
# re-verify connectivity
- ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 >/dev/null 2>&1
+ slowwait 2 ip netns exec ${CLIENT} ping ${SWITCHIP} -c 2 -W 0.1 &> /dev/null
local rc=$?
check_err $rc "Bond failed to recover"
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
index b76bf5030952..9d26ab4cad0b 100755
--- a/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
+++ b/tools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh
@@ -23,7 +23,7 @@ REQUIRE_MZ=no
REQUIRE_JQ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
source "$lib_dir"/lag_lib.sh
cleanup()
diff --git a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
index 8c2619002147..2d275b3e47dd 100755
--- a/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
+++ b/tools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh
@@ -23,7 +23,7 @@ REQUIRE_MZ=no
REQUIRE_JQ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
source "$lib_dir"/lag_lib.sh
cleanup()
diff --git a/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh b/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh
deleted file mode 120000
index 39c96828c5ef..000000000000
--- a/tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/bonding/settings b/tools/testing/selftests/drivers/net/bonding/settings
index 6091b45d226b..79b65bdf05db 100644
--- a/tools/testing/selftests/drivers/net/bonding/settings
+++ b/tools/testing/selftests/drivers/net/bonding/settings
@@ -1 +1 @@
-timeout=120
+timeout=1200
diff --git a/tools/testing/selftests/drivers/net/dsa/Makefile b/tools/testing/selftests/drivers/net/dsa/Makefile
index c393e7b73805..cd6817fe5be6 100644
--- a/tools/testing/selftests/drivers/net/dsa/Makefile
+++ b/tools/testing/selftests/drivers/net/dsa/Makefile
@@ -11,8 +11,22 @@ TEST_PROGS = bridge_locked_port.sh \
tc_actions.sh \
test_bridge_fdb_stress.sh
-TEST_PROGS_EXTENDED := lib.sh tc_common.sh
+TEST_FILES := \
+ run_net_forwarding_test.sh \
+ forwarding.config
-TEST_FILES := forwarding.config
+TEST_INCLUDES := \
+ ../../../net/forwarding/bridge_locked_port.sh \
+ ../../../net/forwarding/bridge_mdb.sh \
+ ../../../net/forwarding/bridge_mld.sh \
+ ../../../net/forwarding/bridge_vlan_aware.sh \
+ ../../../net/forwarding/bridge_vlan_mcast.sh \
+ ../../../net/forwarding/bridge_vlan_unaware.sh \
+ ../../../net/forwarding/lib.sh \
+ ../../../net/forwarding/local_termination.sh \
+ ../../../net/forwarding/no_forwarding.sh \
+ ../../../net/forwarding/tc_actions.sh \
+ ../../../net/forwarding/tc_common.sh \
+ ../../../net/lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh b/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
index f5eb940c4c7c..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_locked_port.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh b/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
index 76492da525f7..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_mdb.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh b/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
index 81a7e0df0474..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_mld.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
index 9831ed74376a..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_vlan_aware.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
index 7f3c3f0bf719..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_vlan_mcast.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
index bf1a57e6bde1..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
@@ -1 +1 @@
-../../../net/forwarding/bridge_vlan_unaware.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/lib.sh b/tools/testing/selftests/drivers/net/dsa/lib.sh
deleted file mode 120000
index 39c96828c5ef..000000000000
--- a/tools/testing/selftests/drivers/net/dsa/lib.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/local_termination.sh b/tools/testing/selftests/drivers/net/dsa/local_termination.sh
index c08166f84501..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/local_termination.sh
+++ b/tools/testing/selftests/drivers/net/dsa/local_termination.sh
@@ -1 +1 @@
-../../../net/forwarding/local_termination.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh b/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
index b9757466bc97..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
+++ b/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
@@ -1 +1 @@
-../../../net/forwarding/no_forwarding.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh b/tools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh
new file mode 100755
index 000000000000..4106c0a102ea
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/run_net_forwarding_test.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+libdir=$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")
+testname=$(basename "${BASH_SOURCE[0]}")
+
+source "$libdir"/forwarding.config
+cd "$libdir"/../../../net/forwarding/ || exit 1
+source "./$testname" "$@"
diff --git a/tools/testing/selftests/drivers/net/dsa/tc_actions.sh b/tools/testing/selftests/drivers/net/dsa/tc_actions.sh
index 306213d9430e..d16a65e7595d 120000
--- a/tools/testing/selftests/drivers/net/dsa/tc_actions.sh
+++ b/tools/testing/selftests/drivers/net/dsa/tc_actions.sh
@@ -1 +1 @@
-../../../net/forwarding/tc_actions.sh \ No newline at end of file
+run_net_forwarding_test.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/tc_common.sh b/tools/testing/selftests/drivers/net/dsa/tc_common.sh
deleted file mode 120000
index bc3465bdc36b..000000000000
--- a/tools/testing/selftests/drivers/net/dsa/tc_common.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../net/forwarding/tc_common.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh b/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
index 92acab83fbe2..74682151d04d 100755
--- a/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
+++ b/tools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh
@@ -19,7 +19,7 @@ REQUIRE_JQ="no"
REQUIRE_MZ="no"
NETIF_CREATE="no"
lib_dir=$(dirname "$0")
-source "$lib_dir"/lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
cleanup() {
echo "Cleaning up"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index 616d3581419c..31252bc8775e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -869,7 +869,7 @@ bloom_simple_test()
bloom_complex_test()
{
# Bloom filter index computation is affected from region ID, eRP
- # ID and from the region key size. In order to excercise those parts
+ # ID and from the region key size. In order to exercise those parts
# of the Bloom filter code, use a series of regions, each with a
# different key size and send packet that should hit all of them.
local index
diff --git a/tools/testing/selftests/drivers/net/netdevsim/Makefile b/tools/testing/selftests/drivers/net/netdevsim/Makefile
new file mode 100644
index 000000000000..5bace0b7fb57
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0+ OR MIT
+
+TEST_PROGS = devlink.sh \
+ devlink_in_netns.sh \
+ devlink_trap.sh \
+ ethtool-coalesce.sh \
+ ethtool-fec.sh \
+ ethtool-pause.sh \
+ ethtool-ring.sh \
+ fib.sh \
+ hw_stats_l3.sh \
+ nexthop.sh \
+ peer.sh \
+ psample.sh \
+ tc-mq-visibility.sh \
+ udp_tunnel_nic.sh \
+
+include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index 46e20b13473c..b5ea2526f23c 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -31,7 +31,7 @@ devlink_wait()
fw_flash_test()
{
- DUMMYFILE=$(find /lib/firmware -maxdepth 1 -type f -printf '%f\n' |head -1)
+ DUMMYFILE=$(find /lib/firmware -type f -printf '%P\n' | head -1)
RET=0
if [ -z "$DUMMYFILE" ]
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
index 7d7829f57550..6c52ce1b0450 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh
@@ -49,7 +49,7 @@ for o in llrs rs; do
Active FEC encoding: ${o^^}"
done
-# Test mutliple bits
+# Test multiple bits
$ETHTOOL --set-fec $NSIM_NETDEV encoding rs llrs
check $?
s=$($ETHTOOL --show-fec $NSIM_NETDEV | tail -2)
diff --git a/tools/testing/selftests/drivers/net/netdevsim/peer.sh b/tools/testing/selftests/drivers/net/netdevsim/peer.sh
new file mode 100755
index 000000000000..aed62d9e6c0a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/peer.sh
@@ -0,0 +1,143 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+source ../../../net/net_helper.sh
+
+NSIM_DEV_1_ID=$((256 + RANDOM % 256))
+NSIM_DEV_1_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_DEV_1_ID
+NSIM_DEV_2_ID=$((512 + RANDOM % 256))
+NSIM_DEV_2_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_DEV_2_ID
+
+NSIM_DEV_SYS_NEW=/sys/bus/netdevsim/new_device
+NSIM_DEV_SYS_DEL=/sys/bus/netdevsim/del_device
+NSIM_DEV_SYS_LINK=/sys/bus/netdevsim/link_device
+NSIM_DEV_SYS_UNLINK=/sys/bus/netdevsim/unlink_device
+
+socat_check()
+{
+ if [ ! -x "$(command -v socat)" ]; then
+ echo "socat command not found. Skipping test"
+ return 1
+ fi
+
+ return 0
+}
+
+setup_ns()
+{
+ set -e
+ ip netns add nssv
+ ip netns add nscl
+
+ NSIM_DEV_1_NAME=$(find $NSIM_DEV_1_SYS/net -maxdepth 1 -type d ! \
+ -path $NSIM_DEV_1_SYS/net -exec basename {} \;)
+ NSIM_DEV_2_NAME=$(find $NSIM_DEV_2_SYS/net -maxdepth 1 -type d ! \
+ -path $NSIM_DEV_2_SYS/net -exec basename {} \;)
+
+ ip link set $NSIM_DEV_1_NAME netns nssv
+ ip link set $NSIM_DEV_2_NAME netns nscl
+
+ ip netns exec nssv ip addr add '192.168.1.1/24' dev $NSIM_DEV_1_NAME
+ ip netns exec nscl ip addr add '192.168.1.2/24' dev $NSIM_DEV_2_NAME
+
+ ip netns exec nssv ip link set dev $NSIM_DEV_1_NAME up
+ ip netns exec nscl ip link set dev $NSIM_DEV_2_NAME up
+ set +e
+}
+
+cleanup_ns()
+{
+ ip netns del nscl
+ ip netns del nssv
+}
+
+###
+### Code start
+###
+
+socat_check || exit 4
+
+modprobe netdevsim
+
+# linking
+
+echo $NSIM_DEV_1_ID > $NSIM_DEV_SYS_NEW
+echo $NSIM_DEV_2_ID > $NSIM_DEV_SYS_NEW
+udevadm settle
+
+setup_ns
+
+NSIM_DEV_1_FD=$((256 + RANDOM % 256))
+exec {NSIM_DEV_1_FD}</var/run/netns/nssv
+NSIM_DEV_1_IFIDX=$(ip netns exec nssv cat /sys/class/net/$NSIM_DEV_1_NAME/ifindex)
+
+NSIM_DEV_2_FD=$((256 + RANDOM % 256))
+exec {NSIM_DEV_2_FD}</var/run/netns/nscl
+NSIM_DEV_2_IFIDX=$(ip netns exec nscl cat /sys/class/net/$NSIM_DEV_2_NAME/ifindex)
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX $NSIM_DEV_2_FD:2000" > $NSIM_DEV_SYS_LINK 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo "linking with non-existent netdevsim should fail"
+ cleanup_ns
+ exit 1
+fi
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX 2000:$NSIM_DEV_2_IFIDX" > $NSIM_DEV_SYS_LINK 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo "linking with non-existent netnsid should fail"
+ cleanup_ns
+ exit 1
+fi
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX $NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX" > $NSIM_DEV_SYS_LINK 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo "linking with self should fail"
+ cleanup_ns
+ exit 1
+fi
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX $NSIM_DEV_2_FD:$NSIM_DEV_2_IFIDX" > $NSIM_DEV_SYS_LINK
+if [ $? -ne 0 ]; then
+ echo "linking netdevsim1 with netdevsim2 should succeed"
+ cleanup_ns
+ exit 1
+fi
+
+# argument error checking
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX $NSIM_DEV_2_FD:a" > $NSIM_DEV_SYS_LINK 2>/dev/null
+if [ $? -eq 0 ]; then
+ echo "invalid arg should fail"
+ cleanup_ns
+ exit 1
+fi
+
+# send/recv packets
+
+tmp_file=$(mktemp)
+ip netns exec nssv socat TCP-LISTEN:1234,fork $tmp_file &
+pid=$!
+res=0
+
+wait_local_port_listen nssv 1234 tcp
+
+echo "HI" | ip netns exec nscl socat STDIN TCP:192.168.1.1:1234
+
+count=$(cat $tmp_file | wc -c)
+if [[ $count -ne 3 ]]; then
+ echo "expected 3 bytes, got $count"
+ res=1
+fi
+
+echo "$NSIM_DEV_1_FD:$NSIM_DEV_1_IFIDX" > $NSIM_DEV_SYS_UNLINK
+
+echo $NSIM_DEV_2_ID > $NSIM_DEV_SYS_DEL
+
+kill $pid
+echo $NSIM_DEV_1_ID > $NSIM_DEV_SYS_DEL
+
+cleanup_ns
+
+modprobe -r netdevsim
+
+exit $res
diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
index 4855ef597a15..384cfa3d38a6 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
@@ -270,6 +270,7 @@ for port in 0 1; do
echo 1 > $NSIM_DEV_SYS/new_port
fi
NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ip link set dev $NSIM_NETDEV up
msg="new NIC device created"
exp0=( 0 0 0 0 )
@@ -283,8 +284,8 @@ for port in 0 1; do
msg="VxLAN v4 devices go down"
exp0=( 0 0 0 0 )
- ifconfig vxlan1 down
- ifconfig vxlan0 down
+ ip link set dev vxlan1 down
+ ip link set dev vxlan0 down
check_tables
msg="VxLAN v6 devices"
@@ -292,7 +293,7 @@ for port in 0 1; do
new_vxlan vxlanA 4789 $NSIM_NETDEV 6
for ifc in vxlan0 vxlan1; do
- ifconfig $ifc up
+ ip link set dev $ifc up
done
new_vxlan vxlanB 4789 $NSIM_NETDEV 6
@@ -306,14 +307,14 @@ for port in 0 1; do
new_geneve gnv0 6081
msg="NIC device goes down"
- ifconfig $NSIM_NETDEV down
+ ip link set dev $NSIM_NETDEV down
if [ $port -eq 1 ]; then
exp0=( 0 0 0 0 )
exp1=( 0 0 0 0 )
fi
check_tables
msg="NIC device goes up again"
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
exp0=( `mke 4789 1` `mke 4790 1` 0 0 )
exp1=( `mke 6081 2` 0 0 0 )
check_tables
@@ -431,7 +432,8 @@ for port in 0 1; do
fi
echo $port > $NSIM_DEV_SYS/new_port
- ifconfig $NSIM_NETDEV up
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ip link set dev $NSIM_NETDEV up
overflow_table0 "overflow NIC table"
overflow_table1 "overflow NIC table"
@@ -488,7 +490,8 @@ for port in 0 1; do
fi
echo $port > $NSIM_DEV_SYS/new_port
- ifconfig $NSIM_NETDEV up
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ip link set dev $NSIM_NETDEV up
overflow_table0 "overflow NIC table"
overflow_table1 "overflow NIC table"
@@ -544,7 +547,8 @@ for port in 0 1; do
fi
echo $port > $NSIM_DEV_SYS/new_port
- ifconfig $NSIM_NETDEV up
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ip link set dev $NSIM_NETDEV up
overflow_table0 "destroy NIC"
overflow_table1 "destroy NIC"
@@ -573,7 +577,8 @@ for port in 0 1; do
fi
echo $port > $NSIM_DEV_SYS/new_port
- ifconfig $NSIM_NETDEV up
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ip link set dev $NSIM_NETDEV up
msg="create VxLANs v6"
new_vxlan vxlanA0 10000 $NSIM_NETDEV 6
@@ -633,7 +638,8 @@ for port in 0 1; do
fi
echo $port > $NSIM_DEV_SYS/new_port
- ifconfig $NSIM_NETDEV up
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ip link set dev $NSIM_NETDEV up
echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
@@ -688,7 +694,8 @@ for port in 0 1; do
fi
echo $port > $NSIM_DEV_SYS/new_port
- ifconfig $NSIM_NETDEV up
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ip link set dev $NSIM_NETDEV up
msg="create VxLANs v6"
exp0=( `mke 10000 1` 0 0 0 )
@@ -747,7 +754,8 @@ for port in 0 1; do
fi
echo $port > $NSIM_DEV_SYS/new_port
- ifconfig $NSIM_NETDEV up
+ NSIM_NETDEV=`get_netdev_name old_netdevs`
+ ip link set dev $NSIM_NETDEV up
msg="create VxLANs v6"
exp0=( `mke 10000 1` 0 0 0 )
@@ -760,7 +768,7 @@ for port in 0 1; do
check_tables
msg="NIC device goes down"
- ifconfig $NSIM_NETDEV down
+ ip link set dev $NSIM_NETDEV down
if [ $port -eq 1 ]; then
exp0=( 0 0 0 0 )
exp1=( 0 0 0 0 )
@@ -771,7 +779,7 @@ for port in 0 1; do
check_tables
msg="NIC device goes up again"
- ifconfig $NSIM_NETDEV up
+ ip link set dev $NSIM_NETDEV up
exp0=( `mke 10000 1` 0 0 0 )
check_tables
@@ -819,12 +827,12 @@ new_vxlan vxlan1 4789 $NSIM_NETDEV2
msg="VxLAN v4 devices go down"
exp0=( 0 0 0 0 )
-ifconfig vxlan1 down
-ifconfig vxlan0 down
+ip link set dev vxlan1 down
+ip link set dev vxlan0 down
check_tables
for ifc in vxlan0 vxlan1; do
- ifconfig $ifc up
+ ip link set dev $ifc up
done
msg="VxLAN v6 device"
@@ -836,11 +844,11 @@ exp1=( `mke 6081 2` 0 0 0 )
new_geneve gnv0 6081
msg="NIC device goes down"
-ifconfig $NSIM_NETDEV down
+ip link set dev $NSIM_NETDEV down
check_tables
msg="NIC device goes up again"
-ifconfig $NSIM_NETDEV up
+ip link set dev $NSIM_NETDEV up
check_tables
for i in `seq 2`; do
@@ -877,6 +885,7 @@ msg="re-add a port"
echo 2 > $NSIM_DEV_SYS/del_port
echo 2 > $NSIM_DEV_SYS/new_port
+NSIM_NETDEV=`get_netdev_name old_netdevs`
check_tables
msg="replace VxLAN in overflow table"
diff --git a/tools/testing/selftests/drivers/net/team/Makefile b/tools/testing/selftests/drivers/net/team/Makefile
index 6a86e61e8bfe..2d5a76d99181 100644
--- a/tools/testing/selftests/drivers/net/team/Makefile
+++ b/tools/testing/selftests/drivers/net/team/Makefile
@@ -3,8 +3,9 @@
TEST_PROGS := dev_addr_lists.sh
-TEST_FILES := \
- lag_lib.sh \
- net_forwarding_lib.sh
+TEST_INCLUDES := \
+ ../bonding/lag_lib.sh \
+ ../../../net/forwarding/lib.sh \
+ ../../../net/lib.sh
include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/team/config b/tools/testing/selftests/drivers/net/team/config
index 265b6882cc21..b5e3a3aad4bf 100644
--- a/tools/testing/selftests/drivers/net/team/config
+++ b/tools/testing/selftests/drivers/net/team/config
@@ -1,3 +1,5 @@
+CONFIG_DUMMY=y
+CONFIG_IPV6=y
+CONFIG_MACVLAN=y
CONFIG_NET_TEAM=y
CONFIG_NET_TEAM_MODE_LOADBALANCE=y
-CONFIG_MACVLAN=y
diff --git a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
index 33913112d5ca..b1ec7755b783 100755
--- a/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
+++ b/tools/testing/selftests/drivers/net/team/dev_addr_lists.sh
@@ -11,9 +11,9 @@ ALL_TESTS="
REQUIRE_MZ=no
NUM_NETIFS=0
lib_dir=$(dirname "$0")
-source "$lib_dir"/net_forwarding_lib.sh
+source "$lib_dir"/../../../net/forwarding/lib.sh
-source "$lib_dir"/lag_lib.sh
+source "$lib_dir"/../bonding/lag_lib.sh
destroy()
diff --git a/tools/testing/selftests/drivers/net/team/lag_lib.sh b/tools/testing/selftests/drivers/net/team/lag_lib.sh
deleted file mode 120000
index e1347a10afde..000000000000
--- a/tools/testing/selftests/drivers/net/team/lag_lib.sh
+++ /dev/null
@@ -1 +0,0 @@
-../bonding/lag_lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh b/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh
deleted file mode 120000
index 39c96828c5ef..000000000000
--- a/tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/dt/test_unprobed_devices.sh b/tools/testing/selftests/dt/test_unprobed_devices.sh
index b07af2a4c4de..7fae90293a9d 100755
--- a/tools/testing/selftests/dt/test_unprobed_devices.sh
+++ b/tools/testing/selftests/dt/test_unprobed_devices.sh
@@ -33,8 +33,8 @@ if [[ ! -d "${PDT}" ]]; then
fi
nodes_compatible=$(
- for node_compat in $(find ${PDT} -name compatible); do
- node=$(dirname "${node_compat}")
+ for node in $(find ${PDT} -type d); do
+ [ ! -f "${node}"/compatible ] && continue
# Check if node is available
if [[ -e "${node}"/status ]]; then
status=$(tr -d '\000' < "${node}"/status)
@@ -46,10 +46,11 @@ nodes_compatible=$(
nodes_dev_bound=$(
IFS=$'\n'
- for uevent in $(find /sys/devices -name uevent); do
- if [[ -d "$(dirname "${uevent}")"/driver ]]; then
- grep '^OF_FULLNAME=' "${uevent}" | sed -e 's|OF_FULLNAME=||'
- fi
+ for dev_dir in $(find /sys/devices -type d); do
+ [ ! -f "${dev_dir}"/uevent ] && continue
+ [ ! -d "${dev_dir}"/driver ] && continue
+
+ grep '^OF_FULLNAME=' "${dev_dir}"/uevent | sed -e 's|OF_FULLNAME=||'
done
)
diff --git a/tools/testing/selftests/hid/tests/test_wacom_generic.py b/tools/testing/selftests/hid/tests/test_wacom_generic.py
index 352fc39f3c6c..b62c7dba6777 100644
--- a/tools/testing/selftests/hid/tests/test_wacom_generic.py
+++ b/tools/testing/selftests/hid/tests/test_wacom_generic.py
@@ -880,8 +880,8 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
does not overlap with other contacts. The value of `t` may be
incremented over time to move the point along a linear path.
"""
- x = 50 + 10 * contact_id + t
- y = 100 + 100 * contact_id + t
+ x = 50 + 10 * contact_id + t * 11
+ y = 100 + 100 * contact_id + t * 11
return test_multitouch.Touch(contact_id, x, y)
def make_contacts(self, n, t=0):
@@ -902,8 +902,8 @@ class TestDTH2452Tablet(test_multitouch.BaseTest.TestMultitouch, TouchTabletTest
tracking_id = contact_ids.tracking_id
slot_num = contact_ids.slot_num
- x = 50 + 10 * contact_id + t
- y = 100 + 100 * contact_id + t
+ x = 50 + 10 * contact_id + t * 11
+ y = 100 + 100 * contact_id + t * 11
# If the data isn't supposed to be stored in any slots, there is
# nothing we can check for in the evdev stream.
diff --git a/tools/testing/selftests/iommu/config b/tools/testing/selftests/iommu/config
index 6c4f901d6fed..110d73917615 100644
--- a/tools/testing/selftests/iommu/config
+++ b/tools/testing/selftests/iommu/config
@@ -1,2 +1,3 @@
-CONFIG_IOMMUFD
-CONFIG_IOMMUFD_TEST
+CONFIG_IOMMUFD=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_IOMMUFD_TEST=y
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index 1a881e7a21d1..edf1c99c9936 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -12,6 +12,7 @@
static unsigned long HUGEPAGE_SIZE;
#define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
+#define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)
static unsigned long get_huge_page_size(void)
{
@@ -1716,10 +1717,12 @@ FIXTURE(iommufd_dirty_tracking)
FIXTURE_VARIANT(iommufd_dirty_tracking)
{
unsigned long buffer_size;
+ bool hugepages;
};
FIXTURE_SETUP(iommufd_dirty_tracking)
{
+ int mmap_flags;
void *vrc;
int rc;
@@ -1732,25 +1735,41 @@ FIXTURE_SETUP(iommufd_dirty_tracking)
variant->buffer_size, rc);
}
+ mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
+ if (variant->hugepages) {
+ /*
+ * MAP_POPULATE will cause the kernel to fail mmap if THPs are
+ * not available.
+ */
+ mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
+ }
assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ mmap_flags, -1, 0);
assert(vrc == self->buffer);
self->page_size = MOCK_PAGE_SIZE;
self->bitmap_size =
variant->buffer_size / self->page_size / BITS_PER_BYTE;
- /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */
+ /* Provision with an extra (PAGE_SIZE) for the unaligned case */
rc = posix_memalign(&self->bitmap, PAGE_SIZE,
- self->bitmap_size + MOCK_PAGE_SIZE);
+ self->bitmap_size + PAGE_SIZE);
assert(!rc);
assert(self->bitmap);
assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
test_ioctl_ioas_alloc(&self->ioas_id);
- test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
- &self->idev_id);
+ /* Enable 1M mock IOMMU hugepages */
+ if (variant->hugepages) {
+ test_cmd_mock_domain_flags(self->ioas_id,
+ MOCK_FLAGS_DEVICE_HUGE_IOVA,
+ &self->stdev_id, &self->hwpt_id,
+ &self->idev_id);
+ } else {
+ test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
+ &self->hwpt_id, &self->idev_id);
+ }
}
FIXTURE_TEARDOWN(iommufd_dirty_tracking)
@@ -1784,12 +1803,26 @@ FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
.buffer_size = 128UL * 1024UL * 1024UL,
};
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
+{
+ /* 4K bitmap (128M IOVA range) */
+ .buffer_size = 128UL * 1024UL * 1024UL,
+ .hugepages = true,
+};
+
FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M)
{
/* 8K bitmap (256M IOVA range) */
.buffer_size = 256UL * 1024UL * 1024UL,
};
+FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge)
+{
+ /* 8K bitmap (256M IOVA range) */
+ .buffer_size = 256UL * 1024UL * 1024UL,
+ .hugepages = true,
+};
+
TEST_F(iommufd_dirty_tracking, enforce_dirty)
{
uint32_t ioas_id, stddev_id, idev_id;
@@ -1849,65 +1882,80 @@ TEST_F(iommufd_dirty_tracking, device_dirty_capability)
TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
{
- uint32_t stddev_id;
+ uint32_t page_size = MOCK_PAGE_SIZE;
uint32_t hwpt_id;
uint32_t ioas_id;
+ if (variant->hugepages)
+ page_size = MOCK_HUGE_PAGE_SIZE;
+
test_ioctl_ioas_alloc(&ioas_id);
test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
variant->buffer_size, MOCK_APERTURE_START);
test_cmd_hwpt_alloc(self->idev_id, ioas_id,
IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
- test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
test_cmd_set_dirty_tracking(hwpt_id, true);
test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
- MOCK_APERTURE_START, self->page_size,
+ MOCK_APERTURE_START, self->page_size, page_size,
self->bitmap, self->bitmap_size, 0, _metadata);
/* PAGE_SIZE unaligned bitmap */
test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
- MOCK_APERTURE_START, self->page_size,
+ MOCK_APERTURE_START, self->page_size, page_size,
self->bitmap + MOCK_PAGE_SIZE,
self->bitmap_size, 0, _metadata);
- test_ioctl_destroy(stddev_id);
+ /* u64 unaligned bitmap */
+ test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
+ MOCK_APERTURE_START, self->page_size, page_size,
+ self->bitmap + 0xff1, self->bitmap_size, 0,
+ _metadata);
+
test_ioctl_destroy(hwpt_id);
}
TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
{
- uint32_t stddev_id;
+ uint32_t page_size = MOCK_PAGE_SIZE;
uint32_t hwpt_id;
uint32_t ioas_id;
+ if (variant->hugepages)
+ page_size = MOCK_HUGE_PAGE_SIZE;
+
test_ioctl_ioas_alloc(&ioas_id);
test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
variant->buffer_size, MOCK_APERTURE_START);
test_cmd_hwpt_alloc(self->idev_id, ioas_id,
IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
- test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
test_cmd_set_dirty_tracking(hwpt_id, true);
test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
- MOCK_APERTURE_START, self->page_size,
+ MOCK_APERTURE_START, self->page_size, page_size,
self->bitmap, self->bitmap_size,
IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
_metadata);
/* Unaligned bitmap */
test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
- MOCK_APERTURE_START, self->page_size,
+ MOCK_APERTURE_START, self->page_size, page_size,
self->bitmap + MOCK_PAGE_SIZE,
self->bitmap_size,
IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
_metadata);
- test_ioctl_destroy(stddev_id);
+ /* u64 unaligned bitmap */
+ test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
+ MOCK_APERTURE_START, self->page_size, page_size,
+ self->bitmap + 0xff1, self->bitmap_size,
+ IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
+ _metadata);
+
test_ioctl_destroy(hwpt_id);
}
diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
index c646264aa41f..8d2b46b2114d 100644
--- a/tools/testing/selftests/iommu/iommufd_utils.h
+++ b/tools/testing/selftests/iommu/iommufd_utils.h
@@ -344,16 +344,19 @@ static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length,
page_size, bitmap, nr))
static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
- __u64 iova, size_t page_size, __u64 *bitmap,
+ __u64 iova, size_t page_size,
+ size_t pte_page_size, __u64 *bitmap,
__u64 bitmap_size, __u32 flags,
struct __test_metadata *_metadata)
{
- unsigned long i, nbits = bitmap_size * BITS_PER_BYTE;
- unsigned long nr = nbits / 2;
+ unsigned long npte = pte_page_size / page_size, pteset = 2 * npte;
+ unsigned long nbits = bitmap_size * BITS_PER_BYTE;
+ unsigned long j, i, nr = nbits / pteset ?: 1;
__u64 out_dirty = 0;
/* Mark all even bits as dirty in the mock domain */
- for (i = 0; i < nbits; i += 2)
+ memset(bitmap, 0, bitmap_size);
+ for (i = 0; i < nbits; i += pteset)
set_bit(i, (unsigned long *)bitmap);
test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size,
@@ -365,8 +368,12 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap,
flags);
/* Beware ASSERT_EQ() is two statements -- braces are not redundant! */
- for (i = 0; i < nbits; i++) {
- ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap));
+ for (i = 0; i < nbits; i += pteset) {
+ for (j = 0; j < pteset; j++) {
+ ASSERT_EQ(j < npte,
+ test_bit(i + j, (unsigned long *)bitmap));
+ }
+ ASSERT_EQ(!(i % pteset), test_bit(i, (unsigned long *)bitmap));
}
memset(bitmap, 0, bitmap_size);
@@ -374,19 +381,23 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length,
flags);
/* It as read already -- expect all zeroes */
- for (i = 0; i < nbits; i++) {
- ASSERT_EQ(!(i % 2) && (flags &
- IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
- test_bit(i, (unsigned long *)bitmap));
+ for (i = 0; i < nbits; i += pteset) {
+ for (j = 0; j < pteset; j++) {
+ ASSERT_EQ(
+ (j < npte) &&
+ (flags &
+ IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR),
+ test_bit(i + j, (unsigned long *)bitmap));
+ }
}
return 0;
}
-#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap, \
- bitmap_size, flags, _metadata) \
+#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, pte_size,\
+ bitmap, bitmap_size, flags, _metadata) \
ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \
- page_size, bitmap, bitmap_size, \
- flags, _metadata))
+ page_size, pte_size, bitmap, \
+ bitmap_size, flags, _metadata))
static int _test_cmd_create_access(int fd, unsigned int ioas_id,
__u32 *access_id, unsigned int flags)
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index a781e6311810..541bf192e30e 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -25,6 +25,7 @@
* ksft_test_result_skip(fmt, ...);
* ksft_test_result_xfail(fmt, ...);
* ksft_test_result_error(fmt, ...);
+ * ksft_test_result_code(exit_code, test_name, fmt, ...);
*
* When all tests are finished, clean up and exit the program with one of:
*
@@ -254,6 +255,50 @@ static inline __printf(1, 2) void ksft_test_result_error(const char *msg, ...)
va_end(args);
}
+static inline __printf(3, 4)
+void ksft_test_result_code(int exit_code, const char *test_name,
+ const char *msg, ...)
+{
+ const char *tap_code = "ok";
+ const char *directive = "";
+ int saved_errno = errno;
+ va_list args;
+
+ switch (exit_code) {
+ case KSFT_PASS:
+ ksft_cnt.ksft_pass++;
+ break;
+ case KSFT_XFAIL:
+ directive = " # XFAIL ";
+ ksft_cnt.ksft_xfail++;
+ break;
+ case KSFT_XPASS:
+ directive = " # XPASS ";
+ ksft_cnt.ksft_xpass++;
+ break;
+ case KSFT_SKIP:
+ directive = " # SKIP ";
+ ksft_cnt.ksft_xskip++;
+ break;
+ case KSFT_FAIL:
+ default:
+ tap_code = "not ok";
+ ksft_cnt.ksft_fail++;
+ break;
+ }
+
+ /* Docs seem to call for double space if directive is absent */
+ if (!directive[0] && msg[0])
+ directive = " # ";
+
+ va_start(args, msg);
+ printf("%s %u %s%s", tap_code, ksft_test_num(), test_name, directive);
+ errno = saved_errno;
+ vprintf(msg, args);
+ printf("\n");
+ va_end(args);
+}
+
static inline int ksft_exit_pass(void)
{
ksft_print_cnts();
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index e05ac8261046..4fd735e48ee7 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -56,6 +56,7 @@
#include <asm/types.h>
#include <ctype.h>
#include <errno.h>
+#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
@@ -95,14 +96,6 @@
* E.g., #define TH_LOG_ENABLED 1
*
* If no definition is provided, logging is enabled by default.
- *
- * If there is no way to print an error message for the process running the
- * test (e.g. not allowed to write to stderr), it is still possible to get the
- * ASSERT_* number for which the test failed. This behavior can be enabled by
- * writing `_metadata->no_print = true;` before the check sequence that is
- * unable to print. When an error occur, instead of printing an error message
- * and calling `abort(3)`, the test process call `_exit(2)` with the assert
- * number as argument, which is then printed by the parent process.
*/
#define TH_LOG(fmt, ...) do { \
if (TH_LOG_ENABLED) \
@@ -135,8 +128,7 @@
fprintf(TH_LOG_STREAM, "# SKIP %s\n", \
_metadata->results->reason); \
} \
- _metadata->passed = 1; \
- _metadata->skip = 1; \
+ _metadata->exit_code = KSFT_SKIP; \
_metadata->trigger = 0; \
statement; \
} while (0)
@@ -363,6 +355,11 @@
* Defines a test that depends on a fixture (e.g., is part of a test case).
* Very similar to TEST() except that *self* is the setup instance of fixture's
* datatype exposed for use by the implementation.
+ *
+ * The @test_name code is run in a separate process sharing the same memory
+ * (i.e. vfork), which means that the test process can update its privileges
+ * without impacting the related FIXTURE_TEARDOWN() (e.g. to remove files from
+ * a directory where write access was dropped).
*/
#define TEST_F(fixture_name, test_name) \
__TEST_F_IMPL(fixture_name, test_name, -1, TEST_TIMEOUT_DEFAULT)
@@ -384,17 +381,34 @@
{ \
/* fixture data is alloced, setup, and torn down per call. */ \
FIXTURE_DATA(fixture_name) self; \
+ pid_t child = 1; \
+ int status = 0; \
memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \
if (setjmp(_metadata->env) == 0) { \
- fixture_name##_setup(_metadata, &self, variant->data); \
- /* Let setup failure terminate early. */ \
- if (!_metadata->passed || _metadata->skip) \
- return; \
- _metadata->setup_completed = true; \
- fixture_name##_##test_name(_metadata, &self, variant->data); \
+ /* Use the same _metadata. */ \
+ child = vfork(); \
+ if (child == 0) { \
+ fixture_name##_setup(_metadata, &self, variant->data); \
+ /* Let setup failure terminate early. */ \
+ if (_metadata->exit_code) \
+ _exit(0); \
+ _metadata->setup_completed = true; \
+ fixture_name##_##test_name(_metadata, &self, variant->data); \
+ } else if (child < 0 || child != waitpid(child, &status, 0)) { \
+ ksft_print_msg("ERROR SPAWNING TEST GRANDCHILD\n"); \
+ _metadata->exit_code = KSFT_FAIL; \
+ } \
+ } \
+ if (child == 0) { \
+ if (_metadata->setup_completed && !_metadata->teardown_parent) \
+ fixture_name##_teardown(_metadata, &self, variant->data); \
+ _exit(0); \
} \
- if (_metadata->setup_completed) \
+ if (_metadata->setup_completed && _metadata->teardown_parent) \
fixture_name##_teardown(_metadata, &self, variant->data); \
+ if (!WIFEXITED(status) && WIFSIGNALED(status)) \
+ /* Forward signal to __wait_for_test(). */ \
+ kill(getpid(), WTERMSIG(status)); \
__test_check_assert(_metadata); \
} \
static struct __test_metadata \
@@ -404,6 +418,7 @@
.fixture = &_##fixture_name##_fixture_object, \
.termsig = signal, \
.timeout = tmout, \
+ .teardown_parent = false, \
}; \
static void __attribute__((constructor)) \
_register_##fixture_name##_##test_name(void) \
@@ -694,18 +709,12 @@
for (; _metadata->trigger; _metadata->trigger = \
__bail(_assert, _metadata))
-#define __INC_STEP(_metadata) \
- /* Keep "step" below 255 (which is used for "SKIP" reporting). */ \
- if (_metadata->passed && _metadata->step < 253) \
- _metadata->step++;
-
#define is_signed_type(var) (!!(((__typeof__(var))(-1)) < (__typeof__(var))1))
#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \
/* Avoid multiple evaluation of the cases */ \
__typeof__(_expected) __exp = (_expected); \
__typeof__(_seen) __seen = (_seen); \
- if (_assert) __INC_STEP(_metadata); \
if (!(__exp _t __seen)) { \
/* Report with actual signedness to avoid weird output. */ \
switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \
@@ -742,7 +751,7 @@
break; \
} \
} \
- _metadata->passed = 0; \
+ _metadata->exit_code = KSFT_FAIL; \
/* Ensure the optional handler is triggered */ \
_metadata->trigger = 1; \
} \
@@ -751,10 +760,9 @@
#define __EXPECT_STR(_expected, _seen, _t, _assert) do { \
const char *__exp = (_expected); \
const char *__seen = (_seen); \
- if (_assert) __INC_STEP(_metadata); \
if (!(strcmp(__exp, __seen) _t 0)) { \
__TH_LOG("Expected '%s' %s '%s'.", __exp, #_t, __seen); \
- _metadata->passed = 0; \
+ _metadata->exit_code = KSFT_FAIL; \
_metadata->trigger = 1; \
} \
} while (0); OPTIONAL_HANDLER(_assert)
@@ -800,6 +808,37 @@ struct __fixture_metadata {
.prev = &_fixture_global,
};
+struct __test_xfail {
+ struct __fixture_metadata *fixture;
+ struct __fixture_variant_metadata *variant;
+ struct __test_metadata *test;
+ struct __test_xfail *prev, *next;
+};
+
+/**
+ * XFAIL_ADD() - mark variant + test case combination as expected to fail
+ * @fixture_name: name of the fixture
+ * @variant_name: name of the variant
+ * @test_name: name of the test case
+ *
+ * Mark a combination of variant + test case for a given fixture as expected
+ * to fail. Tests marked this way will report XPASS / XFAIL return codes,
+ * instead of PASS / FAIL,and use respective counters.
+ */
+#define XFAIL_ADD(fixture_name, variant_name, test_name) \
+ static struct __test_xfail \
+ _##fixture_name##_##variant_name##_##test_name##_xfail = \
+ { \
+ .fixture = &_##fixture_name##_fixture_object, \
+ .variant = &_##fixture_name##_##variant_name##_object, \
+ .test = &_##fixture_name##_##test_name##_object, \
+ }; \
+ static void __attribute__((constructor)) \
+ _register_##fixture_name##_##variant_name##_##test_name##_xfail(void) \
+ { \
+ __register_xfail(&_##fixture_name##_##variant_name##_##test_name##_xfail); \
+ }
+
static struct __fixture_metadata *__fixture_list = &_fixture_global;
static int __constructor_order;
@@ -814,6 +853,7 @@ static inline void __register_fixture(struct __fixture_metadata *f)
struct __fixture_variant_metadata {
const char *name;
const void *data;
+ struct __test_xfail *xfails;
struct __fixture_variant_metadata *prev, *next;
};
@@ -832,20 +872,24 @@ struct __test_metadata {
pid_t pid; /* pid of test when being run */
struct __fixture_metadata *fixture;
int termsig;
- int passed;
- int skip; /* did SKIP get used? */
+ int exit_code;
int trigger; /* extra handler after the evaluation */
int timeout; /* seconds to wait for test timeout */
bool timed_out; /* did this test timeout instead of exiting? */
- __u8 step;
- bool no_print; /* manual trigger when TH_LOG_STREAM is not available */
bool aborted; /* stopped test due to failed ASSERT */
bool setup_completed; /* did setup finish? */
+ bool teardown_parent; /* run teardown in a parent process */
jmp_buf env; /* for exiting out of test early */
struct __test_results *results;
struct __test_metadata *prev, *next;
};
+static inline bool __test_passed(struct __test_metadata *metadata)
+{
+ return metadata->exit_code != KSFT_FAIL &&
+ metadata->exit_code <= KSFT_SKIP;
+}
+
/*
* Since constructors are called in reverse order, reverse the test
* list so tests are run in source declaration order.
@@ -860,6 +904,11 @@ static inline void __register_test(struct __test_metadata *t)
__LIST_APPEND(t->fixture->tests, t);
}
+static inline void __register_xfail(struct __test_xfail *xf)
+{
+ __LIST_APPEND(xf->variant->xfails, xf);
+}
+
static inline int __bail(int for_realz, struct __test_metadata *t)
{
/* if this is ASSERT, return immediately. */
@@ -873,11 +922,8 @@ static inline int __bail(int for_realz, struct __test_metadata *t)
static inline void __test_check_assert(struct __test_metadata *t)
{
- if (t->aborted) {
- if (t->no_print)
- _exit(t->step);
+ if (t->aborted)
abort();
- }
}
struct __test_metadata *__active_test;
@@ -913,7 +959,7 @@ void __wait_for_test(struct __test_metadata *t)
int status;
if (sigaction(SIGALRM, &action, &saved_action)) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: unable to install SIGALRM handler\n",
t->name);
@@ -925,7 +971,7 @@ void __wait_for_test(struct __test_metadata *t)
waitpid(t->pid, &status, 0);
alarm(0);
if (sigaction(SIGALRM, &saved_action, NULL)) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: unable to uninstall SIGALRM handler\n",
t->name);
@@ -934,16 +980,16 @@ void __wait_for_test(struct __test_metadata *t)
__active_test = NULL;
if (t->timed_out) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: Test terminated by timeout\n", t->name);
} else if (WIFEXITED(status)) {
- if (WEXITSTATUS(status) == 255) {
- /* SKIP */
- t->passed = 1;
- t->skip = 1;
+ if (WEXITSTATUS(status) == KSFT_SKIP ||
+ WEXITSTATUS(status) == KSFT_XPASS ||
+ WEXITSTATUS(status) == KSFT_XFAIL) {
+ t->exit_code = WEXITSTATUS(status);
} else if (t->termsig != -1) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
"# %s: Test exited normally instead of by signal (code: %d)\n",
t->name,
@@ -951,26 +997,25 @@ void __wait_for_test(struct __test_metadata *t)
} else {
switch (WEXITSTATUS(status)) {
/* Success */
- case 0:
- t->passed = 1;
+ case KSFT_PASS:
+ t->exit_code = KSFT_PASS;
break;
- /* Other failure, assume step report. */
+ /* Failure */
default:
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
fprintf(TH_LOG_STREAM,
- "# %s: Test failed at step #%d\n",
- t->name,
- WEXITSTATUS(status));
+ "# %s: Test failed\n",
+ t->name);
}
}
} else if (WIFSIGNALED(status)) {
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
if (WTERMSIG(status) == SIGABRT) {
fprintf(TH_LOG_STREAM,
"# %s: Test terminated by assertion\n",
t->name);
} else if (WTERMSIG(status) == t->termsig) {
- t->passed = 1;
+ t->exit_code = KSFT_PASS;
} else {
fprintf(TH_LOG_STREAM,
"# %s: Test terminated unexpectedly by signal %d\n",
@@ -1110,16 +1155,19 @@ void __run_test(struct __fixture_metadata *f,
struct __fixture_variant_metadata *variant,
struct __test_metadata *t)
{
+ struct __test_xfail *xfail;
+ char test_name[LINE_MAX];
+ const char *diagnostic;
+
/* reset test struct */
- t->passed = 1;
- t->skip = 0;
+ t->exit_code = KSFT_PASS;
t->trigger = 0;
- t->step = 1;
- t->no_print = 0;
memset(t->results->reason, 0, sizeof(t->results->reason));
- ksft_print_msg(" RUN %s%s%s.%s ...\n",
- f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ snprintf(test_name, sizeof(test_name), "%s%s%s.%s",
+ f->name, variant->name[0] ? "." : "", variant->name, t->name);
+
+ ksft_print_msg(" RUN %s ...\n", test_name);
/* Make sure output buffers are flushed before fork */
fflush(stdout);
@@ -1128,29 +1176,33 @@ void __run_test(struct __fixture_metadata *f,
t->pid = fork();
if (t->pid < 0) {
ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
- t->passed = 0;
+ t->exit_code = KSFT_FAIL;
} else if (t->pid == 0) {
setpgrp();
t->fn(t, variant);
- if (t->skip)
- _exit(255);
- /* Pass is exit 0 */
- if (t->passed)
- _exit(0);
- /* Something else happened, report the step. */
- _exit(t->step);
+ _exit(t->exit_code);
} else {
__wait_for_test(t);
}
- ksft_print_msg(" %4s %s%s%s.%s\n", t->passed ? "OK" : "FAIL",
- f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ ksft_print_msg(" %4s %s\n",
+ __test_passed(t) ? "OK" : "FAIL", test_name);
- if (t->skip)
- ksft_test_result_skip("%s\n", t->results->reason[0] ?
- t->results->reason : "unknown");
+ /* Check if we're expecting this test to fail */
+ for (xfail = variant->xfails; xfail; xfail = xfail->next)
+ if (xfail->test == t)
+ break;
+ if (xfail)
+ t->exit_code = __test_passed(t) ? KSFT_XPASS : KSFT_XFAIL;
+
+ if (t->results->reason[0])
+ diagnostic = t->results->reason;
+ else if (t->exit_code == KSFT_PASS || t->exit_code == KSFT_FAIL)
+ diagnostic = NULL;
else
- ksft_test_result(t->passed, "%s%s%s.%s\n",
- f->name, variant->name[0] ? "." : "", variant->name, t->name);
+ diagnostic = "unknown";
+
+ ksft_test_result_code(t->exit_code, test_name,
+ diagnostic ? "%s" : "", diagnostic);
}
static int test_harness_run(int argc, char **argv)
@@ -1198,7 +1250,7 @@ static int test_harness_run(int argc, char **argv)
t->results = results;
__run_test(f, v, t);
t->results = NULL;
- if (t->passed)
+ if (__test_passed(t))
pass_count++;
else
ret = 1;
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
index 274b8465b42a..2cb8dd1f8275 100644
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -248,7 +248,7 @@ static void *test_vcpu_run(void *arg)
REPORT_GUEST_ASSERT(uc);
break;
default:
- TEST_FAIL("Unexpected guest exit\n");
+ TEST_FAIL("Unexpected guest exit");
}
return NULL;
@@ -287,7 +287,7 @@ static int test_migrate_vcpu(unsigned int vcpu_idx)
/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
- "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
+ "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d",
vcpu_idx, new_pcpu, ret);
return ret;
@@ -326,12 +326,12 @@ static void test_run(struct kvm_vm *vm)
pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
- TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n");
+ TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap");
for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
(void *)(unsigned long)i);
- TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i);
+ TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread", i);
}
/* Spawn a thread to control the vCPU migrations */
@@ -340,7 +340,7 @@ static void test_run(struct kvm_vm *vm)
ret = pthread_create(&pt_vcpu_migration, NULL,
test_vcpu_migration, NULL);
- TEST_ASSERT(!ret, "Failed to create the migration pthread\n");
+ TEST_ASSERT(!ret, "Failed to create the migration pthread");
}
@@ -384,7 +384,7 @@ static struct kvm_vm *test_vm_create(void)
if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET))
vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset);
else
- TEST_FAIL("no support for global offset\n");
+ TEST_FAIL("no support for global offset");
}
for (i = 0; i < nr_vcpus; i++)
diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c
index 31f66ba97228..27c10e7a7e01 100644
--- a/tools/testing/selftests/kvm/aarch64/hypercalls.c
+++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c
@@ -175,18 +175,18 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
/* First 'read' should be an upper limit of the features supported */
vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
- "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
+ "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
/* Test a 'write' by disabling all the features of the register map */
ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
TEST_ASSERT(ret == 0,
- "Failed to clear all the features of reg: 0x%lx; ret: %d\n",
+ "Failed to clear all the features of reg: 0x%lx; ret: %d",
reg_info->reg, errno);
vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0,
- "Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
+ "Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);
/*
* Test enabling a feature that's not supported.
@@ -195,7 +195,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
if (reg_info->max_feat_bit < 63) {
ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
TEST_ASSERT(ret != 0 && errno == EINVAL,
- "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
+ "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx",
errno, reg_info->reg);
}
}
@@ -216,7 +216,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
*/
vcpu_get_reg(vcpu, reg_info->reg, &val);
TEST_ASSERT(val == 0,
- "Expected all the features to be cleared for reg: 0x%lx\n",
+ "Expected all the features to be cleared for reg: 0x%lx",
reg_info->reg);
/*
@@ -226,7 +226,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
*/
ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
TEST_ASSERT(ret != 0 && errno == EBUSY,
- "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
+ "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx",
errno, reg_info->reg);
}
}
@@ -265,7 +265,7 @@ static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
break;
default:
- TEST_FAIL("Unknown test stage: %d\n", prev_stage);
+ TEST_FAIL("Unknown test stage: %d", prev_stage);
}
}
@@ -294,7 +294,7 @@ static void test_run(void)
REPORT_GUEST_ASSERT(uc);
break;
default:
- TEST_FAIL("Unexpected guest exit\n");
+ TEST_FAIL("Unexpected guest exit");
}
}
diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
index 08a5ca5bed56..53fddad57cbb 100644
--- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c
+++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
@@ -414,10 +414,10 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm,
if (fd != -1) {
ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
0, paging_size);
- TEST_ASSERT(ret == 0, "fallocate failed\n");
+ TEST_ASSERT(ret == 0, "fallocate failed");
} else {
ret = madvise(hva, paging_size, MADV_DONTNEED);
- TEST_ASSERT(ret == 0, "madvise failed\n");
+ TEST_ASSERT(ret == 0, "madvise failed");
}
return true;
@@ -501,7 +501,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
void fail_vcpu_run_no_handler(int ret)
{
- TEST_FAIL("Unexpected vcpu run failure\n");
+ TEST_FAIL("Unexpected vcpu run failure");
}
void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
diff --git a/tools/testing/selftests/kvm/aarch64/smccc_filter.c b/tools/testing/selftests/kvm/aarch64/smccc_filter.c
index f4ceae9c8925..2d189f3da228 100644
--- a/tools/testing/selftests/kvm/aarch64/smccc_filter.c
+++ b/tools/testing/selftests/kvm/aarch64/smccc_filter.c
@@ -178,7 +178,7 @@ static void expect_call_denied(struct kvm_vcpu *vcpu)
struct ucall uc;
if (get_ucall(vcpu, &uc) != UCALL_SYNC)
- TEST_FAIL("Unexpected ucall: %lu\n", uc.cmd);
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
TEST_ASSERT(uc.args[1] == SMCCC_RET_NOT_SUPPORTED,
"Unexpected SMCCC return code: %lu", uc.args[1]);
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
index 9d51b5691349..5f9713364693 100644
--- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
@@ -517,11 +517,11 @@ static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
if (expect_fail)
TEST_ASSERT(pmcr_orig == pmcr,
- "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n",
+ "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
pmcr, pmcr_n);
else
TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
- "Failed to update PMCR.N to %lu (received: %lu)\n",
+ "Failed to update PMCR.N to %lu (received: %lu)",
pmcr_n, get_pmcr_n(pmcr));
}
@@ -594,12 +594,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
*/
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
- "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
+ "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(set_reg_id), reg_val);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
- "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
+ "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
/*
@@ -611,12 +611,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
- "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
+ "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(set_reg_id), reg_val);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
- "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n",
+ "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
}
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 09c116a82a84..bf3609f71854 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -45,10 +45,10 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
/* Let the guest access its memory */
ret = _vcpu_run(vcpu);
- TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
TEST_ASSERT(false,
- "Invalid guest sync status: exit_reason=%s\n",
+ "Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason));
}
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index d374dbcf9a53..504f6fe980e8 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -88,9 +88,9 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
- TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
- "Invalid guest sync status: exit_reason=%s\n",
+ "Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason));
pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 6cbecf499767..eaad5b20854c 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -262,7 +262,7 @@ static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
"vcpu run failed: errno=%d", err);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
- "Invalid guest sync status: exit_reason=%s\n",
+ "Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason));
vcpu_handle_sync_stop();
@@ -376,7 +376,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
- /* Cleared pages should be the same as collected */
+ /*
+ * Cleared pages should be the same as collected, as KVM is supposed to
+ * clear only the entries that have been harvested.
+ */
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
"with collected (%u)", cleared, count);
@@ -410,17 +413,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
pr_info("vcpu continues now.\n");
} else {
TEST_ASSERT(false, "Invalid guest sync status: "
- "exit_reason=%s\n",
+ "exit_reason=%s",
exit_reason_str(run->exit_reason));
}
}
-static void dirty_ring_before_vcpu_join(void)
-{
- /* Kick another round of vcpu just to make sure it will quit */
- sem_post(&sem_vcpu_cont);
-}
-
struct log_mode {
const char *name;
/* Return true if this mode is supported, otherwise false */
@@ -433,7 +430,6 @@ struct log_mode {
uint32_t *ring_buf_idx);
/* Hook to call when after each vcpu run */
void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
- void (*before_vcpu_join) (void);
} log_modes[LOG_MODE_NUM] = {
{
.name = "dirty-log",
@@ -452,7 +448,6 @@ struct log_mode {
.supported = dirty_ring_supported,
.create_vm_done = dirty_ring_create_vm_done,
.collect_dirty_pages = dirty_ring_collect_dirty_pages,
- .before_vcpu_join = dirty_ring_before_vcpu_join,
.after_vcpu_run = dirty_ring_after_vcpu_run,
},
};
@@ -513,14 +508,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
mode->after_vcpu_run(vcpu, ret, err);
}
-static void log_mode_before_vcpu_join(void)
-{
- struct log_mode *mode = &log_modes[host_log_mode];
-
- if (mode->before_vcpu_join)
- mode->before_vcpu_join();
-}
-
static void generate_random_array(uint64_t *guest_array, uint64_t size)
{
uint64_t i;
@@ -719,6 +706,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm;
unsigned long *bmap;
uint32_t ring_buf_idx = 0;
+ int sem_val;
if (!log_mode_supported()) {
print_skip("Log mode '%s' not supported",
@@ -788,12 +776,22 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Start the iterations */
iteration = 1;
sync_global_to_guest(vm, iteration);
- host_quit = false;
+ WRITE_ONCE(host_quit, false);
host_dirty_count = 0;
host_clear_count = 0;
host_track_next_count = 0;
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
+ /*
+ * Ensure the previous iteration didn't leave a dangling semaphore, i.e.
+ * that the main task and vCPU worker were synchronized and completed
+ * verification of all iterations.
+ */
+ sem_getvalue(&sem_vcpu_stop, &sem_val);
+ TEST_ASSERT_EQ(sem_val, 0);
+ sem_getvalue(&sem_vcpu_cont, &sem_val);
+ TEST_ASSERT_EQ(sem_val, 0);
+
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
while (iteration < p->iterations) {
@@ -819,15 +817,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
assert(host_log_mode == LOG_MODE_DIRTY_RING ||
atomic_read(&vcpu_sync_stop_requested) == false);
vm_dirty_log_verify(mode, bmap);
- sem_post(&sem_vcpu_cont);
- iteration++;
+ /*
+ * Set host_quit before sem_vcpu_cont in the final iteration to
+ * ensure that the vCPU worker doesn't resume the guest. As
+ * above, the dirty ring test may stop and wait even when not
+ * explicitly request to do so, i.e. would hang waiting for a
+ * "continue" if it's allowed to resume the guest.
+ */
+ if (++iteration == p->iterations)
+ WRITE_ONCE(host_quit, true);
+
+ sem_post(&sem_vcpu_cont);
sync_global_to_guest(vm, iteration);
}
- /* Tell the vcpu thread to quit */
- host_quit = true;
- log_mode_before_vcpu_join();
pthread_join(vcpu_thread, NULL);
pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
index 8274ef04301f..91f05f78e824 100644
--- a/tools/testing/selftests/kvm/get-reg-list.c
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -152,7 +152,7 @@ static void check_supported(struct vcpu_reg_list *c)
continue;
__TEST_REQUIRE(kvm_has_cap(s->capability),
- "%s: %s not available, skipping tests\n",
+ "%s: %s not available, skipping tests",
config_name(c), s->name);
}
}
diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c
index 41230b746190..3502caa3590c 100644
--- a/tools/testing/selftests/kvm/guest_print_test.c
+++ b/tools/testing/selftests/kvm/guest_print_test.c
@@ -98,7 +98,7 @@ static void ucall_abort(const char *assert_msg, const char *expected_assert_msg)
int offset = len_str - len_substr;
TEST_ASSERT(len_substr <= len_str,
- "Expected '%s' to be a substring of '%s'\n",
+ "Expected '%s' to be a substring of '%s'",
assert_msg, expected_assert_msg);
TEST_ASSERT(strcmp(&assert_msg[offset], expected_assert_msg) == 0,
@@ -116,7 +116,7 @@ static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
- "Unexpected exit reason: %u (%s),\n",
+ "Unexpected exit reason: %u (%s),",
run->exit_reason, exit_reason_str(run->exit_reason));
switch (get_ucall(vcpu, &uc)) {
@@ -161,11 +161,11 @@ static void test_limits(void)
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
- "Unexpected exit reason: %u (%s),\n",
+ "Unexpected exit reason: %u (%s),",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_ABORT,
- "Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)\n",
+ "Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)",
uc.cmd, UCALL_ABORT);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index f5d59b9934f1..decc521fc760 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -41,7 +41,7 @@ static void *run_vcpu(void *arg)
vcpu_run(vcpu);
- TEST_ASSERT(false, "%s: exited with reason %d: %s\n",
+ TEST_ASSERT(false, "%s: exited with reason %d: %s",
__func__, run->exit_reason,
exit_reason_str(run->exit_reason));
pthread_exit(NULL);
@@ -55,7 +55,7 @@ static void *sleeping_thread(void *arg)
fd = open("/dev/null", O_RDWR);
close(fd);
}
- TEST_ASSERT(false, "%s: exited\n", __func__);
+ TEST_ASSERT(false, "%s: exited", __func__);
pthread_exit(NULL);
}
@@ -118,7 +118,7 @@ static void run_test(uint32_t run)
for (i = 0; i < VCPU_NUM; ++i)
check_join(threads[i], &b);
/* Should not be reached */
- TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run);
+ TEST_ASSERT(false, "%s: [%d] child escaped the ninja", __func__, run);
}
void wait_for_child_setup(pid_t pid)
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 71a41fa924b7..50a5e31ba8da 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -195,4 +195,6 @@ __printf(3, 4) int guest_snprintf(char *buf, int n, const char *fmt, ...);
char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1)));
+char *sys_get_cur_clocksource(void);
+
#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index a84863503fcb..5bca8c947c82 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1271,4 +1271,6 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
#define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
#define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
+bool sys_clocksource_is_based_on_tsc(void);
+
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
index 31b3cb24b9a7..b9e23265e4b3 100644
--- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
+++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
@@ -65,7 +65,7 @@ int main(int argc, char *argv[])
int r = setrlimit(RLIMIT_NOFILE, &rl);
__TEST_REQUIRE(r >= 0,
- "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
+ "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)",
old_rlim_max, nr_fds_wanted);
} else {
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index e37dc9c21888..e0ba97ac1c56 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -204,9 +204,9 @@ static void *vcpu_worker(void *data)
ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
- TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
- "Invalid guest sync status: exit_reason=%s\n",
+ "Invalid guest sync status: exit_reason=%s",
exit_reason_str(vcpu->run->exit_reason));
pr_debug("Got sync event from vCPU %d\n", vcpu->id);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 41c776b642c0..43b9a7283360 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -398,7 +398,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
int i;
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
- " num: %u\n", num);
+ " num: %u", num);
va_start(ap, num);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
index b5f28d21a947..184378d593e9 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
@@ -38,7 +38,7 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
struct list_head *iter;
unsigned int nr_gic_pages, nr_vcpus_created = 0;
- TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty\n");
+ TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty");
/*
* Make sure that the caller is infact calling this
@@ -47,7 +47,7 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
list_for_each(iter, &vm->vcpus)
nr_vcpus_created++;
TEST_ASSERT(nr_vcpus == nr_vcpus_created,
- "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)\n",
+ "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)",
nr_vcpus, nr_vcpus_created);
/* Distributor setup */
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index 266f3876e10a..f34d926d9735 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -184,7 +184,7 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
"Seek to program segment offset failed,\n"
" program header idx: %u errno: %i\n"
" offset_rv: 0x%jx\n"
- " expected: 0x%jx\n",
+ " expected: 0x%jx",
n1, errno, (intmax_t) offset_rv,
(intmax_t) phdr.p_offset);
test_read(fd, addr_gva2hva(vm, phdr.p_vaddr),
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index e066d584c656..1b197426f29f 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -27,7 +27,8 @@ int open_path_or_exit(const char *path, int flags)
int fd;
fd = open(path, flags);
- __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
+ __TEST_REQUIRE(fd >= 0 || errno != ENOENT, "Cannot open %s: %s", path, strerror(errno));
+ TEST_ASSERT(fd >= 0, "Failed to open '%s'", path);
return fd;
}
@@ -320,7 +321,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
uint64_t nr_pages;
TEST_ASSERT(nr_runnable_vcpus,
- "Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
+ "Use vm_create_barebones() for VMs that _never_ have vCPUs");
TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
"nr_vcpus = %d too large for host, max-vcpus = %d",
@@ -491,7 +492,7 @@ void kvm_pin_this_task_to_pcpu(uint32_t pcpu)
CPU_ZERO(&mask);
CPU_SET(pcpu, &mask);
r = sched_setaffinity(0, sizeof(mask), &mask);
- TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.\n", pcpu);
+ TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.", pcpu);
}
static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
@@ -499,7 +500,7 @@ static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
- "Not allowed to run on pCPU '%d', check cgroups?\n", pcpu);
+ "Not allowed to run on pCPU '%d', check cgroups?", pcpu);
return pcpu;
}
@@ -529,7 +530,7 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
int i, r;
cpu_list = strdup(pcpus_string);
- TEST_ASSERT(cpu_list, "strdup() allocation failed.\n");
+ TEST_ASSERT(cpu_list, "strdup() allocation failed.");
r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask);
TEST_ASSERT(!r, "sched_getaffinity() failed");
@@ -538,7 +539,7 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
/* 1. Get all pcpus for vcpus. */
for (i = 0; i < nr_vcpus; i++) {
- TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'\n", i);
+ TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'", i);
vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask);
cpu = strtok(NULL, delim);
}
@@ -1057,7 +1058,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION2 IOCTL failed,\n"
" rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n"
- " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d\n",
+ " guest_phys_addr: 0x%lx size: 0x%lx guest_memfd: %d",
ret, errno, slot, flags,
guest_paddr, (uint64_t) region->region.memory_size,
region->region.guest_memfd);
@@ -1222,7 +1223,7 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
- TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx\n",
+ TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx",
punch_hole ? "punch hole" : "allocate", gpa, len,
region->region.guest_memfd, mode, fd_offset);
}
@@ -1265,7 +1266,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
struct kvm_vcpu *vcpu;
/* Confirm a vcpu with the specified id doesn't already exist. */
- TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
+ TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id);
/* Allocate and initialize new vcpu structure. */
vcpu = calloc(1, sizeof(*vcpu));
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index d05487e5a371..cf2c73971308 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -192,7 +192,7 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
TEST_ASSERT(guest_num_pages < region_end_gfn,
"Requested more guest memory than address space allows.\n"
" guest pages: %" PRIx64 " max gfn: %" PRIx64
- " nr_vcpus: %d wss: %" PRIx64 "]\n",
+ " nr_vcpus: %d wss: %" PRIx64 "]",
guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c
index 7ca736fb4194..2bb33a8ac03c 100644
--- a/tools/testing/selftests/kvm/lib/riscv/processor.c
+++ b/tools/testing/selftests/kvm/lib/riscv/processor.c
@@ -327,7 +327,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
int i;
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
- " num: %u\n", num);
+ " num: %u", num);
va_start(ap, num);
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index 15945121daf1..f6d227892cbc 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -198,7 +198,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
int i;
TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
- " num: %u\n",
+ " num: %u",
num);
va_start(ap, num);
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 5d7f28b02d73..5a8f8becb129 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -392,3 +392,28 @@ char *strdup_printf(const char *fmt, ...)
return str;
}
+
+#define CLOCKSOURCE_PATH "/sys/devices/system/clocksource/clocksource0/current_clocksource"
+
+char *sys_get_cur_clocksource(void)
+{
+ char *clk_name;
+ struct stat st;
+ FILE *fp;
+
+ fp = fopen(CLOCKSOURCE_PATH, "r");
+ TEST_ASSERT(fp, "failed to open clocksource file, errno: %d", errno);
+
+ TEST_ASSERT(!fstat(fileno(fp), &st), "failed to stat clocksource file, errno: %d",
+ errno);
+
+ clk_name = malloc(st.st_size);
+ TEST_ASSERT(clk_name, "failed to allocate buffer to read file");
+
+ TEST_ASSERT(fgets(clk_name, st.st_size, fp), "failed to read clocksource file: %d",
+ ferror(fp));
+
+ fclose(fp);
+
+ return clk_name;
+}
diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
index 271f63891581..f4eef6eb2dc2 100644
--- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c
+++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
@@ -69,7 +69,7 @@ static void *uffd_handler_thread_fn(void *arg)
if (pollfd[1].revents & POLLIN) {
r = read(pollfd[1].fd, &tmp_chr, 1);
TEST_ASSERT(r == 1,
- "Error reading pipefd in UFFD thread\n");
+ "Error reading pipefd in UFFD thread");
break;
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index d8288374078e..f639b3e062e3 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -170,10 +170,10 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
* this level.
*/
TEST_ASSERT(current_level != target_level,
- "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
+ "Cannot create hugepage at level: %u, vaddr: 0x%lx",
current_level, vaddr);
TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
- "Cannot create page table at level: %u, vaddr: 0x%lx\n",
+ "Cannot create page table at level: %u, vaddr: 0x%lx",
current_level, vaddr);
}
return pte;
@@ -220,7 +220,7 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
/* Fill in page table entry. */
pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K);
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
- "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
+ "PTE already present for 4k page at vaddr: 0x%lx", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
}
@@ -253,7 +253,7 @@ static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level)
if (*pte & PTE_LARGE_MASK) {
TEST_ASSERT(*level == PG_LEVEL_NONE ||
*level == current_level,
- "Unexpected hugepage at level %d\n", current_level);
+ "Unexpected hugepage at level %d", current_level);
*level = current_level;
}
@@ -825,7 +825,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
struct kvm_regs regs;
TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
- " num: %u\n",
+ " num: %u",
num);
va_start(ap, num);
@@ -1299,3 +1299,14 @@ void kvm_selftest_arch_init(void)
host_cpu_is_intel = this_cpu_is_intel();
host_cpu_is_amd = this_cpu_is_amd();
}
+
+bool sys_clocksource_is_based_on_tsc(void)
+{
+ char *clk_name = sys_get_cur_clocksource();
+ bool ret = !strcmp(clk_name, "tsc\n") ||
+ !strcmp(clk_name, "hyperv_clocksource_tsc_page\n");
+
+ free(clk_name);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 59d97531c9b1..089b8925b6b2 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -54,7 +54,7 @@ int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
/* KVM should return supported EVMCS version range */
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
(evmcs_ver & 0xff) > 0,
- "Incorrect EVMCS version range: %x:%x\n",
+ "Incorrect EVMCS version range: %x:%x",
evmcs_ver & 0xff, evmcs_ver >> 8);
return evmcs_ver;
@@ -387,10 +387,10 @@ static void nested_create_pte(struct kvm_vm *vm,
* this level.
*/
TEST_ASSERT(current_level != target_level,
- "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n",
+ "Cannot create hugepage at level: %u, nested_paddr: 0x%lx",
current_level, nested_paddr);
TEST_ASSERT(!pte->page_size,
- "Cannot create page table at level: %u, nested_paddr: 0x%lx\n",
+ "Cannot create page table at level: %u, nested_paddr: 0x%lx",
current_level, nested_paddr);
}
}
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 9855c41ca811..156361966612 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -45,7 +45,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
/* Let the guest access its memory until a stop signal is received */
while (!READ_ONCE(memstress_args.stop_vcpus)) {
ret = _vcpu_run(vcpu);
- TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
if (get_ucall(vcpu, NULL) == UCALL_SYNC)
continue;
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 8698d1ab60d0..579a64f97333 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -175,11 +175,11 @@ static void wait_for_vcpu(void)
struct timespec ts;
TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
- "clock_gettime() failed: %d\n", errno);
+ "clock_gettime() failed: %d", errno);
ts.tv_sec += 2;
TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
- "sem_timedwait() failed: %d\n", errno);
+ "sem_timedwait() failed: %d", errno);
}
static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
@@ -336,7 +336,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot);
TEST_ASSERT(gpa == guest_addr,
- "vm_phy_pages_alloc() failed\n");
+ "vm_phy_pages_alloc() failed");
data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr);
memset(data->hva_slots[slot - 1], 0, npages * guest_page_size);
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
index 6652108816db..6435e7a65642 100644
--- a/tools/testing/selftests/kvm/riscv/get-reg-list.c
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -49,15 +49,42 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKB:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKC:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBKX:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBS:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFA:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFH:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZFHMIN:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOM:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICBOZ:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICOND:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZICSR:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIFENCEI:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTNTL:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZIHPM:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKND:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNE:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKNH:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKR:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSED:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKSH:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZKT:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBB:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVBC:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFH:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVFHMIN:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKB:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKG:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNED:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNHA:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKNHB:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKSED:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKSH:
+ case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZVKT:
/*
* Like ISA_EXT registers, SBI_EXT registers are only visible when the
* host supports them and disabling them does not affect the visibility
@@ -150,7 +177,7 @@ void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
/* Double check whether the desired extension was enabled */
__TEST_REQUIRE(vcpu_has_ext(vcpu, feature),
- "%s not available, skipping tests\n", s->name);
+ "%s not available, skipping tests", s->name);
}
}
@@ -394,15 +421,42 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off)
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB),
+ KVM_ISA_EXT_ARR(ZBC),
+ KVM_ISA_EXT_ARR(ZBKB),
+ KVM_ISA_EXT_ARR(ZBKC),
+ KVM_ISA_EXT_ARR(ZBKX),
KVM_ISA_EXT_ARR(ZBS),
+ KVM_ISA_EXT_ARR(ZFA),
+ KVM_ISA_EXT_ARR(ZFH),
+ KVM_ISA_EXT_ARR(ZFHMIN),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
KVM_ISA_EXT_ARR(ZICNTR),
KVM_ISA_EXT_ARR(ZICOND),
KVM_ISA_EXT_ARR(ZICSR),
KVM_ISA_EXT_ARR(ZIFENCEI),
+ KVM_ISA_EXT_ARR(ZIHINTNTL),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZIHPM),
+ KVM_ISA_EXT_ARR(ZKND),
+ KVM_ISA_EXT_ARR(ZKNE),
+ KVM_ISA_EXT_ARR(ZKNH),
+ KVM_ISA_EXT_ARR(ZKR),
+ KVM_ISA_EXT_ARR(ZKSED),
+ KVM_ISA_EXT_ARR(ZKSH),
+ KVM_ISA_EXT_ARR(ZKT),
+ KVM_ISA_EXT_ARR(ZVBB),
+ KVM_ISA_EXT_ARR(ZVBC),
+ KVM_ISA_EXT_ARR(ZVFH),
+ KVM_ISA_EXT_ARR(ZVFHMIN),
+ KVM_ISA_EXT_ARR(ZVKB),
+ KVM_ISA_EXT_ARR(ZVKG),
+ KVM_ISA_EXT_ARR(ZVKNED),
+ KVM_ISA_EXT_ARR(ZVKNHA),
+ KVM_ISA_EXT_ARR(ZVKNHB),
+ KVM_ISA_EXT_ARR(ZVKSED),
+ KVM_ISA_EXT_ARR(ZVKSH),
+ KVM_ISA_EXT_ARR(ZVKT),
};
if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name))
@@ -888,15 +942,42 @@ KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT);
KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT);
KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA);
KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbkb, ZBKB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbkc, ZBKC);
+KVM_ISA_EXT_SIMPLE_CONFIG(zbkx, ZBKX);
KVM_ISA_EXT_SIMPLE_CONFIG(zbs, ZBS);
+KVM_ISA_EXT_SIMPLE_CONFIG(zfa, ZFA);
+KVM_ISA_EXT_SIMPLE_CONFIG(zfh, ZFH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zfhmin, ZFHMIN);
KVM_ISA_EXT_SUBLIST_CONFIG(zicbom, ZICBOM);
KVM_ISA_EXT_SUBLIST_CONFIG(zicboz, ZICBOZ);
KVM_ISA_EXT_SIMPLE_CONFIG(zicntr, ZICNTR);
KVM_ISA_EXT_SIMPLE_CONFIG(zicond, ZICOND);
KVM_ISA_EXT_SIMPLE_CONFIG(zicsr, ZICSR);
KVM_ISA_EXT_SIMPLE_CONFIG(zifencei, ZIFENCEI);
+KVM_ISA_EXT_SIMPLE_CONFIG(zihintntl, ZIHINTNTL);
KVM_ISA_EXT_SIMPLE_CONFIG(zihintpause, ZIHINTPAUSE);
KVM_ISA_EXT_SIMPLE_CONFIG(zihpm, ZIHPM);
+KVM_ISA_EXT_SIMPLE_CONFIG(zknd, ZKND);
+KVM_ISA_EXT_SIMPLE_CONFIG(zkne, ZKNE);
+KVM_ISA_EXT_SIMPLE_CONFIG(zknh, ZKNH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zkr, ZKR);
+KVM_ISA_EXT_SIMPLE_CONFIG(zksed, ZKSED);
+KVM_ISA_EXT_SIMPLE_CONFIG(zksh, ZKSH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zkt, ZKT);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvbb, ZVBB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvbc, ZVBC);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvfh, ZVFH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvfhmin, ZVFHMIN);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvkb, ZVKB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvkg, ZVKG);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvkned, ZVKNED);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvknha, ZVKNHA);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvknhb, ZVKNHB);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvksed, ZVKSED);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvksh, ZVKSH);
+KVM_ISA_EXT_SIMPLE_CONFIG(zvkt, ZVKT);
struct vcpu_reg_list *vcpu_configs[] = {
&config_sbi_base,
@@ -914,14 +995,41 @@ struct vcpu_reg_list *vcpu_configs[] = {
&config_svpbmt,
&config_zba,
&config_zbb,
+ &config_zbc,
+ &config_zbkb,
+ &config_zbkc,
+ &config_zbkx,
&config_zbs,
+ &config_zfa,
+ &config_zfh,
+ &config_zfhmin,
&config_zicbom,
&config_zicboz,
&config_zicntr,
&config_zicond,
&config_zicsr,
&config_zifencei,
+ &config_zihintntl,
&config_zihintpause,
&config_zihpm,
+ &config_zknd,
+ &config_zkne,
+ &config_zknh,
+ &config_zkr,
+ &config_zksed,
+ &config_zksh,
+ &config_zkt,
+ &config_zvbb,
+ &config_zvbc,
+ &config_zvfh,
+ &config_zvfhmin,
+ &config_zvkb,
+ &config_zvkg,
+ &config_zvkned,
+ &config_zvknha,
+ &config_zvknhb,
+ &config_zvksed,
+ &config_zvksh,
+ &config_zvkt,
};
int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index f74e76d03b7e..28f97fb52044 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -245,7 +245,7 @@ int main(int argc, char *argv[])
} while (snapshot != atomic_read(&seq_cnt));
TEST_ASSERT(rseq_cpu == cpu,
- "rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu);
+ "rseq CPU = %d, sched CPU = %d", rseq_cpu, cpu);
}
/*
@@ -256,7 +256,7 @@ int main(int argc, char *argv[])
* migrations given the 1us+ delay in the migration task.
*/
TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
- "Only performed %d KVM_RUNs, task stalled too much?\n", i);
+ "Only performed %d KVM_RUNs, task stalled too much?", i);
pthread_join(migration_thread, NULL);
diff --git a/tools/testing/selftests/kvm/s390x/resets.c b/tools/testing/selftests/kvm/s390x/resets.c
index e41e2cb8ffa9..357943f2bea8 100644
--- a/tools/testing/selftests/kvm/s390x/resets.c
+++ b/tools/testing/selftests/kvm/s390x/resets.c
@@ -78,7 +78,7 @@ static void assert_noirq(struct kvm_vcpu *vcpu)
* (notably, the emergency call interrupt we have injected) should
* be cleared by the resets, so this should be 0.
*/
- TEST_ASSERT(irqs >= 0, "Could not fetch IRQs: errno %d\n", errno);
+ TEST_ASSERT(irqs >= 0, "Could not fetch IRQs: errno %d", errno);
TEST_ASSERT(!irqs, "IRQ pending");
}
@@ -199,7 +199,7 @@ static void inject_irq(struct kvm_vcpu *vcpu)
irq->type = KVM_S390_INT_EMERGENCY;
irq->u.emerg.code = vcpu->id;
irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state);
- TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno);
+ TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d", errno);
}
static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu)
diff --git a/tools/testing/selftests/kvm/s390x/sync_regs_test.c b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
index 636a70ddac1e..43fb25ddc3ec 100644
--- a/tools/testing/selftests/kvm/s390x/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
@@ -39,13 +39,13 @@ static void guest_code(void)
#define REG_COMPARE(reg) \
TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \
- " values did not match: 0x%llx, 0x%llx\n", \
+ " values did not match: 0x%llx, 0x%llx", \
left->reg, right->reg)
#define REG_COMPARE32(reg) \
TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \
- " values did not match: 0x%x, 0x%x\n", \
+ " values did not match: 0x%x, 0x%x", \
left->reg, right->reg)
@@ -82,14 +82,14 @@ void test_read_invalid(struct kvm_vcpu *vcpu)
run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
+ "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
rv);
run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
+ "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
rv);
run->kvm_valid_regs = 0;
}
@@ -103,14 +103,14 @@ void test_set_invalid(struct kvm_vcpu *vcpu)
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
+ "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
rv);
run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
+ "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
rv);
run->kvm_dirty_regs = 0;
}
@@ -125,12 +125,12 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu)
/* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
- TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
+ TEST_ASSERT(rv == 0, "vcpu_run failed: %d", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s390_sieic.icptcode == 4 &&
(run->s390_sieic.ipa >> 8) == 0x83 &&
(run->s390_sieic.ipb >> 16) == 0x501,
- "Unexpected interception code: ic=%u, ipa=0x%x, ipb=0x%x\n",
+ "Unexpected interception code: ic=%u, ipa=0x%x, ipb=0x%x",
run->s390_sieic.icptcode, run->s390_sieic.ipa,
run->s390_sieic.ipb);
@@ -161,7 +161,7 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu)
}
rv = _vcpu_run(vcpu);
- TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
+ TEST_ASSERT(rv == 0, "vcpu_run failed: %d", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s.regs.gprs[11] == 0xBAD1DEA + 1,
"r11 sync regs value incorrect 0x%llx.",
@@ -193,7 +193,7 @@ void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu)
run->s.regs.gprs[11] = 0xDEADBEEF;
run->s.regs.diag318 = 0x4B1D;
rv = _vcpu_run(vcpu);
- TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
+ TEST_ASSERT(rv == 0, "vcpu_run failed: %d", rv);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s.regs.gprs[11] != 0xDEADBEEF,
"r11 sync regs value incorrect 0x%llx.",
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 075b80dbe237..40337f566eeb 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -98,11 +98,11 @@ static void wait_for_vcpu(void)
struct timespec ts;
TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
- "clock_gettime() failed: %d\n", errno);
+ "clock_gettime() failed: %d", errno);
ts.tv_sec += 2;
TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
- "sem_timedwait() failed: %d\n", errno);
+ "sem_timedwait() failed: %d", errno);
/* Wait for the vCPU thread to reenter the guest. */
usleep(100000);
@@ -302,7 +302,7 @@ static void test_delete_memory_region(void)
if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR)
TEST_ASSERT(regs.rip >= final_rip_start &&
regs.rip < final_rip_end,
- "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n",
+ "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx",
final_rip_start, final_rip_end, regs.rip);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/system_counter_offset_test.c b/tools/testing/selftests/kvm/system_counter_offset_test.c
index 7f5b330b6a1b..513d421a9bff 100644
--- a/tools/testing/selftests/kvm/system_counter_offset_test.c
+++ b/tools/testing/selftests/kvm/system_counter_offset_test.c
@@ -108,7 +108,7 @@ static void enter_guest(struct kvm_vcpu *vcpu)
handle_abort(&uc);
return;
default:
- TEST_ASSERT(0, "unhandled ucall %ld\n",
+ TEST_ASSERT(0, "unhandled ucall %ld",
get_ucall(vcpu, &uc));
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/amx_test.c b/tools/testing/selftests/kvm/x86_64/amx_test.c
index 11329e5ff945..eae521f050e0 100644
--- a/tools/testing/selftests/kvm/x86_64/amx_test.c
+++ b/tools/testing/selftests/kvm/x86_64/amx_test.c
@@ -221,7 +221,7 @@ int main(int argc, char *argv[])
vm_vaddr_t amx_cfg, tiledata, xstate;
struct ucall uc;
u32 amx_offset;
- int stage, ret;
+ int ret;
/*
* Note, all off-by-default features must be enabled before anything
@@ -263,7 +263,7 @@ int main(int argc, char *argv[])
memset(addr_gva2hva(vm, xstate), 0, PAGE_SIZE * DIV_ROUND_UP(XSAVE_SIZE, PAGE_SIZE));
vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xstate);
- for (stage = 1; ; stage++) {
+ for (;;) {
vcpu_run(vcpu);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
@@ -296,7 +296,7 @@ int main(int argc, char *argv[])
void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
/* Only check TMM0 register, 1 tile */
ret = memcmp(amx_start, tiles_data, TILE_SIZE);
- TEST_ASSERT(ret == 0, "memcmp failed, ret=%d\n", ret);
+ TEST_ASSERT(ret == 0, "memcmp failed, ret=%d", ret);
kvm_x86_state_cleanup(state);
break;
case 9:
diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
index 3b34d8156d1c..8c579ce714e9 100644
--- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
@@ -84,7 +84,7 @@ static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
TEST_ASSERT(e1->function == e2->function &&
e1->index == e2->index && e1->flags == e2->flags,
- "CPUID entries[%d] mismtach: 0x%x.%d.%x vs. 0x%x.%d.%x\n",
+ "CPUID entries[%d] mismtach: 0x%x.%d.%x vs. 0x%x.%d.%x",
i, e1->function, e1->index, e1->flags,
e2->function, e2->index, e2->flags);
@@ -170,7 +170,7 @@ static void test_get_cpuid2(struct kvm_vcpu *vcpu)
vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
TEST_ASSERT(cpuid->nent == vcpu->cpuid->nent,
- "KVM didn't update nent on success, wanted %u, got %u\n",
+ "KVM didn't update nent on success, wanted %u, got %u",
vcpu->cpuid->nent, cpuid->nent);
for (i = 0; i < vcpu->cpuid->nent; i++) {
diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
index 634c6bfcd572..ee3b384b991c 100644
--- a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
+++ b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
@@ -92,7 +92,6 @@ static void run_test(enum vm_guest_mode mode, void *unused)
uint64_t host_num_pages;
uint64_t pages_per_slot;
int i;
- uint64_t total_4k_pages;
struct kvm_page_stats stats_populated;
struct kvm_page_stats stats_dirty_logging_enabled;
struct kvm_page_stats stats_dirty_pass[ITERATIONS];
@@ -107,6 +106,9 @@ static void run_test(enum vm_guest_mode mode, void *unused)
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
pages_per_slot = host_num_pages / SLOTS;
+ TEST_ASSERT_EQ(host_num_pages, pages_per_slot * SLOTS);
+ TEST_ASSERT(!(host_num_pages % 512),
+ "Number of pages, '%lu' not a multiple of 2MiB", host_num_pages);
bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot);
@@ -165,10 +167,8 @@ static void run_test(enum vm_guest_mode mode, void *unused)
memstress_free_bitmaps(bitmaps, SLOTS);
memstress_destroy_vm(vm);
- /* Make assertions about the page counts. */
- total_4k_pages = stats_populated.pages_4k;
- total_4k_pages += stats_populated.pages_2m * 512;
- total_4k_pages += stats_populated.pages_1g * 512 * 512;
+ TEST_ASSERT_EQ((stats_populated.pages_2m * 512 +
+ stats_populated.pages_1g * 512 * 512), host_num_pages);
/*
* Check that all huge pages were split. Since large pages can only
@@ -180,19 +180,22 @@ static void run_test(enum vm_guest_mode mode, void *unused)
*/
if (dirty_log_manual_caps) {
TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
- TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
+ TEST_ASSERT(stats_clear_pass[0].pages_4k >= host_num_pages,
+ "Expected at least '%lu' 4KiB pages, found only '%lu'",
+ host_num_pages, stats_clear_pass[0].pages_4k);
TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
} else {
TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
- TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
+ TEST_ASSERT(stats_dirty_logging_enabled.pages_4k >= host_num_pages,
+ "Expected at least '%lu' 4KiB pages, found only '%lu'",
+ host_num_pages, stats_dirty_logging_enabled.pages_4k);
}
/*
* Once dirty logging is disabled and the vCPUs have touched all their
- * memory again, the page counts should be the same as they were
+ * memory again, the hugepage counts should be the same as they were
* right after initial population of memory.
*/
- TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
}
diff --git a/tools/testing/selftests/kvm/x86_64/flds_emulation.h b/tools/testing/selftests/kvm/x86_64/flds_emulation.h
index 0a1573d52882..37b1a9f52864 100644
--- a/tools/testing/selftests/kvm/x86_64/flds_emulation.h
+++ b/tools/testing/selftests/kvm/x86_64/flds_emulation.h
@@ -41,7 +41,7 @@ static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu)
insn_bytes = run->emulation_failure.insn_bytes;
TEST_ASSERT(insn_bytes[0] == 0xd9 && insn_bytes[1] == 0,
- "Expected 'flds [eax]', opcode '0xd9 0x00', got opcode 0x%02x 0x%02x\n",
+ "Expected 'flds [eax]', opcode '0xd9 0x00', got opcode 0x%02x 0x%02x",
insn_bytes[0], insn_bytes[1]);
vcpu_regs_get(vcpu, &regs);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
index f5e1e98f04f9..e058bc676cd6 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
@@ -212,6 +212,7 @@ int main(void)
int stage;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME));
+ TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
@@ -220,7 +221,7 @@ int main(void)
tsc_page_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
- "TSC page has to be page aligned\n");
+ "TSC page has to be page aligned");
vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
host_check_tsc_msr_rdtsc(vcpu);
@@ -237,7 +238,7 @@ int main(void)
break;
case UCALL_DONE:
/* Keep in sync with guest_main() */
- TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d\n",
+ TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d",
stage);
goto out;
default:
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 4f4193fc74ff..b923a285e96f 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -454,7 +454,7 @@ static void guest_test_msrs_access(void)
case 44:
/* MSR is not available when CPUID feature bit is unset */
if (!has_invtsc)
- continue;
+ goto next_stage;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = false;
msr->fault_expected = true;
@@ -462,7 +462,7 @@ static void guest_test_msrs_access(void)
case 45:
/* MSR is vailable when CPUID feature bit is set */
if (!has_invtsc)
- continue;
+ goto next_stage;
vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = false;
@@ -471,7 +471,7 @@ static void guest_test_msrs_access(void)
case 46:
/* Writing bits other than 0 is forbidden */
if (!has_invtsc)
- continue;
+ goto next_stage;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = true;
msr->write_val = 0xdeadbeef;
@@ -480,7 +480,7 @@ static void guest_test_msrs_access(void)
case 47:
/* Setting bit 0 enables the feature */
if (!has_invtsc)
- continue;
+ goto next_stage;
msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
msr->write = true;
msr->write_val = 1;
@@ -513,6 +513,7 @@ static void guest_test_msrs_access(void)
return;
}
+next_stage:
stage++;
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
index 65e5f4c05068..f1617762c22f 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
@@ -289,7 +289,7 @@ int main(int argc, char *argv[])
switch (get_ucall(vcpu[0], &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage,
- "Unexpected stage: %ld (%d expected)\n",
+ "Unexpected stage: %ld (%d expected)",
uc.args[1], stage);
break;
case UCALL_DONE:
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c b/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
index c4443f71f8dd..05b56095cf76 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c
@@ -658,7 +658,7 @@ int main(int argc, char *argv[])
switch (get_ucall(vcpu[0], &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage,
- "Unexpected stage: %ld (%d expected)\n",
+ "Unexpected stage: %ld (%d expected)",
uc.args[1], stage);
break;
case UCALL_ABORT:
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c b/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c
index 1778704360a6..5bc12222d87a 100644
--- a/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c
+++ b/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c
@@ -92,7 +92,7 @@ static void setup_clock(struct kvm_vm *vm, struct test_case *test_case)
break;
} while (errno == EINTR);
- TEST_ASSERT(!r, "clock_gettime() failed: %d\n", r);
+ TEST_ASSERT(!r, "clock_gettime() failed: %d", r);
data.realtime = ts.tv_sec * NSEC_PER_SEC;
data.realtime += ts.tv_nsec;
@@ -127,47 +127,11 @@ static void enter_guest(struct kvm_vcpu *vcpu)
handle_abort(&uc);
return;
default:
- TEST_ASSERT(0, "unhandled ucall: %ld\n", uc.cmd);
+ TEST_ASSERT(0, "unhandled ucall: %ld", uc.cmd);
}
}
}
-#define CLOCKSOURCE_PATH "/sys/devices/system/clocksource/clocksource0/current_clocksource"
-
-static void check_clocksource(void)
-{
- char *clk_name;
- struct stat st;
- FILE *fp;
-
- fp = fopen(CLOCKSOURCE_PATH, "r");
- if (!fp) {
- pr_info("failed to open clocksource file: %d; assuming TSC.\n",
- errno);
- return;
- }
-
- if (fstat(fileno(fp), &st)) {
- pr_info("failed to stat clocksource file: %d; assuming TSC.\n",
- errno);
- goto out;
- }
-
- clk_name = malloc(st.st_size);
- TEST_ASSERT(clk_name, "failed to allocate buffer to read file\n");
-
- if (!fgets(clk_name, st.st_size, fp)) {
- pr_info("failed to read clocksource file: %d; assuming TSC.\n",
- ferror(fp));
- goto out;
- }
-
- TEST_ASSERT(!strncmp(clk_name, "tsc\n", st.st_size),
- "clocksource not supported: %s", clk_name);
-out:
- fclose(fp);
-}
-
int main(void)
{
struct kvm_vcpu *vcpu;
@@ -179,7 +143,7 @@ int main(void)
flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
TEST_REQUIRE(flags & KVM_CLOCK_REALTIME);
- check_clocksource();
+ TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index 83e25bccc139..17bbb96fc4df 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -257,9 +257,9 @@ int main(int argc, char **argv)
TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES));
__TEST_REQUIRE(token == MAGIC_TOKEN,
- "This test must be run with the magic token %d.\n"
- "This is done by nx_huge_pages_test.sh, which\n"
- "also handles environment setup for the test.", MAGIC_TOKEN);
+ "This test must be run with the magic token via '-t %d'.\n"
+ "Running via nx_huge_pages_test.sh, which also handles "
+ "environment setup, is strongly recommended.", MAGIC_TOKEN);
run_test(reclaim_period_ms, false, reboot_permissions);
run_test(reclaim_period_ms, true, reboot_permissions);
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index c9a07963d68a..87011965dc41 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -44,7 +44,7 @@ static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu)
get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
- "Received ucall other than UCALL_SYNC: %lu\n", uc.cmd);
+ "Received ucall other than UCALL_SYNC: %lu", uc.cmd);
TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
"Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 283cc55597a4..a3bd54b925ab 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -866,7 +866,7 @@ static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
* userspace doesn't set any pmu filter.
*/
count = run_vcpu_to_sync(vcpu);
- TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
+ TEST_ASSERT(count, "Unexpected count value: %ld", count);
for (i = 0; i < BIT(nr_fixed_counters); i++) {
bitmap = BIT(i);
diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
index c7ef97561038..a49828adf294 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
@@ -91,7 +91,7 @@ static void sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
int ret;
ret = __sev_migrate_from(dst, src);
- TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
+ TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d", ret, errno);
}
static void test_sev_migrate_from(bool es)
@@ -113,7 +113,7 @@ static void test_sev_migrate_from(bool es)
/* Migrate the guest back to the original VM. */
ret = __sev_migrate_from(src_vm, dst_vms[NR_MIGRATE_TEST_VMS - 1]);
TEST_ASSERT(ret == -1 && errno == EIO,
- "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
+ "VM that was migrated from should be dead. ret %d, errno: %d", ret,
errno);
kvm_vm_free(src_vm);
@@ -172,7 +172,7 @@ static void test_sev_migrate_parameters(void)
vm_no_sev = aux_vm_create(true);
ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev);
TEST_ASSERT(ret == -1 && errno == EINVAL,
- "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
+ "Migrations require SEV enabled. ret %d, errno: %d", ret,
errno);
if (!have_sev_es)
@@ -187,25 +187,25 @@ static void test_sev_migrate_parameters(void)
ret = __sev_migrate_from(sev_vm, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
+ "Should not be able migrate to SEV enabled VM. ret: %d, errno: %d",
ret, errno);
ret = __sev_migrate_from(sev_es_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
+ "Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d",
ret, errno);
ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
+ "SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d",
ret, errno);
ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm_no_vmsa);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
+ "SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d",
ret, errno);
kvm_vm_free(sev_vm);
@@ -227,7 +227,7 @@ static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
int ret;
ret = __sev_mirror_create(dst, src);
- TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
+ TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d", ret, errno);
}
static void verify_mirror_allowed_cmds(int vm_fd)
@@ -259,7 +259,7 @@ static void verify_mirror_allowed_cmds(int vm_fd)
ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "Should not be able call command: %d. ret: %d, errno: %d\n",
+ "Should not be able call command: %d. ret: %d, errno: %d",
cmd_id, ret, errno);
}
@@ -301,18 +301,18 @@ static void test_sev_mirror_parameters(void)
ret = __sev_mirror_create(sev_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "Should not be able copy context to self. ret: %d, errno: %d\n",
+ "Should not be able copy context to self. ret: %d, errno: %d",
ret, errno);
ret = __sev_mirror_create(vm_no_vcpu, vm_with_vcpu);
TEST_ASSERT(ret == -1 && errno == EINVAL,
- "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
+ "Copy context requires SEV enabled. ret %d, errno: %d", ret,
errno);
ret = __sev_mirror_create(vm_with_vcpu, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
+ "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d",
ret, errno);
if (!have_sev_es)
@@ -322,13 +322,13 @@ static void test_sev_mirror_parameters(void)
ret = __sev_mirror_create(sev_vm, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
+ "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d",
ret, errno);
ret = __sev_mirror_create(sev_es_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
- "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
+ "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d",
ret, errno);
kvm_vm_free(sev_es_vm);
diff --git a/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c b/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c
index 06edf00a97d6..1a46dd7bb391 100644
--- a/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c
@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
MEM_REGION_SIZE / PAGE_SIZE, 0);
gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
MEM_REGION_GPA, MEM_REGION_SLOT);
- TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
+ TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc");
virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, PAGE_SIZE);
@@ -102,7 +102,7 @@ int main(int argc, char *argv[])
case UCALL_DONE:
break;
default:
- TEST_FAIL("Unrecognized ucall: %lu\n", uc.cmd);
+ TEST_FAIL("Unrecognized ucall: %lu", uc.cmd);
}
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index 00965ba33f73..a91b5b145fa3 100644
--- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -46,7 +46,7 @@ static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
#define REG_COMPARE(reg) \
TEST_ASSERT(left->reg == right->reg, \
"Register " #reg \
- " values did not match: 0x%llx, 0x%llx\n", \
+ " values did not match: 0x%llx, 0x%llx", \
left->reg, right->reg)
REG_COMPARE(rax);
REG_COMPARE(rbx);
@@ -230,14 +230,14 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
+ "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
rv);
run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
+ "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
rv);
run->kvm_valid_regs = 0;
@@ -245,14 +245,14 @@ int main(int argc, char *argv[])
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
+ "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
rv);
run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
+ "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
rv);
run->kvm_dirty_regs = 0;
diff --git a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
index 0ed32ec903d0..dcbb3c29fb8e 100644
--- a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
+++ b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
@@ -143,7 +143,7 @@ static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu)
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_SYNC,
- "Expect UCALL_SYNC\n");
+ "Expect UCALL_SYNC");
TEST_ASSERT(uc.args[1] == SYNC_GP, "#GP is expected.");
printf("vCPU received GP in guest.\n");
}
@@ -188,7 +188,7 @@ static void *run_ucna_injection(void *arg)
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
- "Expect UCALL_SYNC\n");
+ "Expect UCALL_SYNC");
TEST_ASSERT(uc.args[1] == SYNC_FIRST_UCNA, "Injecting first UCNA.");
printf("Injecting first UCNA at %#x.\n", FIRST_UCNA_ADDR);
@@ -198,7 +198,7 @@ static void *run_ucna_injection(void *arg)
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
- "Expect UCALL_SYNC\n");
+ "Expect UCALL_SYNC");
TEST_ASSERT(uc.args[1] == SYNC_SECOND_UCNA, "Injecting second UCNA.");
printf("Injecting second UCNA at %#x.\n", SECOND_UCNA_ADDR);
@@ -208,7 +208,7 @@ static void *run_ucna_injection(void *arg)
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
if (get_ucall(params->vcpu, &uc) == UCALL_ABORT) {
- TEST_ASSERT(false, "vCPU assertion failure: %s.\n",
+ TEST_ASSERT(false, "vCPU assertion failure: %s.",
(const char *)uc.args[0]);
}
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
index 255c50b0dc32..9481cbcf284f 100644
--- a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
+++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
@@ -71,7 +71,7 @@ int main(int argc, char *argv[])
break;
TEST_ASSERT(run->io.port == 0x80,
- "Expected I/O at port 0x80, got port 0x%x\n", run->io.port);
+ "Expected I/O at port 0x80, got port 0x%x", run->io.port);
/*
* Modify the rep string count in RCX: 2 => 1 and 3 => 8192.
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
index 2bed5fb3a0d6..a81a24761aac 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
@@ -99,7 +99,7 @@ int main(int argc, char *argv[])
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->internal.suberror ==
KVM_INTERNAL_ERROR_EMULATION,
- "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
+ "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u",
run->internal.suberror);
break;
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index e4ad5fef52ff..7f6f5f23fb9b 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -128,17 +128,17 @@ int main(int argc, char *argv[])
*/
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
if (uc.args[1]) {
- TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
- TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
+ TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean");
+ TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest");
} else {
- TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
- TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
+ TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty");
+ TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest");
}
- TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
- TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
- TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
- TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
+ TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty");
+ TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest");
+ TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty");
+ TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest");
break;
case UCALL_DONE:
done = true;
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
index a9b827c69f32..fad3634fd9eb 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
@@ -28,7 +28,7 @@ static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu)
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
- "Expected emulation failure, got %d\n",
+ "Expected emulation failure, got %d",
run->emulation_failure.suberror);
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
index e710b6e7fb38..1759fa5cb3f2 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
@@ -116,23 +116,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
-static bool system_has_stable_tsc(void)
-{
- bool tsc_is_stable;
- FILE *fp;
- char buf[4];
-
- fp = fopen("/sys/devices/system/clocksource/clocksource0/current_clocksource", "r");
- if (fp == NULL)
- return false;
-
- tsc_is_stable = fgets(buf, sizeof(buf), fp) &&
- !strncmp(buf, "tsc", sizeof(buf));
-
- fclose(fp);
- return tsc_is_stable;
-}
-
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
@@ -148,7 +131,7 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
- TEST_REQUIRE(system_has_stable_tsc());
+ TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
/*
* We set L1's scale factor to be a random number from 2 to 10.
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
index 67ac2a3292ef..725c206ba0b9 100644
--- a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
@@ -216,7 +216,7 @@ static void *vcpu_thread(void *arg)
"Halting vCPU halted %lu times, woke %lu times, received %lu IPIs.\n"
"Halter TPR=%#x PPR=%#x LVR=%#x\n"
"Migrations attempted: %lu\n"
- "Migrations completed: %lu\n",
+ "Migrations completed: %lu",
vcpu->id, (const char *)uc.args[0],
params->data->ipis_sent, params->data->hlt_count,
params->data->wake_count,
@@ -288,7 +288,7 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
}
TEST_ASSERT(nodes > 1,
- "Did not find at least 2 numa nodes. Can't do migration\n");
+ "Did not find at least 2 numa nodes. Can't do migration");
fprintf(stderr, "Migrating amongst %d nodes found\n", nodes);
@@ -347,7 +347,7 @@ void do_migrations(struct test_data_page *data, int run_secs, int delay_usecs,
wake_count != data->wake_count,
"IPI, HLT and wake count have not increased "
"in the last %lu seconds. "
- "HLTer is likely hung.\n", interval_secs);
+ "HLTer is likely hung.", interval_secs);
ipis_sent = data->ipis_sent;
hlt_count = data->hlt_count;
@@ -381,7 +381,7 @@ void get_cmdline_args(int argc, char *argv[], int *run_secs,
"-m adds calls to migrate_pages while vCPUs are running."
" Default is no migrations.\n"
"-d <delay microseconds> - delay between migrate_pages() calls."
- " Default is %d microseconds.\n",
+ " Default is %d microseconds.",
DEFAULT_RUN_SECS, DEFAULT_DELAY_USECS);
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
index dc6217440db3..25a0b0db5c3c 100644
--- a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
@@ -116,7 +116,7 @@ int main(int argc, char *argv[])
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
+ "Unexpected exit reason: %u (%s),",
run->exit_reason,
exit_reason_str(run->exit_reason));
diff --git a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
index e0ddf47362e7..167c97abff1b 100644
--- a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
@@ -29,7 +29,7 @@ int main(int argc, char *argv[])
xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
TEST_ASSERT(xss_val == 0,
- "MSR_IA32_XSS should be initialized to zero\n");
+ "MSR_IA32_XSS should be initialized to zero");
vcpu_set_msr(vcpu, MSR_IA32_XSS, xss_val);
diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c
index 646f778dfb1e..a6f89aaea77d 100644
--- a/tools/testing/selftests/landlock/base_test.c
+++ b/tools/testing/selftests/landlock/base_test.c
@@ -307,7 +307,7 @@ TEST(ruleset_fd_transfer)
dir_fd = open("/tmp", O_RDONLY | O_DIRECTORY | O_CLOEXEC);
ASSERT_LE(0, dir_fd);
ASSERT_EQ(0, close(dir_fd));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h
index 5b79758cae62..401e2eb092a3 100644
--- a/tools/testing/selftests/landlock/common.h
+++ b/tools/testing/selftests/landlock/common.h
@@ -9,6 +9,7 @@
#include <errno.h>
#include <linux/landlock.h>
+#include <linux/securebits.h>
#include <sys/capability.h>
#include <sys/socket.h>
#include <sys/syscall.h>
@@ -22,62 +23,8 @@
#define __maybe_unused __attribute__((__unused__))
#endif
-/*
- * TEST_F_FORK() is useful when a test drop privileges but the corresponding
- * FIXTURE_TEARDOWN() requires them (e.g. to remove files from a directory
- * where write actions are denied). For convenience, FIXTURE_TEARDOWN() is
- * also called when the test failed, but not when FIXTURE_SETUP() failed. For
- * this to be possible, we must not call abort() but instead exit smoothly
- * (hence the step print).
- */
-/* clang-format off */
-#define TEST_F_FORK(fixture_name, test_name) \
- static void fixture_name##_##test_name##_child( \
- struct __test_metadata *_metadata, \
- FIXTURE_DATA(fixture_name) *self, \
- const FIXTURE_VARIANT(fixture_name) *variant); \
- TEST_F(fixture_name, test_name) \
- { \
- int status; \
- const pid_t child = fork(); \
- if (child < 0) \
- abort(); \
- if (child == 0) { \
- _metadata->no_print = 1; \
- fixture_name##_##test_name##_child(_metadata, self, variant); \
- if (_metadata->skip) \
- _exit(255); \
- if (_metadata->passed) \
- _exit(0); \
- _exit(_metadata->step); \
- } \
- if (child != waitpid(child, &status, 0)) \
- abort(); \
- if (WIFSIGNALED(status) || !WIFEXITED(status)) { \
- _metadata->passed = 0; \
- _metadata->step = 1; \
- return; \
- } \
- switch (WEXITSTATUS(status)) { \
- case 0: \
- _metadata->passed = 1; \
- break; \
- case 255: \
- _metadata->passed = 1; \
- _metadata->skip = 1; \
- break; \
- default: \
- _metadata->passed = 0; \
- _metadata->step = WEXITSTATUS(status); \
- break; \
- } \
- } \
- static void fixture_name##_##test_name##_child( \
- struct __test_metadata __attribute__((unused)) *_metadata, \
- FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
- const FIXTURE_VARIANT(fixture_name) \
- __attribute__((unused)) *variant)
-/* clang-format on */
+/* TEST_F_FORK() should not be used for new tests. */
+#define TEST_F_FORK(fixture_name, test_name) TEST_F(fixture_name, test_name)
#ifndef landlock_create_ruleset
static inline int
@@ -115,11 +62,16 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
/* clang-format off */
CAP_DAC_OVERRIDE,
CAP_MKNOD,
+ CAP_NET_ADMIN,
+ CAP_NET_BIND_SERVICE,
CAP_SYS_ADMIN,
CAP_SYS_CHROOT,
- CAP_NET_BIND_SERVICE,
/* clang-format on */
};
+ const unsigned int noroot = SECBIT_NOROOT | SECBIT_NOROOT_LOCKED;
+
+ if ((cap_get_secbits() & noroot) != noroot)
+ EXPECT_EQ(0, cap_set_secbits(noroot));
cap_p = cap_get_proc();
EXPECT_NE(NULL, cap_p)
@@ -137,6 +89,8 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
TH_LOG("Failed to cap_set_flag: %s", strerror(errno));
}
}
+
+ /* Automatically resets ambient capabilities. */
EXPECT_NE(-1, cap_set_proc(cap_p))
{
TH_LOG("Failed to cap_set_proc: %s", strerror(errno));
@@ -145,6 +99,9 @@ static void _init_caps(struct __test_metadata *const _metadata, bool drop_all)
{
TH_LOG("Failed to cap_free: %s", strerror(errno));
}
+
+ /* Quickly checks that ambient capabilities are cleared. */
+ EXPECT_NE(-1, cap_get_ambient(caps[0]));
}
/* We cannot put such helpers in a library because of kselftest_harness.h . */
@@ -158,8 +115,9 @@ static void __maybe_unused drop_caps(struct __test_metadata *const _metadata)
_init_caps(_metadata, true);
}
-static void _effective_cap(struct __test_metadata *const _metadata,
- const cap_value_t caps, const cap_flag_value_t value)
+static void _change_cap(struct __test_metadata *const _metadata,
+ const cap_flag_t flag, const cap_value_t cap,
+ const cap_flag_value_t value)
{
cap_t cap_p;
@@ -168,7 +126,7 @@ static void _effective_cap(struct __test_metadata *const _metadata,
{
TH_LOG("Failed to cap_get_proc: %s", strerror(errno));
}
- EXPECT_NE(-1, cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &caps, value))
+ EXPECT_NE(-1, cap_set_flag(cap_p, flag, 1, &cap, value))
{
TH_LOG("Failed to cap_set_flag: %s", strerror(errno));
}
@@ -183,15 +141,35 @@ static void _effective_cap(struct __test_metadata *const _metadata,
}
static void __maybe_unused set_cap(struct __test_metadata *const _metadata,
- const cap_value_t caps)
+ const cap_value_t cap)
{
- _effective_cap(_metadata, caps, CAP_SET);
+ _change_cap(_metadata, CAP_EFFECTIVE, cap, CAP_SET);
}
static void __maybe_unused clear_cap(struct __test_metadata *const _metadata,
- const cap_value_t caps)
+ const cap_value_t cap)
+{
+ _change_cap(_metadata, CAP_EFFECTIVE, cap, CAP_CLEAR);
+}
+
+static void __maybe_unused
+set_ambient_cap(struct __test_metadata *const _metadata, const cap_value_t cap)
+{
+ _change_cap(_metadata, CAP_INHERITABLE, cap, CAP_SET);
+
+ EXPECT_NE(-1, cap_set_ambient(cap, CAP_SET))
+ {
+ TH_LOG("Failed to set ambient capability %d: %s", cap,
+ strerror(errno));
+ }
+}
+
+static void __maybe_unused clear_ambient_cap(
+ struct __test_metadata *const _metadata, const cap_value_t cap)
{
- _effective_cap(_metadata, caps, CAP_CLEAR);
+ EXPECT_EQ(1, cap_get_ambient(cap));
+ _change_cap(_metadata, CAP_INHERITABLE, cap, CAP_CLEAR);
+ EXPECT_EQ(0, cap_get_ambient(cap));
}
/* Receives an FD from a UNIX socket. Returns the received FD, or -errno. */
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 50818904397c..9a6036fbf289 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -241,9 +241,11 @@ struct mnt_opt {
const char *const data;
};
-const struct mnt_opt mnt_tmp = {
+#define MNT_TMP_DATA "size=4m,mode=700"
+
+static const struct mnt_opt mnt_tmp = {
.type = "tmpfs",
- .data = "size=4m,mode=700",
+ .data = MNT_TMP_DATA,
};
static int mount_opt(const struct mnt_opt *const mnt, const char *const target)
@@ -283,6 +285,8 @@ static void prepare_layout_opt(struct __test_metadata *const _metadata,
static void prepare_layout(struct __test_metadata *const _metadata)
{
+ _metadata->teardown_parent = true;
+
prepare_layout_opt(_metadata, &mnt_tmp);
}
@@ -1962,7 +1966,7 @@ static void test_execute(struct __test_metadata *const _metadata, const int err,
strerror(errno));
};
ASSERT_EQ(err, errno);
- _exit(_metadata->passed ? 2 : 1);
+ _exit(__test_passed(_metadata) ? 2 : 1);
return;
}
ASSERT_EQ(child, waitpid(child, &status, 0));
@@ -3805,7 +3809,7 @@ TEST_F_FORK(ftruncate, open_and_ftruncate_in_different_processes)
ASSERT_EQ(0, close(socket_fds[0]));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
@@ -3859,9 +3863,7 @@ FIXTURE_SETUP(layout1_bind)
FIXTURE_TEARDOWN(layout1_bind)
{
- set_cap(_metadata, CAP_SYS_ADMIN);
- EXPECT_EQ(0, umount(dir_s2d2));
- clear_cap(_metadata, CAP_SYS_ADMIN);
+ /* umount(dir_s2d2)) is handled by namespace lifetime. */
remove_layout1(_metadata);
@@ -4274,9 +4276,8 @@ FIXTURE_TEARDOWN(layout2_overlay)
EXPECT_EQ(0, remove_path(lower_fl1));
EXPECT_EQ(0, remove_path(lower_do1_fo2));
EXPECT_EQ(0, remove_path(lower_fo1));
- set_cap(_metadata, CAP_SYS_ADMIN);
- EXPECT_EQ(0, umount(LOWER_BASE));
- clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ /* umount(LOWER_BASE)) is handled by namespace lifetime. */
EXPECT_EQ(0, remove_path(LOWER_BASE));
EXPECT_EQ(0, remove_path(upper_do1_fu3));
@@ -4285,14 +4286,11 @@ FIXTURE_TEARDOWN(layout2_overlay)
EXPECT_EQ(0, remove_path(upper_do1_fo2));
EXPECT_EQ(0, remove_path(upper_fo1));
EXPECT_EQ(0, remove_path(UPPER_WORK "/work"));
- set_cap(_metadata, CAP_SYS_ADMIN);
- EXPECT_EQ(0, umount(UPPER_BASE));
- clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ /* umount(UPPER_BASE)) is handled by namespace lifetime. */
EXPECT_EQ(0, remove_path(UPPER_BASE));
- set_cap(_metadata, CAP_SYS_ADMIN);
- EXPECT_EQ(0, umount(MERGE_DATA));
- clear_cap(_metadata, CAP_SYS_ADMIN);
+ /* umount(MERGE_DATA)) is handled by namespace lifetime. */
EXPECT_EQ(0, remove_path(MERGE_DATA));
cleanup_layout(_metadata);
@@ -4632,7 +4630,10 @@ FIXTURE_VARIANT(layout3_fs)
/* clang-format off */
FIXTURE_VARIANT_ADD(layout3_fs, tmpfs) {
/* clang-format on */
- .mnt = mnt_tmp,
+ .mnt = {
+ .type = "tmpfs",
+ .data = MNT_TMP_DATA,
+ },
.file_path = file1_s1d1,
};
@@ -4686,6 +4687,8 @@ FIXTURE_SETUP(layout3_fs)
SKIP(return, "this filesystem is not supported (setup)");
}
+ _metadata->teardown_parent = true;
+
slash = strrchr(variant->file_path, '/');
ASSERT_NE(slash, NULL);
dir_len = (size_t)slash - (size_t)variant->file_path;
diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c
index ea5f727dd257..f21cfbbc3638 100644
--- a/tools/testing/selftests/landlock/net_test.c
+++ b/tools/testing/selftests/landlock/net_test.c
@@ -17,6 +17,7 @@
#include <string.h>
#include <sys/prctl.h>
#include <sys/socket.h>
+#include <sys/syscall.h>
#include <sys/un.h>
#include "common.h"
@@ -54,6 +55,11 @@ struct service_fixture {
};
};
+static pid_t sys_gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+
static int set_service(struct service_fixture *const srv,
const struct protocol_variant prot,
const unsigned short index)
@@ -88,7 +94,7 @@ static int set_service(struct service_fixture *const srv,
case AF_UNIX:
srv->unix_addr.sun_family = prot.domain;
sprintf(srv->unix_addr.sun_path,
- "_selftests-landlock-net-tid%d-index%d", gettid(),
+ "_selftests-landlock-net-tid%d-index%d", sys_gettid(),
index);
srv->unix_addr_len = SUN_LEN(&srv->unix_addr);
srv->unix_addr.sun_path[0] = '\0';
@@ -101,8 +107,11 @@ static void setup_loopback(struct __test_metadata *const _metadata)
{
set_cap(_metadata, CAP_SYS_ADMIN);
ASSERT_EQ(0, unshare(CLONE_NEWNET));
- ASSERT_EQ(0, system("ip link set dev lo up"));
clear_cap(_metadata, CAP_SYS_ADMIN);
+
+ set_ambient_cap(_metadata, CAP_NET_ADMIN);
+ ASSERT_EQ(0, system("ip link set dev lo up"));
+ clear_ambient_cap(_metadata, CAP_NET_ADMIN);
}
static bool is_restricted(const struct protocol_variant *const prot,
@@ -530,7 +539,7 @@ static void test_bind_and_connect(struct __test_metadata *const _metadata,
}
EXPECT_EQ(0, close(connect_fd));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
@@ -825,7 +834,7 @@ TEST_F(protocol, connect_unspec)
}
EXPECT_EQ(0, close(connect_fd));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
diff --git a/tools/testing/selftests/landlock/ptrace_test.c b/tools/testing/selftests/landlock/ptrace_test.c
index 55e7871631a1..a19db4d0b3bd 100644
--- a/tools/testing/selftests/landlock/ptrace_test.c
+++ b/tools/testing/selftests/landlock/ptrace_test.c
@@ -314,7 +314,7 @@ TEST_F(hierarchy, trace)
ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
if (variant->domain_both) {
create_domain(_metadata);
- if (!_metadata->passed)
+ if (!__test_passed(_metadata))
/* Aborts before forking. */
return;
}
@@ -375,7 +375,7 @@ TEST_F(hierarchy, trace)
/* Waits for the parent PTRACE_ATTACH test. */
ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
- _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ _exit(_metadata->exit_code);
return;
}
@@ -430,9 +430,10 @@ TEST_F(hierarchy, trace)
/* Signals that the parent PTRACE_ATTACH test is done. */
ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
ASSERT_EQ(child, waitpid(child, &status, 0));
+
if (WIFSIGNALED(status) || !WIFEXITED(status) ||
WEXITSTATUS(status) != EXIT_SUCCESS)
- _metadata->passed = 0;
+ _metadata->exit_code = KSFT_FAIL;
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index aa646e0661f3..087fee22dd53 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -69,11 +69,29 @@ define RUN_TESTS
run_many $(1)
endef
+define INSTALL_INCLUDES
+ $(if $(TEST_INCLUDES), \
+ relative_files=""; \
+ for entry in $(TEST_INCLUDES); do \
+ entry_dir=$$(readlink -e "$$(dirname "$$entry")"); \
+ entry_name=$$(basename "$$entry"); \
+ relative_dir=$${entry_dir#"$$SRC_PATH"/}; \
+ if [ "$$relative_dir" = "$$entry_dir" ]; then \
+ echo "Error: TEST_INCLUDES entry \"$$entry\" not located inside selftests directory ($$SRC_PATH)" >&2; \
+ exit 1; \
+ fi; \
+ relative_files="$$relative_files $$relative_dir/$$entry_name"; \
+ done; \
+ cd $(SRC_PATH) && rsync -aR $$relative_files $(OBJ_PATH)/ \
+ )
+endef
+
run_tests: all
ifdef building_out_of_srctree
@if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
rsync -aq --copy-unsafe-links $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
fi
+ @$(INSTALL_INCLUDES)
@if [ "X$(TEST_PROGS)" != "X" ]; then \
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
$(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
@@ -103,6 +121,7 @@ endef
install: all
ifdef INSTALL_PATH
$(INSTALL_RULE)
+ $(INSTALL_INCLUDES)
else
$(error Error: set INSTALL_PATH to use install)
endif
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index c8416c54b463..b1fd7362c2fe 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -42,17 +42,6 @@ function die() {
exit 1
}
-# save existing dmesg so we can detect new content
-function save_dmesg() {
- SAVED_DMESG=$(mktemp --tmpdir -t klp-dmesg-XXXXXX)
- dmesg > "$SAVED_DMESG"
-}
-
-# cleanup temporary dmesg file from save_dmesg()
-function cleanup_dmesg_file() {
- rm -f "$SAVED_DMESG"
-}
-
function push_config() {
DYNAMIC_DEBUG=$(grep '^kernel/livepatch' /sys/kernel/debug/dynamic_debug/control | \
awk -F'[: ]' '{print "file " $1 " line " $2 " " $4}')
@@ -99,7 +88,6 @@ function set_ftrace_enabled() {
function cleanup() {
pop_config
- cleanup_dmesg_file
}
# setup_config - save the current config and set a script exit trap that
@@ -280,7 +268,15 @@ function set_pre_patch_ret {
function start_test {
local test="$1"
- save_dmesg
+ # Dump something unique into the dmesg log, then stash the entry
+ # in LAST_DMESG. The check_result() function will use it to
+ # find new kernel messages since the test started.
+ local last_dmesg_msg="livepatch kselftest timestamp: $(date --rfc-3339=ns)"
+ log "$last_dmesg_msg"
+ loop_until 'dmesg | grep -q "$last_dmesg_msg"' ||
+ die "buffer busy? can't find canary dmesg message: $last_dmesg_msg"
+ LAST_DMESG=$(dmesg | grep "$last_dmesg_msg")
+
echo -n "TEST: $test ... "
log "===== TEST: $test ====="
}
@@ -291,23 +287,24 @@ function check_result {
local expect="$*"
local result
- # Note: when comparing dmesg output, the kernel log timestamps
- # help differentiate repeated testing runs. Remove them with a
- # post-comparison sed filter.
-
- result=$(dmesg | comm --nocheck-order -13 "$SAVED_DMESG" - | \
+ # Test results include any new dmesg entry since LAST_DMESG, then:
+ # - include lines matching keywords
+ # - exclude lines matching keywords
+ # - filter out dmesg timestamp prefixes
+ result=$(dmesg | awk -v last_dmesg="$LAST_DMESG" 'p; $0 == last_dmesg { p=1 }' | \
grep -e 'livepatch:' -e 'test_klp' | \
grep -v '\(tainting\|taints\) kernel' | \
sed 's/^\[[ 0-9.]*\] //')
if [[ "$expect" == "$result" ]] ; then
echo "ok"
+ elif [[ "$result" == "" ]] ; then
+ echo -e "not ok\n\nbuffer overrun? can't find canary dmesg entry: $LAST_DMESG\n"
+ die "livepatch kselftest(s) failed"
else
echo -e "not ok\n\n$(diff -upr --label expected --label result <(echo "$expect") <(echo "$result"))\n"
die "livepatch kselftest(s) failed"
fi
-
- cleanup_dmesg_file
}
# check_sysfs_rights(modname, rel_path, expected_rights) - check sysfs
diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
index 0899019a7fcb..e14bdd4455f2 100755
--- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Kselftest framework requirement - SKIP code is 4.
diff --git a/tools/testing/selftests/mm/hmm-tests.c b/tools/testing/selftests/mm/hmm-tests.c
index 20294553a5dd..d2cfc9b494a0 100644
--- a/tools/testing/selftests/mm/hmm-tests.c
+++ b/tools/testing/selftests/mm/hmm-tests.c
@@ -138,7 +138,7 @@ FIXTURE_SETUP(hmm)
self->fd = hmm_open(variant->device_number);
if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
- SKIP(exit(0), "DEVICE_COHERENT not available");
+ SKIP(return, "DEVICE_COHERENT not available");
ASSERT_GE(self->fd, 0);
}
@@ -149,7 +149,7 @@ FIXTURE_SETUP(hmm2)
self->fd0 = hmm_open(variant->device_number0);
if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
- SKIP(exit(0), "DEVICE_COHERENT not available");
+ SKIP(return, "DEVICE_COHERENT not available");
ASSERT_GE(self->fd0, 0);
self->fd1 = hmm_open(variant->device_number1);
ASSERT_GE(self->fd1, 0);
diff --git a/tools/testing/selftests/mm/ksm_tests.c b/tools/testing/selftests/mm/ksm_tests.c
index 380b691d3eb9..b748c48908d9 100644
--- a/tools/testing/selftests/mm/ksm_tests.c
+++ b/tools/testing/selftests/mm/ksm_tests.c
@@ -566,7 +566,7 @@ static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot,
if (map_ptr_orig == MAP_FAILED)
err(2, "initial mmap");
- if (madvise(map_ptr, len + HPAGE_SIZE, MADV_HUGEPAGE))
+ if (madvise(map_ptr, len, MADV_HUGEPAGE))
err(2, "MADV_HUGEPAGE");
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
diff --git a/tools/testing/selftests/mm/map_hugetlb.c b/tools/testing/selftests/mm/map_hugetlb.c
index 193281560b61..86e8f2048a40 100644
--- a/tools/testing/selftests/mm/map_hugetlb.c
+++ b/tools/testing/selftests/mm/map_hugetlb.c
@@ -15,6 +15,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
+#include "vm_util.h"
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
@@ -58,10 +59,16 @@ int main(int argc, char **argv)
{
void *addr;
int ret;
+ size_t hugepage_size;
size_t length = LENGTH;
int flags = FLAGS;
int shift = 0;
+ hugepage_size = default_huge_page_size();
+ /* munmap with fail if the length is not page aligned */
+ if (hugepage_size > length)
+ length = hugepage_size;
+
if (argc > 1)
length = atol(argv[1]) << 20;
if (argc > 2) {
diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c
index 1d4c1589c305..2f8b991f78cb 100644
--- a/tools/testing/selftests/mm/mremap_test.c
+++ b/tools/testing/selftests/mm/mremap_test.c
@@ -360,7 +360,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
char pattern_seed)
{
void *addr, *src_addr, *dest_addr, *dest_preamble_addr;
- unsigned long long i;
+ int d;
+ unsigned long long t;
struct timespec t_start = {0, 0}, t_end = {0, 0};
long long start_ns, end_ns, align_mask, ret, offset;
unsigned long long threshold;
@@ -378,8 +379,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Set byte pattern for source block. */
srand(pattern_seed);
- for (i = 0; i < threshold; i++)
- memset((char *) src_addr + i, (char) rand(), 1);
+ for (t = 0; t < threshold; t++)
+ memset((char *) src_addr + t, (char) rand(), 1);
/* Mask to zero out lower bits of address for alignment */
align_mask = ~(c.dest_alignment - 1);
@@ -420,8 +421,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Set byte pattern for the dest preamble block. */
srand(pattern_seed);
- for (i = 0; i < c.dest_preamble_size; i++)
- memset((char *) dest_preamble_addr + i, (char) rand(), 1);
+ for (d = 0; d < c.dest_preamble_size; d++)
+ memset((char *) dest_preamble_addr + d, (char) rand(), 1);
}
clock_gettime(CLOCK_MONOTONIC, &t_start);
@@ -437,14 +438,14 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Verify byte pattern after remapping */
srand(pattern_seed);
- for (i = 0; i < threshold; i++) {
+ for (t = 0; t < threshold; t++) {
char c = (char) rand();
- if (((char *) dest_addr)[i] != c) {
+ if (((char *) dest_addr)[t] != c) {
ksft_print_msg("Data after remap doesn't match at offset %llu\n",
- i);
+ t);
ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
- ((char *) dest_addr)[i] & 0xff);
+ ((char *) dest_addr)[t] & 0xff);
ret = -1;
goto clean_up_dest;
}
@@ -453,14 +454,14 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Verify the dest preamble byte pattern after remapping */
if (c.dest_preamble_size) {
srand(pattern_seed);
- for (i = 0; i < c.dest_preamble_size; i++) {
+ for (d = 0; d < c.dest_preamble_size; d++) {
char c = (char) rand();
- if (((char *) dest_preamble_addr)[i] != c) {
+ if (((char *) dest_preamble_addr)[d] != c) {
ksft_print_msg("Preamble data after remap doesn't match at offset %d\n",
- i);
+ d);
ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
- ((char *) dest_preamble_addr)[i] & 0xff);
+ ((char *) dest_preamble_addr)[d] & 0xff);
ret = -1;
goto clean_up_dest;
}
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index cce90a10515a..2b9f8cc52639 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -1517,6 +1517,12 @@ int main(int argc, char *argv[])
continue;
uffd_test_start("%s on %s", test->name, mem_type->name);
+ if ((mem_type->mem_flag == MEM_HUGETLB ||
+ mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
+ (default_huge_page_size() == 0)) {
+ uffd_test_skip("huge page size is 0, feature missing?");
+ continue;
+ }
if (!uffd_feature_supported(test)) {
uffd_test_skip("feature missing");
continue;
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.sh b/tools/testing/selftests/mm/va_high_addr_switch.sh
index 45cae7cab27e..a0a75f302904 100755
--- a/tools/testing/selftests/mm/va_high_addr_switch.sh
+++ b/tools/testing/selftests/mm/va_high_addr_switch.sh
@@ -29,9 +29,15 @@ check_supported_x86_64()
# See man 1 gzip under '-f'.
local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
+ local cpu_supports_pl5=$(awk '/^flags/ {if (/la57/) {print 0;}
+ else {print 1}; exit}' /proc/cpuinfo 2>/dev/null)
+
if [[ "${pg_table_levels}" -lt 5 ]]; then
echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
exit $ksft_skip
+ elif [[ "${cpu_supports_pl5}" -ne 0 ]]; then
+ echo "$0: CPU does not have the necessary la57 flag to support page table level 5"
+ exit $ksft_skip
fi
}
diff --git a/tools/testing/selftests/mm/write_hugetlb_memory.sh b/tools/testing/selftests/mm/write_hugetlb_memory.sh
index 70a02301f4c2..3d2d2eb9d6ff 100755
--- a/tools/testing/selftests/mm/write_hugetlb_memory.sh
+++ b/tools/testing/selftests/mm/write_hugetlb_memory.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
set -e
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 50818075e566..7b6918d5f4af 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -53,8 +53,7 @@ TEST_PROGS += bind_bhash.sh
TEST_PROGS += ip_local_port_range.sh
TEST_PROGS += rps_default_mask.sh
TEST_PROGS += big_tcp.sh
-TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
-TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh lib.sh
+TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
@@ -84,6 +83,7 @@ TEST_PROGS += sctp_vrf.sh
TEST_GEN_FILES += sctp_hello
TEST_GEN_FILES += csum
TEST_GEN_FILES += nat6to4.o
+TEST_GEN_FILES += xdp_dummy.o
TEST_GEN_FILES += ip_local_port_range
TEST_GEN_FILES += bind_wildcard
TEST_PROGS += test_vxlan_mdb.sh
@@ -95,6 +95,9 @@ TEST_PROGS += fq_band_pktlimit.sh
TEST_PROGS += vlan_hw_filter.sh
TEST_FILES := settings
+TEST_FILES += in_netns.sh lib.sh net_helper.sh setup_loopback.sh setup_veth.sh
+
+TEST_INCLUDES := forwarding/lib.sh
include ../lib.mk
@@ -104,7 +107,7 @@ $(OUTPUT)/tcp_inq: LDLIBS += -lpthread
$(OUTPUT)/bind_bhash: LDLIBS += -lpthread
$(OUTPUT)/io_uring_zerocopy_tx: CFLAGS += -I../../../include/
-# Rules to generate bpf obj nat6to4.o
+# Rules to generate bpf objs
CLANG ?= clang
SCRATCH_DIR := $(OUTPUT)/tools
BUILD_DIR := $(SCRATCH_DIR)/build
@@ -139,7 +142,7 @@ endif
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
-$(OUTPUT)/nat6to4.o: nat6to4.c $(BPFOBJ) | $(MAKE_DIRS)
+$(OUTPUT)/nat6to4.o $(OUTPUT)/xdp_dummy.o: $(OUTPUT)/%.o : %.c $(BPFOBJ) | $(MAKE_DIRS)
$(CLANG) -O2 --target=bpf -c $< $(CCINCLUDE) $(CLANG_SYS_INCLUDES) -o $@
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
diff --git a/tools/testing/selftests/net/big_tcp.sh b/tools/testing/selftests/net/big_tcp.sh
index cde9a91c4797..2db9d15cd45f 100755
--- a/tools/testing/selftests/net/big_tcp.sh
+++ b/tools/testing/selftests/net/big_tcp.sh
@@ -122,7 +122,9 @@ do_netperf() {
local netns=$1
[ "$NF" = "6" ] && serip=$SERVER_IP6
- ip net exec $netns netperf -$NF -t TCP_STREAM -H $serip 2>&1 >/dev/null
+
+ # use large write to be sure to generate big tcp packets
+ ip net exec $netns netperf -$NF -t TCP_STREAM -l 1 -H $serip -- -m 262144 2>&1 >/dev/null
}
do_test() {
diff --git a/tools/testing/selftests/net/cmsg_ipv6.sh b/tools/testing/selftests/net/cmsg_ipv6.sh
index f30bd57d5e38..8bc23fb4c82b 100755
--- a/tools/testing/selftests/net/cmsg_ipv6.sh
+++ b/tools/testing/selftests/net/cmsg_ipv6.sh
@@ -89,7 +89,7 @@ for ovr in setsock cmsg both diff; do
check_result $? 0 "TCLASS $prot $ovr - pass"
while [ -d /proc/$BG ]; do
- $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
+ $NSEXE ./cmsg_sender -6 -p $p $m $((TOS2)) $TGT6 1234
done
tcpdump -r $TMPF -v 2>&1 | grep "class $TOS2" >> /dev/null
@@ -126,7 +126,7 @@ for ovr in setsock cmsg both diff; do
check_result $? 0 "HOPLIMIT $prot $ovr - pass"
while [ -d /proc/$BG ]; do
- $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
+ $NSEXE ./cmsg_sender -6 -p $p $m $LIM $TGT6 1234
done
tcpdump -r $TMPF -v 2>&1 | grep "hlim $LIM[^0-9]" >> /dev/null
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 8da562a9ae87..5e4390cac17e 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -1,5 +1,6 @@
CONFIG_USER_NS=y
CONFIG_NET_NS=y
+CONFIG_BONDING=m
CONFIG_BPF_SYSCALL=y
CONFIG_TEST_BPF=m
CONFIG_NUMA=y
@@ -14,30 +15,74 @@ CONFIG_VETH=y
CONFIG_NET_IPVTI=y
CONFIG_IPV6_VTI=y
CONFIG_DUMMY=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_BRIDGE=y
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_VLAN_8021Q=y
+CONFIG_GENEVE=m
CONFIG_IFB=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_ESP=y
+CONFIG_INET_ESP_OFFLOAD=y
+CONFIG_NET_FOU=y
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_IP_GRE=m
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
+CONFIG_IPV6_SIT=y
+CONFIG_IP_DCCP=m
CONFIG_NF_NAT=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_RAW=m
CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_L2TP_ETH=m
+CONFIG_L2TP_IP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_V3=y
+CONFIG_MACSEC=m
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MPLS=y
+CONFIG_MPTCP=y
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_IPV6=y
CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_NAT=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_CT=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPIP=y
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_FQ=m
CONFIG_NET_SCH_ETF=m
CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_PSAMPLE=m
+CONFIG_TCP_MD5SIG=y
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_KALLSYMS=y
+CONFIG_TLS=m
CONFIG_TRACEPOINTS=y
CONFIG_NET_DROP_MONITOR=m
CONFIG_NETDEVSIM=m
-CONFIG_NET_FOU=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_SCH_INGRESS=m
@@ -48,7 +93,10 @@ CONFIG_BAREUDP=m
CONFIG_IPV6_IOAM6_LWTUNNEL=y
CONFIG_CRYPTO_SM4_GENERIC=y
CONFIG_AMT=m
+CONFIG_TUN=y
CONFIG_VXLAN=m
CONFIG_IP_SCTP=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_CRYPTO_ARIA=y
+CONFIG_XFRM_INTERFACE=m
+CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 0d4f252427e2..386ebd829df5 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -38,6 +38,9 @@
# server / client nomenclature relative to ns-A
source lib.sh
+
+PATH=$PWD:$PWD/tools/testing/selftests/net:$PATH
+
VERBOSE=0
NSA_DEV=eth1
@@ -97,6 +100,7 @@ log_test()
local rc=$1
local expected=$2
local msg="$3"
+ local ans
[ "${VERBOSE}" = "1" ] && echo
@@ -106,19 +110,20 @@ log_test()
else
nfail=$((nfail+1))
printf "TEST: %-70s [FAIL]\n" "${msg}"
+ echo " expected rc $expected; actual rc $rc"
if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
echo
echo "hit enter to continue, 'q' to quit"
- read a
- [ "$a" = "q" ] && exit 1
+ read ans
+ [ "$ans" = "q" ] && exit 1
fi
fi
if [ "${PAUSE}" = "yes" ]; then
echo
echo "hit enter to continue, 'q' to quit"
- read a
- [ "$a" = "q" ] && exit 1
+ read ans
+ [ "$ans" = "q" ] && exit 1
fi
kill_procs
@@ -187,6 +192,15 @@ kill_procs()
sleep 1
}
+set_ping_group()
+{
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: ${NSA_CMD} sysctl -q -w net.ipv4.ping_group_range='0 2147483647'"
+ fi
+
+ ${NSA_CMD} sysctl -q -w net.ipv4.ping_group_range='0 2147483647'
+}
+
do_run_cmd()
{
local cmd="$*"
@@ -835,14 +849,14 @@ ipv4_ping()
set_sysctl net.ipv4.raw_l3mdev_accept=1 2>/dev/null
ipv4_ping_novrf
setup
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv4_ping_novrf
log_subsection "With VRF"
setup "yes"
ipv4_ping_vrf
setup "yes"
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv4_ping_vrf
}
@@ -2053,12 +2067,12 @@ ipv4_addr_bind()
log_subsection "No VRF"
setup
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv4_addr_bind_novrf
log_subsection "With VRF"
setup "yes"
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv4_addr_bind_vrf
}
@@ -2521,14 +2535,14 @@ ipv6_ping()
setup
ipv6_ping_novrf
setup
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv6_ping_novrf
log_subsection "With VRF"
setup "yes"
ipv6_ping_vrf
setup "yes"
- set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
+ set_ping_group
ipv6_ping_vrf
}
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index b3ecccbbfcd2..73895711cdf4 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -743,6 +743,43 @@ fib_notify_test()
cleanup &> /dev/null
}
+# Create a new dummy_10 to remove all associated routes.
+reset_dummy_10()
+{
+ $IP link del dev dummy_10
+
+ $IP link add dummy_10 type dummy
+ $IP link set dev dummy_10 up
+ $IP -6 address add 2001:10::1/64 dev dummy_10
+}
+
+check_rt_num()
+{
+ local expected=$1
+ local num=$2
+
+ if [ $num -ne $expected ]; then
+ echo "FAIL: Expected $expected routes, got $num"
+ ret=1
+ else
+ ret=0
+ fi
+}
+
+check_rt_num_clean()
+{
+ local expected=$1
+ local num=$2
+
+ if [ $num -ne $expected ]; then
+ log_test 1 0 "expected $expected routes, got $num"
+ set +e
+ cleanup &> /dev/null
+ return 1
+ fi
+ return 0
+}
+
fib6_gc_test()
{
setup
@@ -751,7 +788,8 @@ fib6_gc_test()
echo "Fib6 garbage collection test"
set -e
- EXPIRE=3
+ EXPIRE=5
+ GC_WAIT_TIME=$((EXPIRE * 2 + 2))
# Check expiration of routes every $EXPIRE seconds (GC)
$NS_EXEC sysctl -wq net.ipv6.route.gc_interval=$EXPIRE
@@ -763,44 +801,110 @@ fib6_gc_test()
$NS_EXEC sysctl -wq net.ipv6.route.flush=1
# Temporary routes
- for i in $(seq 1 1000); do
+ for i in $(seq 1 5); do
# Expire route after $EXPIRE seconds
$IP -6 route add 2001:20::$i \
via 2001:10::2 dev dummy_10 expires $EXPIRE
done
- sleep $(($EXPIRE * 2))
- N_EXP_SLEEP=$($IP -6 route list |grep expires|wc -l)
- if [ $N_EXP_SLEEP -ne 0 ]; then
- echo "FAIL: expected 0 routes with expires, got $N_EXP_SLEEP"
- ret=1
- else
- ret=0
- fi
+ sleep $GC_WAIT_TIME
+ $NS_EXEC sysctl -wq net.ipv6.route.flush=1
+ check_rt_num 0 $($IP -6 route list |grep expires|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection"
+
+ reset_dummy_10
# Permanent routes
- for i in $(seq 1 5000); do
+ for i in $(seq 1 5); do
$IP -6 route add 2001:30::$i \
via 2001:10::2 dev dummy_10
done
# Temporary routes
- for i in $(seq 1 1000); do
+ for i in $(seq 1 5); do
# Expire route after $EXPIRE seconds
$IP -6 route add 2001:20::$i \
via 2001:10::2 dev dummy_10 expires $EXPIRE
done
- sleep $(($EXPIRE * 2))
- N_EXP_SLEEP=$($IP -6 route list |grep expires|wc -l)
- if [ $N_EXP_SLEEP -ne 0 ]; then
- echo "FAIL: expected 0 routes with expires," \
- "got $N_EXP_SLEEP (5000 permanent routes)"
- ret=1
- else
- ret=0
+ # Wait for GC
+ sleep $GC_WAIT_TIME
+ check_rt_num 0 $($IP -6 route list |grep expires|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection (with permanent routes)"
+
+ reset_dummy_10
+
+ # Permanent routes
+ for i in $(seq 1 5); do
+ $IP -6 route add 2001:20::$i \
+ via 2001:10::2 dev dummy_10
+ done
+ # Replace with temporary routes
+ for i in $(seq 1 5); do
+ # Expire route after $EXPIRE seconds
+ $IP -6 route replace 2001:20::$i \
+ via 2001:10::2 dev dummy_10 expires $EXPIRE
+ done
+ # Wait for GC
+ sleep $GC_WAIT_TIME
+ check_rt_num 0 $($IP -6 route list |grep expires|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection (replace with expires)"
+
+ reset_dummy_10
+
+ # Temporary routes
+ for i in $(seq 1 5); do
+ # Expire route after $EXPIRE seconds
+ $IP -6 route add 2001:20::$i \
+ via 2001:10::2 dev dummy_10 expires $EXPIRE
+ done
+ # Replace with permanent routes
+ for i in $(seq 1 5); do
+ $IP -6 route replace 2001:20::$i \
+ via 2001:10::2 dev dummy_10
+ done
+ check_rt_num_clean 0 $($IP -6 route list |grep expires|wc -l) || return
+
+ # Wait for GC
+ sleep $GC_WAIT_TIME
+ check_rt_num 5 $($IP -6 route list |grep -v expires|grep 2001:20::|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection (replace with permanent)"
+
+ # ra6 is required for the next test. (ipv6toolkit)
+ if [ ! -x "$(command -v ra6)" ]; then
+ echo "SKIP: ra6 not found."
+ set +e
+ cleanup &> /dev/null
+ return
fi
- set +e
+ # Delete dummy_10 and remove all routes
+ $IP link del dev dummy_10
- log_test $ret 0 "ipv6 route garbage collection"
+ # Create a pair of veth devices to send a RA message from one
+ # device to another.
+ $IP link add veth1 type veth peer name veth2
+ $IP link set dev veth1 up
+ $IP link set dev veth2 up
+ $IP -6 address add 2001:10::1/64 dev veth1 nodad
+ $IP -6 address add 2001:10::2/64 dev veth2 nodad
+
+ # Make veth1 ready to receive RA messages.
+ $NS_EXEC sysctl -wq net.ipv6.conf.veth1.accept_ra=2
+
+ # Send a RA message with a route from veth2 to veth1.
+ $NS_EXEC ra6 -i veth2 -d 2001:10::1 -t $EXPIRE
+
+ # Wait for the RA message.
+ sleep 1
+
+ # systemd may mess up the test. You syould make sure that
+ # systemd-networkd.service and systemd-networkd.socket are stopped.
+ check_rt_num_clean 1 $($IP -6 route list|grep expires|wc -l) || return
+
+ # Wait for GC
+ sleep $GC_WAIT_TIME
+ check_rt_num 0 $($IP -6 route list |grep expires|wc -l)
+ log_test $ret 0 "ipv6 route garbage collection (RA message)"
+
+ set +e
cleanup &> /dev/null
}
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index 452693514be4..cdefc9a5ec34 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -112,7 +112,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
vxlan_symmetric_ipv6.sh \
vxlan_symmetric.sh
-TEST_PROGS_EXTENDED := devlink_lib.sh \
+TEST_FILES := devlink_lib.sh \
ethtool_lib.sh \
fib_offload_lib.sh \
forwarding.config.sample \
@@ -129,4 +129,7 @@ TEST_PROGS_EXTENDED := devlink_lib.sh \
sch_tbf_etsprio.sh \
tc_common.sh
+TEST_INCLUDES := \
+ ../lib.sh
+
include ../../lib.mk
diff --git a/tools/testing/selftests/net/forwarding/bridge_locked_port.sh b/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
index 9af9f6964808..c62331b2e006 100755
--- a/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
@@ -327,10 +327,10 @@ locked_port_mab_redirect()
RET=0
check_port_mab_support || return 0
- bridge link set dev $swp1 learning on locked on mab on
tc qdisc add dev $swp1 clsact
tc filter add dev $swp1 ingress protocol all pref 1 handle 101 flower \
action mirred egress redirect dev $swp2
+ bridge link set dev $swp1 learning on locked on mab on
ping_do $h1 192.0.2.2
check_err $? "Ping did not work with redirection"
@@ -349,8 +349,8 @@ locked_port_mab_redirect()
check_err $? "Locked entry not created after deleting filter"
bridge fdb del `mac_get $h1` vlan 1 dev $swp1 master
- tc qdisc del dev $swp1 clsact
bridge link set dev $swp1 learning off locked off mab off
+ tc qdisc del dev $swp1 clsact
log_test "Locked port MAB redirect"
}
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
index 61348f71728c..d9d587454d20 100755
--- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
@@ -329,7 +329,7 @@ __cfg_test_port_ip_star_g()
bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00"
check_err $? "(*, G) \"permanent\" entry has a pending group timer"
- bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "\/0.00"
+ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00"
check_err $? "\"permanent\" source entry has a pending source timer"
bridge mdb del dev br0 port $swp1 grp $grp vid 10
@@ -346,7 +346,7 @@ __cfg_test_port_ip_star_g()
bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00"
check_fail $? "(*, G) EXCLUDE entry does not have a pending group timer"
- bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "\/0.00"
+ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00"
check_err $? "\"blocked\" source entry has a pending source timer"
bridge mdb del dev br0 port $swp1 grp $grp vid 10
@@ -363,7 +363,7 @@ __cfg_test_port_ip_star_g()
bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q " 0.00"
check_err $? "(*, G) INCLUDE entry has a pending group timer"
- bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "\/0.00"
+ bridge -d -s mdb get dev br0 grp $grp vid 10 | grep -q "/0.00"
check_fail $? "Source entry does not have a pending source timer"
bridge mdb del dev br0 port $swp1 grp $grp vid 10
@@ -1252,14 +1252,17 @@ fwd_test()
echo
log_info "# Forwarding tests"
+ # Set the Max Response Delay to 100 centiseconds (1 second) so that the
+ # bridge will start forwarding according to its MDB soon after a
+ # multicast querier is enabled.
+ ip link set dev br0 type bridge mcast_query_response_interval 100
+
# Forwarding according to MDB entries only takes place when the bridge
# detects that there is a valid querier in the network. Set the bridge
# as the querier and assign it a valid IPv6 link-local address to be
# used as the source address for MLD queries.
ip -6 address add fe80::1/64 nodad dev br0
ip link set dev br0 type bridge mcast_querier 1
- # Wait the default Query Response Interval (10 seconds) for the bridge
- # to determine that there are no other queriers in the network.
sleep 10
fwd_test_host
@@ -1267,6 +1270,7 @@ fwd_test()
ip link set dev br0 type bridge mcast_querier 0
ip -6 address del fe80::1/64 dev br0
+ ip link set dev br0 type bridge mcast_query_response_interval 1000
}
ctrl_igmpv3_is_in_test()
diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
index 697994a9278b..8d7a1a004b7c 100644
--- a/tools/testing/selftests/net/forwarding/config
+++ b/tools/testing/selftests/net/forwarding/config
@@ -6,14 +6,49 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NET_VRF=m
CONFIG_BPF_SYSCALL=y
CONFIG_CGROUP_BPF=y
+CONFIG_DUMMY=m
+CONFIG_IPV6=y
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_MACVLAN=m
CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_MPLS=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_SAMPLE=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
CONFIG_NET_ACT_VLAN=m
CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPIP=m
+CONFIG_NET_SCH_ETS=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_ACT_GACT=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_TC_SKB_EXT=y
+CONFIG_NET_TEAM=y
+CONFIG_NET_TEAM_MODE_LOADBALANCE=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_TABLES=m
CONFIG_VETH=m
CONFIG_NAMESPACES=y
CONFIG_NET_NS=y
+CONFIG_VXLAN=m
+CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
index 56eb83d1a3bd..1783c10215e5 100755
--- a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
@@ -183,42 +183,42 @@ send_src_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_flowlabel()
@@ -234,14 +234,14 @@ send_src_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:4::2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:4::2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
custom_hash_test()
diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
index 4a546509de90..1fc4f0242fc5 100644
--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
+++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
@@ -28,6 +28,8 @@ PING=ping
PING6=ping6
# Packet generator. Some distributions use 'mz'.
MZ=mausezahn
+# mausezahn delay between transmissions in microseconds.
+MZ_DELAY=0
# Time to wait after interfaces participating in the test are all UP
WAIT_TIME=5
# Whether to pause on failure or not.
diff --git a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
index 0446db9c6f74..9788bd0f6e8b 100755
--- a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
@@ -278,42 +278,42 @@ send_src_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_flowlabel()
@@ -329,14 +329,14 @@ send_src_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
custom_hash_test()
diff --git a/tools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh b/tools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh
index e4009f658003..efca6114a3ce 100755
--- a/tools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_inner_v4_multipath.sh
@@ -267,7 +267,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A "192.0.3.2-192.0.3.62" -B "192.0.4.2-192.0.4.62" \
- -d 1msec -c 50 -t udp "sp=1024,dp=1024"
+ -d $MZ_DELAY -c 50 -t udp "sp=1024,dp=1024"
sleep 1
local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
diff --git a/tools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh b/tools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh
index e449475c4d3e..a71ad39fc0c3 100755
--- a/tools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_inner_v6_multipath.sh
@@ -266,9 +266,9 @@ multipath6_test()
local t0_222=$(tc_rule_stats_get $ul32 222 ingress)
ip vrf exec v$h1 \
- $MZ $h1 -6 -q -p 64 -A "2001:db8:1::2-2001:db8:1::1e" \
- -B "2001:db8:2::2-2001:db8:2::1e" \
- -d 1msec -c 50 -t udp "sp=1024,dp=1024"
+ $MZ $h1 -6 -q -p 64 -A "2001:db8:1::2-2001:db8:1::3e" \
+ -B "2001:db8:2::2-2001:db8:2::3e" \
+ -d $MZ_DELAY -c 50 -t udp "sp=1024,dp=1024"
sleep 1
local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
index a8d8e8b3dc81..57531c1d884d 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -220,7 +220,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath_nh.sh b/tools/testing/selftests/net/forwarding/gre_multipath_nh.sh
index d03aa2cab9fd..7d5b2b9cc133 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath_nh.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath_nh.sh
@@ -64,7 +64,6 @@ ALL_TESTS="
ping_ipv6
multipath_ipv4
multipath_ipv6
- multipath_ipv6_l4
"
NUM_NETIFS=6
@@ -245,7 +244,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
@@ -264,34 +263,6 @@ multipath6_test()
local weight1=$1; shift
local weight2=$1; shift
- sysctl_set net.ipv6.fib_multipath_hash_policy 0
- ip nexthop replace id 103 group 101,$weight1/102,$weight2
-
- local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
- local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
-
- # Generate 16384 echo requests, each with a random flow label.
- for ((i=0; i < 16384; ++i)); do
- ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
- done
-
- local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
- local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
-
- local d111=$((t1_111 - t0_111))
- local d222=$((t1_222 - t0_222))
- multipath_eval "$what" $weight1 $weight2 $d111 $d222
-
- ip nexthop replace id 103 group 101/102
- sysctl_restore net.ipv6.fib_multipath_hash_policy
-}
-
-multipath6_l4_test()
-{
- local what=$1; shift
- local weight1=$1; shift
- local weight2=$1; shift
-
sysctl_set net.ipv6.fib_multipath_hash_policy 1
ip nexthop replace id 103 group 101,$weight1/102,$weight2
@@ -300,7 +271,7 @@ multipath6_l4_test()
ip vrf exec v$h1 \
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
@@ -339,14 +310,6 @@ multipath_ipv6()
multipath6_test "Weighted MP 11:45" 11 45
}
-multipath_ipv6_l4()
-{
- log_info "Running IPv6 L4 hash multipath tests"
- multipath6_l4_test "ECMP" 1 1
- multipath6_l4_test "Weighted MP 2:1" 2 1
- multipath6_l4_test "Weighted MP 11:45" 11 45
-}
-
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh b/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh
index 088b65e64d66..370f9925302d 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh
@@ -64,7 +64,6 @@ ALL_TESTS="
ping_ipv6
multipath_ipv4
multipath_ipv6
- multipath_ipv6_l4
"
NUM_NETIFS=6
@@ -248,7 +247,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
@@ -267,35 +266,6 @@ multipath6_test()
local weight1=$1; shift
local weight2=$1; shift
- sysctl_set net.ipv6.fib_multipath_hash_policy 0
- ip nexthop replace id 103 group 101,$weight1/102,$weight2 \
- type resilient
-
- local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
- local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
-
- # Generate 16384 echo requests, each with a random flow label.
- for ((i=0; i < 16384; ++i)); do
- ip vrf exec v$h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
- done
-
- local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
- local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
-
- local d111=$((t1_111 - t0_111))
- local d222=$((t1_222 - t0_222))
- multipath_eval "$what" $weight1 $weight2 $d111 $d222
-
- ip nexthop replace id 103 group 101/102 type resilient
- sysctl_restore net.ipv6.fib_multipath_hash_policy
-}
-
-multipath6_l4_test()
-{
- local what=$1; shift
- local weight1=$1; shift
- local weight2=$1; shift
-
sysctl_set net.ipv6.fib_multipath_hash_policy 1
ip nexthop replace id 103 group 101,$weight1/102,$weight2 \
type resilient
@@ -305,7 +275,7 @@ multipath6_l4_test()
ip vrf exec v$h1 \
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::1 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
@@ -344,14 +314,6 @@ multipath_ipv6()
multipath6_test "Weighted MP 11:45" 11 45
}
-multipath_ipv6_l4()
-{
- log_info "Running IPv6 L4 hash multipath tests"
- multipath6_l4_test "ECMP" 1 1
- multipath6_l4_test "Weighted MP 2:1" 2 1
- multipath6_l4_test "Weighted MP 11:45" 11 45
-}
-
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
index d40183b4eccc..2ab9eaaa5532 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
@@ -280,42 +280,42 @@ send_src_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
ip vrf exec v$h1 $MZ $h1 -q -p 64 \
-A 198.51.100.2 -B 203.0.113.2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
- -d 1msec -c 50 -t udp "sp=20000,dp=30000"
+ -d $MZ_DELAY -c 50 -t udp "sp=20000,dp=30000"
}
send_flowlabel()
@@ -331,14 +331,14 @@ send_src_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=0-32768,dp=30000"
+ -d $MZ_DELAY -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
-A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=20000,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=20000,dp=0-32768"
}
custom_hash_test()
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh b/tools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh
index a257979d3fc5..32d1461f37b7 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_inner_v4_multipath.sh
@@ -266,7 +266,7 @@ multipath4_test()
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A "192.0.3.2-192.0.3.62" -B "192.0.4.2-192.0.4.62" \
- -d 1msec -c 50 -t udp "sp=1024,dp=1024"
+ -d $MZ_DELAY -c 50 -t udp "sp=1024,dp=1024"
sleep 1
local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh b/tools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh
index d208f5243ade..e1a4b50505f5 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_inner_v6_multipath.sh
@@ -265,9 +265,9 @@ multipath6_test()
local t0_222=$(tc_rule_stats_get $ul32 222 ingress)
ip vrf exec v$h1 \
- $MZ $h1 -6 -q -p 64 -A "2001:db8:1::2-2001:db8:1::1e" \
- -B "2001:db8:2::2-2001:db8:2::1e" \
- -d 1msec -c 50 -t udp "sp=1024,dp=1024"
+ $MZ $h1 -6 -q -p 64 -A "2001:db8:1::2-2001:db8:1::3e" \
+ -B "2001:db8:2::2-2001:db8:2::3e" \
+ -d $MZ_DELAY -c 50 -t udp "sp=1024,dp=1024"
sleep 1
local t1_111=$(tc_rule_stats_get $ul32 111 ingress)
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_lib.sh b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
index 58a3597037b1..24f4ab328bd2 100644
--- a/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_lib.sh
@@ -356,7 +356,7 @@ test_traffic_ip4ip6()
flower $TC_FLAG dst_ip 203.0.113.1 action pass
$MZ $h1 -c 1000 -p 64 -a $h1mac -b $ol1mac -A 198.51.100.1 \
- -B 203.0.113.1 -t ip -q -d 1msec
+ -B 203.0.113.1 -t ip -q -d $MZ_DELAY
# Check ports after encap and after decap.
tc_check_at_least_x_packets "dev $ul1 egress" 101 1000
@@ -389,7 +389,7 @@ test_traffic_ip6ip6()
flower $TC_FLAG dst_ip 2001:db8:2::1 action pass
$MZ -6 $h1 -c 1000 -p 64 -a $h1mac -b $ol1mac -A 2001:db8:1::1 \
- -B 2001:db8:2::1 -t ip -q -d 1msec
+ -B 2001:db8:2::1 -t ip -q -d $MZ_DELAY
# Check ports after encap and after decap.
tc_check_at_least_x_packets "dev $ul1 egress" 101 1000
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 8a61464ab6eb..d1bf39eaf2b3 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -8,6 +8,7 @@
PING=${PING:=ping}
PING6=${PING6:=ping6}
MZ=${MZ:=mausezahn}
+MZ_DELAY=${MZ_DELAY:=0}
ARPING=${ARPING:=arping}
TEAMD=${TEAMD:=teamd}
WAIT_TIME=${WAIT_TIME:=5}
@@ -29,23 +30,20 @@ STABLE_MAC_ADDRS=${STABLE_MAC_ADDRS:=no}
TCPDUMP_EXTRA_FLAGS=${TCPDUMP_EXTRA_FLAGS:=}
TROUTE6=${TROUTE6:=traceroute6}
-relative_path="${BASH_SOURCE%/*}"
-if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
- relative_path="."
-fi
+net_forwarding_dir=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
-if [[ -f $relative_path/forwarding.config ]]; then
- source "$relative_path/forwarding.config"
+if [[ -f $net_forwarding_dir/forwarding.config ]]; then
+ source "$net_forwarding_dir/forwarding.config"
fi
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
+source "$net_forwarding_dir/../lib.sh"
-busywait()
+# timeout in seconds
+slowwait()
{
local timeout=$1; shift
- local start_time="$(date -u +%s%3N)"
+ local start_time="$(date -u +%s)"
while true
do
local out
@@ -56,11 +54,13 @@ busywait()
return 0
fi
- local current_time="$(date -u +%s%3N)"
+ local current_time="$(date -u +%s)"
if ((current_time - start_time > timeout)); then
echo -n "$out"
return 1
fi
+
+ sleep 0.1
done
}
@@ -505,6 +505,15 @@ busywait_for_counter()
busywait "$timeout" until_counter_is ">= $((base + delta))" "$@"
}
+slowwait_for_counter()
+{
+ local timeout=$1; shift
+ local delta=$1; shift
+
+ local base=$("$@")
+ slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@"
+}
+
setup_wait_dev()
{
local dev=$1; shift
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
index fac486178ef7..0c36546e131e 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-source "$relative_path/mirror_lib.sh"
+source "$net_forwarding_dir/mirror_lib.sh"
quick_test_span_gre_dir_ips()
{
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
index 39c03e2867f4..6e615fffa4ef 100644
--- a/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_topo_lib.sh
@@ -33,7 +33,7 @@
# | |
# +-------------------------------------------------------------------------+
-source "$relative_path/mirror_topo_lib.sh"
+source "$net_forwarding_dir/mirror_topo_lib.sh"
mirror_gre_topo_h3_create()
{
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
index a0d612e04990..982e0d098ea9 100755
--- a/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh.sh
@@ -204,7 +204,7 @@ multipath4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -218,7 +218,7 @@ multipath4_test()
sysctl_restore net.ipv4.fib_multipath_hash_policy
}
-multipath6_l4_test()
+multipath6_test()
{
local desc="$1"
local weight_rp12=$2
@@ -237,7 +237,7 @@ multipath6_l4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -251,34 +251,6 @@ multipath6_l4_test()
sysctl_restore net.ipv6.fib_multipath_hash_policy
}
-multipath6_test()
-{
- local desc="$1"
- local weight_rp12=$2
- local weight_rp13=$3
- local t0_rp12 t0_rp13 t1_rp12 t1_rp13
- local packets_rp12 packets_rp13
-
- ip nexthop replace id 106 group 104,$weight_rp12/105,$weight_rp13
-
- t0_rp12=$(link_stats_tx_packets_get $rp12)
- t0_rp13=$(link_stats_tx_packets_get $rp13)
-
- # Generate 16384 echo requests, each with a random flow label.
- for _ in $(seq 1 16384); do
- ip vrf exec vrf-h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1
- done
-
- t1_rp12=$(link_stats_tx_packets_get $rp12)
- t1_rp13=$(link_stats_tx_packets_get $rp13)
-
- let "packets_rp12 = $t1_rp12 - $t0_rp12"
- let "packets_rp13 = $t1_rp13 - $t0_rp13"
- multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
-
- ip nexthop replace id 106 group 104/105
-}
-
multipath_test()
{
log_info "Running IPv4 multipath tests"
@@ -301,11 +273,6 @@ multipath_test()
multipath6_test "ECMP" 1 1
multipath6_test "Weighted MP 2:1" 2 1
multipath6_test "Weighted MP 11:45" 11 45
-
- log_info "Running IPv6 L4 hash multipath tests"
- multipath6_l4_test "ECMP" 1 1
- multipath6_l4_test "Weighted MP 2:1" 2 1
- multipath6_l4_test "Weighted MP 11:45" 11 45
}
ping_ipv4_blackhole()
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
index cb08ffe2356a..a60ff54723b7 100755
--- a/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh_res.sh
@@ -205,7 +205,7 @@ multipath4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -235,7 +235,7 @@ multipath6_l4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
diff --git a/tools/testing/selftests/net/forwarding/router_multipath.sh b/tools/testing/selftests/net/forwarding/router_multipath.sh
index 464821c587a5..e2be354167a1 100755
--- a/tools/testing/selftests/net/forwarding/router_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/router_multipath.sh
@@ -179,7 +179,7 @@ multipath4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -195,7 +195,7 @@ multipath4_test()
sysctl_restore net.ipv4.fib_multipath_hash_policy
}
-multipath6_l4_test()
+multipath6_test()
{
local desc="$1"
local weight_rp12=$2
@@ -216,7 +216,7 @@ multipath6_l4_test()
t0_rp13=$(link_stats_tx_packets_get $rp13)
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
- -d 1msec -t udp "sp=1024,dp=0-32768"
+ -d $MZ_DELAY -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
@@ -232,38 +232,6 @@ multipath6_l4_test()
sysctl_restore net.ipv6.fib_multipath_hash_policy
}
-multipath6_test()
-{
- local desc="$1"
- local weight_rp12=$2
- local weight_rp13=$3
- local t0_rp12 t0_rp13 t1_rp12 t1_rp13
- local packets_rp12 packets_rp13
-
- ip route replace 2001:db8:2::/64 vrf vrf-r1 \
- nexthop via fe80:2::22 dev $rp12 weight $weight_rp12 \
- nexthop via fe80:3::23 dev $rp13 weight $weight_rp13
-
- t0_rp12=$(link_stats_tx_packets_get $rp12)
- t0_rp13=$(link_stats_tx_packets_get $rp13)
-
- # Generate 16384 echo requests, each with a random flow label.
- for _ in $(seq 1 16384); do
- ip vrf exec vrf-h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q &> /dev/null
- done
-
- t1_rp12=$(link_stats_tx_packets_get $rp12)
- t1_rp13=$(link_stats_tx_packets_get $rp13)
-
- let "packets_rp12 = $t1_rp12 - $t0_rp12"
- let "packets_rp13 = $t1_rp13 - $t0_rp13"
- multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
-
- ip route replace 2001:db8:2::/64 vrf vrf-r1 \
- nexthop via fe80:2::22 dev $rp12 \
- nexthop via fe80:3::23 dev $rp13
-}
-
multipath_test()
{
log_info "Running IPv4 multipath tests"
@@ -275,11 +243,6 @@ multipath_test()
multipath6_test "ECMP" 1 1
multipath6_test "Weighted MP 2:1" 2 1
multipath6_test "Weighted MP 11:45" 11 45
-
- log_info "Running IPv6 L4 hash multipath tests"
- multipath6_l4_test "ECMP" 1 1
- multipath6_l4_test "Weighted MP 2:1" 2 1
- multipath6_l4_test "Weighted MP 11:45" 11 45
}
setup_prepare()
diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
index b0f5e55d2d0b..589629636502 100755
--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
+++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
@@ -235,9 +235,6 @@ mirred_egress_to_ingress_tcp_test()
check_err $? "didn't mirred redirect ICMP"
tc_check_packets "dev $h1 ingress" 102 10
check_err $? "didn't drop mirred ICMP"
- local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
- test ${overlimits} = 10
- check_err $? "wrong overlimits, expected 10 got ${overlimits}"
tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
diff --git a/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh b/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
index 20a7cb7222b8..c2420bb72c12 100755
--- a/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
+++ b/tools/testing/selftests/net/forwarding/tc_flower_l2_miss.sh
@@ -209,14 +209,17 @@ test_l2_miss_multicast()
# both registered and unregistered multicast traffic.
bridge link set dev $swp2 mcast_router 2
+ # Set the Max Response Delay to 100 centiseconds (1 second) so that the
+ # bridge will start forwarding according to its MDB soon after a
+ # multicast querier is enabled.
+ ip link set dev br1 type bridge mcast_query_response_interval 100
+
# Forwarding according to MDB entries only takes place when the bridge
# detects that there is a valid querier in the network. Set the bridge
# as the querier and assign it a valid IPv6 link-local address to be
# used as the source address for MLD queries.
ip link set dev br1 type bridge mcast_querier 1
ip -6 address add fe80::1/64 nodad dev br1
- # Wait the default Query Response Interval (10 seconds) for the bridge
- # to determine that there are no other queriers in the network.
sleep 10
test_l2_miss_multicast_ipv4
@@ -224,6 +227,7 @@ test_l2_miss_multicast()
ip -6 address del fe80::1/64 dev br1
ip link set dev br1 type bridge mcast_querier 0
+ ip link set dev br1 type bridge mcast_query_response_interval 1000
bridge link set dev $swp2 mcast_router 1
}
diff --git a/tools/testing/selftests/net/forwarding/tc_police.sh b/tools/testing/selftests/net/forwarding/tc_police.sh
index 0a51eef21b9e..5103f64a71d6 100755
--- a/tools/testing/selftests/net/forwarding/tc_police.sh
+++ b/tools/testing/selftests/net/forwarding/tc_police.sh
@@ -140,7 +140,7 @@ police_common_test()
sleep 10
local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
- local er=$((80 * 1000 * 1000))
+ local er=$((10 * 1000 * 1000))
local nr=$(rate $t0 $t1 10)
local nr_pct=$((100 * (nr - er) / er))
((-10 <= nr_pct && nr_pct <= 10))
@@ -157,7 +157,7 @@ police_rx_test()
# Rule to police traffic destined to $h2 on ingress of $rp1
tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
- action police rate 80mbit burst 16k conform-exceed drop/ok
+ action police rate 10mbit burst 16k conform-exceed drop/ok
police_common_test "police on rx"
@@ -169,7 +169,7 @@ police_tx_test()
# Rule to police traffic destined to $h2 on egress of $rp2
tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \
dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
- action police rate 80mbit burst 16k conform-exceed drop/ok
+ action police rate 10mbit burst 16k conform-exceed drop/ok
police_common_test "police on tx"
@@ -190,7 +190,7 @@ police_shared_common_test()
sleep 10
local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
- local er=$((80 * 1000 * 1000))
+ local er=$((10 * 1000 * 1000))
local nr=$(rate $t0 $t1 10)
local nr_pct=$((100 * (nr - er) / er))
((-10 <= nr_pct && nr_pct <= 10))
@@ -211,7 +211,7 @@ police_shared_test()
# Rule to police traffic destined to $h2 on ingress of $rp1
tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
- action police rate 80mbit burst 16k conform-exceed drop/ok \
+ action police rate 10mbit burst 16k conform-exceed drop/ok \
index 10
# Rule to police a different flow destined to $h2 on egress of $rp2
@@ -250,7 +250,7 @@ police_mirror_common_test()
# Rule to police traffic destined to $h2 and mirror to $h3
tc filter add dev $pol_if $dir protocol ip pref 1 handle 101 flower \
dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
- action police rate 80mbit burst 16k conform-exceed drop/pipe \
+ action police rate 10mbit burst 16k conform-exceed drop/pipe \
action mirred egress mirror dev $rp3
mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \
@@ -260,7 +260,7 @@ police_mirror_common_test()
sleep 10
local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
- local er=$((80 * 1000 * 1000))
+ local er=$((10 * 1000 * 1000))
local nr=$(rate $t0 $t1 10)
local nr_pct=$((100 * (nr - er) / er))
((-10 <= nr_pct && nr_pct <= 10))
@@ -270,7 +270,7 @@ police_mirror_common_test()
sleep 10
local t1=$(tc_rule_stats_get $h3 1 ingress .bytes)
- local er=$((80 * 1000 * 1000))
+ local er=$((10 * 1000 * 1000))
local nr=$(rate $t0 $t1 10)
local nr_pct=$((100 * (nr - er) / er))
((-10 <= nr_pct && nr_pct <= 10))
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index eb307ca37bfa..6f0a2e452ba1 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -495,7 +495,7 @@ vxlan_ping_test()
local delta=$((t1 - t0))
# Tolerate a couple stray extra packets.
- ((expect <= delta && delta <= expect + 2))
+ ((expect <= delta && delta <= expect + 5))
check_err $? "$capture_dev: Expected to capture $expect packets, got $delta."
}
@@ -532,7 +532,7 @@ __test_ecn_encap()
RET=0
tc filter add dev v1 egress pref 77 prot ip \
- flower ip_tos $tos action pass
+ flower ip_tos $tos ip_proto udp dst_port $VXPORT action pass
sleep 1
vxlan_ping_test $h1 192.0.2.3 "-Q $q" v1 egress 77 10
tc filter del dev v1 egress pref 77 prot ip
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
index ac97f07e5ce8..a0bb4524e1e9 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
@@ -616,7 +616,7 @@ vxlan_ping_test()
local delta=$((t1 - t0))
# Tolerate a couple stray extra packets.
- ((expect <= delta && delta <= expect + 2))
+ ((expect <= delta && delta <= expect + 5))
check_err $? "$capture_dev: Expected to capture $expect packets, got $delta."
}
@@ -653,7 +653,7 @@ __test_ecn_encap()
RET=0
tc filter add dev v1 egress pref 77 protocol ipv6 \
- flower ip_tos $tos action pass
+ flower ip_tos $tos ip_proto udp dst_port $VXPORT action pass
sleep 1
vxlan_ping_test $h1 2001:db8:1::3 "-Q $q" v1 egress 77 10
tc filter del dev v1 egress pref 77 protocol ipv6
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh
index a596bbf3ed6a..fb9a34cb50c6 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh
@@ -750,7 +750,7 @@ __test_learning()
expects[0]=0; expects[$idx1]=10; expects[$idx2]=0
vxlan_flood_test $mac $dst $vid "${expects[@]}"
- sleep 20
+ sleep 60
bridge fdb show brport $vx | grep $mac | grep -q self
check_fail $?
@@ -796,11 +796,11 @@ test_learning()
local dst=192.0.2.100
local vid=10
- # Enable learning on the VxLAN devices and set ageing time to 10 seconds
- ip link set dev br1 type bridge ageing_time 1000
- ip link set dev vx10 type vxlan ageing 10
+ # Enable learning on the VxLAN devices and set ageing time to 30 seconds
+ ip link set dev br1 type bridge ageing_time 3000
+ ip link set dev vx10 type vxlan ageing 30
ip link set dev vx10 type vxlan learning
- ip link set dev vx20 type vxlan ageing 10
+ ip link set dev vx20 type vxlan ageing 30
ip link set dev vx20 type vxlan learning
reapply_config
diff --git a/tools/testing/selftests/net/fq_band_pktlimit.sh b/tools/testing/selftests/net/fq_band_pktlimit.sh
index 24b77bdf41ff..977070ed42b3 100755
--- a/tools/testing/selftests/net/fq_band_pktlimit.sh
+++ b/tools/testing/selftests/net/fq_band_pktlimit.sh
@@ -8,7 +8,7 @@
# 3. send 20 pkts on band A: verify that 0 are queued, 20 dropped
# 4. send 20 pkts on band B: verify that 10 are queued, 10 dropped
#
-# Send packets with a 100ms delay to ensure that previously sent
+# Send packets with a delay to ensure that previously sent
# packets are still queued when later ones are sent.
# Use SO_TXTIME for this.
@@ -29,19 +29,21 @@ ip -6 addr add fdaa::1/128 dev dummy0
ip -6 route add fdaa::/64 dev dummy0
tc qdisc replace dev dummy0 root handle 1: fq quantum 1514 initial_quantum 1514 limit 10
-./cmsg_sender -6 -p u -d 100000 -n 20 fdaa::2 8000
+DELAY=400000
+
+./cmsg_sender -6 -p u -d "${DELAY}" -n 20 fdaa::2 8000
OUT1="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
-./cmsg_sender -6 -p u -d 100000 -n 20 fdaa::2 8000
+./cmsg_sender -6 -p u -d "${DELAY}" -n 20 fdaa::2 8000
OUT2="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
-./cmsg_sender -6 -p u -d 100000 -n 20 -P 7 fdaa::2 8000
+./cmsg_sender -6 -p u -d "${DELAY}" -n 20 -P 7 fdaa::2 8000
OUT3="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
# Initial stats will report zero sent, as all packets are still
-# queued in FQ. Sleep for the delay period (100ms) and see that
+# queued in FQ. Sleep for at least the delay period and see that
# twenty are now sent.
-sleep 0.1
+sleep 0.6
OUT4="$(tc -s qdisc show dev dummy0 | grep '^\ Sent')"
# Log the output after the test
diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh
index 19352f106c1d..02c21ff4ca81 100755
--- a/tools/testing/selftests/net/gro.sh
+++ b/tools/testing/selftests/net/gro.sh
@@ -31,6 +31,11 @@ run_test() {
1>>log.txt
wait "${server_pid}"
exit_code=$?
+ if [[ ${test} == "large" && -n "${KSFT_MACHINE_SLOW}" && \
+ ${exit_code} -ne 0 ]]; then
+ echo "Ignoring errors due to slow environment" 1>&2
+ exit_code=0
+ fi
if [[ "${exit_code}" -eq 0 ]]; then
break;
fi
diff --git a/tools/testing/selftests/net/ioam6.sh b/tools/testing/selftests/net/ioam6.sh
index fe59ca3e5596..12491850ae98 100755
--- a/tools/testing/selftests/net/ioam6.sh
+++ b/tools/testing/selftests/net/ioam6.sh
@@ -367,14 +367,12 @@ run_test()
local desc=$2
local node_src=$3
local node_dst=$4
- local ip6_src=$5
- local ip6_dst=$6
- local if_dst=$7
- local trace_type=$8
- local ioam_ns=$9
-
- ip netns exec $node_dst ./ioam6_parser $if_dst $name $ip6_src $ip6_dst \
- $trace_type $ioam_ns &
+ local ip6_dst=$5
+ local trace_type=$6
+ local ioam_ns=$7
+ local type=$8
+
+ ip netns exec $node_dst ./ioam6_parser $name $trace_type $ioam_ns $type &
local spid=$!
sleep 0.1
@@ -489,7 +487,7 @@ out_undef_ns()
trace prealloc type 0x800000 ns 0 size 4 dev veth0
run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
- db01::2 db01::1 veth0 0x800000 0
+ db01::1 0x800000 0 $1
[ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
}
@@ -509,7 +507,7 @@ out_no_room()
trace prealloc type 0xc00000 ns 123 size 4 dev veth0
run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
- db01::2 db01::1 veth0 0xc00000 123
+ db01::1 0xc00000 123 $1
[ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
}
@@ -543,14 +541,14 @@ out_bits()
if [ $cmd_res != 0 ]
then
npassed=$((npassed+1))
- log_test_passed "$descr"
+ log_test_passed "$descr ($1 mode)"
else
nfailed=$((nfailed+1))
- log_test_failed "$descr"
+ log_test_failed "$descr ($1 mode)"
fi
else
run_test "out_bit$i" "$descr ($1 mode)" $ioam_node_alpha \
- $ioam_node_beta db01::2 db01::1 veth0 ${bit2type[$i]} 123
+ $ioam_node_beta db01::1 ${bit2type[$i]} 123 $1
fi
done
@@ -574,7 +572,7 @@ out_full_supp_trace()
trace prealloc type 0xfff002 ns 123 size 100 dev veth0
run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
- db01::2 db01::1 veth0 0xfff002 123
+ db01::1 0xfff002 123 $1
[ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
}
@@ -604,7 +602,7 @@ in_undef_ns()
trace prealloc type 0x800000 ns 0 size 4 dev veth0
run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
- db01::2 db01::1 veth0 0x800000 0
+ db01::1 0x800000 0 $1
[ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
}
@@ -624,7 +622,7 @@ in_no_room()
trace prealloc type 0xc00000 ns 123 size 4 dev veth0
run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
- db01::2 db01::1 veth0 0xc00000 123
+ db01::1 0xc00000 123 $1
[ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
}
@@ -651,7 +649,7 @@ in_bits()
dev veth0
run_test "in_bit$i" "${desc/<n>/$i} ($1 mode)" $ioam_node_alpha \
- $ioam_node_beta db01::2 db01::1 veth0 ${bit2type[$i]} 123
+ $ioam_node_beta db01::1 ${bit2type[$i]} 123 $1
done
[ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
@@ -679,7 +677,7 @@ in_oflag()
trace prealloc type 0xc00000 ns 123 size 4 dev veth0
run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
- db01::2 db01::1 veth0 0xc00000 123
+ db01::1 0xc00000 123 $1
[ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
@@ -703,7 +701,7 @@ in_full_supp_trace()
trace prealloc type 0xfff002 ns 123 size 80 dev veth0
run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_beta \
- db01::2 db01::1 veth0 0xfff002 123
+ db01::1 0xfff002 123 $1
[ "$1" = "encap" ] && ip -netns $ioam_node_beta link set ip6tnl0 down
}
@@ -731,7 +729,7 @@ fwd_full_supp_trace()
trace prealloc type 0xfff002 ns 123 size 244 via db01::1 dev veth0
run_test ${FUNCNAME[0]} "${desc} ($1 mode)" $ioam_node_alpha $ioam_node_gamma \
- db01::2 db02::2 veth0 0xfff002 123
+ db02::2 0xfff002 123 $1
[ "$1" = "encap" ] && ip -netns $ioam_node_gamma link set ip6tnl0 down
}
diff --git a/tools/testing/selftests/net/ioam6_parser.c b/tools/testing/selftests/net/ioam6_parser.c
index d9d1d4190126..895e5bb5044b 100644
--- a/tools/testing/selftests/net/ioam6_parser.c
+++ b/tools/testing/selftests/net/ioam6_parser.c
@@ -8,7 +8,6 @@
#include <errno.h>
#include <limits.h>
#include <linux/const.h>
-#include <linux/if_ether.h>
#include <linux/ioam6.h>
#include <linux/ipv6.h>
#include <stdlib.h>
@@ -512,14 +511,6 @@ static int str2id(const char *tname)
return -1;
}
-static int ipv6_addr_equal(const struct in6_addr *a1, const struct in6_addr *a2)
-{
- return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
- (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
- (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
- (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
-}
-
static int get_u32(__u32 *val, const char *arg, int base)
{
unsigned long res;
@@ -603,70 +594,80 @@ static int (*func[__TEST_MAX])(int, struct ioam6_trace_hdr *, __u32, __u16) = {
int main(int argc, char **argv)
{
- int fd, size, hoplen, tid, ret = 1;
- struct in6_addr src, dst;
+ int fd, size, hoplen, tid, ret = 1, on = 1;
struct ioam6_hdr *opt;
- struct ipv6hdr *ip6h;
- __u8 buffer[400], *p;
- __u16 ioam_ns;
+ struct cmsghdr *cmsg;
+ struct msghdr msg;
+ struct iovec iov;
+ __u8 buffer[512];
__u32 tr_type;
+ __u16 ioam_ns;
+ __u8 *ptr;
- if (argc != 7)
+ if (argc != 5)
goto out;
- tid = str2id(argv[2]);
+ tid = str2id(argv[1]);
if (tid < 0 || !func[tid])
goto out;
- if (inet_pton(AF_INET6, argv[3], &src) != 1 ||
- inet_pton(AF_INET6, argv[4], &dst) != 1)
+ if (get_u32(&tr_type, argv[2], 16) ||
+ get_u16(&ioam_ns, argv[3], 0))
goto out;
- if (get_u32(&tr_type, argv[5], 16) ||
- get_u16(&ioam_ns, argv[6], 0))
+ fd = socket(PF_INET6, SOCK_RAW,
+ !strcmp(argv[4], "encap") ? IPPROTO_IPV6 : IPPROTO_ICMPV6);
+ if (fd < 0)
goto out;
- fd = socket(AF_PACKET, SOCK_DGRAM, __cpu_to_be16(ETH_P_IPV6));
- if (!fd)
- goto out;
+ setsockopt(fd, IPPROTO_IPV6, IPV6_RECVHOPOPTS, &on, sizeof(on));
- if (setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
- argv[1], strlen(argv[1])))
+ iov.iov_len = 1;
+ iov.iov_base = malloc(CMSG_SPACE(sizeof(buffer)));
+ if (!iov.iov_base)
goto close;
-
recv:
- size = recv(fd, buffer, sizeof(buffer), 0);
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = buffer;
+ msg.msg_controllen = CMSG_SPACE(sizeof(buffer));
+
+ size = recvmsg(fd, &msg, 0);
if (size <= 0)
goto close;
- ip6h = (struct ipv6hdr *)buffer;
+ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ if (cmsg->cmsg_level != IPPROTO_IPV6 ||
+ cmsg->cmsg_type != IPV6_HOPOPTS ||
+ cmsg->cmsg_len < sizeof(struct ipv6_hopopt_hdr))
+ continue;
- if (!ipv6_addr_equal(&ip6h->saddr, &src) ||
- !ipv6_addr_equal(&ip6h->daddr, &dst))
- goto recv;
+ ptr = (__u8 *)CMSG_DATA(cmsg);
- if (ip6h->nexthdr != IPPROTO_HOPOPTS)
- goto close;
+ hoplen = (ptr[1] + 1) << 3;
+ ptr += sizeof(struct ipv6_hopopt_hdr);
- p = buffer + sizeof(*ip6h);
- hoplen = (p[1] + 1) << 3;
- p += sizeof(struct ipv6_hopopt_hdr);
+ while (hoplen > 0) {
+ opt = (struct ioam6_hdr *)ptr;
- while (hoplen > 0) {
- opt = (struct ioam6_hdr *)p;
+ if (opt->opt_type == IPV6_TLV_IOAM &&
+ opt->type == IOAM6_TYPE_PREALLOC) {
+ ptr += sizeof(*opt);
+ ret = func[tid](tid,
+ (struct ioam6_trace_hdr *)ptr,
+ tr_type, ioam_ns);
+ goto close;
+ }
- if (opt->opt_type == IPV6_TLV_IOAM &&
- opt->type == IOAM6_TYPE_PREALLOC) {
- p += sizeof(*opt);
- ret = func[tid](tid, (struct ioam6_trace_hdr *)p,
- tr_type, ioam_ns);
- break;
+ ptr += opt->opt_len + 2;
+ hoplen -= opt->opt_len + 2;
}
-
- p += opt->opt_len + 2;
- hoplen -= opt->opt_len + 2;
}
+
+ goto recv;
close:
+ free(iov.iov_base);
close(fd);
out:
return ret;
diff --git a/tools/testing/selftests/net/ip_local_port_range.c b/tools/testing/selftests/net/ip_local_port_range.c
index 0f217a1cc837..193b82745fd8 100644
--- a/tools/testing/selftests/net/ip_local_port_range.c
+++ b/tools/testing/selftests/net/ip_local_port_range.c
@@ -16,6 +16,10 @@
#define IP_LOCAL_PORT_RANGE 51
#endif
+#ifndef IPPROTO_MPTCP
+#define IPPROTO_MPTCP 262
+#endif
+
static __u32 pack_port_range(__u16 lo, __u16 hi)
{
return (hi << 16) | (lo << 0);
@@ -361,9 +365,6 @@ TEST_F(ip_local_port_range, late_bind)
__u32 range;
__u16 port;
- if (variant->so_protocol == IPPROTO_SCTP)
- SKIP(return, "SCTP doesn't support IP_BIND_ADDRESS_NO_PORT");
-
fd = socket(variant->so_domain, variant->so_type, 0);
ASSERT_GE(fd, 0) TH_LOG("socket failed");
@@ -410,6 +411,9 @@ TEST_F(ip_local_port_range, late_bind)
ASSERT_TRUE(!err) TH_LOG("close failed");
}
+XFAIL_ADD(ip_local_port_range, ip4_stcp, late_bind);
+XFAIL_ADD(ip_local_port_range, ip6_stcp, late_bind);
+
TEST_F(ip_local_port_range, get_port_range)
{
__u16 lo, hi;
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index dca549443801..f9fe182dfbd4 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -4,6 +4,9 @@
##############################################################################
# Defines
+WAIT_TIMEOUT=${WAIT_TIMEOUT:=20}
+BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
# namespace list created by setup_ns
@@ -48,7 +51,7 @@ cleanup_ns()
for ns in "$@"; do
ip netns delete "${ns}" &> /dev/null
- if ! busywait 2 ip netns list \| grep -vq "^$ns$" &> /dev/null; then
+ if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
echo "Warn: Failed to remove namespace $ns"
ret=1
fi
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index e317c2e44dae..4f80014cae49 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -22,8 +22,11 @@ CONFIG_NFT_TPROXY=m
CONFIG_NFT_SOCKET=m
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IP6_NF_FILTER=m
CONFIG_NET_ACT_CSUM=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_CLS_ACT=y
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 04fcb8a077c9..afe862895946 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -1,14 +1,16 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns="ns1-$rndh"
-ksft_skip=4
+ns=""
test_cnt=1
-timeout_poll=100
+timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
ret=0
@@ -20,31 +22,23 @@ flush_pids()
ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
- for _ in $(seq 10); do
+ for _ in $(seq $((timeout_poll * 10))); do
[ -z "$(ip netns pids "${ns}")" ] && break
sleep 0.1
done
}
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
- ip netns del $ns
+ mptcp_lib_ns_exit "${ns}"
}
mptcp_lib_check_mptcp
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
-ss -h | grep -q MPTCP
-if [ $? -ne 0 ];then
- echo "SKIP: ss tool does not support MPTCP"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip ss
get_msk_inuse()
{
@@ -62,14 +56,14 @@ __chk_nr()
nr=$(eval $command)
printf "%-50s" "$msg"
- if [ $nr != $expected ]; then
- if [ $nr = "$skip" ] && ! mptcp_lib_expect_all_features; then
+ if [ "$nr" != "$expected" ]; then
+ if [ "$nr" = "$skip" ] && ! mptcp_lib_expect_all_features; then
echo "[ skip ] Feature probably not supported"
mptcp_lib_result_skip "${msg}"
else
echo "[ fail ] expected $expected found $nr"
mptcp_lib_result_fail "${msg}"
- ret=$test_cnt
+ ret=${KSFT_FAIL}
fi
else
echo "[ ok ]"
@@ -91,6 +85,15 @@ chk_msk_nr()
__chk_msk_nr "grep -c token:" "$@"
}
+chk_listener_nr()
+{
+ local expected=$1
+ local msg="$2"
+
+ __chk_nr "ss -nlHMON $ns | wc -l" "$expected" "$msg - mptcp" 0
+ __chk_nr "ss -nlHtON $ns | wc -l" "$expected" "$msg - subflows"
+}
+
wait_msk_nr()
{
local condition="grep -c token:"
@@ -115,11 +118,11 @@ wait_msk_nr()
if [ $i -ge $timeout ]; then
echo "[ fail ] timeout while expecting $expected max $max last $nr"
mptcp_lib_result_fail "${msg} # timeout"
- ret=$test_cnt
+ ret=${KSFT_FAIL}
elif [ $nr != $expected ]; then
echo "[ fail ] expected $expected found $nr"
mptcp_lib_result_fail "${msg} # unexpected result"
- ret=$test_cnt
+ ret=${KSFT_FAIL}
else
echo "[ ok ]"
mptcp_lib_result_pass "${msg}"
@@ -166,29 +169,38 @@ chk_msk_listen()
chk_msk_inuse()
{
local expected=$1
- local msg="$2"
+ local msg="....chk ${2:-${expected}} msk in use"
local listen_nr
+ if [ "${expected}" -eq 0 ]; then
+ msg+=" after flush"
+ fi
+
listen_nr=$(ss -N "${ns}" -Ml | grep -c LISTEN)
expected=$((expected + listen_nr))
for _ in $(seq 10); do
- if [ $(get_msk_inuse) -eq $expected ];then
+ if [ "$(get_msk_inuse)" -eq $expected ]; then
break
fi
sleep 0.1
done
- __chk_nr get_msk_inuse $expected "$msg" 0
+ __chk_nr get_msk_inuse $expected "${msg}" 0
}
# $1: cestab nr
chk_msk_cestab()
{
- local cestab=$1
+ local expected=$1
+ local msg="....chk ${2:-${expected}} cestab"
+
+ if [ "${expected}" -eq 0 ]; then
+ msg+=" after flush"
+ fi
__chk_nr "mptcp_lib_get_counter ${ns} MPTcpExtMPCurrEstab" \
- "${cestab}" "....chk ${cestab} cestab" ""
+ "${expected}" "${msg}" ""
}
wait_connected()
@@ -206,8 +218,7 @@ wait_connected()
}
trap cleanup EXIT
-ip netns add $ns
-ip -n $ns link set dev lo up
+mptcp_lib_ns_init ns
echo "a" | \
timeout ${timeout_test} \
@@ -227,12 +238,12 @@ wait_connected $ns 10000
chk_msk_nr 2 "after MPC handshake "
chk_msk_remote_key_nr 2 "....chk remote_key"
chk_msk_fallback_nr 0 "....chk no fallback"
-chk_msk_inuse 2 "....chk 2 msk in use"
+chk_msk_inuse 2
chk_msk_cestab 2
flush_pids
-chk_msk_inuse 0 "....chk 0 msk in use after flush"
-chk_msk_cestab 0
+chk_msk_inuse 0 "2->0"
+chk_msk_cestab 0 "2->0"
echo "a" | \
timeout ${timeout_test} \
@@ -247,15 +258,15 @@ echo "b" | \
127.0.0.1 >/dev/null &
wait_connected $ns 10001
chk_msk_fallback_nr 1 "check fallback"
-chk_msk_inuse 1 "....chk 1 msk in use"
+chk_msk_inuse 1
chk_msk_cestab 1
flush_pids
-chk_msk_inuse 0 "....chk 0 msk in use after flush"
-chk_msk_cestab 0
+chk_msk_inuse 0 "1->0"
+chk_msk_cestab 0 "1->0"
NR_CLIENTS=100
-for I in `seq 1 $NR_CLIENTS`; do
+for I in $(seq 1 $NR_CLIENTS); do
echo "a" | \
timeout ${timeout_test} \
ip netns exec $ns \
@@ -264,7 +275,7 @@ for I in `seq 1 $NR_CLIENTS`; do
done
mptcp_lib_wait_local_port_listen $ns $((NR_CLIENTS + 10001))
-for I in `seq 1 $NR_CLIENTS`; do
+for I in $(seq 1 $NR_CLIENTS); do
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
@@ -273,12 +284,28 @@ for I in `seq 1 $NR_CLIENTS`; do
done
wait_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
-chk_msk_inuse $((NR_CLIENTS*2)) "....chk many msk in use"
-chk_msk_cestab $((NR_CLIENTS*2))
+chk_msk_inuse $((NR_CLIENTS*2)) "many"
+chk_msk_cestab $((NR_CLIENTS*2)) "many"
flush_pids
-chk_msk_inuse 0 "....chk 0 msk in use after flush"
-chk_msk_cestab 0
+chk_msk_inuse 0 "many->0"
+chk_msk_cestab 0 "many->0"
+
+chk_listener_nr 0 "no listener sockets"
+NR_SERVERS=100
+for I in $(seq 1 $NR_SERVERS); do
+ ip netns exec $ns ./mptcp_connect -p $((I + 20001)) \
+ -t ${timeout_poll} -l 0.0.0.0 >/dev/null 2>&1 &
+done
+mptcp_lib_wait_local_port_listen $ns $((NR_SERVERS + 20001))
+
+chk_listener_nr $NR_SERVERS "many listener sockets"
+
+# graceful termination
+for I in $(seq 1 $NR_SERVERS); do
+ echo a | ip netns exec $ns ./mptcp_connect -p $((I + 20001)) 127.0.0.1 >/dev/null 2>&1 &
+done
+flush_pids
mptcp_lib_result_print_all_tap
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 7898d62fce0b..0ca2960c9099 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -1,6 +1,11 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
time_start=$(date +%s)
@@ -13,7 +18,6 @@ sout=""
cin_disconnect=""
cin=""
cout=""
-ksft_skip=4
capture=false
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
@@ -121,16 +125,16 @@ while getopts "$optstring" option;do
esac
done
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
-ns2="ns2-$rndh"
-ns3="ns3-$rndh"
-ns4="ns4-$rndh"
+ns1=""
+ns2=""
+ns3=""
+ns4=""
TEST_COUNT=0
TEST_GROUP=""
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
rm -f "$cin_disconnect" "$cout_disconnect"
@@ -138,21 +142,12 @@ cleanup()
rm -f "$sin" "$sout"
rm -f "$capout"
- local netns
- for netns in "$ns1" "$ns2" "$ns3" "$ns4";do
- ip netns del $netns
- rm -f /tmp/$netns.{nstat,out}
- done
+ mptcp_lib_ns_exit "${ns1}" "${ns2}" "${ns3}" "${ns4}"
}
mptcp_lib_check_mptcp
mptcp_lib_check_kallsyms
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip
sin=$(mktemp)
sout=$(mktemp)
@@ -163,10 +158,7 @@ cin_disconnect="$cin".disconnect
cout_disconnect="$cout".disconnect
trap cleanup EXIT
-for i in "$ns1" "$ns2" "$ns3" "$ns4";do
- ip netns add $i || exit $ksft_skip
- ip -net $i link set lo up
-done
+mptcp_lib_ns_init ns1 ns2 ns3 ns4
# "$ns1" ns2 ns3 ns4
# ns1eth2 ns2eth1 ns2eth3 ns3eth2 ns3eth4 ns4eth3
@@ -225,8 +217,9 @@ set_ethtool_flags() {
local dev="$2"
local flags="$3"
- ip netns exec $ns ethtool -K $dev $flags 2>/dev/null
- [ $? -eq 0 ] && echo "INFO: set $ns dev $dev: ethtool -K $flags"
+ if ip netns exec $ns ethtool -K $dev $flags 2>/dev/null; then
+ echo "INFO: set $ns dev $dev: ethtool -K $flags"
+ fi
}
set_random_ethtool_flags() {
@@ -256,8 +249,8 @@ fi
check_mptcp_disabled()
{
- local disabled_ns="ns_disabled-$rndh"
- ip netns add ${disabled_ns} || exit $ksft_skip
+ local disabled_ns
+ mptcp_lib_ns_init disabled_ns
# net.mptcp.enabled should be enabled by default
if [ "$(ip netns exec ${disabled_ns} sysctl net.mptcp.enabled | awk '{ print $3 }')" -ne 1 ]; then
@@ -271,7 +264,7 @@ check_mptcp_disabled()
local err=0
LC_ALL=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
grep -q "^socket: Protocol not available$" && err=1
- ip netns delete ${disabled_ns}
+ mptcp_lib_ns_exit "${disabled_ns}"
if [ ${err} -eq 0 ]; then
echo -e "New MPTCP socket cannot be blocked via sysctl\t\t[ FAIL ]"
@@ -321,7 +314,7 @@ do_transfer()
local extra_args="$7"
local port
- port=$((10000+$TEST_COUNT))
+ port=$((10000+TEST_COUNT))
TEST_COUNT=$((TEST_COUNT+1))
if [ "$rcvbuf" -gt 0 ]; then
@@ -353,6 +346,7 @@ do_transfer()
if $capture; then
local capuser
+ local rndh="${connector_ns:4}"
if [ -z $SUDO_USER ] ; then
capuser=""
else
@@ -378,12 +372,18 @@ do_transfer()
nstat -n
fi
- local stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- local stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- local stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
- local stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- local stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
- local stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+ local stat_synrx_last_l
+ local stat_ackrx_last_l
+ local stat_cookietx_last
+ local stat_cookierx_last
+ local stat_csum_err_s
+ local stat_csum_err_c
+ stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+ stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+ stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+ stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
timeout ${timeout_test} \
ip netns exec ${listener_ns} \
@@ -446,11 +446,16 @@ do_transfer()
mptcp_lib_check_transfer $cin $sout "file received by server"
rets=$?
- local stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
- local stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
- local stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
- local stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
- local stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
+ local stat_synrx_now_l
+ local stat_ackrx_now_l
+ local stat_cookietx_now
+ local stat_cookierx_now
+ local stat_ooo_now
+ stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+ stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+ stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
+ stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+ stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
expect_synrx=$((stat_synrx_last_l))
expect_ackrx=$((stat_ackrx_last_l))
@@ -459,8 +464,8 @@ do_transfer()
cookies=${cookies##*=}
if [ ${cl_proto} = "MPTCP" ] && [ ${srv_proto} = "MPTCP" ]; then
- expect_synrx=$((stat_synrx_last_l+$connect_per_transfer))
- expect_ackrx=$((stat_ackrx_last_l+$connect_per_transfer))
+ expect_synrx=$((stat_synrx_last_l+connect_per_transfer))
+ expect_ackrx=$((stat_ackrx_last_l+connect_per_transfer))
fi
if [ ${stat_synrx_now_l} -lt ${expect_synrx} ]; then
@@ -468,7 +473,7 @@ do_transfer()
"${stat_synrx_now_l}" "${expect_synrx}" 1>&2
retc=1
fi
- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then
+ if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ] && [ ${stat_ooo_now} -eq 0 ]; then
if [ ${stat_ooo_now} -eq 0 ]; then
printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
"${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
@@ -479,18 +484,20 @@ do_transfer()
fi
if $checksum; then
- local csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
- local csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+ local csum_err_s
+ local csum_err_c
+ csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+ csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
local csum_err_s_nr=$((csum_err_s - stat_csum_err_s))
if [ $csum_err_s_nr -gt 0 ]; then
- printf "[ FAIL ]\nserver got $csum_err_s_nr data checksum error[s]"
+ printf "[ FAIL ]\nserver got %d data checksum error[s]" ${csum_err_s_nr}
rets=1
fi
local csum_err_c_nr=$((csum_err_c - stat_csum_err_c))
if [ $csum_err_c_nr -gt 0 ]; then
- printf "[ FAIL ]\nclient got $csum_err_c_nr data checksum error[s]"
+ printf "[ FAIL ]\nclient got %d data checksum error[s]" ${csum_err_c_nr}
retc=1
fi
fi
@@ -658,7 +665,7 @@ run_test_transparent()
return
fi
-ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF"
+ if ! ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF"
flush ruleset
table inet mangle {
chain divert {
@@ -669,7 +676,7 @@ table inet mangle {
}
}
EOF
- if [ $? -ne 0 ]; then
+ then
echo "SKIP: $msg, could not load nft ruleset"
mptcp_lib_fail_if_expected_feature "nft rules"
mptcp_lib_result_skip "${TEST_GROUP}"
@@ -684,8 +691,7 @@ EOF
local_addr="0.0.0.0"
fi
- ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100
- if [ $? -ne 0 ]; then
+ if ! ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100; then
ip netns exec "$listener_ns" nft flush ruleset
echo "SKIP: $msg, ip $r6flag rule failed"
mptcp_lib_fail_if_expected_feature "ip rule"
@@ -693,8 +699,7 @@ EOF
return
fi
- ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100
- if [ $? -ne 0 ]; then
+ if ! ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100; then
ip netns exec "$listener_ns" nft flush ruleset
ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100
echo "SKIP: $msg, ip route add local $local_addr failed"
@@ -857,7 +862,7 @@ stop_if_error "Could not even run ping tests"
echo -n "INFO: Using loss of $tc_loss "
test "$tc_delay" -gt 0 && echo -n "delay $tc_delay ms "
-reorder_delay=$(($tc_delay / 4))
+reorder_delay=$((tc_delay / 4))
if [ -z "${tc_reorder}" ]; then
reorder1=$((RANDOM%10))
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 3a5b63026191..1df2d24979a0 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -21,6 +21,7 @@ cinfail=""
cinsent=""
tmpfile=""
cout=""
+err=""
capout=""
ns1=""
ns2=""
@@ -29,11 +30,11 @@ iptables="iptables"
ip6tables="ip6tables"
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
-capture=0
-checksum=0
+capture=false
+checksum=false
ip_mptcp=0
check_invert=0
-validate_checksum=0
+validate_checksum=false
init=0
evts_ns1=""
evts_ns2=""
@@ -85,22 +86,12 @@ init_partial()
{
capout=$(mktemp)
- local sec rndh
- sec=$(date +%s)
- rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-
- ns1="ns1-$rndh"
- ns2="ns2-$rndh"
+ mptcp_lib_ns_init ns1 ns2
local netns
for netns in "$ns1" "$ns2"; do
- ip netns add $netns || exit $ksft_skip
- ip -net $netns link set lo up
- ip netns exec $netns sysctl -q net.mptcp.enabled=1
ip netns exec $netns sysctl -q net.mptcp.pm_type=0 2>/dev/null || true
- ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
- ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
- if [ $checksum -eq 1 ]; then
+ if $checksum; then
ip netns exec $netns sysctl -q net.mptcp.checksum_enabled=1
fi
done
@@ -144,46 +135,22 @@ cleanup_partial()
{
rm -f "$capout"
- local netns
- for netns in "$ns1" "$ns2"; do
- ip netns del $netns
- rm -f /tmp/$netns.{nstat,out}
- done
-}
-
-check_tools()
-{
- mptcp_lib_check_mptcp
- mptcp_lib_check_kallsyms
-
- if ! ip -Version &> /dev/null; then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
- fi
-
- # Use the legacy version if available to support old kernel versions
- if iptables-legacy -V &> /dev/null; then
- iptables="iptables-legacy"
- ip6tables="ip6tables-legacy"
- elif ! iptables -V &> /dev/null; then
- echo "SKIP: Could not run all tests without iptables tool"
- exit $ksft_skip
- elif ! ip6tables -V &> /dev/null; then
- echo "SKIP: Could not run all tests without ip6tables tool"
- exit $ksft_skip
- fi
+ mptcp_lib_ns_exit "${ns1}" "${ns2}"
}
init() {
init=1
- check_tools
+ mptcp_lib_check_mptcp
+ mptcp_lib_check_kallsyms
+ mptcp_lib_check_tools ip ss "${iptables}" "${ip6tables}"
sin=$(mktemp)
sout=$(mktemp)
cin=$(mktemp)
cinsent=$(mktemp)
cout=$(mktemp)
+ err=$(mktemp)
evts_ns1=$(mktemp)
evts_ns2=$(mktemp)
@@ -199,6 +166,7 @@ cleanup()
rm -f "$sin" "$sout" "$cinsent" "$cinfail"
rm -f "$tmpfile"
rm -rf $evts_ns1 $evts_ns2
+ rm -f "$err"
cleanup_partial
}
@@ -380,7 +348,7 @@ reset_with_checksum()
ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=$ns1_enable
ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=$ns2_enable
- validate_checksum=1
+ validate_checksum=true
}
reset_with_allow_join_id0()
@@ -413,7 +381,7 @@ reset_with_allow_join_id0()
setup_fail_rules()
{
check_invert=1
- validate_checksum=1
+ validate_checksum=true
local i="$1"
local ip="${2:-4}"
local tables
@@ -462,12 +430,8 @@ reset_with_events()
{
reset "${1}" || return 1
- :> "$evts_ns1"
- :> "$evts_ns2"
- ip netns exec $ns1 ./pm_nl_ctl events >> "$evts_ns1" 2>&1 &
- evts_ns1_pid=$!
- ip netns exec $ns2 ./pm_nl_ctl events >> "$evts_ns2" 2>&1 &
- evts_ns2_pid=$!
+ mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid
+ mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid
}
reset_with_tcp_filter()
@@ -640,14 +604,9 @@ wait_mpj()
kill_events_pids()
{
mptcp_lib_kill_wait $evts_ns1_pid
+ evts_ns1_pid=0
mptcp_lib_kill_wait $evts_ns2_pid
-}
-
-kill_tests_wait()
-{
- #shellcheck disable=SC2046
- kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
- wait
+ evts_ns2_pid=0
}
pm_nl_set_limits()
@@ -1024,7 +983,7 @@ do_transfer()
:> "$sout"
:> "$capout"
- if [ $capture -eq 1 ]; then
+ if $capture; then
local capuser
if [ -z $SUDO_USER ] ; then
capuser=""
@@ -1126,7 +1085,7 @@ do_transfer()
wait $spid
local rets=$?
- if [ $capture -eq 1 ]; then
+ if $capture; then
sleep 1
kill $cappid
fi
@@ -1514,7 +1473,7 @@ chk_join_nr()
else
print_ok
fi
- if [ $validate_checksum -eq 1 ]; then
+ if $validate_checksum; then
chk_csum_nr $csum_ns1 $csum_ns2
chk_fail_nr $fail_nr $fail_nr
chk_rst_nr $rst_nr $rst_nr
@@ -3340,16 +3299,17 @@ userspace_pm_rm_sf()
{
local evts=$evts_ns1
local t=${3:-1}
- local ip=4
+ local ip
local tk da dp sp
local cnt
[ "$1" == "$ns2" ] && evts=$evts_ns2
- if mptcp_lib_is_v6 $2; then ip=6; fi
+ [ -n "$(mptcp_lib_evts_get_info "saddr4" "$evts" $t)" ] && ip=4
+ [ -n "$(mptcp_lib_evts_get_info "saddr6" "$evts" $t)" ] && ip=6
tk=$(mptcp_lib_evts_get_info token "$evts")
- da=$(mptcp_lib_evts_get_info "daddr$ip" "$evts" $t)
- dp=$(mptcp_lib_evts_get_info dport "$evts" $t)
- sp=$(mptcp_lib_evts_get_info sport "$evts" $t)
+ da=$(mptcp_lib_evts_get_info "daddr$ip" "$evts" $t $2)
+ dp=$(mptcp_lib_evts_get_info dport "$evts" $t $2)
+ sp=$(mptcp_lib_evts_get_info sport "$evts" $t $2)
cnt=$(rm_sf_count ${1})
ip netns exec $1 ./pm_nl_ctl dsf lip $2 lport $sp \
@@ -3357,6 +3317,77 @@ userspace_pm_rm_sf()
wait_rm_sf $1 "${cnt}"
}
+check_output()
+{
+ local cmd="$1"
+ local expected="$2"
+ local msg="$3"
+ local rc=0
+
+ mptcp_lib_check_output "${err}" "${cmd}" "${expected}" || rc=${?}
+ if [ ${rc} -eq 2 ]; then
+ fail_test "fail to check output # error ${rc}"
+ elif [ ${rc} -eq 0 ]; then
+ print_ok
+ elif [ ${rc} -eq 1 ]; then
+ fail_test "fail to check output # different output"
+ fi
+}
+
+# $1: ns
+userspace_pm_dump()
+{
+ local evts=$evts_ns1
+ local tk
+
+ [ "$1" == "$ns2" ] && evts=$evts_ns2
+ tk=$(mptcp_lib_evts_get_info token "$evts")
+
+ ip netns exec $1 ./pm_nl_ctl dump token $tk
+}
+
+# $1: ns ; $2: id
+userspace_pm_get_addr()
+{
+ local evts=$evts_ns1
+ local tk
+
+ [ "$1" == "$ns2" ] && evts=$evts_ns2
+ tk=$(mptcp_lib_evts_get_info token "$evts")
+
+ ip netns exec $1 ./pm_nl_ctl get $2 token $tk
+}
+
+userspace_pm_chk_dump_addr()
+{
+ local ns="${1}"
+ local exp="${2}"
+ local check="${3}"
+
+ print_check "dump addrs ${check}"
+
+ if mptcp_lib_kallsyms_has "mptcp_userspace_pm_dump_addr$"; then
+ check_output "userspace_pm_dump ${ns}" "${exp}"
+ else
+ print_skip
+ fi
+}
+
+userspace_pm_chk_get_addr()
+{
+ local ns="${1}"
+ local id="${2}"
+ local exp="${3}"
+
+ print_check "get id ${id} addr"
+
+ if mptcp_lib_kallsyms_has "mptcp_userspace_pm_get_addr$"; then
+ check_output "userspace_pm_get_addr ${ns} ${id}" "${exp}"
+ else
+ print_skip
+ fi
+}
+
userspace_tests()
{
# userspace pm type prevents add_addr
@@ -3436,24 +3467,35 @@ userspace_tests()
if reset_with_events "userspace pm add & remove address" &&
continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
set_userspace_pm $ns1
- pm_nl_set_limits $ns2 1 1
+ pm_nl_set_limits $ns2 2 2
speed=5 \
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!
wait_mpj $ns1
userspace_pm_add_addr $ns1 10.0.2.1 10
- chk_join_nr 1 1 1
- chk_add_nr 1 1
- chk_mptcp_info subflows 1 subflows 1
- chk_subflows_total 2 2
- chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
+ userspace_pm_add_addr $ns1 10.0.3.1 20
+ chk_join_nr 2 2 2
+ chk_add_nr 2 2
+ chk_mptcp_info subflows 2 subflows 2
+ chk_subflows_total 3 3
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
+ userspace_pm_chk_dump_addr "${ns1}" \
+ $'id 10 flags signal 10.0.2.1\nid 20 flags signal 10.0.3.1' \
+ "signal"
+ userspace_pm_chk_get_addr "${ns1}" "10" "id 10 flags signal 10.0.2.1"
+ userspace_pm_chk_get_addr "${ns1}" "20" "id 20 flags signal 10.0.3.1"
userspace_pm_rm_addr $ns1 10
userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $SUB_ESTABLISHED
- chk_rm_nr 1 1 invert
+ userspace_pm_chk_dump_addr "${ns1}" \
+ "id 20 flags signal 10.0.3.1" "after rm_addr 10"
+ userspace_pm_rm_addr $ns1 20
+ userspace_pm_rm_sf $ns1 10.0.3.1 $SUB_ESTABLISHED
+ userspace_pm_chk_dump_addr "${ns1}" "" "after rm_addr 20"
+ chk_rm_nr 2 2 invert
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
kill_events_pids
- wait $tests_pid
+ mptcp_lib_kill_wait $tests_pid
fi
# userspace pm create destroy subflow
@@ -3469,13 +3511,20 @@ userspace_tests()
chk_join_nr 1 1 1
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 2 2
+ userspace_pm_chk_dump_addr "${ns2}" \
+ "id 20 flags subflow 10.0.3.2" \
+ "subflow"
+ userspace_pm_chk_get_addr "${ns2}" "20" "id 20 flags subflow 10.0.3.2"
userspace_pm_rm_addr $ns2 20
userspace_pm_rm_sf $ns2 10.0.3.2 $SUB_ESTABLISHED
+ userspace_pm_chk_dump_addr "${ns2}" \
+ "" \
+ "after rm_addr 20"
chk_rm_nr 1 1
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
kill_events_pids
- wait $tests_pid
+ mptcp_lib_kill_wait $tests_pid
fi
# userspace pm create id 0 subflow
@@ -3490,11 +3539,13 @@ userspace_tests()
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
userspace_pm_add_sf $ns2 10.0.3.2 0
+ userspace_pm_chk_dump_addr "${ns2}" \
+ "id 0 flags subflow 10.0.3.2" "id 0 subflow"
chk_join_nr 1 1 1
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 2 2
kill_events_pids
- wait $tests_pid
+ mptcp_lib_kill_wait $tests_pid
fi
# userspace pm remove initial subflow
@@ -3518,7 +3569,7 @@ userspace_tests()
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 1 1
kill_events_pids
- wait $tests_pid
+ mptcp_lib_kill_wait $tests_pid
fi
# userspace pm send RM_ADDR for ID 0
@@ -3544,7 +3595,7 @@ userspace_tests()
chk_mptcp_info subflows 1 subflows 1
chk_subflows_total 1 1
kill_events_pids
- wait $tests_pid
+ mptcp_lib_kill_wait $tests_pid
fi
}
@@ -3558,7 +3609,8 @@ endpoint_tests()
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
speed=slow \
- run_tests $ns1 $ns2 10.0.1.1 2>/dev/null &
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
wait_mpj $ns1
pm_nl_check_endpoint "creation" \
@@ -3573,7 +3625,7 @@ endpoint_tests()
pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
pm_nl_check_endpoint "modif is allowed" \
$ns2 10.0.2.2 id 1 flags signal
- kill_tests_wait
+ mptcp_lib_kill_wait $tests_pid
fi
if reset "delete and re-add" &&
@@ -3582,7 +3634,8 @@ endpoint_tests()
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
test_linkfail=4 speed=20 \
- run_tests $ns1 $ns2 10.0.1.1 2>/dev/null &
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
wait_mpj $ns2
chk_subflow_nr "before delete" 2
@@ -3597,7 +3650,7 @@ endpoint_tests()
wait_mpj $ns2
chk_subflow_nr "after re-add" 2
chk_mptcp_info subflows 1 subflows 1
- kill_tests_wait
+ mptcp_lib_kill_wait $tests_pid
fi
}
@@ -3669,10 +3722,10 @@ while getopts "${all_tests_args}cCih" opt; do
tests+=("${all_tests[${opt}]}")
;;
c)
- capture=1
+ capture=true
;;
C)
- checksum=1
+ checksum=true
;;
i)
ip_mptcp=1
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index 022262a2cfe0..5d071b6eb780 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -6,9 +6,10 @@ readonly KSFT_FAIL=1
readonly KSFT_SKIP=4
# shellcheck disable=SC2155 # declare and assign separately
-readonly KSFT_TEST=$(basename "${0}" | sed 's/\.sh$//g')
+readonly KSFT_TEST="${MPTCP_LIB_KSFT_TEST:-$(basename "${0}" .sh)}"
MPTCP_LIB_SUBTESTS=()
+MPTCP_LIB_SUBTESTS_DUPLICATED=0
# only if supported (or forced) and not disabled, see no-color.org
if { [ -t 1 ] || [ "${SELFTESTS_MPTCP_LIB_COLOR_FORCE:-}" = "1" ]; } &&
@@ -146,12 +147,26 @@ mptcp_lib_kversion_ge() {
mptcp_lib_fail_if_expected_feature "kernel version ${1} lower than ${v}"
}
+__mptcp_lib_result_check_duplicated() {
+ local subtest
+
+ for subtest in "${MPTCP_LIB_SUBTESTS[@]}"; do
+ if [[ "${subtest}" == *" - ${KSFT_TEST}: ${*%% #*}" ]]; then
+ MPTCP_LIB_SUBTESTS_DUPLICATED=1
+ mptcp_lib_print_err "Duplicated entry: ${*}"
+ break
+ fi
+ done
+}
+
__mptcp_lib_result_add() {
local result="${1}"
shift
local id=$((${#MPTCP_LIB_SUBTESTS[@]} + 1))
+ __mptcp_lib_result_check_duplicated "${*}"
+
MPTCP_LIB_SUBTESTS+=("${result} ${id} - ${KSFT_TEST}: ${*}")
}
@@ -206,6 +221,12 @@ mptcp_lib_result_print_all_tap() {
for subtest in "${MPTCP_LIB_SUBTESTS[@]}"; do
printf "%s\n" "${subtest}"
done
+
+ if [ "${MPTCP_LIB_SUBTESTS_DUPLICATED}" = 1 ] &&
+ mptcp_lib_expect_all_features; then
+ mptcp_lib_print_err "Duplicated test entries"
+ exit ${KSFT_FAIL}
+ fi
}
# get the value of keyword $1 in the line marked by keyword $2
@@ -213,9 +234,9 @@ mptcp_lib_get_info_value() {
grep "${2}" | sed -n 's/.*\('"${1}"':\)\([0-9a-f:.]*\).*$/\2/p;q'
}
-# $1: info name ; $2: evts_ns ; $3: event type
+# $1: info name ; $2: evts_ns ; [$3: event type; [$4: addr]]
mptcp_lib_evts_get_info() {
- mptcp_lib_get_info_value "${1}" "^type:${3:-1}," < "${2}"
+ grep "${4:-}" "${2}" | mptcp_lib_get_info_value "${1}" "^type:${3:-1},"
}
# $1: PID
@@ -298,3 +319,94 @@ mptcp_lib_wait_local_port_listen() {
sleep 0.1
done
}
+
+mptcp_lib_check_output() {
+ local err="${1}"
+ local cmd="${2}"
+ local expected="${3}"
+ local cmd_ret=0
+ local out
+
+ if ! out=$(${cmd} 2>"${err}"); then
+ cmd_ret=${?}
+ fi
+
+ if [ ${cmd_ret} -ne 0 ]; then
+ mptcp_lib_print_err "[FAIL] command execution '${cmd}' stderr"
+ cat "${err}"
+ return 2
+ elif [ "${out}" = "${expected}" ]; then
+ return 0
+ else
+ mptcp_lib_print_err "[FAIL] expected '${expected}' got '${out}'"
+ return 1
+ fi
+}
+
+mptcp_lib_check_tools() {
+ local tool
+
+ for tool in "${@}"; do
+ case "${tool}" in
+ "ip")
+ if ! ip -Version &> /dev/null; then
+ mptcp_lib_print_warn "SKIP: Could not run test without ip tool"
+ exit ${KSFT_SKIP}
+ fi
+ ;;
+ "ss")
+ if ! ss -h | grep -q MPTCP; then
+ mptcp_lib_print_warn "SKIP: ss tool does not support MPTCP"
+ exit ${KSFT_SKIP}
+ fi
+ ;;
+ "iptables"* | "ip6tables"*)
+ if ! "${tool}" -V &> /dev/null; then
+ mptcp_lib_print_warn "SKIP: Could not run all tests without ${tool}"
+ exit ${KSFT_SKIP}
+ fi
+ ;;
+ *)
+ mptcp_lib_print_err "Internal error: unsupported tool: ${tool}"
+ exit ${KSFT_FAIL}
+ ;;
+ esac
+ done
+}
+mptcp_lib_ns_init() {
+ local sec rndh
+
+ sec=$(date +%s)
+ rndh=$(printf %x "${sec}")-$(mktemp -u XXXXXX)
+
+ local netns
+ for netns in "${@}"; do
+ eval "${netns}=${netns}-${rndh}"
+
+ ip netns add "${!netns}" || exit ${KSFT_SKIP}
+ ip -net "${!netns}" link set lo up
+ ip netns exec "${!netns}" sysctl -q net.mptcp.enabled=1
+ ip netns exec "${!netns}" sysctl -q net.ipv4.conf.all.rp_filter=0
+ ip netns exec "${!netns}" sysctl -q net.ipv4.conf.default.rp_filter=0
+ done
+}
+
+mptcp_lib_ns_exit() {
+ local netns
+ for netns in "${@}"; do
+ ip netns del "${netns}"
+ rm -f /tmp/"${netns}".{nstat,out}
+ done
+}
+
+mptcp_lib_events() {
+ local ns="${1}"
+ local evts="${2}"
+ declare -n pid="${3}"
+
+ :>"${evts}"
+
+ mptcp_lib_kill_wait "${pid:-0}"
+ ip netns exec "${ns}" ./pm_nl_ctl events >> "${evts}" 2>&1 &
+ pid=$!
+}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index c643872ddf47..6ed4aa32222f 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -1,6 +1,11 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
ret=0
@@ -8,17 +13,14 @@ sin=""
sout=""
cin=""
cout=""
-ksft_skip=4
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
iptables="iptables"
ip6tables="ip6tables"
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
-ns2="ns2-$rndh"
-ns_sbox="ns_sbox-$rndh"
+ns1=""
+ns2=""
+ns_sbox=""
add_mark_rules()
{
@@ -40,17 +42,10 @@ add_mark_rules()
init()
{
- local netns
- for netns in "$ns1" "$ns2" "$ns_sbox";do
- ip netns add $netns || exit $ksft_skip
- ip -net $netns link set lo up
- ip netns exec $netns sysctl -q net.mptcp.enabled=1
- ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
- ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
- done
+ mptcp_lib_ns_init ns1 ns2 ns_sbox
local i
- for i in `seq 1 4`; do
+ for i in $(seq 1 4); do
ip link add ns1eth$i netns "$ns1" type veth peer name ns2eth$i netns "$ns2"
ip -net "$ns1" addr add 10.0.$i.1/24 dev ns1eth$i
ip -net "$ns1" addr add dead:beef:$i::1/64 dev ns1eth$i nodad
@@ -77,36 +72,18 @@ init()
add_mark_rules $ns2 2
}
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
- local netns
- for netns in "$ns1" "$ns2" "$ns_sbox"; do
- ip netns del $netns
- done
+ mptcp_lib_ns_exit "${ns1}" "${ns2}" "${ns_sbox}"
rm -f "$cin" "$cout"
rm -f "$sin" "$sout"
}
mptcp_lib_check_mptcp
mptcp_lib_check_kallsyms
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
-
-# Use the legacy version if available to support old kernel versions
-if iptables-legacy -V &> /dev/null; then
- iptables="iptables-legacy"
- ip6tables="ip6tables-legacy"
-elif ! iptables -V &> /dev/null; then
- echo "SKIP: Could not run all tests without iptables tool"
- exit $ksft_skip
-elif ! ip6tables -V &> /dev/null; then
- echo "SKIP: Could not run all tests without ip6tables tool"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip "${iptables}" "${ip6tables}"
check_mark()
{
@@ -286,12 +263,12 @@ do_tcpinq_test()
local lret=$?
if [ $lret -ne 0 ];then
ret=$lret
- echo "FAIL: mptcp_inq $@" 1>&2
+ echo "FAIL: mptcp_inq $*" 1>&2
mptcp_lib_result_fail "TCP_INQ: $*"
return $lret
fi
- echo "PASS: TCP_INQ cmsg/ioctl $@"
+ echo "PASS: TCP_INQ cmsg/ioctl $*"
mptcp_lib_result_pass "TCP_INQ: $*"
return $lret
}
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
index 8f4ff123a7eb..427fc5c70b3c 100755
--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -1,16 +1,20 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
-ksft_skip=4
ret=0
usage() {
echo "Usage: $0 [ -h ]"
}
-
+optstring=h
while getopts "$optstring" option;do
case "$option" in
"h")
@@ -24,52 +28,40 @@ while getopts "$optstring" option;do
esac
done
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
+ns1=""
err=$(mktemp)
-ret=0
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
rm -f $err
- ip netns del $ns1
+ mptcp_lib_ns_exit "${ns1}"
}
mptcp_lib_check_mptcp
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip
trap cleanup EXIT
-ip netns add $ns1 || exit $ksft_skip
-ip -net $ns1 link set lo up
-ip netns exec $ns1 sysctl -q net.mptcp.enabled=1
+mptcp_lib_ns_init ns1
check()
{
local cmd="$1"
local expected="$2"
local msg="$3"
- local out=`$cmd 2>$err`
- local cmd_ret=$?
+ local rc=0
printf "%-50s" "$msg"
- if [ $cmd_ret -ne 0 ]; then
- echo "[FAIL] command execution '$cmd' stderr "
- cat $err
- mptcp_lib_result_fail "${msg} # error ${cmd_ret}"
+ mptcp_lib_check_output "${err}" "${cmd}" "${expected}" || rc=${?}
+ if [ ${rc} -eq 2 ]; then
+ mptcp_lib_result_fail "${msg} # error ${rc}"
ret=1
- elif [ "$out" = "$expected" ]; then
- echo "[ OK ]"
+ elif [ ${rc} -eq 0 ]; then
+ mptcp_lib_print_ok "[ OK ]"
mptcp_lib_result_pass "${msg}"
- else
- echo -n "[FAIL] "
- echo "expected '$expected' got '$out'"
+ elif [ ${rc} -eq 1 ]; then
mptcp_lib_result_fail "${msg} # different output"
ret=1
fi
@@ -105,14 +97,14 @@ check "ip netns exec $ns1 ./pm_nl_ctl get 4" "" "duplicate addr"
ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.4 flags signal
check "ip netns exec $ns1 ./pm_nl_ctl get 4" "id 4 flags signal 10.0.1.4" "id addr increment"
-for i in `seq 5 9`; do
+for i in $(seq 5 9); do
ip netns exec $ns1 ./pm_nl_ctl add 10.0.1.$i flags signal >/dev/null 2>&1
done
check "ip netns exec $ns1 ./pm_nl_ctl get 9" "id 9 flags signal 10.0.1.9" "hard addr limit"
check "ip netns exec $ns1 ./pm_nl_ctl get 10" "" "above hard addr limit"
ip netns exec $ns1 ./pm_nl_ctl del 9
-for i in `seq 10 255`; do
+for i in $(seq 10 255); do
ip netns exec $ns1 ./pm_nl_ctl add 10.0.0.9 id $i
ip netns exec $ns1 ./pm_nl_ctl del $i
done
@@ -183,7 +175,7 @@ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
subflow 10.0.1.1" " (nobackup)"
# fullmesh support has been added later
-ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh
+ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh 2>/dev/null
if ip netns exec $ns1 ./pm_nl_ctl dump | grep -q "fullmesh" ||
mptcp_lib_expect_all_features; then
check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
@@ -194,6 +186,12 @@ subflow 10.0.1.1" " (nofullmesh)"
ip netns exec $ns1 ./pm_nl_ctl set id 1 flags backup,fullmesh
check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
subflow,backup,fullmesh 10.0.1.1" " (backup,fullmesh)"
+else
+ for st in fullmesh nofullmesh backup,fullmesh; do
+ st=" (${st})"
+ printf "%-50s%s\n" "${st}" "[SKIP]"
+ mptcp_lib_result_skip "${st}"
+ done
fi
mptcp_lib_result_print_all_tap
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index 49369c4a5f26..7426a2cbd4a0 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -453,6 +453,7 @@ int csf(int fd, int pm_family, int argc, char *argv[])
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
1024];
+ u_int32_t flags = MPTCP_PM_ADDR_FLAG_SUBFLOW;
const char *params[5];
struct nlmsghdr *nh;
struct rtattr *addr;
@@ -558,6 +559,13 @@ int csf(int fd, int pm_family, int argc, char *argv[])
off += NLMSG_ALIGN(rta->rta_len);
}
+ /* addr flags */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &flags, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
addr->rta_len = off - addr_start;
}
@@ -1079,6 +1087,7 @@ int get_addr(int fd, int pm_family, int argc, char *argv[])
1024];
struct rtattr *rta, *nest;
struct nlmsghdr *nh;
+ u_int32_t token = 0;
int nest_start;
u_int8_t id;
int off = 0;
@@ -1089,10 +1098,12 @@ int get_addr(int fd, int pm_family, int argc, char *argv[])
MPTCP_PM_VER);
/* the only argument is the address id */
- if (argc != 3)
+ if (argc != 3 && argc != 5)
syntax(argv);
id = atoi(argv[2]);
+ if (argc == 5 && !strcmp(argv[3], "token"))
+ token = strtoul(argv[4], NULL, 10);
nest_start = off;
nest = (void *)(data + off);
@@ -1108,6 +1119,15 @@ int get_addr(int fd, int pm_family, int argc, char *argv[])
off += NLMSG_ALIGN(rta->rta_len);
nest->rta_len = off - nest_start;
+ /* token */
+ if (token) {
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
print_addrs(nh, pm_family, do_nl_req(fd, nh, off, sizeof(data)));
return 0;
}
@@ -1119,8 +1139,16 @@ int dump_addrs(int fd, int pm_family, int argc, char *argv[])
1024];
pid_t pid = getpid();
struct nlmsghdr *nh;
+ u_int32_t token = 0;
+ struct rtattr *rta;
int off = 0;
+ if (argc != 2 && argc != 4)
+ syntax(argv);
+
+ if (argc == 4 && !strcmp(argv[2], "token"))
+ token = strtoul(argv[3], NULL, 10);
+
memset(data, 0, sizeof(data));
nh = (void *)data;
off = init_genl_req(data, pm_family, MPTCP_PM_CMD_GET_ADDR,
@@ -1130,6 +1158,15 @@ int dump_addrs(int fd, int pm_family, int argc, char *argv[])
nh->nlmsg_pid = pid;
nh->nlmsg_len = off;
+ /* token */
+ if (token) {
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
print_addrs(nh, pm_family, do_nl_req(fd, nh, off, sizeof(data)));
return 0;
}
diff --git a/tools/testing/selftests/net/mptcp/settings b/tools/testing/selftests/net/mptcp/settings
index 79b65bdf05db..abc5648b59ab 100644
--- a/tools/testing/selftests/net/mptcp/settings
+++ b/tools/testing/selftests/net/mptcp/settings
@@ -1 +1 @@
-timeout=1200
+timeout=1800
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index ae8ad5d6fb9d..365fb3d6ef55 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -1,21 +1,29 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Double quotes to prevent globbing and word splitting is recommended in new
+# code but we accept it, especially because there were too many before having
+# address all other issues detected by shellcheck.
+#shellcheck disable=SC2086
+
. "$(dirname "${0}")/mptcp_lib.sh"
-sec=$(date +%s)
-rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
-ns2="ns2-$rndh"
-ns3="ns3-$rndh"
+ns1=""
+ns2=""
+ns3=""
capture=false
-ksft_skip=4
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
test_cnt=1
ret=0
bail=0
slack=50
+large=""
+small=""
+sout=""
+cout=""
+capout=""
+size=0
usage() {
echo "Usage: $0 [ -b ] [ -c ] [ -d ]"
@@ -24,25 +32,19 @@ usage() {
echo -e "\t-d: debug this script"
}
+# This function is used in the cleanup trap
+#shellcheck disable=SC2317
cleanup()
{
rm -f "$cout" "$sout"
rm -f "$large" "$small"
rm -f "$capout"
- local netns
- for netns in "$ns1" "$ns2" "$ns3";do
- ip netns del $netns
- done
+ mptcp_lib_ns_exit "${ns1}" "${ns2}" "${ns3}"
}
mptcp_lib_check_mptcp
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
- echo "SKIP: Could not run test without ip tool"
- exit $ksft_skip
-fi
+mptcp_lib_check_tools ip
# "$ns1" ns2 ns3
# ns1eth1 ns2eth1 ns2eth3 ns3eth1
@@ -64,12 +66,7 @@ setup()
trap cleanup EXIT
- for i in "$ns1" "$ns2" "$ns3";do
- ip netns add $i || exit $ksft_skip
- ip -net $i link set lo up
- ip netns exec $i sysctl -q net.ipv4.conf.all.rp_filter=0
- ip netns exec $i sysctl -q net.ipv4.conf.default.rp_filter=0
- done
+ mptcp_lib_ns_init ns1 ns2 ns3
ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
ip link add ns1eth2 netns "$ns1" type veth peer name ns2eth2 netns "$ns2"
@@ -129,7 +126,7 @@ do_transfer()
local sin=$2
local max_time=$3
local port
- port=$((10000+$test_cnt))
+ port=$((10000+test_cnt))
test_cnt=$((test_cnt+1))
:> "$cout"
@@ -138,6 +135,7 @@ do_transfer()
if $capture; then
local capuser
+ local rndh="${ns1:4}"
if [ -z $SUDO_USER ] ; then
capuser=""
else
@@ -250,7 +248,8 @@ run_test()
[ $bail -eq 0 ] || exit $ret
fi
- printf "%-60s" "$msg - reverse direction"
+ msg+=" - reverse direction"
+ printf "%-60s" "${msg}"
do_transfer $large $small $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
@@ -284,12 +283,12 @@ done
setup
run_test 10 10 0 0 "balanced bwidth"
-run_test 10 10 1 50 "balanced bwidth with unbalanced delay"
+run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
# we still need some additional infrastructure to pass the following test-cases
-run_test 30 10 0 0 "unbalanced bwidth"
-run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay"
-run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay"
+run_test 10 3 0 0 "unbalanced bwidth"
+run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
+run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
mptcp_lib_result_print_all_tap
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 6167837f48e1..b0cce8f065d8 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -17,11 +17,7 @@ if ! mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
echo "userspace pm tests are not supported by the kernel: SKIP"
exit ${KSFT_SKIP}
fi
-
-if ! ip -Version &> /dev/null; then
- echo "SKIP: Cannot not run test without ip tool"
- exit ${KSFT_SKIP}
-fi
+mptcp_lib_check_tools ip
ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
REMOVED=7 # MPTCP_EVENT_REMOVED
@@ -54,10 +50,8 @@ app6_port=50004
client_addr_id=${RANDOM:0:2}
server_addr_id=${RANDOM:0:2}
-sec=$(date +%s)
-rndh=$(printf %x "$sec")-$(mktemp -u XXXXXX)
-ns1="ns1-$rndh"
-ns2="ns2-$rndh"
+ns1=""
+ns2=""
ret=0
test_name=""
@@ -75,7 +69,7 @@ print_test()
{
test_name="${1}"
- _printf "%-63s" "${test_name}"
+ _printf "%-68s" "${test_name}"
}
print_results()
@@ -122,10 +116,7 @@ cleanup()
mptcp_lib_kill_wait $pid
done
- local netns
- for netns in "$ns1" "$ns2" ;do
- ip netns del "$netns"
- done
+ mptcp_lib_ns_exit "${ns1}" "${ns2}"
rm -rf $file $client_evts $server_evts
@@ -135,10 +126,8 @@ cleanup()
trap cleanup EXIT
# Create and configure network namespaces for testing
+mptcp_lib_ns_init ns1 ns2
for i in "$ns1" "$ns2" ;do
- ip netns add "$i" || exit 1
- ip -net "$i" link set lo up
- ip netns exec "$i" sysctl -q net.mptcp.enabled=1
ip netns exec "$i" sysctl -q net.mptcp.pm_type=1
done
@@ -160,17 +149,23 @@ ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
ip -net "$ns2" addr add dead:beef:2::2/64 dev ns2eth1 nodad
ip -net "$ns2" link set ns2eth1 up
+file=$(mktemp)
+mptcp_lib_make_file "$file" 2 1
+
+# Capture netlink events over the two network namespaces running
+# the MPTCP client and server
+client_evts=$(mktemp)
+mptcp_lib_events "${ns2}" "${client_evts}" client_evts_pid
+server_evts=$(mktemp)
+mptcp_lib_events "${ns1}" "${server_evts}" server_evts_pid
+sleep 0.5
+
print_title "Init"
print_test "Created network namespaces ns1, ns2"
test_pass
make_connection()
{
- if [ -z "$file" ]; then
- file=$(mktemp)
- fi
- mptcp_lib_make_file "$file" 2 1
-
local is_v6=$1
local app_port=$app4_port
local connect_addr="10.0.1.1"
@@ -184,27 +179,8 @@ make_connection()
is_v6="v4"
fi
- # Capture netlink events over the two network namespaces running
- # the MPTCP client and server
- if [ -z "$client_evts" ]; then
- client_evts=$(mktemp)
- fi
:>"$client_evts"
- if [ $client_evts_pid -ne 0 ]; then
- mptcp_lib_kill_wait $client_evts_pid
- fi
- ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
- client_evts_pid=$!
- if [ -z "$server_evts" ]; then
- server_evts=$(mktemp)
- fi
:>"$server_evts"
- if [ $server_evts_pid -ne 0 ]; then
- mptcp_lib_kill_wait $server_evts_pid
- fi
- ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 &
- server_evts_pid=$!
- sleep 0.5
# Run the server
ip netns exec "$ns1" \
@@ -542,7 +518,7 @@ verify_subflow_events()
local remid
local info
- info="${e_saddr} (${e_from}) => ${e_daddr} (${e_to})"
+ info="${e_saddr} (${e_from}) => ${e_daddr}:${e_dport} (${e_to})"
if [ "$e_type" = "$SUB_ESTABLISHED" ]
then
diff --git a/tools/testing/selftests/net/net_helper.sh b/tools/testing/selftests/net/net_helper.sh
index 4fe0befa13fb..6596fe03c77f 100755..100644
--- a/tools/testing/selftests/net/net_helper.sh
+++ b/tools/testing/selftests/net/net_helper.sh
@@ -8,13 +8,16 @@ wait_local_port_listen()
local listener_ns="${1}"
local port="${2}"
local protocol="${3}"
- local port_hex
+ local pattern
local i
- port_hex="$(printf "%04X" "${port}")"
+ pattern=":$(printf "%04X" "${port}") "
+
+ # for tcp protocol additionally check the socket state
+ [ ${protocol} = "tcp" ] && pattern="${pattern}0A"
for i in $(seq 10); do
- if ip netns exec "${listener_ns}" cat /proc/net/"${protocol}"* | \
- grep -q "${port_hex}"; then
+ if ip netns exec "${listener_ns}" awk '{print $2" "$4}' \
+ /proc/net/"${protocol}"* | grep -q "${pattern}"; then
break
fi
sleep 0.1
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index f8499d4c87f3..5cae53543849 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -17,6 +17,7 @@ tests="
ct_connect_v4 ip4-ct-xon: Basic ipv4 tcp connection using ct
connect_v4 ip4-xon: Basic ipv4 ping between two NS
nat_connect_v4 ip4-nat-xon: Basic ipv4 tcp connection via NAT
+ nat_related_v4 ip4-nat-related: ICMP related matches work with SNAT
netlink_checks ovsnl: validate netlink attrs and settings
upcall_interfaces ovs: test the upcall interfaces
drop_reason drop: test drop reasons are emitted"
@@ -473,6 +474,67 @@ test_nat_connect_v4 () {
return 0
}
+# nat_related_v4 test
+# - client->server ip packets go via SNAT
+# - client solicits ICMP destination unreachable packet from server
+# - undo NAT for ICMP reply and test dst ip has been updated
+test_nat_related_v4 () {
+ which nc >/dev/null 2>/dev/null || return $ksft_skip
+
+ sbx_add "test_nat_related_v4" || return $?
+
+ ovs_add_dp "test_nat_related_v4" natrelated4 || return 1
+ info "create namespaces"
+ for ns in client server; do
+ ovs_add_netns_and_veths "test_nat_related_v4" "natrelated4" "$ns" \
+ "${ns:0:1}0" "${ns:0:1}1" || return 1
+ done
+
+ ip netns exec client ip addr add 172.31.110.10/24 dev c1
+ ip netns exec client ip link set c1 up
+ ip netns exec server ip addr add 172.31.110.20/24 dev s1
+ ip netns exec server ip link set s1 up
+
+ ip netns exec server ip route add 192.168.0.20/32 via 172.31.110.10
+
+ # Allow ARP
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "in_port(1),eth(),eth_type(0x0806),arp()" "2" || return 1
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "in_port(2),eth(),eth_type(0x0806),arp()" "1" || return 1
+
+ # Allow IP traffic from client->server, rewrite source IP with SNAT to 192.168.0.20
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "ct_state(-trk),in_port(1),eth(),eth_type(0x0800),ipv4(dst=172.31.110.20)" \
+ "ct(commit,nat(src=192.168.0.20)),recirc(0x1)" || return 1
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "recirc_id(0x1),ct_state(+trk-inv),in_port(1),eth(),eth_type(0x0800),ipv4()" \
+ "2" || return 1
+
+ # Allow related ICMP responses back from server and undo NAT to restore original IP
+ # Drop any ICMP related packets where dst ip hasn't been restored back to original IP
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "ct_state(-trk),in_port(2),eth(),eth_type(0x0800),ipv4()" \
+ "ct(commit,nat),recirc(0x2)" || return 1
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "recirc_id(0x2),ct_state(+rel+trk),in_port(2),eth(),eth_type(0x0800),ipv4(src=172.31.110.20,dst=172.31.110.10,proto=1),icmp()" \
+ "1" || return 1
+ ovs_add_flow "test_nat_related_v4" natrelated4 \
+ "recirc_id(0x2),ct_state(+rel+trk),in_port(2),eth(),eth_type(0x0800),ipv4(dst=192.168.0.20,proto=1),icmp()" \
+ "drop" || return 1
+
+ # Solicit destination unreachable response from server
+ ovs_sbx "test_nat_related_v4" ip netns exec client \
+ bash -c "echo a | nc -u -w 1 172.31.110.20 10000"
+
+ # Check to make sure no packets matched the drop rule with incorrect dst ip
+ python3 "$ovs_base/ovs-dpctl.py" dump-flows natrelated4 \
+ | grep "drop" | grep "packets:0" >/dev/null || return 1
+
+ info "done..."
+ return 0
+}
+
# netlink_validation
# - Create a dp
# - check no warning with "old version" simulation
@@ -502,9 +564,22 @@ test_netlink_checks () {
wc -l) == 2 ] || \
return 1
+ info "Checking clone depth"
ERR_MSG="Flow actions may not be safe on all matching packets"
PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
ovs_add_flow "test_netlink_checks" nv0 \
+ 'in_port(1),eth(),eth_type(0x800),ipv4()' \
+ 'clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(drop)))))))))))))))))' \
+ >/dev/null 2>&1 && return 1
+ POST_TEST=$(dmesg | grep -c "${ERR_MSG}")
+
+ if [ "$PRE_TEST" == "$POST_TEST" ]; then
+ info "failed - clone depth too large"
+ return 1
+ fi
+
+ PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
+ ovs_add_flow "test_netlink_checks" nv0 \
'in_port(1),eth(),eth_type(0x0806),arp()' 'drop(0),2' \
&> /dev/null && return 1
POST_TEST=$(dmesg | grep -c "${ERR_MSG}")
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index b97e621face9..5e0e539a323d 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -299,7 +299,7 @@ class ovsactions(nla):
("OVS_ACTION_ATTR_PUSH_NSH", "none"),
("OVS_ACTION_ATTR_POP_NSH", "flag"),
("OVS_ACTION_ATTR_METER", "none"),
- ("OVS_ACTION_ATTR_CLONE", "none"),
+ ("OVS_ACTION_ATTR_CLONE", "recursive"),
("OVS_ACTION_ATTR_CHECK_PKT_LEN", "none"),
("OVS_ACTION_ATTR_ADD_MPLS", "none"),
("OVS_ACTION_ATTR_DEC_TTL", "none"),
@@ -465,29 +465,42 @@ class ovsactions(nla):
print_str += "pop_mpls"
else:
datum = self.get_attr(field[0])
- print_str += datum.dpstr(more)
+ if field[0] == "OVS_ACTION_ATTR_CLONE":
+ print_str += "clone("
+ print_str += datum.dpstr(more)
+ print_str += ")"
+ else:
+ print_str += datum.dpstr(more)
return print_str
def parse(self, actstr):
+ totallen = len(actstr)
while len(actstr) != 0:
parsed = False
+ parencount = 0
if actstr.startswith("drop"):
# If no reason is provided, the implicit drop is used (i.e no
# action). If some reason is given, an explicit action is used.
- actstr, reason = parse_extract_field(
- actstr,
- "drop(",
- "([0-9]+)",
- lambda x: int(x, 0),
- False,
- None,
- )
+ reason = None
+ if actstr.startswith("drop("):
+ parencount += 1
+
+ actstr, reason = parse_extract_field(
+ actstr,
+ "drop(",
+ "([0-9]+)",
+ lambda x: int(x, 0),
+ False,
+ None,
+ )
+
if reason is not None:
self["attrs"].append(["OVS_ACTION_ATTR_DROP", reason])
parsed = True
else:
- return
+ actstr = actstr[len("drop"): ]
+ return (totallen - len(actstr))
elif parse_starts_block(actstr, "^(\d+)", False, True):
actstr, output = parse_extract_field(
@@ -504,6 +517,7 @@ class ovsactions(nla):
False,
0,
)
+ parencount += 1
self["attrs"].append(["OVS_ACTION_ATTR_RECIRC", recircid])
parsed = True
@@ -516,12 +530,22 @@ class ovsactions(nla):
for flat_act in parse_flat_map:
if parse_starts_block(actstr, flat_act[0], False):
- actstr += len(flat_act[0])
+ actstr = actstr[len(flat_act[0]):]
self["attrs"].append([flat_act[1]])
actstr = actstr[strspn(actstr, ", ") :]
parsed = True
- if parse_starts_block(actstr, "ct(", False):
+ if parse_starts_block(actstr, "clone(", False):
+ parencount += 1
+ subacts = ovsactions()
+ actstr = actstr[len("clone("):]
+ parsedLen = subacts.parse(actstr)
+ lst = []
+ self["attrs"].append(("OVS_ACTION_ATTR_CLONE", subacts))
+ actstr = actstr[parsedLen:]
+ parsed = True
+ elif parse_starts_block(actstr, "ct(", False):
+ parencount += 1
actstr = actstr[len("ct(") :]
ctact = ovsactions.ctact()
@@ -553,6 +577,7 @@ class ovsactions(nla):
natact = ovsactions.ctact.natattr()
if actstr.startswith("("):
+ parencount += 1
t = None
actstr = actstr[1:]
if actstr.startswith("src"):
@@ -607,15 +632,29 @@ class ovsactions(nla):
actstr = actstr[strspn(actstr, ", ") :]
ctact["attrs"].append(["OVS_CT_ATTR_NAT", natact])
- actstr = actstr[strspn(actstr, ",) ") :]
+ actstr = actstr[strspn(actstr, ", ") :]
self["attrs"].append(["OVS_ACTION_ATTR_CT", ctact])
parsed = True
- actstr = actstr[strspn(actstr, "), ") :]
+ actstr = actstr[strspn(actstr, ", ") :]
+ while parencount > 0:
+ parencount -= 1
+ actstr = actstr[strspn(actstr, " "):]
+ if len(actstr) and actstr[0] != ")":
+ raise ValueError("Action str: '%s' unbalanced" % actstr)
+ actstr = actstr[1:]
+
+ if len(actstr) and actstr[0] == ")":
+ return (totallen - len(actstr))
+
+ actstr = actstr[strspn(actstr, ", ") :]
+
if not parsed:
raise ValueError("Action str: '%s' not supported" % actstr)
+ return (totallen - len(actstr))
+
class ovskey(nla):
nla_flags = NLA_F_NESTED
@@ -2111,6 +2150,8 @@ def main(argv):
ovsflow = OvsFlow()
ndb = NDB()
+ sys.setrecursionlimit(100000)
+
if hasattr(args, "showdp"):
found = False
for iface in ndb.interfaces:
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index f10879788f61..cfc84958025a 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -199,6 +199,7 @@
# Same as above but with IPv6
source lib.sh
+source net_helper.sh
PAUSE_ON_FAIL=no
VERBOSE=0
@@ -707,23 +708,23 @@ setup_xfrm6() {
}
setup_xfrm4udp() {
- setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0"
- setup_nettest_xfrm 4 4500
+ setup_xfrm 4 ${veth4_a_addr} ${veth4_b_addr} "encap espinudp 4500 4500 0.0.0.0" && \
+ setup_nettest_xfrm 4 4500
}
setup_xfrm6udp() {
- setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0"
- setup_nettest_xfrm 6 4500
+ setup_xfrm 6 ${veth6_a_addr} ${veth6_b_addr} "encap espinudp 4500 4500 0.0.0.0" && \
+ setup_nettest_xfrm 6 4500
}
setup_xfrm4udprouted() {
- setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0"
- setup_nettest_xfrm 4 4500
+ setup_xfrm 4 ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "encap espinudp 4500 4500 0.0.0.0" && \
+ setup_nettest_xfrm 4 4500
}
setup_xfrm6udprouted() {
- setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0"
- setup_nettest_xfrm 6 4500
+ setup_xfrm 6 ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "encap espinudp 4500 4500 0.0.0.0" && \
+ setup_nettest_xfrm 6 4500
}
setup_routing_old() {
@@ -1335,12 +1336,14 @@ test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception() {
else
TCPDST="TCP:[${dst}]:50000"
fi
- ${ns_b} socat -T 3 -u -6 TCP-LISTEN:50000 STDOUT > $tmpoutfile &
+ ${ns_b} socat -T 3 -u -6 TCP-LISTEN:50000,reuseaddr STDOUT > $tmpoutfile &
+ local socat_pid=$!
- sleep 1
+ wait_local_port_listen ${NS_B} 50000 tcp
- dd if=/dev/zero of=/dev/stdout status=none bs=1M count=1 | ${target} socat -T 3 -u STDIN $TCPDST,connect-timeout=3
+ dd if=/dev/zero status=none bs=1M count=1 | ${target} socat -T 3 -u STDIN $TCPDST,connect-timeout=3
+ wait ${socat_pid}
size=$(du -sb $tmpoutfile)
size=${size%%/tmp/*}
@@ -1954,6 +1957,13 @@ check_command() {
return 0
}
+check_running() {
+ pid=${1}
+ cmd=${2}
+
+ [ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "{cmd}" ]
+}
+
test_cleanup_vxlanX_exception() {
outer="${1}"
encap="vxlan"
@@ -1984,11 +1994,12 @@ test_cleanup_vxlanX_exception() {
${ns_a} ip link del dev veth_A-R1 &
iplink_pid=$!
- sleep 1
- if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
- err " can't delete veth device in a timely manner, PMTU dst likely leaked"
- return 1
- fi
+ for i in $(seq 1 20); do
+ check_running ${iplink_pid} "iplinkdeldevveth_A-R1" || return 0
+ sleep 0.1
+ done
+ err " can't delete veth device in a timely manner, PMTU dst likely leaked"
+ return 1
}
test_cleanup_ipv6_exception() {
diff --git a/tools/testing/selftests/net/rps_default_mask.sh b/tools/testing/selftests/net/rps_default_mask.sh
index a26c5624429f..4287a8529890 100755
--- a/tools/testing/selftests/net/rps_default_mask.sh
+++ b/tools/testing/selftests/net/rps_default_mask.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
readonly ksft_skip=4
@@ -33,6 +33,10 @@ chk_rps() {
rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus)
printf "%-60s" "$msg"
+
+ # In case there is more than 32 CPUs we need to remove commas from masks
+ rps_mask=${rps_mask//,}
+ expected_rps_mask=${expected_rps_mask//,}
if [ $rps_mask -eq $expected_rps_mask ]; then
echo "[ ok ]"
else
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 4667d74579d1..bdf6f10d0558 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -440,7 +440,6 @@ kci_test_encap_vxlan()
local ret=0
vxlan="test-vxlan0"
vlan="test-vlan0"
- testns="$1"
run_cmd ip -netns "$testns" link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
dev "$devdummy" dstport 4789
if [ $? -ne 0 ]; then
@@ -485,7 +484,6 @@ kci_test_encap_fou()
{
local ret=0
name="test-fou"
- testns="$1"
run_cmd_grep 'Usage: ip fou' ip fou help
if [ $? -ne 0 ];then
end_test "SKIP: fou: iproute2 too old"
@@ -526,8 +524,8 @@ kci_test_encap()
run_cmd ip -netns "$testns" link set lo up
run_cmd ip -netns "$testns" link add name "$devdummy" type dummy
run_cmd ip -netns "$testns" link set "$devdummy" up
- run_cmd kci_test_encap_vxlan "$testns"
- run_cmd kci_test_encap_fou "$testns"
+ run_cmd kci_test_encap_vxlan
+ run_cmd kci_test_encap_fou
ip netns del "$testns"
return $ret
@@ -803,6 +801,8 @@ kci_test_ipsec_offload()
end_test "FAIL: ipsec_offload SA offload missing from list output"
fi
+ # we didn't create a peer, make sure we can Tx
+ ip neigh add $dstip dev $dev lladdr 00:11:22:33:44:55
# use ping to exercise the Tx path
ping -I $dev -c 3 -W 1 -i 0 $dstip >/dev/null
diff --git a/tools/testing/selftests/net/setup_loopback.sh b/tools/testing/selftests/net/setup_loopback.sh
index 2070b57849de..2070b57849de 100755..100644
--- a/tools/testing/selftests/net/setup_loopback.sh
+++ b/tools/testing/selftests/net/setup_loopback.sh
diff --git a/tools/testing/selftests/net/setup_veth.sh b/tools/testing/selftests/net/setup_veth.sh
index a9a1759e035c..1f78a87f6f37 100644
--- a/tools/testing/selftests/net/setup_veth.sh
+++ b/tools/testing/selftests/net/setup_veth.sh
@@ -11,7 +11,7 @@ setup_veth_ns() {
local -r ns_mac="$4"
[[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}"
- echo 100000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
+ echo 1000000 > "/sys/class/net/${ns_dev}/gro_flush_timeout"
ip link set dev "${ns_dev}" netns "${ns_name}" mtu 65535
ip -netns "${ns_name}" link set dev "${ns_dev}" up
diff --git a/tools/testing/selftests/net/so_incoming_cpu.c b/tools/testing/selftests/net/so_incoming_cpu.c
index a14818164102..e9fa14e10732 100644
--- a/tools/testing/selftests/net/so_incoming_cpu.c
+++ b/tools/testing/selftests/net/so_incoming_cpu.c
@@ -3,19 +3,16 @@
#define _GNU_SOURCE
#include <sched.h>
+#include <fcntl.h>
+
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/sysinfo.h>
#include "../kselftest_harness.h"
-#define CLIENT_PER_SERVER 32 /* More sockets, more reliable */
-#define NR_SERVER self->nproc
-#define NR_CLIENT (CLIENT_PER_SERVER * NR_SERVER)
-
FIXTURE(so_incoming_cpu)
{
- int nproc;
int *servers;
union {
struct sockaddr addr;
@@ -56,12 +53,47 @@ FIXTURE_VARIANT_ADD(so_incoming_cpu, after_all_listen)
.when_to_set = AFTER_ALL_LISTEN,
};
+static void write_sysctl(struct __test_metadata *_metadata,
+ char *filename, char *string)
+{
+ int fd, len, ret;
+
+ fd = open(filename, O_WRONLY);
+ ASSERT_NE(fd, -1);
+
+ len = strlen(string);
+ ret = write(fd, string, len);
+ ASSERT_EQ(ret, len);
+}
+
+static void setup_netns(struct __test_metadata *_metadata)
+{
+ ASSERT_EQ(unshare(CLONE_NEWNET), 0);
+ ASSERT_EQ(system("ip link set lo up"), 0);
+
+ write_sysctl(_metadata, "/proc/sys/net/ipv4/ip_local_port_range", "10000 60001");
+ write_sysctl(_metadata, "/proc/sys/net/ipv4/tcp_tw_reuse", "0");
+}
+
+#define NR_PORT (60001 - 10000 - 1)
+#define NR_CLIENT_PER_SERVER_DEFAULT 32
+static int nr_client_per_server, nr_server, nr_client;
+
FIXTURE_SETUP(so_incoming_cpu)
{
- self->nproc = get_nprocs();
- ASSERT_LE(2, self->nproc);
+ setup_netns(_metadata);
+
+ nr_server = get_nprocs();
+ ASSERT_LE(2, nr_server);
+
+ if (NR_CLIENT_PER_SERVER_DEFAULT * nr_server < NR_PORT)
+ nr_client_per_server = NR_CLIENT_PER_SERVER_DEFAULT;
+ else
+ nr_client_per_server = NR_PORT / nr_server;
+
+ nr_client = nr_client_per_server * nr_server;
- self->servers = malloc(sizeof(int) * NR_SERVER);
+ self->servers = malloc(sizeof(int) * nr_server);
ASSERT_NE(self->servers, NULL);
self->in_addr.sin_family = AF_INET;
@@ -74,7 +106,7 @@ FIXTURE_TEARDOWN(so_incoming_cpu)
{
int i;
- for (i = 0; i < NR_SERVER; i++)
+ for (i = 0; i < nr_server; i++)
close(self->servers[i]);
free(self->servers);
@@ -110,10 +142,10 @@ int create_server(struct __test_metadata *_metadata,
if (variant->when_to_set == BEFORE_LISTEN)
set_so_incoming_cpu(_metadata, fd, cpu);
- /* We don't use CLIENT_PER_SERVER here not to block
+ /* We don't use nr_client_per_server here not to block
* this test at connect() if SO_INCOMING_CPU is broken.
*/
- ret = listen(fd, NR_CLIENT);
+ ret = listen(fd, nr_client);
ASSERT_EQ(ret, 0);
if (variant->when_to_set == AFTER_LISTEN)
@@ -128,7 +160,7 @@ void create_servers(struct __test_metadata *_metadata,
{
int i, ret;
- for (i = 0; i < NR_SERVER; i++) {
+ for (i = 0; i < nr_server; i++) {
self->servers[i] = create_server(_metadata, self, variant, i);
if (i == 0) {
@@ -138,7 +170,7 @@ void create_servers(struct __test_metadata *_metadata,
}
if (variant->when_to_set == AFTER_ALL_LISTEN) {
- for (i = 0; i < NR_SERVER; i++)
+ for (i = 0; i < nr_server; i++)
set_so_incoming_cpu(_metadata, self->servers[i], i);
}
}
@@ -149,7 +181,7 @@ void create_clients(struct __test_metadata *_metadata,
cpu_set_t cpu_set;
int i, j, fd, ret;
- for (i = 0; i < NR_SERVER; i++) {
+ for (i = 0; i < nr_server; i++) {
CPU_ZERO(&cpu_set);
CPU_SET(i, &cpu_set);
@@ -162,7 +194,7 @@ void create_clients(struct __test_metadata *_metadata,
ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
ASSERT_EQ(ret, 0);
- for (j = 0; j < CLIENT_PER_SERVER; j++) {
+ for (j = 0; j < nr_client_per_server; j++) {
fd = socket(AF_INET, SOCK_STREAM, 0);
ASSERT_NE(fd, -1);
@@ -180,8 +212,8 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
int i, j, fd, cpu, ret, total = 0;
socklen_t len = sizeof(int);
- for (i = 0; i < NR_SERVER; i++) {
- for (j = 0; j < CLIENT_PER_SERVER; j++) {
+ for (i = 0; i < nr_server; i++) {
+ for (j = 0; j < nr_client_per_server; j++) {
/* If we see -EAGAIN here, SO_INCOMING_CPU is broken */
fd = accept(self->servers[i], &self->addr, &self->addrlen);
ASSERT_NE(fd, -1);
@@ -195,7 +227,7 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
}
}
- ASSERT_EQ(total, NR_CLIENT);
+ ASSERT_EQ(total, nr_client);
TH_LOG("SO_INCOMING_CPU is very likely to be "
"working correctly with %d sockets.", total);
}
diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
index 2672ac0b6d1f..8457b7ccbc09 100644
--- a/tools/testing/selftests/net/so_txtime.c
+++ b/tools/testing/selftests/net/so_txtime.c
@@ -134,8 +134,11 @@ static void do_recv_one(int fdr, struct timed_send *ts)
if (rbuf[0] != ts->data)
error(1, 0, "payload mismatch. expected %c", ts->data);
- if (llabs(tstop - texpect) > cfg_variance_us)
- error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
+ if (llabs(tstop - texpect) > cfg_variance_us) {
+ fprintf(stderr, "exceeds variance (%d us)\n", cfg_variance_us);
+ if (!getenv("KSFT_MACHINE_SLOW"))
+ exit(1);
+ }
}
static void do_recv_verify_empty(int fdr)
diff --git a/tools/testing/selftests/net/so_txtime.sh b/tools/testing/selftests/net/so_txtime.sh
index 3f06f4d286a9..5e861ad32a42 100755
--- a/tools/testing/selftests/net/so_txtime.sh
+++ b/tools/testing/selftests/net/so_txtime.sh
@@ -5,6 +5,7 @@
set -e
+readonly ksft_skip=4
readonly DEV="veth0"
readonly BIN="./so_txtime"
@@ -46,7 +47,7 @@ ip -netns "${NS2}" addr add 192.168.1.2/24 dev "${DEV}"
ip -netns "${NS1}" addr add fd::1/64 dev "${DEV}" nodad
ip -netns "${NS2}" addr add fd::2/64 dev "${DEV}" nodad
-do_test() {
+run_test() {
local readonly IP="$1"
local readonly CLOCK="$2"
local readonly TXARGS="$3"
@@ -64,12 +65,25 @@ do_test() {
fi
local readonly START="$(date +%s%N --date="+ 0.1 seconds")"
+
ip netns exec "${NS2}" "${BIN}" -"${IP}" -c "${CLOCK}" -t "${START}" -S "${SADDR}" -D "${DADDR}" "${RXARGS}" -r &
ip netns exec "${NS1}" "${BIN}" -"${IP}" -c "${CLOCK}" -t "${START}" -S "${SADDR}" -D "${DADDR}" "${TXARGS}"
wait "$!"
}
+do_test() {
+ run_test $@
+ [ $? -ne 0 ] && ret=1
+}
+
+do_fail_test() {
+ run_test $@
+ [ $? -eq 0 ] && ret=1
+}
+
ip netns exec "${NS1}" tc qdisc add dev "${DEV}" root fq
+set +e
+ret=0
do_test 4 mono a,-1 a,-1
do_test 6 mono a,0 a,0
do_test 6 mono a,10 a,10
@@ -77,13 +91,20 @@ do_test 4 mono a,10,b,20 a,10,b,20
do_test 6 mono a,20,b,10 b,20,a,20
if ip netns exec "${NS1}" tc qdisc replace dev "${DEV}" root etf clockid CLOCK_TAI delta 400000; then
- ! do_test 4 tai a,-1 a,-1
- ! do_test 6 tai a,0 a,0
+ do_fail_test 4 tai a,-1 a,-1
+ do_fail_test 6 tai a,0 a,0
do_test 6 tai a,10 a,10
do_test 4 tai a,10,b,20 a,10,b,20
do_test 6 tai a,20,b,10 b,10,a,20
else
echo "tc ($(tc -V)) does not support qdisc etf. skipping"
+ [ $ret -eq 0 ] && ret=$ksft_skip
fi
-echo OK. All tests passed
+if [ $ret -eq 0 ]; then
+ echo OK. All tests passed
+elif [[ $ret -ne $ksft_skip && -n "$KSFT_MACHINE_SLOW" ]]; then
+ echo "Ignoring errors due to slow environment" 1>&2
+ ret=0
+fi
+exit $ret
diff --git a/tools/testing/selftests/net/tcp_ao/config b/tools/testing/selftests/net/tcp_ao/config
new file mode 100644
index 000000000000..d3277a9de987
--- /dev/null
+++ b/tools/testing/selftests/net/tcp_ao/config
@@ -0,0 +1,10 @@
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_RMD160=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_VRF=y
+CONFIG_TCP_AO=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_VETH=m
diff --git a/tools/testing/selftests/net/tcp_ao/key-management.c b/tools/testing/selftests/net/tcp_ao/key-management.c
index c48b4970ca17..24e62120b792 100644
--- a/tools/testing/selftests/net/tcp_ao/key-management.c
+++ b/tools/testing/selftests/net/tcp_ao/key-management.c
@@ -417,9 +417,9 @@ struct test_key {
matches_vrf : 1,
is_current : 1,
is_rnext : 1,
- used_on_handshake : 1,
- used_after_accept : 1,
- used_on_client : 1;
+ used_on_server_tx : 1,
+ used_on_client_tx : 1,
+ skip_counters_checks : 1;
};
struct key_collection {
@@ -609,16 +609,14 @@ static int key_collection_socket(bool server, unsigned int port)
addr = &this_ip_dest;
sndid = key->client_keyid;
rcvid = key->server_keyid;
- set_current = key->is_current;
- set_rnext = key->is_rnext;
+ key->used_on_client_tx = set_current = key->is_current;
+ key->used_on_server_tx = set_rnext = key->is_rnext;
}
if (test_add_key_cr(sk, key->password, key->len,
*addr, vrf, sndid, rcvid, key->maclen,
key->alg, set_current, set_rnext))
test_key_error("setsockopt(TCP_AO_ADD_KEY)", key);
- if (set_current || set_rnext)
- key->used_on_handshake = 1;
#ifdef DEBUG
test_print("%s [%u/%u] key: { %s, %u:%u, %u, %u:%u:%u:%u (%u)}",
server ? "server" : "client", i, collection.nr_keys,
@@ -640,22 +638,22 @@ static void verify_counters(const char *tst_name, bool is_listen_sk, bool server
for (i = 0; i < collection.nr_keys; i++) {
struct test_key *key = &collection.keys[i];
uint8_t sndid, rcvid;
- bool was_used;
+ bool rx_cnt_expected;
+ if (key->skip_counters_checks)
+ continue;
if (server) {
sndid = key->server_keyid;
rcvid = key->client_keyid;
- if (is_listen_sk)
- was_used = key->used_on_handshake;
- else
- was_used = key->used_after_accept;
+ rx_cnt_expected = key->used_on_client_tx;
} else {
sndid = key->client_keyid;
rcvid = key->server_keyid;
- was_used = key->used_on_client;
+ rx_cnt_expected = key->used_on_server_tx;
}
- test_tcp_ao_key_counters_cmp(tst_name, a, b, was_used,
+ test_tcp_ao_key_counters_cmp(tst_name, a, b,
+ rx_cnt_expected ? TEST_CNT_KEY_GOOD : 0,
sndid, rcvid);
}
test_tcp_ao_counters_free(a);
@@ -843,7 +841,7 @@ static void end_server(const char *tst_name, int sk,
synchronize_threads(); /* 4: verified => closed */
close(sk);
- verify_counters(tst_name, true, false, begin, &end);
+ verify_counters(tst_name, false, true, begin, &end);
synchronize_threads(); /* 5: counters */
}
@@ -916,9 +914,8 @@ static int run_client(const char *tst_name, unsigned int port,
current_index = nr_keys - 1;
if (rnext_index < 0)
rnext_index = nr_keys - 1;
- collection.keys[current_index].used_on_handshake = 1;
- collection.keys[rnext_index].used_after_accept = 1;
- collection.keys[rnext_index].used_on_client = 1;
+ collection.keys[current_index].used_on_client_tx = 1;
+ collection.keys[rnext_index].used_on_server_tx = 1;
synchronize_threads(); /* 3: accepted => send data */
if (test_client_verify(sk, msg_sz, msg_nr, TEST_TIMEOUT_SEC)) {
@@ -1059,7 +1056,16 @@ static void check_current_back(const char *tst_name, unsigned int port,
test_error("Can't change the current key");
if (test_client_verify(sk, msg_len, nr_packets, TEST_TIMEOUT_SEC))
test_fail("verify failed");
- collection.keys[rotate_to_index].used_after_accept = 1;
+ /* There is a race here: between setting the current_key with
+ * setsockopt(TCP_AO_INFO) and starting to send some data - there
+ * might have been a segment received with the desired
+ * RNext_key set. In turn that would mean that the first outgoing
+ * segment will have the desired current_key (flipped back).
+ * Which is what the user/test wants. As it's racy, skip checking
+ * the counters, yet check what are the resulting current/rnext
+ * keys on both sides.
+ */
+ collection.keys[rotate_to_index].skip_counters_checks = 1;
end_client(tst_name, sk, nr_keys, current_index, rnext_index, &tmp);
}
@@ -1089,7 +1095,7 @@ static void roll_over_keys(const char *tst_name, unsigned int port,
}
verify_current_rnext(tst_name, sk, -1,
collection.keys[i].server_keyid);
- collection.keys[i].used_on_client = 1;
+ collection.keys[i].used_on_server_tx = 1;
synchronize_threads(); /* verify current/rnext */
}
end_client(tst_name, sk, nr_keys, current_index, rnext_index, &tmp);
diff --git a/tools/testing/selftests/net/tcp_ao/lib/sock.c b/tools/testing/selftests/net/tcp_ao/lib/sock.c
index c75d82885a2e..15aeb0963058 100644
--- a/tools/testing/selftests/net/tcp_ao/lib/sock.c
+++ b/tools/testing/selftests/net/tcp_ao/lib/sock.c
@@ -62,7 +62,9 @@ int test_wait_fd(int sk, time_t sec, bool write)
return -ETIMEDOUT;
}
- if (getsockopt(sk, SOL_SOCKET, SO_ERROR, &ret, &slen) || ret)
+ if (getsockopt(sk, SOL_SOCKET, SO_ERROR, &ret, &slen))
+ return -errno;
+ if (ret)
return -ret;
return 0;
}
@@ -584,9 +586,11 @@ int test_client_verify(int sk, const size_t msg_len, const size_t nr,
{
size_t buf_sz = msg_len * nr;
char *buf = alloca(buf_sz);
+ ssize_t ret;
randomize_buffer(buf, buf_sz);
- if (test_client_loop(sk, buf, buf_sz, msg_len, timeout_sec) != buf_sz)
- return -1;
- return 0;
+ ret = test_client_loop(sk, buf, buf_sz, msg_len, timeout_sec);
+ if (ret < 0)
+ return (int)ret;
+ return ret != buf_sz ? -1 : 0;
}
diff --git a/tools/testing/selftests/net/tcp_ao/rst.c b/tools/testing/selftests/net/tcp_ao/rst.c
index ac06009a7f5f..7df8b8700e39 100644
--- a/tools/testing/selftests/net/tcp_ao/rst.c
+++ b/tools/testing/selftests/net/tcp_ao/rst.c
@@ -1,10 +1,33 @@
// SPDX-License-Identifier: GPL-2.0
-/* Author: Dmitry Safonov <dima@arista.com> */
+/*
+ * The test checks that both active and passive reset have correct TCP-AO
+ * signature. An "active" reset (abort) here is procured from closing
+ * listen() socket with non-accepted connections in the queue:
+ * inet_csk_listen_stop() => inet_child_forget() =>
+ * => tcp_disconnect() => tcp_send_active_reset()
+ *
+ * The passive reset is quite hard to get on established TCP connections.
+ * It could be procured from non-established states, but the synchronization
+ * part from userspace in order to reliably get RST seems uneasy.
+ * So, instead it's procured by corrupting SEQ number on TIMED-WAIT state.
+ *
+ * It's important to test both passive and active RST as they go through
+ * different code-paths:
+ * - tcp_send_active_reset() makes no-data skb, sends it with tcp_transmit_skb()
+ * - tcp_v*_send_reset() create their reply skbs and send them with
+ * ip_send_unicast_reply()
+ *
+ * In both cases TCP-AO signatures have to be correct, which is verified by
+ * (1) checking that the TCP-AO connection was reset and (2) TCP-AO counters.
+ *
+ * Author: Dmitry Safonov <dima@arista.com>
+ */
#include <inttypes.h>
#include "../../../../include/linux/kernel.h"
#include "aolib.h"
const size_t quota = 1000;
+const size_t packet_sz = 100;
/*
* Backlog == 0 means 1 connection in queue, see:
* commit 64a146513f8f ("[NET]: Revert incorrect accept queue...")
@@ -59,26 +82,6 @@ static void close_forced(int sk)
close(sk);
}
-static int test_wait_for_exception(int sk, time_t sec)
-{
- struct timeval tv = { .tv_sec = sec };
- struct timeval *ptv = NULL;
- fd_set efds;
- int ret;
-
- FD_ZERO(&efds);
- FD_SET(sk, &efds);
-
- if (sec)
- ptv = &tv;
-
- errno = 0;
- ret = select(sk + 1, NULL, NULL, &efds, ptv);
- if (ret < 0)
- return -errno;
- return ret ? sk : 0;
-}
-
static void test_server_active_rst(unsigned int port)
{
struct tcp_ao_counters cnt1, cnt2;
@@ -155,17 +158,16 @@ static void test_server_passive_rst(unsigned int port)
test_fail("server returned %zd", bytes);
}
- synchronize_threads(); /* 3: chekpoint/restore the connection */
+ synchronize_threads(); /* 3: checkpoint the client */
+ synchronize_threads(); /* 4: close the server, creating twsk */
if (test_get_tcp_ao_counters(sk, &ao2))
test_error("test_get_tcp_ao_counters()");
-
- synchronize_threads(); /* 4: terminate server + send more on client */
- bytes = test_server_run(sk, quota, TEST_RETRANSMIT_SEC);
close(sk);
+
+ synchronize_threads(); /* 5: restore the socket, send more data */
test_tcp_ao_counters_cmp("passive RST server", &ao1, &ao2, TEST_CNT_GOOD);
- synchronize_threads(); /* 5: verified => closed */
- close(sk);
+ synchronize_threads(); /* 6: server exits */
}
static void *server_fn(void *arg)
@@ -284,7 +286,7 @@ static void test_client_active_rst(unsigned int port)
test_error("test_wait_fds(): %d", err);
synchronize_threads(); /* 3: close listen socket */
- if (test_client_verify(sk[0], 100, quota / 100, TEST_TIMEOUT_SEC))
+ if (test_client_verify(sk[0], packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC))
test_fail("Failed to send data on connected socket");
else
test_ok("Verified established tcp connection");
@@ -323,7 +325,6 @@ static void test_client_passive_rst(unsigned int port)
struct tcp_sock_state img;
sockaddr_af saddr;
int sk, err;
- socklen_t slen = sizeof(err);
sk = socket(test_family, SOCK_STREAM, IPPROTO_TCP);
if (sk < 0)
@@ -337,18 +338,51 @@ static void test_client_passive_rst(unsigned int port)
test_error("failed to connect()");
synchronize_threads(); /* 2: accepted => send data */
- if (test_client_verify(sk, 100, quota / 100, TEST_TIMEOUT_SEC))
+ if (test_client_verify(sk, packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC))
test_fail("Failed to send data on connected socket");
else
test_ok("Verified established tcp connection");
- synchronize_threads(); /* 3: chekpoint/restore the connection */
+ synchronize_threads(); /* 3: checkpoint the client */
test_enable_repair(sk);
test_sock_checkpoint(sk, &img, &saddr);
test_ao_checkpoint(sk, &ao_img);
- test_kill_sk(sk);
+ test_disable_repair(sk);
- img.out.seq += quota;
+ synchronize_threads(); /* 4: close the server, creating twsk */
+
+ /*
+ * The "corruption" in SEQ has to be small enough to fit into TCP
+ * window, see tcp_timewait_state_process() for out-of-window
+ * segments.
+ */
+ img.out.seq += 5; /* 5 is more noticeable in tcpdump than 1 */
+
+ /*
+ * FIXME: This is kind-of ugly and dirty, but it works.
+ *
+ * At this moment, the server has close'ed(sk).
+ * The passive RST that is being targeted here is new data after
+ * half-duplex close, see tcp_timewait_state_process() => TCP_TW_RST
+ *
+ * What is needed here is:
+ * (1) wait for FIN from the server
+ * (2) make sure that the ACK from the client went out
+ * (3) make sure that the ACK was received and processed by the server
+ *
+ * Otherwise, the data that will be sent from "repaired" socket
+ * post SEQ corruption may get to the server before it's in
+ * TCP_FIN_WAIT2.
+ *
+ * (1) is easy with select()/poll()
+ * (2) is possible by polling tcpi_state from TCP_INFO
+ * (3) is quite complex: as server's socket was already closed,
+ * probably the way to do it would be tcp-diag.
+ */
+ sleep(TEST_RETRANSMIT_SEC);
+
+ synchronize_threads(); /* 5: restore the socket, send more data */
+ test_kill_sk(sk);
sk = socket(test_family, SOCK_STREAM, IPPROTO_TCP);
if (sk < 0)
@@ -366,25 +400,33 @@ static void test_client_passive_rst(unsigned int port)
test_disable_repair(sk);
test_sock_state_free(&img);
- synchronize_threads(); /* 4: terminate server + send more on client */
- if (test_client_verify(sk, 100, quota / 100, 2 * TEST_TIMEOUT_SEC))
- test_ok("client connection broken post-seq-adjust");
- else
- test_fail("client connection still works post-seq-adjust");
-
- test_wait_for_exception(sk, TEST_TIMEOUT_SEC);
-
- if (getsockopt(sk, SOL_SOCKET, SO_ERROR, &err, &slen))
- test_error("getsockopt()");
- if (err != ECONNRESET && err != EPIPE)
- test_fail("client connection was not reset: %d", err);
+ /*
+ * This is how "passive reset" is acquired in this test from TCP_TW_RST:
+ *
+ * IP 10.0.254.1.7011 > 10.0.1.1.59772: Flags [P.], seq 901:1001, ack 1001, win 249,
+ * options [tcp-ao keyid 100 rnextkeyid 100 mac 0x10217d6c36a22379086ef3b1], length 100
+ * IP 10.0.254.1.7011 > 10.0.1.1.59772: Flags [F.], seq 1001, ack 1001, win 249,
+ * options [tcp-ao keyid 100 rnextkeyid 100 mac 0x104ffc99b98c10a5298cc268], length 0
+ * IP 10.0.1.1.59772 > 10.0.254.1.7011: Flags [.], ack 1002, win 251,
+ * options [tcp-ao keyid 100 rnextkeyid 100 mac 0xe496dd4f7f5a8a66873c6f93,nop,nop,sack 1 {1001:1002}], length 0
+ * IP 10.0.1.1.59772 > 10.0.254.1.7011: Flags [P.], seq 1006:1106, ack 1001, win 251,
+ * options [tcp-ao keyid 100 rnextkeyid 100 mac 0x1b5f3330fb23fbcd0c77d0ca], length 100
+ * IP 10.0.254.1.7011 > 10.0.1.1.59772: Flags [R], seq 3215596252, win 0,
+ * options [tcp-ao keyid 100 rnextkeyid 100 mac 0x0bcfbbf497bce844312304b2], length 0
+ */
+ err = test_client_verify(sk, packet_sz, quota / packet_sz, 2 * TEST_TIMEOUT_SEC);
+ /* Make sure that the connection was reset, not timeouted */
+ if (err && err == -ECONNRESET)
+ test_ok("client sock was passively reset post-seq-adjust");
+ else if (err)
+ test_fail("client sock was not reset post-seq-adjust: %d", err);
else
- test_ok("client connection was reset");
+ test_fail("client sock is yet connected post-seq-adjust");
if (test_get_tcp_ao_counters(sk, &ao2))
test_error("test_get_tcp_ao_counters()");
- synchronize_threads(); /* 5: verified => closed */
+ synchronize_threads(); /* 6: server exits */
close(sk);
test_tcp_ao_counters_cmp("client passive RST", &ao1, &ao2, TEST_CNT_GOOD);
}
@@ -410,6 +452,6 @@ static void *client_fn(void *arg)
int main(int argc, char *argv[])
{
- test_init(15, server_fn, client_fn);
+ test_init(14, server_fn, client_fn);
return 0;
}
diff --git a/tools/testing/selftests/net/tcp_ao/settings b/tools/testing/selftests/net/tcp_ao/settings
new file mode 100644
index 000000000000..6091b45d226b
--- /dev/null
+++ b/tools/testing/selftests/net/tcp_ao/settings
@@ -0,0 +1 @@
+timeout=120
diff --git a/tools/testing/selftests/net/tcp_ao/unsigned-md5.c b/tools/testing/selftests/net/tcp_ao/unsigned-md5.c
index c5b568cd7d90..6b59a652159f 100644
--- a/tools/testing/selftests/net/tcp_ao/unsigned-md5.c
+++ b/tools/testing/selftests/net/tcp_ao/unsigned-md5.c
@@ -110,9 +110,9 @@ static void try_accept(const char *tst_name, unsigned int port,
test_tcp_ao_counters_cmp(tst_name, &ao_cnt1, &ao_cnt2, cnt_expected);
out:
- synchronize_threads(); /* close() */
+ synchronize_threads(); /* test_kill_sk() */
if (sk > 0)
- close(sk);
+ test_kill_sk(sk);
}
static void server_add_routes(void)
@@ -302,10 +302,10 @@ static void try_connect(const char *tst_name, unsigned int port,
test_ok("%s: connected", tst_name);
out:
- synchronize_threads(); /* close() */
+ synchronize_threads(); /* test_kill_sk() */
/* _test_connect_socket() cleans up on failure */
if (ret > 0)
- close(sk);
+ test_kill_sk(sk);
}
#define PREINSTALL_MD5_FIRST BIT(0)
@@ -486,10 +486,10 @@ static void try_to_add(const char *tst_name, unsigned int port,
}
out:
- synchronize_threads(); /* close() */
+ synchronize_threads(); /* test_kill_sk() */
/* _test_connect_socket() cleans up on failure */
if (ret > 0)
- close(sk);
+ test_kill_sk(sk);
}
static void client_add_ip(union tcp_addr *client, const char *ip)
diff --git a/tools/testing/selftests/net/test_bridge_backup_port.sh b/tools/testing/selftests/net/test_bridge_backup_port.sh
index 70a7d87ba2d2..1b3f89e2b86e 100755
--- a/tools/testing/selftests/net/test_bridge_backup_port.sh
+++ b/tools/testing/selftests/net/test_bridge_backup_port.sh
@@ -124,6 +124,16 @@ tc_check_packets()
[[ $pkts == $count ]]
}
+bridge_link_check()
+{
+ local ns=$1; shift
+ local dev=$1; shift
+ local state=$1; shift
+
+ bridge -n $ns -d -j link show dev $dev | \
+ jq -e ".[][\"state\"] == \"$state\"" &> /dev/null
+}
+
################################################################################
# Setup
@@ -259,6 +269,7 @@ backup_port()
log_test $? 0 "No forwarding out of vx0"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
log_test $? 0 "swp1 carrier off"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -268,6 +279,7 @@ backup_port()
log_test $? 0 "No forwarding out of vx0"
run_cmd "ip -n $sw1 link set dev swp1 carrier on"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
log_test $? 0 "swp1 carrier on"
# Configure vx0 as the backup port of swp1 and check that packets are
@@ -284,6 +296,7 @@ backup_port()
log_test $? 0 "No forwarding out of vx0"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
log_test $? 0 "swp1 carrier off"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -293,6 +306,7 @@ backup_port()
log_test $? 0 "Forwarding out of vx0"
run_cmd "ip -n $sw1 link set dev swp1 carrier on"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
log_test $? 0 "swp1 carrier on"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -314,6 +328,7 @@ backup_port()
log_test $? 0 "No forwarding out of vx0"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
log_test $? 0 "swp1 carrier off"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -369,6 +384,7 @@ backup_nhid()
log_test $? 0 "No forwarding out of vx0"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
log_test $? 0 "swp1 carrier off"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -382,6 +398,7 @@ backup_nhid()
log_test $? 0 "Forwarding using VXLAN FDB entry"
run_cmd "ip -n $sw1 link set dev swp1 carrier on"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
log_test $? 0 "swp1 carrier on"
# Configure nexthop ID 10 as the backup nexthop ID of swp1 and check
@@ -398,6 +415,7 @@ backup_nhid()
log_test $? 0 "No forwarding out of vx0"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
log_test $? 0 "swp1 carrier off"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -411,6 +429,7 @@ backup_nhid()
log_test $? 0 "No forwarding using VXLAN FDB entry"
run_cmd "ip -n $sw1 link set dev swp1 carrier on"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 forwarding
log_test $? 0 "swp1 carrier on"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -441,6 +460,7 @@ backup_nhid()
log_test $? 0 "No forwarding using VXLAN FDB entry"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
log_test $? 0 "swp1 carrier off"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -497,6 +517,7 @@ backup_nhid_invalid()
log_test $? 0 "Valid nexthop as backup nexthop"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
log_test $? 0 "swp1 carrier off"
run_cmd "ip netns exec $sw1 mausezahn br0.10 -a $smac -b $dmac -A 198.51.100.1 -B 198.51.100.2 -t ip -p 100 -q -c 1"
@@ -604,7 +625,9 @@ backup_nhid_ping()
run_cmd "bridge -n $sw2 link set dev swp1 backup_nhid 10"
run_cmd "ip -n $sw1 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw1 swp1 disabled
run_cmd "ip -n $sw2 link set dev swp1 carrier off"
+ busywait $BUSYWAIT_TIMEOUT bridge_link_check $sw2 swp1 disabled
run_cmd "ip netns exec $sw1 ping -i 0.1 -c 10 -w $PING_TIMEOUT 192.0.2.66"
log_test $? 0 "Ping with backup nexthop ID"
diff --git a/tools/testing/selftests/net/test_vxlan_mdb.sh b/tools/testing/selftests/net/test_vxlan_mdb.sh
index 84a05a9e46d8..74ff9fb2a6f0 100755
--- a/tools/testing/selftests/net/test_vxlan_mdb.sh
+++ b/tools/testing/selftests/net/test_vxlan_mdb.sh
@@ -1014,10 +1014,10 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 port vx0"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010"
- log_test $? 254 "Flush by port"
+ log_test $? 254 "Flush by port - matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 port veth0"
- log_test $? 255 "Flush by wrong port"
+ log_test $? 255 "Flush by port - non-matching"
# Check that when flushing by source VNI only entries programmed with
# the specified source VNI are flushed and the rest are not.
@@ -1030,9 +1030,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 src_vni 10010"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010"
- log_test $? 254 "Flush by specified source VNI"
+ log_test $? 254 "Flush by source VNI - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10011"
- log_test $? 0 "Flush by unspecified source VNI"
+ log_test $? 0 "Flush by source VNI - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1058,9 +1058,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 proto bgp"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"proto bgp\""
- log_test $? 1 "Flush by specified routing protocol"
+ log_test $? 1 "Flush by routing protocol - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"proto zebra\""
- log_test $? 0 "Flush by unspecified routing protocol"
+ log_test $? 0 "Flush by routing protocol - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1075,9 +1075,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst 198.51.100.2"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
- log_test $? 1 "Flush by specified destination IP - IPv4"
+ log_test $? 1 "Flush by IPv4 destination IP - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
- log_test $? 0 "Flush by unspecified destination IP - IPv4"
+ log_test $? 0 "Flush by IPv4 destination IP - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1089,9 +1089,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst 2001:db8:1000::2"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 2001:db8:1000::2"
- log_test $? 1 "Flush by specified destination IP - IPv6"
+ log_test $? 1 "Flush by IPv6 destination IP - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 2001:db8:1000::1"
- log_test $? 0 "Flush by unspecified destination IP - IPv6"
+ log_test $? 0 "Flush by IPv6 destination IP - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1104,9 +1104,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst_port 11111"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"dst_port 11111\""
- log_test $? 1 "Flush by specified UDP destination port"
+ log_test $? 1 "Flush by UDP destination port - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"dst_port 22222\""
- log_test $? 0 "Flush by unspecified UDP destination port"
+ log_test $? 0 "Flush by UDP destination port - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1121,9 +1121,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst_port 4789"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
- log_test $? 1 "Flush by device's UDP destination port"
+ log_test $? 1 "Flush by device's UDP destination port - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
- log_test $? 0 "Flush by unspecified UDP destination port"
+ log_test $? 0 "Flush by device's UDP destination port - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1136,9 +1136,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 vni 20010"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \" vni 20010\""
- log_test $? 1 "Flush by specified destination VNI"
+ log_test $? 1 "Flush by destination VNI - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \" vni 20011\""
- log_test $? 0 "Flush by unspecified destination VNI"
+ log_test $? 0 "Flush by destination VNI - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
@@ -1153,9 +1153,9 @@ flush()
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 vni 10010"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
- log_test $? 1 "Flush by destination VNI equal to source VNI"
+ log_test $? 1 "Flush by destination VNI equal to source VNI - matching"
run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
- log_test $? 0 "Flush by unspecified destination VNI"
+ log_test $? 0 "Flush by destination VNI equal to source VNI - non-matching"
run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 7799e042a971..c6eda21cefb6 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -1002,12 +1002,12 @@ TEST_F(tls, recv_partial)
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
- MSG_WAITALL), -1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_first),
+ MSG_WAITALL), strlen(test_str_first));
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
memset(recv_mem, 0, sizeof(recv_mem));
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
- MSG_WAITALL), -1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_second),
+ MSG_WAITALL), strlen(test_str_second));
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
0);
}
@@ -1485,6 +1485,51 @@ TEST_F(tls, control_msg)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+TEST_F(tls, control_msg_nomerge)
+{
+ char *rec1 = "1111";
+ char *rec2 = "2222";
+ int send_len = 5;
+ char buf[15];
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, rec1, send_len, 0), send_len);
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, rec2, send_len, 0), send_len);
+
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, 100, buf, sizeof(buf), MSG_PEEK), send_len);
+ EXPECT_EQ(memcmp(buf, rec1, send_len), 0);
+
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, 100, buf, sizeof(buf), MSG_PEEK), send_len);
+ EXPECT_EQ(memcmp(buf, rec1, send_len), 0);
+
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, 100, buf, sizeof(buf), 0), send_len);
+ EXPECT_EQ(memcmp(buf, rec1, send_len), 0);
+
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, 100, buf, sizeof(buf), 0), send_len);
+ EXPECT_EQ(memcmp(buf, rec2, send_len), 0);
+}
+
+TEST_F(tls, data_control_data)
+{
+ char *rec1 = "1111";
+ char *rec2 = "2222";
+ char *rec3 = "3333";
+ int send_len = 5;
+ char buf[15];
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ EXPECT_EQ(send(self->fd, rec1, send_len, 0), send_len);
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, rec2, send_len, 0), send_len);
+ EXPECT_EQ(send(self->fd, rec3, send_len, 0), send_len);
+
+ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_PEEK), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, sizeof(buf), MSG_PEEK), send_len);
+}
+
TEST_F(tls, shutdown)
{
char const *test_str = "test_read";
@@ -1874,15 +1919,15 @@ TEST_F(tls_err, poll_partial_rec_async)
/* Child should sleep in poll(), never get a wake */
pfd.fd = self->cfd2;
pfd.events = POLLIN;
- EXPECT_EQ(poll(&pfd, 1, 5), 0);
+ EXPECT_EQ(poll(&pfd, 1, 20), 0);
EXPECT_EQ(write(p[1], &token, 1), 1); /* Barrier #1 */
pfd.fd = self->cfd2;
pfd.events = POLLIN;
- EXPECT_EQ(poll(&pfd, 1, 5), 1);
+ EXPECT_EQ(poll(&pfd, 1, 20), 1);
- exit(!_metadata->passed);
+ exit(!__test_passed(_metadata));
}
}
diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c
index 10f2fde3686b..ec60a16c9307 100644
--- a/tools/testing/selftests/net/txtimestamp.c
+++ b/tools/testing/selftests/net/txtimestamp.c
@@ -163,7 +163,8 @@ static void validate_timestamp(struct timespec *cur, int min_delay)
if (cur64 < start64 + min_delay || cur64 > start64 + max_delay) {
fprintf(stderr, "ERROR: %" PRId64 " us expected between %d and %d\n",
cur64 - start64, min_delay, max_delay);
- test_failed = true;
+ if (!getenv("KSFT_MACHINE_SLOW"))
+ test_failed = true;
}
}
diff --git a/tools/testing/selftests/net/txtimestamp.sh b/tools/testing/selftests/net/txtimestamp.sh
index 31637769f59f..25baca4b148e 100755
--- a/tools/testing/selftests/net/txtimestamp.sh
+++ b/tools/testing/selftests/net/txtimestamp.sh
@@ -8,13 +8,13 @@ set -e
setup() {
# set 1ms delay on lo egress
- tc qdisc add dev lo root netem delay 1ms
+ tc qdisc add dev lo root netem delay 10ms
# set 2ms delay on ifb0 egress
modprobe ifb
ip link add ifb_netem0 type ifb
ip link set dev ifb_netem0 up
- tc qdisc add dev ifb_netem0 root netem delay 2ms
+ tc qdisc add dev ifb_netem0 root netem delay 20ms
# redirect lo ingress through ifb0 egress
tc qdisc add dev lo handle ffff: ingress
@@ -24,9 +24,11 @@ setup() {
}
run_test_v4v6() {
- # SND will be delayed 1000us
- # ACK will be delayed 6000us: 1 + 2 ms round-trip
- local -r args="$@ -v 1000 -V 6000"
+ # SND will be delayed 10ms
+ # ACK will be delayed 60ms: 10 + 20 ms round-trip
+ # allow +/- tolerance of 8ms
+ # wait for ACK to be queued
+ local -r args="$@ -v 10000 -V 60000 -t 8000 -S 80000"
./txtimestamp ${args} -4 -L 127.0.0.1
./txtimestamp ${args} -6 -L ::1
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index af5dc57c8ce9..8802604148dd 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -7,7 +7,7 @@ source net_helper.sh
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+BPF_FILE="xdp_dummy.o"
# set global exit status, but never reset nonzero one.
check_err()
@@ -197,7 +197,7 @@ run_all() {
}
if [ ! -f ${BPF_FILE} ]; then
- echo "Missing ${BPF_FILE}. Build bpf selftest first"
+ echo "Missing ${BPF_FILE}. Run 'make' first"
exit -1
fi
diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
index cb664679b434..7080eae5312b 100755
--- a/tools/testing/selftests/net/udpgro_bench.sh
+++ b/tools/testing/selftests/net/udpgro_bench.sh
@@ -7,7 +7,7 @@ source net_helper.sh
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+BPF_FILE="xdp_dummy.o"
cleanup() {
local -r jobs="$(jobs -p)"
@@ -84,7 +84,7 @@ run_all() {
}
if [ ! -f ${BPF_FILE} ]; then
- echo "Missing ${BPF_FILE}. Build bpf selftest first"
+ echo "Missing ${BPF_FILE}. Run 'make' first"
exit -1
fi
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
index dd47fa96f6b3..e1ff645bd3d1 100755
--- a/tools/testing/selftests/net/udpgro_frglist.sh
+++ b/tools/testing/selftests/net/udpgro_frglist.sh
@@ -7,7 +7,7 @@ source net_helper.sh
readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+BPF_FILE="xdp_dummy.o"
cleanup() {
local -r jobs="$(jobs -p)"
@@ -85,12 +85,12 @@ run_all() {
}
if [ ! -f ${BPF_FILE} ]; then
- echo "Missing ${BPF_FILE}. Build bpf selftest first"
+ echo "Missing ${BPF_FILE}. Run 'make' first"
exit -1
fi
if [ ! -f nat6to4.o ]; then
- echo "Missing nat6to4 helper. Build bpf nat6to4.o selftest first"
+ echo "Missing nat6to4 helper. Run 'make' first"
exit -1
fi
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index c079565add39..9cd5e885e91f 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -1,7 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+source net_helper.sh
+
+BPF_FILE="xdp_dummy.o"
readonly BASE="ns-$(mktemp -u XXXXXX)"
readonly SRC=2
readonly DST=1
@@ -37,6 +39,10 @@ create_ns() {
for ns in $NS_SRC $NS_DST; do
ip netns add $ns
ip -n $ns link set dev lo up
+
+ # disable route solicitations to decrease 'noise' traffic
+ ip netns exec $ns sysctl -qw net.ipv6.conf.default.router_solicitations=0
+ ip netns exec $ns sysctl -qw net.ipv6.conf.all.router_solicitations=0
done
ip link add name veth$SRC type veth peer name veth$DST
@@ -78,6 +84,12 @@ create_vxlan_pair() {
create_vxlan_endpoint $BASE$ns veth$ns $BM_NET_V6$((3 - $ns)) vxlan6$ns 6
ip -n $BASE$ns addr add dev vxlan6$ns $OL_NET_V6$ns/24 nodad
done
+
+ # preload neighbur cache, do avoid some noisy traffic
+ local addr_dst=$(ip -j -n $BASE$DST link show dev vxlan6$DST |jq -r '.[]["address"]')
+ local addr_src=$(ip -j -n $BASE$SRC link show dev vxlan6$SRC |jq -r '.[]["address"]')
+ ip -n $BASE$DST neigh add dev vxlan6$DST lladdr $addr_src $OL_NET_V6$SRC
+ ip -n $BASE$SRC neigh add dev vxlan6$SRC lladdr $addr_dst $OL_NET_V6$DST
}
is_ipv6() {
@@ -117,9 +129,9 @@ run_test() {
# not enable GRO
ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 4789
ip netns exec $NS_DST $ipt -A INPUT -p udp --dport 8000
- ip netns exec $NS_DST ./udpgso_bench_rx -C 1000 -R 10 -n 10 -l 1300 $rx_args &
+ ip netns exec $NS_DST ./udpgso_bench_rx -C 2000 -R 100 -n 10 -l 1300 $rx_args &
local spid=$!
- sleep 0.1
+ wait_local_port_listen "$NS_DST" 8000 udp
ip netns exec $NS_SRC ./udpgso_bench_tx $family -M 1 -s 13000 -S 1300 -D $dst
local retc=$?
wait $spid
@@ -166,9 +178,9 @@ run_bench() {
# bind the sender and the receiver to different CPUs to try
# get reproducible results
ip netns exec $NS_DST bash -c "echo 2 > /sys/class/net/veth$DST/queues/rx-0/rps_cpus"
- ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 1000 -R 10 &
+ ip netns exec $NS_DST taskset 0x2 ./udpgso_bench_rx -C 2000 -R 100 &
local spid=$!
- sleep 0.1
+ wait_local_port_listen "$NS_DST" 8000 udp
ip netns exec $NS_SRC taskset 0x1 ./udpgso_bench_tx $family -l 3 -S 1300 -D $dst
local retc=$?
wait $spid
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index 7badaf215de2..1d975bf52af3 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -56,7 +56,6 @@ static bool cfg_do_msgmore;
static bool cfg_do_setsockopt;
static int cfg_specific_test_id = -1;
-static const char cfg_ifname[] = "lo";
static unsigned short cfg_port = 9000;
static char buf[ETH_MAX_MTU];
@@ -69,8 +68,13 @@ struct testcase {
int r_len_last; /* recv(): size of last non-mss dgram, if any */
};
-const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT;
-const struct in_addr addr4 = { .s_addr = __constant_htonl(INADDR_LOOPBACK + 2) };
+const struct in6_addr addr6 = {
+ { { 0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } }, /* fd00::1 */
+};
+
+const struct in_addr addr4 = {
+ __constant_htonl(0x0a000001), /* 10.0.0.1 */
+};
struct testcase testcases_v4[] = {
{
@@ -274,48 +278,6 @@ struct testcase testcases_v6[] = {
}
};
-static unsigned int get_device_mtu(int fd, const char *ifname)
-{
- struct ifreq ifr;
-
- memset(&ifr, 0, sizeof(ifr));
-
- strcpy(ifr.ifr_name, ifname);
-
- if (ioctl(fd, SIOCGIFMTU, &ifr))
- error(1, errno, "ioctl get mtu");
-
- return ifr.ifr_mtu;
-}
-
-static void __set_device_mtu(int fd, const char *ifname, unsigned int mtu)
-{
- struct ifreq ifr;
-
- memset(&ifr, 0, sizeof(ifr));
-
- ifr.ifr_mtu = mtu;
- strcpy(ifr.ifr_name, ifname);
-
- if (ioctl(fd, SIOCSIFMTU, &ifr))
- error(1, errno, "ioctl set mtu");
-}
-
-static void set_device_mtu(int fd, int mtu)
-{
- int val;
-
- val = get_device_mtu(fd, cfg_ifname);
- fprintf(stderr, "device mtu (orig): %u\n", val);
-
- __set_device_mtu(fd, cfg_ifname, mtu);
- val = get_device_mtu(fd, cfg_ifname);
- if (val != mtu)
- error(1, 0, "unable to set device mtu to %u\n", val);
-
- fprintf(stderr, "device mtu (test): %u\n", val);
-}
-
static void set_pmtu_discover(int fd, bool is_ipv4)
{
int level, name, val;
@@ -354,81 +316,6 @@ static unsigned int get_path_mtu(int fd, bool is_ipv4)
return mtu;
}
-/* very wordy version of system("ip route add dev lo mtu 1500 127.0.0.3/32") */
-static void set_route_mtu(int mtu, bool is_ipv4)
-{
- struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK };
- struct nlmsghdr *nh;
- struct rtattr *rta;
- struct rtmsg *rt;
- char data[NLMSG_ALIGN(sizeof(*nh)) +
- NLMSG_ALIGN(sizeof(*rt)) +
- NLMSG_ALIGN(RTA_LENGTH(sizeof(addr6))) +
- NLMSG_ALIGN(RTA_LENGTH(sizeof(int))) +
- NLMSG_ALIGN(RTA_LENGTH(0) + RTA_LENGTH(sizeof(int)))];
- int fd, ret, alen, off = 0;
-
- alen = is_ipv4 ? sizeof(addr4) : sizeof(addr6);
-
- fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
- if (fd == -1)
- error(1, errno, "socket netlink");
-
- memset(data, 0, sizeof(data));
-
- nh = (void *)data;
- nh->nlmsg_type = RTM_NEWROUTE;
- nh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE;
- off += NLMSG_ALIGN(sizeof(*nh));
-
- rt = (void *)(data + off);
- rt->rtm_family = is_ipv4 ? AF_INET : AF_INET6;
- rt->rtm_table = RT_TABLE_MAIN;
- rt->rtm_dst_len = alen << 3;
- rt->rtm_protocol = RTPROT_BOOT;
- rt->rtm_scope = RT_SCOPE_UNIVERSE;
- rt->rtm_type = RTN_UNICAST;
- off += NLMSG_ALIGN(sizeof(*rt));
-
- rta = (void *)(data + off);
- rta->rta_type = RTA_DST;
- rta->rta_len = RTA_LENGTH(alen);
- if (is_ipv4)
- memcpy(RTA_DATA(rta), &addr4, alen);
- else
- memcpy(RTA_DATA(rta), &addr6, alen);
- off += NLMSG_ALIGN(rta->rta_len);
-
- rta = (void *)(data + off);
- rta->rta_type = RTA_OIF;
- rta->rta_len = RTA_LENGTH(sizeof(int));
- *((int *)(RTA_DATA(rta))) = 1; //if_nametoindex("lo");
- off += NLMSG_ALIGN(rta->rta_len);
-
- /* MTU is a subtype in a metrics type */
- rta = (void *)(data + off);
- rta->rta_type = RTA_METRICS;
- rta->rta_len = RTA_LENGTH(0) + RTA_LENGTH(sizeof(int));
- off += NLMSG_ALIGN(rta->rta_len);
-
- /* now fill MTU subtype. Note that it fits within above rta_len */
- rta = (void *)(((char *) rta) + RTA_LENGTH(0));
- rta->rta_type = RTAX_MTU;
- rta->rta_len = RTA_LENGTH(sizeof(int));
- *((int *)(RTA_DATA(rta))) = mtu;
-
- nh->nlmsg_len = off;
-
- ret = sendto(fd, data, off, 0, (void *)&nladdr, sizeof(nladdr));
- if (ret != off)
- error(1, errno, "send netlink: %uB != %uB\n", ret, off);
-
- if (close(fd))
- error(1, errno, "close netlink");
-
- fprintf(stderr, "route mtu (test): %u\n", mtu);
-}
-
static bool __send_one(int fd, struct msghdr *msg, int flags)
{
int ret;
@@ -591,15 +478,10 @@ static void run_test(struct sockaddr *addr, socklen_t alen)
/* Do not fragment these datagrams: only succeed if GSO works */
set_pmtu_discover(fdt, addr->sa_family == AF_INET);
- if (cfg_do_connectionless) {
- set_device_mtu(fdt, CONST_MTU_TEST);
+ if (cfg_do_connectionless)
run_all(fdt, fdr, addr, alen);
- }
if (cfg_do_connected) {
- set_device_mtu(fdt, CONST_MTU_TEST + 100);
- set_route_mtu(CONST_MTU_TEST, addr->sa_family == AF_INET);
-
if (connect(fdt, addr, alen))
error(1, errno, "connect");
diff --git a/tools/testing/selftests/net/udpgso.sh b/tools/testing/selftests/net/udpgso.sh
index fec24f584fe9..6c63178086b0 100755
--- a/tools/testing/selftests/net/udpgso.sh
+++ b/tools/testing/selftests/net/udpgso.sh
@@ -3,27 +3,56 @@
#
# Run a series of udpgso regression tests
+set -o errexit
+set -o nounset
+
+setup_loopback() {
+ ip addr add dev lo 10.0.0.1/32
+ ip addr add dev lo fd00::1/128 nodad noprefixroute
+}
+
+test_dev_mtu() {
+ setup_loopback
+ # Reduce loopback MTU
+ ip link set dev lo mtu 1500
+}
+
+test_route_mtu() {
+ setup_loopback
+ # Remove default local routes
+ ip route del local 10.0.0.1/32 table local dev lo
+ ip route del local fd00::1/128 table local dev lo
+ # Install local routes with reduced MTU
+ ip route add local 10.0.0.1/32 table local dev lo mtu 1500
+ ip route add local fd00::1/128 table local dev lo mtu 1500
+}
+
+if [ "$#" -gt 0 ]; then
+ "$1"
+ shift 2 # pop "test_*" arg and "--" delimiter
+ exec "$@"
+fi
+
echo "ipv4 cmsg"
-./in_netns.sh ./udpgso -4 -C
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -4 -C
echo "ipv4 setsockopt"
-./in_netns.sh ./udpgso -4 -C -s
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -4 -C -s
echo "ipv6 cmsg"
-./in_netns.sh ./udpgso -6 -C
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C
echo "ipv6 setsockopt"
-./in_netns.sh ./udpgso -6 -C -s
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C -s
echo "ipv4 connected"
-./in_netns.sh ./udpgso -4 -c
+./in_netns.sh "$0" test_route_mtu -- ./udpgso -4 -c
-# blocked on 2nd loopback address
-# echo "ipv6 connected"
-# ./in_netns.sh ./udpgso -6 -c
+echo "ipv6 connected"
+./in_netns.sh "$0" test_route_mtu -- ./udpgso -6 -c
echo "ipv4 msg_more"
-./in_netns.sh ./udpgso -4 -C -m
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -4 -C -m
echo "ipv6 msg_more"
-./in_netns.sh ./udpgso -6 -C -m
+./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C -m
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index f35a924d4a30..1cbadd267c96 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -375,7 +375,7 @@ static void do_recv(void)
do_flush_udp(fd);
tnow = gettimeofday_ms();
- if (tnow > treport) {
+ if (!cfg_expected_pkt_nr && tnow > treport) {
if (packets)
fprintf(stderr,
"%s rx: %6lu MB/s %8lu calls/s\n",
diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
index 2d073595c620..5ae85def0739 100755
--- a/tools/testing/selftests/net/veth.sh
+++ b/tools/testing/selftests/net/veth.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-BPF_FILE="../bpf/xdp_dummy.bpf.o"
+BPF_FILE="xdp_dummy.o"
readonly STATS="$(mktemp -p /tmp ns-XXXXXX)"
readonly BASE=`basename $STATS`
readonly SRC=2
@@ -218,7 +218,7 @@ while getopts "hs:" option; do
done
if [ ! -f ${BPF_FILE} ]; then
- echo "Missing ${BPF_FILE}. Build bpf selftest first"
+ echo "Missing ${BPF_FILE}. Run 'make' first"
exit 1
fi
@@ -247,6 +247,20 @@ chk_gro " - aggregation with TSO off" 1
cleanup
create_ns
+ip -n $NS_DST link set dev veth$DST up
+ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp
+chk_gro_flag "gro vs xdp while down - gro flag on" $DST on
+ip -n $NS_DST link set dev veth$DST down
+chk_gro_flag " - after down" $DST on
+ip -n $NS_DST link set dev veth$DST xdp off
+chk_gro_flag " - after xdp off" $DST off
+ip -n $NS_DST link set dev veth$DST up
+chk_gro_flag " - after up" $DST off
+ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp
+chk_gro_flag " - after peer xdp" $DST off
+cleanup
+
+create_ns
chk_channels "default channels" $DST 1 1
ip -n $NS_DST link set dev veth$DST down
diff --git a/tools/testing/selftests/net/xdp_dummy.c b/tools/testing/selftests/net/xdp_dummy.c
new file mode 100644
index 000000000000..d988b2e0cee8
--- /dev/null
+++ b/tools/testing/selftests/net/xdp_dummy.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define KBUILD_MODNAME "xdp_dummy"
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+SEC("xdp")
+int xdp_dummy_prog(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index db27153eb4a0..936c3085bb83 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -7,7 +7,8 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
- conntrack_sctp_collision.sh xt_string.sh
+ conntrack_sctp_collision.sh xt_string.sh \
+ bridge_netfilter.sh
HOSTPKG_CONFIG := pkg-config
diff --git a/tools/testing/selftests/netfilter/bridge_netfilter.sh b/tools/testing/selftests/netfilter/bridge_netfilter.sh
new file mode 100644
index 000000000000..659b3ab02c8b
--- /dev/null
+++ b/tools/testing/selftests/netfilter/bridge_netfilter.sh
@@ -0,0 +1,188 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test bridge netfilter + conntrack, a combination that doesn't really work,
+# with multicast/broadcast packets racing for hash table insertion.
+
+# eth0 br0 eth0
+# setup is: ns1 <->,ns0 <-> ns3
+# ns2 <-' `'-> ns4
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns0="ns0-$sfx"
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+ns3="ns3-$sfx"
+ns4="ns4-$sfx"
+
+ebtables -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ebtables"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+for i in $(seq 0 4); do
+ eval ip netns add \$ns$i
+done
+
+cleanup() {
+ for i in $(seq 0 4); do eval ip netns del \$ns$i;done
+}
+
+trap cleanup EXIT
+
+do_ping()
+{
+ fromns="$1"
+ dstip="$2"
+
+ ip netns exec $fromns ping -c 1 -q $dstip > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "ERROR: ping from $fromns to $dstip"
+ ip netns exec ${ns0} nft list ruleset
+ ret=1
+ fi
+}
+
+bcast_ping()
+{
+ fromns="$1"
+ dstip="$2"
+
+ for i in $(seq 1 1000); do
+ ip netns exec $fromns ping -q -f -b -c 1 -q $dstip > /dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ echo "ERROR: ping -b from $fromns to $dstip"
+ ip netns exec ${ns0} nft list ruleset
+ fi
+ done
+}
+
+ip link add veth1 netns ${ns0} type veth peer name eth0 netns ${ns1}
+if [ $? -ne 0 ]; then
+ echo "SKIP: Can't create veth device"
+ exit $ksft_skip
+fi
+
+ip link add veth2 netns ${ns0} type veth peer name eth0 netns $ns2
+ip link add veth3 netns ${ns0} type veth peer name eth0 netns $ns3
+ip link add veth4 netns ${ns0} type veth peer name eth0 netns $ns4
+
+ip -net ${ns0} link set lo up
+
+for i in $(seq 1 4); do
+ ip -net ${ns0} link set veth$i up
+done
+
+ip -net ${ns0} link add br0 type bridge stp_state 0 forward_delay 0 nf_call_iptables 1 nf_call_ip6tables 1 nf_call_arptables 1
+if [ $? -ne 0 ]; then
+ echo "SKIP: Can't create bridge br0"
+ exit $ksft_skip
+fi
+
+# make veth0,1,2 part of bridge.
+for i in $(seq 1 3); do
+ ip -net ${ns0} link set veth$i master br0
+done
+
+# add a macvlan on top of the bridge.
+MACVLAN_ADDR=ba:f3:13:37:42:23
+ip -net ${ns0} link add link br0 name macvlan0 type macvlan mode private
+ip -net ${ns0} link set macvlan0 address ${MACVLAN_ADDR}
+ip -net ${ns0} link set macvlan0 up
+ip -net ${ns0} addr add 10.23.0.1/24 dev macvlan0
+
+# add a macvlan on top of veth4.
+MACVLAN_ADDR=ba:f3:13:37:42:24
+ip -net ${ns0} link add link veth4 name macvlan4 type macvlan mode vepa
+ip -net ${ns0} link set macvlan4 address ${MACVLAN_ADDR}
+ip -net ${ns0} link set macvlan4 up
+
+# make the macvlan part of the bridge.
+# veth4 is not a bridge port, only the macvlan on top of it.
+ip -net ${ns0} link set macvlan4 master br0
+
+ip -net ${ns0} link set br0 up
+ip -net ${ns0} addr add 10.0.0.1/24 dev br0
+ip netns exec ${ns0} sysctl -q net.bridge.bridge-nf-call-iptables=1
+ret=$?
+if [ $ret -ne 0 ] ; then
+ echo "SKIP: bridge netfilter not available"
+ ret=$ksft_skip
+fi
+
+# for testing, so namespaces will reply to ping -b probes.
+ip netns exec ${ns0} sysctl -q net.ipv4.icmp_echo_ignore_broadcasts=0
+
+# enable conntrack in ns0 and drop broadcast packets in forward to
+# avoid them from getting confirmed in the postrouting hook before
+# the cloned skb is passed up the stack.
+ip netns exec ${ns0} nft -f - <<EOF
+table ip filter {
+ chain input {
+ type filter hook input priority 1; policy accept
+ iifname br0 counter
+ ct state new accept
+ }
+}
+
+table bridge filter {
+ chain forward {
+ type filter hook forward priority 0; policy accept
+ meta pkttype broadcast ip protocol icmp counter drop
+ }
+}
+EOF
+
+# place 1, 2 & 3 in same subnet, connected via ns0:br0.
+# ns4 is placed in same subnet as well, but its not
+# part of the bridge: the corresponding veth4 is not
+# part of the bridge, only its macvlan interface.
+for i in $(seq 1 4); do
+ eval ip -net \$ns$i link set lo up
+ eval ip -net \$ns$i link set eth0 up
+done
+for i in $(seq 1 2); do
+ eval ip -net \$ns$i addr add 10.0.0.1$i/24 dev eth0
+done
+
+ip -net ${ns3} addr add 10.23.0.13/24 dev eth0
+ip -net ${ns4} addr add 10.23.0.14/24 dev eth0
+
+# test basic connectivity
+do_ping ${ns1} 10.0.0.12
+do_ping ${ns3} 10.23.0.1
+do_ping ${ns4} 10.23.0.1
+
+if [ $ret -eq 0 ];then
+ echo "PASS: netns connectivity: ns1 can reach ns2, ns3 and ns4 can reach ns0"
+fi
+
+bcast_ping ${ns1} 10.0.0.255
+
+# This should deliver broadcast to macvlan0, which is on top of ns0:br0.
+bcast_ping ${ns3} 10.23.0.255
+
+# same, this time via veth4:macvlan4.
+bcast_ping ${ns4} 10.23.0.255
+
+read t < /proc/sys/kernel/tainted
+
+if [ $t -eq 0 ];then
+ echo PASS: kernel not tainted
+else
+ echo ERROR: kernel is tainted
+ ret=1
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/conntrack_dump_flush.c b/tools/testing/selftests/netfilter/conntrack_dump_flush.c
index f18c6db13bbf..b11ea8ee6719 100644
--- a/tools/testing/selftests/netfilter/conntrack_dump_flush.c
+++ b/tools/testing/selftests/netfilter/conntrack_dump_flush.c
@@ -13,7 +13,7 @@
#include "../kselftest_harness.h"
#define TEST_ZONE_ID 123
-#define CTA_FILTER_F_CTA_TUPLE_ZONE (1 << 2)
+#define NF_CT_DEFAULT_ZONE_ID 0
static int reply_counter;
@@ -336,6 +336,9 @@ FIXTURE_SETUP(conntrack_dump_flush)
ret = conntrack_data_generate_v4(self->sock, 0xf4f4f4f4, 0xf5f5f5f5,
TEST_ZONE_ID + 2);
EXPECT_EQ(ret, 0);
+ ret = conntrack_data_generate_v4(self->sock, 0xf6f6f6f6, 0xf7f7f7f7,
+ NF_CT_DEFAULT_ZONE_ID);
+ EXPECT_EQ(ret, 0);
src = (struct in6_addr) {{
.__u6_addr32 = {
@@ -395,6 +398,26 @@ FIXTURE_SETUP(conntrack_dump_flush)
TEST_ZONE_ID + 2);
EXPECT_EQ(ret, 0);
+ src = (struct in6_addr) {{
+ .__u6_addr32 = {
+ 0xb80d0120,
+ 0x00000000,
+ 0x00000000,
+ 0x07000000
+ }
+ }};
+ dst = (struct in6_addr) {{
+ .__u6_addr32 = {
+ 0xb80d0120,
+ 0x00000000,
+ 0x00000000,
+ 0x08000000
+ }
+ }};
+ ret = conntrack_data_generate_v6(self->sock, src, dst,
+ NF_CT_DEFAULT_ZONE_ID);
+ EXPECT_EQ(ret, 0);
+
ret = conntracK_count_zone(self->sock, TEST_ZONE_ID);
EXPECT_GE(ret, 2);
if (ret > 2)
@@ -425,6 +448,24 @@ TEST_F(conntrack_dump_flush, test_flush_by_zone)
EXPECT_EQ(ret, 2);
ret = conntracK_count_zone(self->sock, TEST_ZONE_ID + 2);
EXPECT_EQ(ret, 2);
+ ret = conntracK_count_zone(self->sock, NF_CT_DEFAULT_ZONE_ID);
+ EXPECT_EQ(ret, 2);
+}
+
+TEST_F(conntrack_dump_flush, test_flush_by_zone_default)
+{
+ int ret;
+
+ ret = conntrack_flush_zone(self->sock, NF_CT_DEFAULT_ZONE_ID);
+ EXPECT_EQ(ret, 0);
+ ret = conntracK_count_zone(self->sock, TEST_ZONE_ID);
+ EXPECT_EQ(ret, 2);
+ ret = conntracK_count_zone(self->sock, TEST_ZONE_ID + 1);
+ EXPECT_EQ(ret, 2);
+ ret = conntracK_count_zone(self->sock, TEST_ZONE_ID + 2);
+ EXPECT_EQ(ret, 2);
+ ret = conntracK_count_zone(self->sock, NF_CT_DEFAULT_ZONE_ID);
+ EXPECT_EQ(ret, 0);
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/powerpc/math/fpu_signal.c b/tools/testing/selftests/powerpc/math/fpu_signal.c
index 7b1addd50420..8a64f63e37ce 100644
--- a/tools/testing/selftests/powerpc/math/fpu_signal.c
+++ b/tools/testing/selftests/powerpc/math/fpu_signal.c
@@ -18,6 +18,7 @@
#include <pthread.h>
#include "utils.h"
+#include "fpu.h"
/* Number of times each thread should receive the signal */
#define ITERATIONS 10
@@ -27,9 +28,7 @@
*/
#define THREAD_FACTOR 8
-__thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
- 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0,
- 2.1};
+__thread double darray[32];
bool bad_context;
int threads_starting;
@@ -43,9 +42,9 @@ void signal_fpu_sig(int sig, siginfo_t *info, void *context)
ucontext_t *uc = context;
mcontext_t *mc = &uc->uc_mcontext;
- /* Only the non volatiles were loaded up */
- for (i = 14; i < 32; i++) {
- if (mc->fp_regs[i] != darray[i - 14]) {
+ // Don't check f30/f31, they're used as scratches in check_all_fprs()
+ for (i = 0; i < 30; i++) {
+ if (mc->fp_regs[i] != darray[i]) {
bad_context = true;
break;
}
@@ -54,7 +53,6 @@ void signal_fpu_sig(int sig, siginfo_t *info, void *context)
void *signal_fpu_c(void *p)
{
- int i;
long rc;
struct sigaction act;
act.sa_sigaction = signal_fpu_sig;
@@ -64,9 +62,7 @@ void *signal_fpu_c(void *p)
return p;
srand(pthread_self());
- for (i = 0; i < 21; i++)
- darray[i] = rand();
-
+ randomise_darray(darray, ARRAY_SIZE(darray));
rc = preempt_fpu(darray, &threads_starting, &running);
return (void *) rc;
diff --git a/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c b/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
index 98cbb9109ee6..505294da1b9f 100644
--- a/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
+++ b/tools/testing/selftests/powerpc/papr_vpd/papr_vpd.c
@@ -263,10 +263,10 @@ static int papr_vpd_system_loc_code(void)
off_t size;
int fd;
- SKIP_IF_MSG(get_system_loc_code(&lc),
- "Cannot determine system location code");
SKIP_IF_MSG(devfd < 0 && errno == ENOENT,
DEVPATH " not present");
+ SKIP_IF_MSG(get_system_loc_code(&lc),
+ "Cannot determine system location code");
FAIL_IF(devfd < 0);
diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c
index 50a2cc8aef38..c537d52fafc5 100644
--- a/tools/testing/selftests/riscv/hwprobe/cbo.c
+++ b/tools/testing/selftests/riscv/hwprobe/cbo.c
@@ -36,16 +36,14 @@ static void sigill_handler(int sig, siginfo_t *info, void *context)
regs[0] += 4;
}
-static void cbo_insn(char *base, int fn)
-{
- uint32_t insn = MK_CBO(fn);
-
- asm volatile(
- "mv a0, %0\n"
- "li a1, %1\n"
- ".4byte %2\n"
- : : "r" (base), "i" (fn), "i" (insn) : "a0", "a1", "memory");
-}
+#define cbo_insn(base, fn) \
+({ \
+ asm volatile( \
+ "mv a0, %0\n" \
+ "li a1, %1\n" \
+ ".4byte %2\n" \
+ : : "r" (base), "i" (fn), "i" (MK_CBO(fn)) : "a0", "a1", "memory"); \
+})
static void cbo_inval(char *base) { cbo_insn(base, 0); }
static void cbo_clean(char *base) { cbo_insn(base, 1); }
@@ -97,7 +95,7 @@ static void test_zicboz(void *arg)
block_size = pair.value;
ksft_test_result(rc == 0 && pair.key == RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE &&
is_power_of_2(block_size), "Zicboz block size\n");
- ksft_print_msg("Zicboz block size: %ld\n", block_size);
+ ksft_print_msg("Zicboz block size: %llu\n", block_size);
illegal_insn = false;
cbo_zero(&mem[block_size]);
@@ -121,7 +119,7 @@ static void test_zicboz(void *arg)
for (j = 0; j < block_size; ++j) {
if (mem[i * block_size + j] != expected) {
ksft_test_result_fail("cbo.zero check\n");
- ksft_print_msg("cbo.zero check: mem[%d] != 0x%x\n",
+ ksft_print_msg("cbo.zero check: mem[%llu] != 0x%x\n",
i * block_size + j, expected);
return;
}
@@ -201,7 +199,7 @@ int main(int argc, char **argv)
pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, 0);
if (rc < 0)
- ksft_exit_fail_msg("hwprobe() failed with %d\n", rc);
+ ksft_exit_fail_msg("hwprobe() failed with %ld\n", rc);
assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) {
diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.c b/tools/testing/selftests/riscv/hwprobe/hwprobe.c
index d53e0889b59e..fd73c87804f3 100644
--- a/tools/testing/selftests/riscv/hwprobe/hwprobe.c
+++ b/tools/testing/selftests/riscv/hwprobe/hwprobe.c
@@ -29,7 +29,7 @@ int main(int argc, char **argv)
/* Fail if the kernel claims not to recognize a base key. */
if ((i < 4) && (pairs[i].key != i))
ksft_exit_fail_msg("Failed to recognize base key: key != i, "
- "key=%ld, i=%ld\n", pairs[i].key, i);
+ "key=%lld, i=%ld\n", pairs[i].key, i);
if (pairs[i].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
continue;
@@ -37,7 +37,7 @@ int main(int argc, char **argv)
if (pairs[i].value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA)
continue;
- ksft_exit_fail_msg("Unexpected pair: (%ld, %ld)\n", pairs[i].key, pairs[i].value);
+ ksft_exit_fail_msg("Unexpected pair: (%lld, %llu)\n", pairs[i].key, pairs[i].value);
}
out = riscv_hwprobe(pairs, 8, 0, 0, 0);
diff --git a/tools/testing/selftests/riscv/mm/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h
index 9b8434f62f57..2e0db9c5be6c 100644
--- a/tools/testing/selftests/riscv/mm/mmap_test.h
+++ b/tools/testing/selftests/riscv/mm/mmap_test.h
@@ -18,6 +18,8 @@ struct addresses {
int *on_56_addr;
};
+// Only works on 64 bit
+#if __riscv_xlen == 64
static inline void do_mmaps(struct addresses *mmap_addresses)
{
/*
@@ -50,6 +52,7 @@ static inline void do_mmaps(struct addresses *mmap_addresses)
mmap_addresses->on_56_addr =
mmap(on_56_bits, 5 * sizeof(int), prot, flags, 0, 0);
}
+#endif /* __riscv_xlen == 64 */
static inline int memory_layout(void)
{
diff --git a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
index 66764edb0d52..1dd94197da30 100644
--- a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
+++ b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
@@ -27,7 +27,7 @@ int main(void)
datap = malloc(MAX_VSIZE);
if (!datap) {
- ksft_test_result_fail("fail to allocate memory for size = %lu\n", MAX_VSIZE);
+ ksft_test_result_fail("fail to allocate memory for size = %d\n", MAX_VSIZE);
exit(-1);
}
diff --git a/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c b/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c
index 2c0d2b1126c1..1f9969bed235 100644
--- a/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c
+++ b/tools/testing/selftests/riscv/vector/vstate_exec_nolibc.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/wait.h>
+
#define THIS_PROGRAM "./vstate_exec_nolibc"
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c
index 8dcd399ef7fc..27668fb3b6d0 100644
--- a/tools/testing/selftests/riscv/vector/vstate_prctl.c
+++ b/tools/testing/selftests/riscv/vector/vstate_prctl.c
@@ -60,7 +60,7 @@ int test_and_compare_child(long provided, long expected, int inherit)
}
rc = launch_test(inherit);
if (rc != expected) {
- ksft_test_result_fail("Test failed, check %d != %d\n", rc,
+ ksft_test_result_fail("Test failed, check %d != %ld\n", rc,
expected);
return -2;
}
@@ -79,7 +79,7 @@ int main(void)
pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
rc = riscv_hwprobe(&pair, 1, 0, NULL, 0);
if (rc < 0) {
- ksft_test_result_fail("hwprobe() failed with %d\n", rc);
+ ksft_test_result_fail("hwprobe() failed with %ld\n", rc);
return -1;
}
diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
index 887542961968..2348d2c20d0a 100644
--- a/tools/testing/selftests/rseq/basic_percpu_ops_test.c
+++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
@@ -24,6 +24,11 @@ bool rseq_validate_cpu_id(void)
{
return rseq_mm_cid_available();
}
+static
+bool rseq_use_cpu_index(void)
+{
+ return false; /* Use mm_cid */
+}
#else
# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
static
@@ -36,6 +41,11 @@ bool rseq_validate_cpu_id(void)
{
return rseq_current_cpu_raw() >= 0;
}
+static
+bool rseq_use_cpu_index(void)
+{
+ return true; /* Use cpu_id as index. */
+}
#endif
struct percpu_lock_entry {
@@ -274,7 +284,7 @@ void test_percpu_list(void)
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
for (j = 1; j <= 100; j++) {
struct percpu_list_node *node;
@@ -299,7 +309,7 @@ void test_percpu_list(void)
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_list_node *node;
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
while ((node = __percpu_list_pop(&list, i))) {
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 20403d58345c..2f37961240ca 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -288,6 +288,11 @@ bool rseq_validate_cpu_id(void)
{
return rseq_mm_cid_available();
}
+static
+bool rseq_use_cpu_index(void)
+{
+ return false; /* Use mm_cid */
+}
# ifdef TEST_MEMBARRIER
/*
* Membarrier does not currently support targeting a mm_cid, so
@@ -312,6 +317,11 @@ bool rseq_validate_cpu_id(void)
{
return rseq_current_cpu_raw() >= 0;
}
+static
+bool rseq_use_cpu_index(void)
+{
+ return true; /* Use cpu_id as index. */
+}
# ifdef TEST_MEMBARRIER
static
int rseq_membarrier_expedited(int cpu)
@@ -715,7 +725,7 @@ void test_percpu_list(void)
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
for (j = 1; j <= 100; j++) {
struct percpu_list_node *node;
@@ -752,7 +762,7 @@ void test_percpu_list(void)
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_list_node *node;
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
while ((node = __percpu_list_pop(&list, i))) {
@@ -902,7 +912,7 @@ void test_percpu_buffer(void)
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
/* Worse-case is every item in same CPU. */
buffer.c[i].array =
@@ -952,7 +962,7 @@ void test_percpu_buffer(void)
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_buffer_node *node;
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
while ((node = __percpu_buffer_pop(&buffer, i))) {
@@ -1113,7 +1123,7 @@ void test_percpu_memcpy_buffer(void)
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
/* Worse-case is every item in same CPU. */
buffer.c[i].array =
@@ -1160,7 +1170,7 @@ void test_percpu_memcpy_buffer(void)
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_memcpy_buffer_node item;
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {
diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c
index 5b5c9d558dee..97b86980b768 100644
--- a/tools/testing/selftests/seccomp/seccomp_benchmark.c
+++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c
@@ -38,10 +38,10 @@ unsigned long long timing(clockid_t clk_id, unsigned long long samples)
i *= 1000000000ULL;
i += finish.tv_nsec - start.tv_nsec;
- printf("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n",
- finish.tv_sec, finish.tv_nsec,
- start.tv_sec, start.tv_nsec,
- i, (double)i / 1000000000.0);
+ ksft_print_msg("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n",
+ finish.tv_sec, finish.tv_nsec,
+ start.tv_sec, start.tv_nsec,
+ i, (double)i / 1000000000.0);
return i;
}
@@ -53,7 +53,7 @@ unsigned long long calibrate(void)
pid_t pid, ret;
int seconds = 15;
- printf("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds);
+ ksft_print_msg("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds);
samples = 0;
pid = getpid();
@@ -98,24 +98,36 @@ bool le(int i_one, int i_two)
}
long compare(const char *name_one, const char *name_eval, const char *name_two,
- unsigned long long one, bool (*eval)(int, int), unsigned long long two)
+ unsigned long long one, bool (*eval)(int, int), unsigned long long two,
+ bool skip)
{
bool good;
- printf("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two,
- (long long)one, name_eval, (long long)two);
+ if (skip) {
+ ksft_test_result_skip("%s %s %s\n", name_one, name_eval,
+ name_two);
+ return 0;
+ }
+
+ ksft_print_msg("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two,
+ (long long)one, name_eval, (long long)two);
if (one > INT_MAX) {
- printf("Miscalculation! Measurement went negative: %lld\n", (long long)one);
- return 1;
+ ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)one);
+ good = false;
+ goto out;
}
if (two > INT_MAX) {
- printf("Miscalculation! Measurement went negative: %lld\n", (long long)two);
- return 1;
+ ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)two);
+ good = false;
+ goto out;
}
good = eval(one, two);
printf("%s\n", good ? "✔️" : "❌");
+out:
+ ksft_test_result(good, "%s %s %s\n", name_one, name_eval, name_two);
+
return good ? 0 : 1;
}
@@ -142,15 +154,22 @@ int main(int argc, char *argv[])
unsigned long long samples, calc;
unsigned long long native, filter1, filter2, bitmap1, bitmap2;
unsigned long long entry, per_filter1, per_filter2;
+ bool skip = false;
setbuf(stdout, NULL);
- printf("Running on:\n");
+ ksft_print_header();
+ ksft_set_plan(7);
+
+ ksft_print_msg("Running on:\n");
+ ksft_print_msg("");
system("uname -a");
- printf("Current BPF sysctl settings:\n");
+ ksft_print_msg("Current BPF sysctl settings:\n");
/* Avoid using "sysctl" which may not be installed. */
+ ksft_print_msg("");
system("grep -H . /proc/sys/net/core/bpf_jit_enable");
+ ksft_print_msg("");
system("grep -H . /proc/sys/net/core/bpf_jit_harden");
if (argc > 1)
@@ -158,11 +177,11 @@ int main(int argc, char *argv[])
else
samples = calibrate();
- printf("Benchmarking %llu syscalls...\n", samples);
+ ksft_print_msg("Benchmarking %llu syscalls...\n", samples);
/* Native call */
native = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
- printf("getpid native: %llu ns\n", native);
+ ksft_print_msg("getpid native: %llu ns\n", native);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
assert(ret == 0);
@@ -172,35 +191,37 @@ int main(int argc, char *argv[])
assert(ret == 0);
bitmap1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
- printf("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1);
+ ksft_print_msg("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1);
/* Second filter resulting in a bitmap */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
assert(ret == 0);
bitmap2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
- printf("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2);
+ ksft_print_msg("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2);
/* Third filter, can no longer be converted to bitmap */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
assert(ret == 0);
filter1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
- printf("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1);
+ ksft_print_msg("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1);
/* Fourth filter, can not be converted to bitmap because of filter 3 */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
assert(ret == 0);
filter2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
- printf("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2);
+ ksft_print_msg("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2);
/* Estimations */
#define ESTIMATE(fmt, var, what) do { \
var = (what); \
- printf("Estimated " fmt ": %llu ns\n", var); \
- if (var > INT_MAX) \
- goto more_samples; \
+ ksft_print_msg("Estimated " fmt ": %llu ns\n", var); \
+ if (var > INT_MAX) { \
+ skip = true; \
+ ret |= 1; \
+ } \
} while (0)
ESTIMATE("total seccomp overhead for 1 bitmapped filter", calc,
@@ -218,31 +239,34 @@ int main(int argc, char *argv[])
ESTIMATE("seccomp per-filter overhead (filters / 4)", per_filter2,
(filter2 - native - entry) / 4);
- printf("Expectations:\n");
- ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1);
- bits = compare("native", "≤", "1 filter", native, le, filter1);
+ ksft_print_msg("Expectations:\n");
+ ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1,
+ skip);
+ bits = compare("native", "≤", "1 filter", native, le, filter1,
+ skip);
if (bits)
- goto more_samples;
+ skip = true;
ret |= compare("per-filter (last 2 diff)", "≈", "per-filter (filters / 4)",
- per_filter1, approx, per_filter2);
+ per_filter1, approx, per_filter2, skip);
bits = compare("1 bitmapped", "≈", "2 bitmapped",
- bitmap1 - native, approx, bitmap2 - native);
+ bitmap1 - native, approx, bitmap2 - native, skip);
if (bits) {
- printf("Skipping constant action bitmap expectations: they appear unsupported.\n");
- goto out;
+ ksft_print_msg("Skipping constant action bitmap expectations: they appear unsupported.\n");
+ skip = true;
}
- ret |= compare("entry", "≈", "1 bitmapped", entry, approx, bitmap1 - native);
- ret |= compare("entry", "≈", "2 bitmapped", entry, approx, bitmap2 - native);
+ ret |= compare("entry", "≈", "1 bitmapped", entry, approx,
+ bitmap1 - native, skip);
+ ret |= compare("entry", "≈", "2 bitmapped", entry, approx,
+ bitmap2 - native, skip);
ret |= compare("native + entry + (per filter * 4)", "≈", "4 filters total",
- entry + (per_filter1 * 4) + native, approx, filter2);
- if (ret == 0)
- goto out;
+ entry + (per_filter1 * 4) + native, approx, filter2,
+ skip);
-more_samples:
- printf("Saw unexpected benchmark result. Try running again with more samples?\n");
-out:
- return 0;
+ if (ret)
+ ksft_print_msg("Saw unexpected benchmark result. Try running again with more samples?\n");
+
+ ksft_finished();
}
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 38f651469968..1027e6170186 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1576,7 +1576,7 @@ void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
ASSERT_EQ(0, ret);
}
/* Directly report the status of our test harness results. */
- syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
+ syscall(__NR_exit, _metadata->exit_code);
}
/* Common tracer setup/teardown functions. */
@@ -1623,7 +1623,7 @@ void teardown_trace_fixture(struct __test_metadata *_metadata,
ASSERT_EQ(0, kill(tracer, SIGUSR1));
ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
if (WEXITSTATUS(status))
- _metadata->passed = 0;
+ _metadata->exit_code = KSFT_FAIL;
}
}
@@ -3088,8 +3088,7 @@ TEST(syscall_restart)
}
/* Directly report the status of our test harness results. */
- syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
- : EXIT_FAILURE);
+ syscall(__NR_exit, _metadata->exit_code);
}
EXPECT_EQ(0, close(pipefd[0]));
@@ -3174,7 +3173,7 @@ TEST(syscall_restart)
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
if (WIFSIGNALED(status) || WEXITSTATUS(status))
- _metadata->passed = 0;
+ _metadata->exit_code = KSFT_FAIL;
}
TEST_SIGNAL(filter_flag_log, SIGSYS)
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index c60acba951c2..db176fe7d0c3 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -8,6 +8,7 @@ CONFIG_VETH=y
#
# Core Netfilter Configuration
#
+CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_MARK=y
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index b53d12909962..b73bd255ea36 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -649,5 +649,408 @@
"teardown": [
"$TC actions flush action mirred"
]
+ },
+ {
+ "id": "456d",
+ "name": "Add mirred mirror to egress block action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 egress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress mirror index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "mirror",
+ "direction": "egress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "pipe"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 egress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "2358",
+ "name": "Add mirred mirror to ingress block action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred ingress mirror index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "mirror",
+ "direction": "ingress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "pipe"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "fdb1",
+ "name": "Add mirred redirect to egress block action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred egress redirect index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "redirect",
+ "direction": "egress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "stolen"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "20cc",
+ "name": "Add mirred redirect to ingress block action",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred ingress redirect index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "redirect",
+ "direction": "ingress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "stolen"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "e739",
+ "name": "Try to add mirred action with both dev and block",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred ingress redirect index 1 blockid 21 dev $DEV1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC -j actions list action mirred",
+ "matchJSON": [],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "2f47",
+ "name": "Try to add mirred action without specifying neither dev nor block",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action mirred ingress redirect index 1",
+ "expExitCode": "255",
+ "verifyCmd": "$TC -j actions list action mirred",
+ "matchJSON": [],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "3188",
+ "name": "Replace mirred redirect to dev action with redirect to block",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ],
+ [
+ "$TC actions add action mirred ingress redirect index 1 dev $DEV1",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions replace action mirred egress redirect index 1 blockid 21",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "redirect",
+ "direction": "egress",
+ "to_blockid": 21,
+ "control_action": {
+ "type": "stolen"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
+ },
+ {
+ "id": "83cc",
+ "name": "Replace mirred redirect to block action with mirror to dev",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "dependsOn": "$TC actions add action mirred help 2>&1 | grep -q blockid",
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ [
+ "$TC qdisc add dev $DEV1 ingress_block 21 clsact",
+ 0
+ ],
+ [
+ "$TC actions add action mirred egress redirect index 1 blockid 21",
+ 0
+ ]
+ ],
+ "cmdUnderTest": "$TC actions replace action mirred ingress mirror index 1 dev lo",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -j actions get action mirred index 1",
+ "matchJSON": [
+ {
+ "total acts": 0
+ },
+ {
+ "actions": [
+ {
+ "order": 1,
+ "kind": "mirred",
+ "mirred_action": "mirror",
+ "direction": "ingress",
+ "to_dev": "lo",
+ "control_action": {
+ "type": "pipe"
+ },
+ "index": 1,
+ "ref": 1,
+ "bind": 0,
+ "not_in_hw": true
+ }
+ ]
+ }
+ ],
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress_block 21 clsact",
+ "$TC actions flush action mirred"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
index be293e7c6d18..3a537b2ec4c9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
@@ -77,7 +77,7 @@
"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq quantum 9000",
"expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $DUMMY",
- "matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p buckets.*orphan_mask 1023 quantum 9000b",
+ "matchPattern": "qdisc fq 1: root refcnt [0-9]+ limit 10000p flow_limit 100p.*quantum 9000b",
"matchCount": "1",
"teardown": [
"$TC qdisc del dev $DUMMY handle 1: root"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
index 2d603ef2e375..12da0a939e3e 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
@@ -167,6 +167,7 @@
"plugins": {
"requires": "nsPlugin"
},
+ "dependsOn": "echo '' | jq",
"setup": [
"echo \"1 1 8\" > /sys/bus/netdevsim/new_device",
"$TC qdisc replace dev $ETH handle 8001: parent root stab overhead 24 taprio num_tc 8 map 0 1 2 3 4 5 6 7 queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 base-time 0 sched-entry S ff 20000000 clockid CLOCK_TAI",
@@ -192,6 +193,7 @@
"plugins": {
"requires": "nsPlugin"
},
+ "dependsOn": "echo '' | jq",
"setup": [
"echo \"1 1 8\" > /sys/bus/netdevsim/new_device",
"$TC qdisc replace dev $ETH handle 8001: parent root stab overhead 24 taprio num_tc 8 map 0 1 2 3 4 5 6 7 queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 base-time 0 sched-entry S ff 20000000 flags 0x2",
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index caeacc691587..ee349187636f 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -541,7 +541,7 @@ def test_runner(pm, args, filtered_tests):
message = pmtf.message
output = pmtf.output
res = TestResult(tidx['id'], tidx['name'])
- res.set_result(ResultState.skip)
+ res.set_result(ResultState.fail)
res.set_errormsg(pmtf.message)
res.set_failmsg(pmtf.output)
tsr.add_resultdata(res)
diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
index c53ede8b730d..cddff1772e10 100755
--- a/tools/testing/selftests/tc-testing/tdc.sh
+++ b/tools/testing/selftests/tc-testing/tdc.sh
@@ -63,5 +63,4 @@ try_modprobe sch_hfsc
try_modprobe sch_hhf
try_modprobe sch_htb
try_modprobe sch_teql
-./tdc.py -J`nproc` -c actions
-./tdc.py -J`nproc` -c qdisc
+./tdc.py -J`nproc`
diff --git a/tools/testing/vsock/vsock_diag_test.c b/tools/testing/vsock/vsock_diag_test.c
index 9d61b1f1c4c3..081e045f4696 100644
--- a/tools/testing/vsock/vsock_diag_test.c
+++ b/tools/testing/vsock/vsock_diag_test.c
@@ -39,6 +39,8 @@ static const char *sock_type_str(int type)
return "DGRAM";
case SOCK_STREAM:
return "STREAM";
+ case SOCK_SEQPACKET:
+ return "SEQPACKET";
default:
return "INVALID TYPE";
}
diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
index 2456a399eb9a..afd18c678ff5 100644
--- a/tools/tracing/rtla/Makefile
+++ b/tools/tracing/rtla/Makefile
@@ -28,10 +28,15 @@ FOPTS := -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
-fasynchronous-unwind-tables -fstack-clash-protection
WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
+ifeq ($(CC),clang)
+ FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS))
+ WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS))
+endif
+
TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs)
CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS)
-LDFLAGS := -ggdb $(EXTRA_LDFLAGS)
+LDFLAGS := -flto=auto -ggdb $(EXTRA_LDFLAGS)
LIBS := $$($(PKG_CONFIG) --libs libtracefs)
SRC := $(wildcard src/*.c)
diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
index 8f81fa007364..01870d50942a 100644
--- a/tools/tracing/rtla/src/osnoise_hist.c
+++ b/tools/tracing/rtla/src/osnoise_hist.c
@@ -135,8 +135,7 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
if (params->output_divisor)
duration = duration / params->output_divisor;
- if (data->bucket_size)
- bucket = duration / data->bucket_size;
+ bucket = duration / data->bucket_size;
total_duration = duration * count;
@@ -480,7 +479,11 @@ static void osnoise_hist_usage(char *usage)
for (i = 0; msg[i]; i++)
fprintf(stderr, "%s\n", msg[i]);
- exit(1);
+
+ if (usage)
+ exit(EXIT_FAILURE);
+
+ exit(EXIT_SUCCESS);
}
/*
diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
index f7c959be8677..457360db0767 100644
--- a/tools/tracing/rtla/src/osnoise_top.c
+++ b/tools/tracing/rtla/src/osnoise_top.c
@@ -331,7 +331,11 @@ static void osnoise_top_usage(struct osnoise_top_params *params, char *usage)
for (i = 0; msg[i]; i++)
fprintf(stderr, "%s\n", msg[i]);
- exit(1);
+
+ if (usage)
+ exit(EXIT_FAILURE);
+
+ exit(EXIT_SUCCESS);
}
/*
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index 47d3d8b53cb2..dbf154082f95 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -178,8 +178,7 @@ timerlat_hist_update(struct osnoise_tool *tool, int cpu,
if (params->output_divisor)
latency = latency / params->output_divisor;
- if (data->bucket_size)
- bucket = latency / data->bucket_size;
+ bucket = latency / data->bucket_size;
if (!context) {
hist = data->hist[cpu].irq;
@@ -546,7 +545,11 @@ static void timerlat_hist_usage(char *usage)
for (i = 0; msg[i]; i++)
fprintf(stderr, "%s\n", msg[i]);
- exit(1);
+
+ if (usage)
+ exit(EXIT_FAILURE);
+
+ exit(EXIT_SUCCESS);
}
/*
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
index 1640f121baca..3e9af2c38688 100644
--- a/tools/tracing/rtla/src/timerlat_top.c
+++ b/tools/tracing/rtla/src/timerlat_top.c
@@ -375,7 +375,11 @@ static void timerlat_top_usage(char *usage)
for (i = 0; msg[i]; i++)
fprintf(stderr, "%s\n", msg[i]);
- exit(1);
+
+ if (usage)
+ exit(EXIT_FAILURE);
+
+ exit(EXIT_SUCCESS);
}
/*
diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
index c769d7b3842c..9ac71a66840c 100644
--- a/tools/tracing/rtla/src/utils.c
+++ b/tools/tracing/rtla/src/utils.c
@@ -238,12 +238,6 @@ static inline int sched_setattr(pid_t pid, const struct sched_attr *attr,
return syscall(__NR_sched_setattr, pid, attr, flags);
}
-static inline int sched_getattr(pid_t pid, struct sched_attr *attr,
- unsigned int size, unsigned int flags)
-{
- return syscall(__NR_sched_getattr, pid, attr, size, flags);
-}
-
int __set_sched_attr(int pid, struct sched_attr *attr)
{
int flags = 0;
@@ -479,13 +473,13 @@ int parse_prio(char *arg, struct sched_attr *sched_param)
if (prio == INVALID_VAL)
return -1;
- if (prio < sched_get_priority_min(SCHED_OTHER))
+ if (prio < MIN_NICE)
return -1;
- if (prio > sched_get_priority_max(SCHED_OTHER))
+ if (prio > MAX_NICE)
return -1;
sched_param->sched_policy = SCHED_OTHER;
- sched_param->sched_priority = prio;
+ sched_param->sched_nice = prio;
break;
default:
return -1;
@@ -536,7 +530,7 @@ int set_cpu_dma_latency(int32_t latency)
*/
static const int find_mount(const char *fs, char *mp, int sizeof_mp)
{
- char mount_point[MAX_PATH];
+ char mount_point[MAX_PATH+1];
char type[100];
int found = 0;
FILE *fp;
diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h
index 04ed1e650495..d44513e6c66a 100644
--- a/tools/tracing/rtla/src/utils.h
+++ b/tools/tracing/rtla/src/utils.h
@@ -9,6 +9,8 @@
*/
#define BUFF_U64_STR_SIZE 24
#define MAX_PATH 1024
+#define MAX_NICE 20
+#define MIN_NICE -19
#define container_of(ptr, type, member)({ \
const typeof(((type *)0)->member) *__mptr = (ptr); \
diff --git a/tools/verification/rv/Makefile b/tools/verification/rv/Makefile
index 3d0f3888a58c..485f8aeddbe0 100644
--- a/tools/verification/rv/Makefile
+++ b/tools/verification/rv/Makefile
@@ -28,10 +28,15 @@ FOPTS := -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
-fasynchronous-unwind-tables -fstack-clash-protection
WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
+ifeq ($(CC),clang)
+ FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS))
+ WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS))
+endif
+
TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs)
CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS) -I include
-LDFLAGS := -ggdb $(EXTRA_LDFLAGS)
+LDFLAGS := -flto=auto -ggdb $(EXTRA_LDFLAGS)
LIBS := $$($(PKG_CONFIG) --libs libtracefs)
SRC := $(wildcard src/*.c)
diff --git a/tools/verification/rv/src/in_kernel.c b/tools/verification/rv/src/in_kernel.c
index ad28582bcf2b..f04479ecc96c 100644
--- a/tools/verification/rv/src/in_kernel.c
+++ b/tools/verification/rv/src/in_kernel.c
@@ -210,9 +210,9 @@ static char *ikm_read_reactor(char *monitor_name)
static char *ikm_get_current_reactor(char *monitor_name)
{
char *reactors = ikm_read_reactor(monitor_name);
+ char *curr_reactor = NULL;
char *start;
char *end;
- char *curr_reactor;
if (!reactors)
return NULL;
diff --git a/tools/virtio/.gitignore b/tools/virtio/.gitignore
index 9934d48d9a55..7e47b281c442 100644
--- a/tools/virtio/.gitignore
+++ b/tools/virtio/.gitignore
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
*.d
virtio_test
+vhost_net_test
vringh_test
virtio-trace/trace-agent
diff --git a/tools/virtio/Makefile b/tools/virtio/Makefile
index d128925980e0..e25e99c1c3b7 100644
--- a/tools/virtio/Makefile
+++ b/tools/virtio/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
all: test mod
-test: virtio_test vringh_test
+test: virtio_test vringh_test vhost_net_test
virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o
+vhost_net_test: virtio_ring.o vhost_net_test.o
try-run = $(shell set -e; \
if ($(1)) >/dev/null 2>&1; \
@@ -49,6 +50,7 @@ oot-clean: OOT_BUILD+=clean
.PHONY: all test mod clean vhost oot oot-clean oot-build
clean:
- ${RM} *.o vringh_test virtio_test vhost_test/*.o vhost_test/.*.cmd \
- vhost_test/Module.symvers vhost_test/modules.order *.d
+ ${RM} *.o vringh_test virtio_test vhost_net_test vhost_test/*.o \
+ vhost_test/.*.cmd vhost_test/Module.symvers \
+ vhost_test/modules.order *.d
-include *.d
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index 2a8a70e2a950..42a564f22f2d 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_VIRTIO_CONFIG_H
+#define LINUX_VIRTIO_CONFIG_H
#include <linux/virtio_byteorder.h>
#include <linux/virtio.h>
#include <uapi/linux/virtio_config.h>
@@ -95,3 +97,5 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
{
return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
}
+
+#endif
diff --git a/tools/virtio/vhost_net_test.c b/tools/virtio/vhost_net_test.c
new file mode 100644
index 000000000000..389d99a6d7c7
--- /dev/null
+++ b/tools/virtio/vhost_net_test.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <getopt.h>
+#include <limits.h>
+#include <string.h>
+#include <poll.h>
+#include <sys/eventfd.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <linux/vhost.h>
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#include <linux/in.h>
+#include <linux/if_packet.h>
+#include <linux/virtio_net.h>
+#include <netinet/ether.h>
+
+#define HDR_LEN sizeof(struct virtio_net_hdr_mrg_rxbuf)
+#define TEST_BUF_LEN 256
+#define TEST_PTYPE ETH_P_LOOPBACK
+#define DESC_NUM 256
+
+/* Used by implementation of kmalloc() in tools/virtio/linux/kernel.h */
+void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
+
+struct vq_info {
+ int kick;
+ int call;
+ int idx;
+ long started;
+ long completed;
+ struct pollfd fds;
+ void *ring;
+ /* copy used for control */
+ struct vring vring;
+ struct virtqueue *vq;
+};
+
+struct vdev_info {
+ struct virtio_device vdev;
+ int control;
+ struct vq_info vqs[2];
+ int nvqs;
+ void *buf;
+ size_t buf_size;
+ char *test_buf;
+ char *res_buf;
+ struct vhost_memory *mem;
+ int sock;
+ int ifindex;
+ unsigned char mac[ETHER_ADDR_LEN];
+};
+
+static int tun_alloc(struct vdev_info *dev, char *tun_name)
+{
+ struct ifreq ifr;
+ int len = HDR_LEN;
+ int fd, e;
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (fd < 0) {
+ perror("Cannot open /dev/net/tun");
+ return fd;
+ }
+
+ memset(&ifr, 0, sizeof(ifr));
+
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
+ strncpy(ifr.ifr_name, tun_name, IFNAMSIZ);
+
+ e = ioctl(fd, TUNSETIFF, &ifr);
+ if (e < 0) {
+ perror("ioctl[TUNSETIFF]");
+ close(fd);
+ return e;
+ }
+
+ e = ioctl(fd, TUNSETVNETHDRSZ, &len);
+ if (e < 0) {
+ perror("ioctl[TUNSETVNETHDRSZ]");
+ close(fd);
+ return e;
+ }
+
+ e = ioctl(fd, SIOCGIFHWADDR, &ifr);
+ if (e < 0) {
+ perror("ioctl[SIOCGIFHWADDR]");
+ close(fd);
+ return e;
+ }
+
+ memcpy(dev->mac, &ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
+ return fd;
+}
+
+static void vdev_create_socket(struct vdev_info *dev, char *tun_name)
+{
+ struct ifreq ifr;
+
+ dev->sock = socket(AF_PACKET, SOCK_RAW, htons(TEST_PTYPE));
+ assert(dev->sock != -1);
+
+ strncpy(ifr.ifr_name, tun_name, IFNAMSIZ);
+ assert(ioctl(dev->sock, SIOCGIFINDEX, &ifr) >= 0);
+
+ dev->ifindex = ifr.ifr_ifindex;
+
+ /* Set the flags that bring the device up */
+ assert(ioctl(dev->sock, SIOCGIFFLAGS, &ifr) >= 0);
+ ifr.ifr_flags |= (IFF_UP | IFF_RUNNING);
+ assert(ioctl(dev->sock, SIOCSIFFLAGS, &ifr) >= 0);
+}
+
+static void vdev_send_packet(struct vdev_info *dev)
+{
+ char *sendbuf = dev->test_buf + HDR_LEN;
+ struct sockaddr_ll saddrll = {0};
+ int sockfd = dev->sock;
+ int ret;
+
+ saddrll.sll_family = PF_PACKET;
+ saddrll.sll_ifindex = dev->ifindex;
+ saddrll.sll_halen = ETH_ALEN;
+ saddrll.sll_protocol = htons(TEST_PTYPE);
+
+ ret = sendto(sockfd, sendbuf, TEST_BUF_LEN, 0,
+ (struct sockaddr *)&saddrll,
+ sizeof(struct sockaddr_ll));
+ assert(ret >= 0);
+}
+
+static bool vq_notify(struct virtqueue *vq)
+{
+ struct vq_info *info = vq->priv;
+ unsigned long long v = 1;
+ int r;
+
+ r = write(info->kick, &v, sizeof(v));
+ assert(r == sizeof(v));
+
+ return true;
+}
+
+static void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
+{
+ struct vhost_vring_addr addr = {
+ .index = info->idx,
+ .desc_user_addr = (uint64_t)(unsigned long)info->vring.desc,
+ .avail_user_addr = (uint64_t)(unsigned long)info->vring.avail,
+ .used_user_addr = (uint64_t)(unsigned long)info->vring.used,
+ };
+ struct vhost_vring_state state = { .index = info->idx };
+ struct vhost_vring_file file = { .index = info->idx };
+ int r;
+
+ state.num = info->vring.num;
+ r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
+ assert(r >= 0);
+
+ state.num = 0;
+ r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
+ assert(r >= 0);
+
+ r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
+ assert(r >= 0);
+
+ file.fd = info->kick;
+ r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
+ assert(r >= 0);
+}
+
+static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev)
+{
+ if (info->vq)
+ vring_del_virtqueue(info->vq);
+
+ memset(info->ring, 0, vring_size(num, 4096));
+ vring_init(&info->vring, num, info->ring, 4096);
+ info->vq = vring_new_virtqueue(info->idx, num, 4096, vdev, true, false,
+ info->ring, vq_notify, NULL, "test");
+ assert(info->vq);
+ info->vq->priv = info;
+}
+
+static void vq_info_add(struct vdev_info *dev, int idx, int num, int fd)
+{
+ struct vhost_vring_file backend = { .index = idx, .fd = fd };
+ struct vq_info *info = &dev->vqs[idx];
+ int r;
+
+ info->idx = idx;
+ info->kick = eventfd(0, EFD_NONBLOCK);
+ r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
+ assert(r >= 0);
+ vq_reset(info, num, &dev->vdev);
+ vhost_vq_setup(dev, info);
+
+ r = ioctl(dev->control, VHOST_NET_SET_BACKEND, &backend);
+ assert(!r);
+}
+
+static void vdev_info_init(struct vdev_info *dev, unsigned long long features)
+{
+ struct ether_header *eh;
+ int i, r;
+
+ dev->vdev.features = features;
+ INIT_LIST_HEAD(&dev->vdev.vqs);
+ spin_lock_init(&dev->vdev.vqs_list_lock);
+
+ dev->buf_size = (HDR_LEN + TEST_BUF_LEN) * 2;
+ dev->buf = malloc(dev->buf_size);
+ assert(dev->buf);
+ dev->test_buf = dev->buf;
+ dev->res_buf = dev->test_buf + HDR_LEN + TEST_BUF_LEN;
+
+ memset(dev->test_buf, 0, HDR_LEN + TEST_BUF_LEN);
+ eh = (struct ether_header *)(dev->test_buf + HDR_LEN);
+ eh->ether_type = htons(TEST_PTYPE);
+ memcpy(eh->ether_dhost, dev->mac, ETHER_ADDR_LEN);
+ memcpy(eh->ether_shost, dev->mac, ETHER_ADDR_LEN);
+
+ for (i = sizeof(*eh); i < TEST_BUF_LEN; i++)
+ dev->test_buf[i + HDR_LEN] = (char)i;
+
+ dev->control = open("/dev/vhost-net", O_RDWR);
+ assert(dev->control >= 0);
+
+ r = ioctl(dev->control, VHOST_SET_OWNER, NULL);
+ assert(r >= 0);
+
+ dev->mem = malloc(offsetof(struct vhost_memory, regions) +
+ sizeof(dev->mem->regions[0]));
+ assert(dev->mem);
+ memset(dev->mem, 0, offsetof(struct vhost_memory, regions) +
+ sizeof(dev->mem->regions[0]));
+ dev->mem->nregions = 1;
+ dev->mem->regions[0].guest_phys_addr = (long)dev->buf;
+ dev->mem->regions[0].userspace_addr = (long)dev->buf;
+ dev->mem->regions[0].memory_size = dev->buf_size;
+
+ r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
+ assert(r >= 0);
+
+ r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
+ assert(r >= 0);
+
+ dev->nvqs = 2;
+}
+
+static void wait_for_interrupt(struct vq_info *vq)
+{
+ unsigned long long val;
+
+ poll(&vq->fds, 1, 100);
+
+ if (vq->fds.revents & POLLIN)
+ read(vq->fds.fd, &val, sizeof(val));
+}
+
+static void verify_res_buf(char *res_buf)
+{
+ int i;
+
+ for (i = ETHER_HDR_LEN; i < TEST_BUF_LEN; i++)
+ assert(res_buf[i] == (char)i);
+}
+
+static void run_tx_test(struct vdev_info *dev, struct vq_info *vq,
+ bool delayed, int bufs)
+{
+ long long spurious = 0;
+ struct scatterlist sl;
+ unsigned int len;
+ int r;
+
+ for (;;) {
+ long started_before = vq->started;
+ long completed_before = vq->completed;
+
+ virtqueue_disable_cb(vq->vq);
+ do {
+ while (vq->started < bufs &&
+ (vq->started - vq->completed) < 1) {
+ sg_init_one(&sl, dev->test_buf, HDR_LEN + TEST_BUF_LEN);
+ r = virtqueue_add_outbuf(vq->vq, &sl, 1,
+ dev->test_buf + vq->started,
+ GFP_ATOMIC);
+ if (unlikely(r != 0))
+ break;
+
+ ++vq->started;
+
+ if (unlikely(!virtqueue_kick(vq->vq))) {
+ r = -1;
+ break;
+ }
+ }
+
+ if (vq->started >= bufs)
+ r = -1;
+
+ /* Flush out completed bufs if any */
+ while (virtqueue_get_buf(vq->vq, &len)) {
+ int n;
+
+ n = recvfrom(dev->sock, dev->res_buf, TEST_BUF_LEN, 0, NULL, NULL);
+ assert(n == TEST_BUF_LEN);
+ verify_res_buf(dev->res_buf);
+
+ ++vq->completed;
+ r = 0;
+ }
+ } while (r == 0);
+
+ if (vq->completed == completed_before && vq->started == started_before)
+ ++spurious;
+
+ assert(vq->completed <= bufs);
+ assert(vq->started <= bufs);
+ if (vq->completed == bufs)
+ break;
+
+ if (delayed) {
+ if (virtqueue_enable_cb_delayed(vq->vq))
+ wait_for_interrupt(vq);
+ } else {
+ if (virtqueue_enable_cb(vq->vq))
+ wait_for_interrupt(vq);
+ }
+ }
+ printf("TX spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+ spurious, vq->started, vq->completed);
+}
+
+static void run_rx_test(struct vdev_info *dev, struct vq_info *vq,
+ bool delayed, int bufs)
+{
+ long long spurious = 0;
+ struct scatterlist sl;
+ unsigned int len;
+ int r;
+
+ for (;;) {
+ long started_before = vq->started;
+ long completed_before = vq->completed;
+
+ do {
+ while (vq->started < bufs &&
+ (vq->started - vq->completed) < 1) {
+ sg_init_one(&sl, dev->res_buf, HDR_LEN + TEST_BUF_LEN);
+
+ r = virtqueue_add_inbuf(vq->vq, &sl, 1,
+ dev->res_buf + vq->started,
+ GFP_ATOMIC);
+ if (unlikely(r != 0))
+ break;
+
+ ++vq->started;
+
+ vdev_send_packet(dev);
+
+ if (unlikely(!virtqueue_kick(vq->vq))) {
+ r = -1;
+ break;
+ }
+ }
+
+ if (vq->started >= bufs)
+ r = -1;
+
+ /* Flush out completed bufs if any */
+ while (virtqueue_get_buf(vq->vq, &len)) {
+ struct ether_header *eh;
+
+ eh = (struct ether_header *)(dev->res_buf + HDR_LEN);
+
+ /* tun netdev is up and running, only handle the
+ * TEST_PTYPE packet.
+ */
+ if (eh->ether_type == htons(TEST_PTYPE)) {
+ assert(len == TEST_BUF_LEN + HDR_LEN);
+ verify_res_buf(dev->res_buf + HDR_LEN);
+ }
+
+ ++vq->completed;
+ r = 0;
+ }
+ } while (r == 0);
+
+ if (vq->completed == completed_before && vq->started == started_before)
+ ++spurious;
+
+ assert(vq->completed <= bufs);
+ assert(vq->started <= bufs);
+ if (vq->completed == bufs)
+ break;
+ }
+
+ printf("RX spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+ spurious, vq->started, vq->completed);
+}
+
+static const char optstring[] = "h";
+static const struct option longopts[] = {
+ {
+ .name = "help",
+ .val = 'h',
+ },
+ {
+ .name = "event-idx",
+ .val = 'E',
+ },
+ {
+ .name = "no-event-idx",
+ .val = 'e',
+ },
+ {
+ .name = "indirect",
+ .val = 'I',
+ },
+ {
+ .name = "no-indirect",
+ .val = 'i',
+ },
+ {
+ .name = "virtio-1",
+ .val = '1',
+ },
+ {
+ .name = "no-virtio-1",
+ .val = '0',
+ },
+ {
+ .name = "delayed-interrupt",
+ .val = 'D',
+ },
+ {
+ .name = "no-delayed-interrupt",
+ .val = 'd',
+ },
+ {
+ .name = "buf-num",
+ .val = 'n',
+ .has_arg = required_argument,
+ },
+ {
+ .name = "batch",
+ .val = 'b',
+ .has_arg = required_argument,
+ },
+ {
+ }
+};
+
+static void help(int status)
+{
+ fprintf(stderr, "Usage: vhost_net_test [--help]"
+ " [--no-indirect]"
+ " [--no-event-idx]"
+ " [--no-virtio-1]"
+ " [--delayed-interrupt]"
+ " [--buf-num]"
+ "\n");
+
+ exit(status);
+}
+
+int main(int argc, char **argv)
+{
+ unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
+ char tun_name[IFNAMSIZ];
+ long nbufs = 0x100000;
+ struct vdev_info dev;
+ bool delayed = false;
+ int o, fd;
+
+ for (;;) {
+ o = getopt_long(argc, argv, optstring, longopts, NULL);
+ switch (o) {
+ case -1:
+ goto done;
+ case '?':
+ help(2);
+ case 'e':
+ features &= ~(1ULL << VIRTIO_RING_F_EVENT_IDX);
+ break;
+ case 'h':
+ help(0);
+ case 'i':
+ features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC);
+ break;
+ case '0':
+ features &= ~(1ULL << VIRTIO_F_VERSION_1);
+ break;
+ case 'D':
+ delayed = true;
+ break;
+ case 'n':
+ nbufs = strtol(optarg, NULL, 10);
+ assert(nbufs > 0);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ }
+
+done:
+ memset(&dev, 0, sizeof(dev));
+ snprintf(tun_name, IFNAMSIZ, "tun_%d", getpid());
+
+ fd = tun_alloc(&dev, tun_name);
+ assert(fd >= 0);
+
+ vdev_info_init(&dev, features);
+ vq_info_add(&dev, 0, DESC_NUM, fd);
+ vq_info_add(&dev, 1, DESC_NUM, fd);
+ vdev_create_socket(&dev, tun_name);
+
+ run_rx_test(&dev, &dev.vqs[0], delayed, nbufs);
+ run_tx_test(&dev, &dev.vqs[1], delayed, nbufs);
+
+ return 0;
+}
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index 61230532fef1..edcdb8abfa31 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -27,6 +27,7 @@
static unsigned int offset;
static unsigned int ino = 721;
static time_t default_mtime;
+static bool do_file_mtime;
static bool do_csum = false;
struct file_handler {
@@ -329,6 +330,7 @@ static int cpio_mkfile(const char *name, const char *location,
int file;
int retval;
int rc = -1;
+ time_t mtime;
int namesize;
unsigned int i;
uint32_t csum = 0;
@@ -347,16 +349,21 @@ static int cpio_mkfile(const char *name, const char *location,
goto error;
}
- if (buf.st_mtime > 0xffffffff) {
- fprintf(stderr, "%s: Timestamp exceeds maximum cpio timestamp, clipping.\n",
- location);
- buf.st_mtime = 0xffffffff;
- }
+ if (do_file_mtime) {
+ mtime = default_mtime;
+ } else {
+ mtime = buf.st_mtime;
+ if (mtime > 0xffffffff) {
+ fprintf(stderr, "%s: Timestamp exceeds maximum cpio timestamp, clipping.\n",
+ location);
+ mtime = 0xffffffff;
+ }
- if (buf.st_mtime < 0) {
- fprintf(stderr, "%s: Timestamp negative, clipping.\n",
- location);
- buf.st_mtime = 0;
+ if (mtime < 0) {
+ fprintf(stderr, "%s: Timestamp negative, clipping.\n",
+ location);
+ mtime = 0;
+ }
}
if (buf.st_size > 0xffffffff) {
@@ -387,7 +394,7 @@ static int cpio_mkfile(const char *name, const char *location,
(long) uid, /* uid */
(long) gid, /* gid */
nlinks, /* nlink */
- (long) buf.st_mtime, /* mtime */
+ (long) mtime, /* mtime */
size, /* filesize */
3, /* major */
1, /* minor */
@@ -536,8 +543,9 @@ static void usage(const char *prog)
"file /sbin/kinit /usr/src/klibc/kinit/kinit 0755 0 0\n"
"\n"
"<timestamp> is time in seconds since Epoch that will be used\n"
- "as mtime for symlinks, special files and directories. The default\n"
- "is to use the current time for these entries.\n"
+ "as mtime for symlinks, directories, regular and special files.\n"
+ "The default is to use the current time for all files, but\n"
+ "preserve modification time for regular files.\n"
"-c: calculate and store 32-bit checksums for file data.\n",
prog);
}
@@ -594,6 +602,7 @@ int main (int argc, char *argv[])
usage(argv[0]);
exit(1);
}
+ do_file_mtime = true;
break;
case 'c':
do_csum = true;